Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
7063c9d
add spot for Rceattle in converter
Schiano-NOAA Feb 23, 2026
db82026
add more info for rceattle into converter from previous effort
Schiano-NOAA Feb 23, 2026
8f921e9
add in changes for Rceattle that were stashed
Schiano-NOAA Feb 26, 2026
8e341a4
add navigation for develpment
Schiano-NOAA Feb 27, 2026
fb6e68c
lay foundation for extracting based on type within list
Schiano-NOAA Feb 27, 2026
ef74c12
work on converter for rceattle and clean
Schiano-NOAA Mar 2, 2026
1204ccd
start function to rework data once out of a list
Schiano-NOAA Mar 2, 2026
b398cb9
improve function so it does processing on final extracted list rather…
Schiano-NOAA Mar 2, 2026
1b913dc
adjust utility fxn for rceattle to work with multidimensional element…
Schiano-NOAA Mar 3, 2026
7b8cd6f
minor updates on supporting function to run rceattle through converter
Schiano-NOAA Mar 4, 2026
c252971
add note on where you left off
Schiano-NOAA Mar 4, 2026
87e4eef
additional progress and testing up through element 9
Schiano-NOAA Mar 6, 2026
8cc30ee
fix issues with some reps in loop and manually adjust values that are…
Schiano-NOAA Mar 9, 2026
27d2061
add change to standard naming conventions
Schiano-NOAA Mar 9, 2026
996ae41
adjustment to brackets and parantheses
Schiano-NOAA Mar 9, 2026
818a268
recognize model as rceattle object; make reading in ifelse for naming…
Schiano-NOAA Mar 13, 2026
80eee1e
update module names and expand naming conventions for other labels
Schiano-NOAA Mar 13, 2026
3fc2790
line that gets commented in and out for testing purposes
Schiano-NOAA Mar 17, 2026
4daf371
add era column for year
Schiano-NOAA Mar 17, 2026
a7c8632
comment out testing line to check tests
Schiano-NOAA Mar 17, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
305 changes: 264 additions & 41 deletions R/convert_output.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#'
#' @param file Assessment model output file path
#' @param model Assessment model used in evaluation ("ss3", "bam",
#' "fims").
#' "fims", "rceattle).
#' @param fleet_names Names of fleets in the assessment model as
#' shortened in the output file. If fleet names are not properly read, then
#' indicate the fleets names as an acronym in a vector
Expand Down Expand Up @@ -97,42 +97,60 @@ convert_output <- function(
out_new <- out_new[-1, ]

# Check if path links to a valid file
url_pattern <- "^(https?|ftp|file):\\/\\/[-A-Za-z0-9+&@#\\/%?=~_|!:,.;]*[-A-Za-z0-9+&@#\\/%=~_|]$"
if (grepl(url_pattern, file)) {
check <- httr::HEAD(file)
url <- httr::status_code(check)
if (url == 404) cli::cli_abort(c(message = "Invalid URL."))
} else {
if (!file.exists(file)) {
cli::cli_abort(c(
message = "`file` not found.",
"i" = "`file` entered as {file}"
))
if (is.character(file)) {
url_pattern <- "^(https?|ftp|file):\\/\\/[-A-Za-z0-9+&@#\\/%?=~_|!:,.;]*[-A-Za-z0-9+&@#\\/%=~_|]$"
if (grepl(url_pattern, file)) {
check <- httr::HEAD(file)
url <- httr::status_code(check)
if (url == 404) cli::cli_abort(c(message = "Invalid URL."))
} else {
if (!file.exists(file)) {
cli::cli_abort(c(
message = "`file` not found.",
"i" = "`file` entered as {file}"
))
}
}
}

# Recognize model through file extension
if (is.null(model)) {
model <- switch(stringr::str_extract(file, "\\.([^.]+)$"),
".sso" = {
cli::cli_alert_info("Processing Stock Synthesis output file...")
"ss3"
},
".rdat" = {
cli::cli_alert_info("Processing BAM output file...")
"bam"
},
".rds" = {
cli::cli_alert_info("Processing WHAM output file...")
"wham"
},
".RDS" = {
if (is.character(file)) {
if (is.null(model)) {
model <- switch(stringr::str_extract(file, "\\.([^.]+)$"),
".sso" = {
cli::cli_alert_info("Processing Stock Synthesis output file...")
"ss3"
},
".rdat" = {
cli::cli_alert_info("Processing BAM output file...")
"bam"
},
".rds" = {
cli::cli_alert_info("Processing WHAM output file...")
"wham"
},
".RDS" = {
cli::cli_alert_info("Processing FIMS output file...")
"fims"
},

cli::cli_abort("Unknown file type. Please indicate model.")
)
}
} else {
model <- switch (class(file)[1],
"fims" = {
cli::cli_alert_info("Processing FIMS output file...")
"fims"
},
"Rceattle" = {
cli::cli_alert_info("Processing Rceattle output file...")
"rceattle"
},
cli::cli_abort("Unknown file type. Please indicate model.")
)
}


#### SS3 ####
# Convert SS3 output Report.sso file
Expand Down Expand Up @@ -1764,6 +1782,202 @@ convert_output <- function(
}
fims_output[setdiff(tolower(names(out_new)), tolower(names(fims_output)))] <- NA
out_new <- fims_output
#### Rceattle ####
} else if (model == "rceattle") {
# Want to extract and set values from:
# quantities, sdrep, and estimated_params
# take similar approach to SS3 when only some keywords were converted
# can late take approach like BAM?
# TODO: Do we want users to input the saved file or already loaded into the R environment?
if (is.character(file)) {
dat <- readRDS(file)
} else {
dat <- file
}

# Extract or use fleet names
if (is.null(fleet_names)) {
# TODO: as if there is a better way to id fleet names
fleet_names <- names(dat$estimated_params$index_ln_q)
}

# Output fleet names in console
cli::cli_alert_info("Identified fleet names:")
cli::cli_alert_info("{fleet_names}")
# Create list for morphed dfs to go into (for rbind later)
out_list <- list()

factors <- c("year", "fleet", "fleet_name", "age", "sex", "area", "seas", "season", "time", "era", "subseas", "subseason", "platoon", "platoo", "growth_pattern", "gp", "nsim", "age_a")
errors <- c("StdDev", "sd", "se", "SE", "cv", "CV", "stddev")
# units <- c("mt", "lbs", "eggs")

for (p in (2:length(dat))[-c(6, 8, 9, 10)]) {
# might need to manually add the removed above
# last tested through p=9 (data_list)
extract <- dat[p]
module_name <- names(extract)
cli::cli_alert_info("Processing {module_name}")
if (module_name == "sdrep") {
# this does not include all elements from sdrep list
df <- extract[[1]]
# Extract values from sdrep element in listdrep
values <- data.frame(
label = names(extract[[1]]$value),
estimate = extract[[1]]$value,
uncertainty = extract[[1]]$sd,
uncertainty_label = "sd"
)
values_count <- values |>
dplyr::group_by(label) |>
dplyr::count()
values <- values |>
dplyr::left_join(
{
values |> dplyr::group_by(label) |> dplyr::count()
},
by = "label"
)
# make year column
year_col <- rep(
file[["data_list"]]$styr:file[["data_list"]]$projyr,
length(unique(
dplyr::filter(values_count, n == length(file[["data_list"]]$styr:file[["data_list"]]$projyr)) |>
dplyr::pull(label)
))
)

df2 <- values |>
dplyr::filter(n == length(file[["data_list"]]$styr:file[["data_list"]]$projyr)) |>
dplyr::mutate(year = year_col) |>
rbind(
{
values |>
dplyr::filter(
n != length(file[["data_list"]]$styr:file[["data_list"]]$projyr)
) |>
dplyr::mutate(year = NA)
}
)

# Extract parameter values ts
par_fixes <- data.frame(
label = names(extract[[1]]$par.fixed),
estimate = extract[[1]]$par.fixed
)
par_fixes_count <- par_fixes |>
dplyr::group_by(label) |>
dplyr::count()
par_fixes <- par_fixes |>
dplyr::left_join(
{
par_fixes |>
dplyr::group_by(label) |>
dplyr::count()
},
by = "label"
)

year_col_par_fix <- rep(
file[["data_list"]]$styr:file[["data_list"]]$endyr,
length(unique(
dplyr::filter(par_fixes_count, n == length(file[["data_list"]]$styr:file[["data_list"]]$endyr)) |>
dplyr::pull(label)
))
)

df3 <- par_fixes |>
dplyr::filter(n == length(file[["data_list"]]$styr:file[["data_list"]]$endyr)) |>
dplyr::mutate(year = year_col_par_fix) |>
rbind(
{
par_fixes |>
dplyr::filter(
n != length(file[["data_list"]]$styr:file[["data_list"]]$endyr)
) |>
dplyr::mutate(year = NA)
}
) |>
dplyr::mutate(
uncertainty = NA,
uncertainty_label = NA
)
# not sure how pop_scalar is indexed
# not sure how log_index_hat is indexes
# Did not use r_sd for the error in rec bc used it from the other element in the list

df4 <- rbind(df2, df3) |>
dplyr::select(-n) |>
dplyr::mutate(
module_name = module_name
)

df4[setdiff(tolower(names(out_new)), tolower(names(df4)))] <- NA
out_list[[names(extract)]] <- df4
} else if (is.list(extract[[1]])) { # indicates vector and list
if (any(vapply(extract[[1]], is.matrix, FUN.VALUE = logical(1)))) {
##############################################################
df <- extract[[1]] |>
expand_element(fleet_names = fleet_names) |>
dplyr::mutate(
module_name = module_name
)
df[setdiff(tolower(names(out_new)), tolower(names(df)))] <- NA
out_list[[names(extract)]] <- df

} else if (any(vapply(extract[[1]], is.vector, FUN.VALUE = logical(1)))) { # all must be a vector to work - so there must be conditions for dfs with a mix
extract_list <- list()
# mod_name1 <- names(extract)
for (i in seq_along(extract[[1]])) {
# need to add condition or something in expand_element to account for data thats formatted differently but is still a list i.e. p=9
if (is.list(extract[[1]][i][[1]])) {
# mod_name2 <- glue::glue("{module_name}_{names(extract[[1]][i])}")
# comment out message once finished development
cli::cli_alert_info("Processing {names(extract[[1]][i])}")

df <- extract[[1]][i][[1]] |>
expand_element(fleet_names = fleet_names) |>
dplyr::mutate(
module_name = module_name # mod_name2
) # |>
# suppressWarnings()
} else {
df <- data.frame(
estimate = extract[[1]][[i]][[1]],
label = names(extract[[1]][i]),
module_name = module_name
)
}
df[setdiff(tolower(names(out_new)), tolower(names(df)))] <- NA
extract_list[[names(extract[[1]][i])]] <- df
}
new_df <- Reduce(rbind, extract_list)
out_list[[names(extract)]] <- new_df
} else {
cli::cli_alert_warning("Not compatible.")
}
} else {
cli::cli_alert_warning("Not compatible yet.")
}
# } else if (is.list(extract[[1]])) { # list only
# } else if (is.matrix(extract[[1]])) { # matrix only
# } else {
# cli::cli_alert_warning(paste(names(extract), " not compatible.", sep = ""))
# } # close if statement
} # close loop over objects listed in dat file
# Finish out df
out_new <- Reduce(rbind, out_list) |>
# Add era as factor into BAM conout
dplyr::mutate(
# TODO: replace all periods with underscore if naming convention is different
label = tolower(label),
# set era
era = dplyr::if_else(
year > dat$data_list$endyr,
"fore",
"time"
)
)

} else {
cli::cli_abort(c(
message = "Output file not compatible.",
Expand Down Expand Up @@ -1803,20 +2017,29 @@ convert_output <- function(
)
) |>
suppressWarnings()
if (tolower(model) == "ss3") {
con_file <- system.file("resources", "ss3_var_names.csv", package = "stockplotr", mustWork = TRUE)
var_names_sheet <- utils::read.csv(con_file, na.strings = "")
} else if (tolower(model) == "bam") {
con_file <- system.file("resources", "bam_var_names.csv", package = "stockplotr", mustWork = TRUE)
var_names_sheet <- utils::read.csv(con_file, na.strings = "") |>
dplyr::mutate(
label = tolower(label)
)
} else if (tolower(model) == "fims") {
con_file <- system.file("resources", "fims_var_names.csv", package = "stockplotr", mustWork = TRUE)
var_names_sheet <- utils::read.csv(con_file, na.strings = "")
}

# if (tolower(model) == "ss3") {
# con_file <- system.file("resources", "ss3_var_names.csv", package = "stockplotr", mustWork = TRUE)
# var_names_sheet <- utils::read.csv(con_file, na.strings = "")
# } else if (tolower(model) == "bam") {
# con_file <- system.file("resources", "bam_var_names.csv", package = "stockplotr", mustWork = TRUE)
# var_names_sheet <- utils::read.csv(con_file, na.strings = "") |>
# dplyr::mutate(
# label = tolower(label)
# )
# } else if (tolower(model) == "fims") {
# con_file <- system.file("resources", "fims_var_names.csv", package = "stockplotr", mustWork = TRUE)
# var_names_sheet <- utils::read.csv(con_file, na.strings = "")
# } else if (tolower(model) == "rceattle") {
# con_file <- system.file("resources", "rceattle_var_names.csv", package = "stockplotr", mustWork = TRUE)
# var_names_sheet <- utils::read.csv(con_file, na.strings = "")
# }

# edit: here is a different way of loading in the csv sheets
con_file <- system.file("resources", glue::glue("{model}_var_names.csv"), package = "stockplotr", mustWork = TRUE)
# temporarily add call to local csv so I can test
# con_file <- glue::glue("~/GitHub/stockplotr/inst/resources/{model}_var_names.csv")
var_names_sheet <- utils::read.csv(con_file, na.strings = "")

if (file.exists(con_file)) {
# Remove 'X' column if it exists
var_names_sheet <- var_names_sheet |>
Expand Down
Loading
Loading