From c1ecc49cba86649d8afa88c5b65b8b5e8a3ca595 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 07:46:18 -0400 Subject: [PATCH 001/145] upload code --- .../R/beta_prior_estimation.R | 177 +++++++++++++ .../delphiBackfillCorrection/R/constants.R | 23 ++ .../delphiBackfillCorrection/R/main.R | 107 ++++++++ .../delphiBackfillCorrection/R/model.R | 105 ++++++++ .../R/preprocessing.R | 247 ++++++++++++++++++ .../delphiBackfillCorrection/R/utils.R | 82 ++++++ .../unit-tests/testthat.R | 4 + .../unit-tests/testthat/constants.R | 22 ++ .../testthat/test-beta_prior_estimation.R | 64 +++++ .../unit-tests/testthat/test-preprocessing.R | 113 ++++++++ .../unit-tests/testthat/test-training.R | 3 + .../unit-tests/testthat/test-utils.R | 212 +++++++++++++++ Backfill_Correction/params.json.template | 6 + 13 files changed, 1165 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/constants.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/main.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/model.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/utils.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R create mode 100644 Backfill_Correction/params.json.template diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R new file mode 100644 index 000000000..366c0a060 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -0,0 +1,177 @@ +#' Functions for Beta Prior Approach. +#' This is used only for the ratio prediction e.g. fraction of Covid claims, +#' percentage of positive tests. We assume that the ratio follows a beta distribution +#' that is day-of-week dependent. A quantile regression model is used first with lasso +#' penalty for supporting quantile estimation and then a non-linear minimization is used +#' for prior estimation. +lp_solver <- "gurobi" + +#' Sum of squared error +#' +#' @param fit estimated values +#' @param actual actual values +#' +#' @export +delta <- function(fit, actual) sum((fit-actual)^2) + +#' Generate objection function +#' @param theta parameters for the distribution in log scale +#' @param prob the expected probabilities +#' +#' @importFrom stats pbeta +#' +#' @export +objective <- function(theta, x, prob, ...) { + ab <- exp(theta) # Parameters are the *logs* of alpha and beta + fit <- pbeta(x, ab[1], ab[2]) + return (delta(fit, prob)) +} + +#' Main function for the beta prior approach +#' Estimate the priors for the beta distribution based on data for +#' a certain day of a week +#' +#' @param train_data Data Frame for training +#' @param prior_test_data Data Frame for testing +#' @param dw column name to indicate which day of a week it is +#' @param taus vector of considered quantiles +#' @param params_list the list of parameters for training +#' @param response the column name of the response variable +#' @param lp_solver the lp solver used in Quantgen +#' @param labmda the level of lasso penalty +#' @param start the initialization of the the points in nlm +#' @param base_pseudo_denum the pseudo counts added to denominator if little data for training +#' @param base_pseudo_num the pseudo counts added to numerator if little data for training +#' +#' @import nlm +#' @import gurobi +#' @import Matrix +#' @import tidyverse +#' @import dplyr +#' @importFrom quantgen quantile_lasso +#' @importFrom constants lp_solver +est_priors <- function(train_data, prior_test_data, dw, taus, + params_list, response, lp_solver, lambda, + start=c(0, log(10)), + base_pseudo_denom=1000, base_pseudo_num=10){ + sub_train_data <- train_data %>% filter(train_data[dw] == 1) + sub_test_data <- prior_test_data %>% filter(prior_test_data[dw] == 1) + if (dim(sub_test_data)[1] == 0) { + pseudo_denom <- base_pseudo_denom + pseudo_num <- base_pseudo_num + } else { + # Using quantile regression to get estimated quantiles at log scale + quantiles <- list() + for (idx in 1:length(taus)){ + tau <- taus[idx] + obj <- quantile_lasso(as.matrix(sub_train_data[params_list]), + sub_train_data[response], tau = tau, + lambda = lambda, stand = FALSE, lp_solver = lp_solver) + y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[params_list]))) + quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale + } + quantiles <- as.vector(unlist(quantiles)) + # Using nlm to estimate priors + sol <- nlm(objective, start, x=quantiles, prob=taus, lower=0, upper=1, + typsize=c(1,1), fscale=1e-12, gradtol=1e-12) + parms <- exp(sol$estimate) + # Computing pseudo counts based on beta priors + pseudo_denom <- parms[1] + parms[2] + pseudo_num <- parms[1] + } + return (c(pseudo_denom, pseudo_num)) +} + +#' Update ratio based on the pseudo counts for numerators and denominators +#' +#' @param data Data Frame +#' @param dw character to indicate the day of a week. Can be NULL for all the days +#' @param pseudo_num the estimated counts to be added to numerators +#' @param pseudo_denom the estimated counts to be added to denominators +#' @param num_col the column name for the numerator +#' @param denom_col the column name for the denominator +#' +#' @export +ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ + if (is.null(dw)){ + num_adj <- data[[num_col]] + pseudo_num + denom_adj <- data[[denom_col]] + pseudo_denom + } else { + num_adj <- data[data[dw] == 1, num_col] + pseudo_num + denom_adj <- data[data[dw] == 1, denom_col] + pseudo_denom + } + return (num_adj / denom_adj) +} + +#' Update ratio using beta prior approach +#' +#' @param train_data training data +#' @param test_data testing data +#' @param prior_test_data testing data for the lag -1 model +#' +#' @importFrom constants taus, dw, lp_solver +#' @export +ratio_adj <- function(train_data, test_data, prior_test_data){ + train_data$value_target <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") + train_data$value_7dav <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") + test_data$value_target <- ratio_adj_with_pseudo(test_data, NULL, 1, 100, "value_target_num", "value_target_denom") + prior_test_data$value_7dav <- ratio_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") + + train_data$log_value_target <- log(train_data$value_target) + train_data$log_value_7dav <- log(train_data$value_7dav) + test_data$log_value_target <- log(test_data$value_target) + prior_test_data$log_value_7dav <- log(prior_test_data$value_7dav) + + pre_params_list = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", + "log_value_7dav") + #For training + train_data$value_raw = NaN + train_data$value_7dav = NaN + train_data$value_prev_7dav = NaN + + #For testing + test_data$value_raw = NaN + test_data$value_7dav = NaN + test_data$value_prev_7dav = NaN + + test_data$pseudo_num = NaN + test_data$pseudo_denum = NaN + + for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")){ + pseudo_counts <- est_priors(train_data, prior_test_data, cov, taus, + pre_params_list, "value_target", lp_solver, lambda=0.1) + pseudo_denum = pseudo_counts[1] + pseudo_counts[2] + pseudo_num = pseudo_counts[1] + # update current data + # For training + train_data$value_raw[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") + train_data$value_7dav[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") + train_data$value_prev_7dav[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") + + #For testing + test_data$value_raw[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") + test_data$value_7dav[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") + test_data$value_prev_7dav[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") + + test_data$pseudo_num[test_data[cov] == 1] = pseudo_num + test_data$pseudo_denum[test_data[cov] == 1] = pseudo_denum + } + + train_data$log_value_raw = log(train_data$value_raw) + train_data$log_value_7dav = log(train_data$value_7dav) + train_data$log_value_prev_7dav = log(train_data$value_prev_7dav) + train_data$log_7dav_slope = train_data$log_value_7dav - train_data$log_value_prev_7dav + + test_data$log_value_raw = log(test_data$value_raw) + test_data$log_value_7dav = log(test_data$value_7dav) + test_data$log_value_prev_7dav = log(test_data$value_prev_7dav) + test_data$log_7dav_slope = test_data$log_value_7dav - test_data$log_value_prev_7dav + + return (list(train_data, test_data)) +} \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R new file mode 100644 index 000000000..47da159da --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -0,0 +1,23 @@ +# Constants for the backfill correction model + +taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) +ref_lag <- 60 +lag_knots <- c(1, 2, 3, 4, 5, 7, 10, 14) +upper_lag <- 15 +training_days <- 270 +testing_window <- 14 +lag_window <- 5 +lambda <- 0.1 + +ld_name = "01" +yitl = "log_value_raw" +slope = "log_7dav_slope" +y7dav = "log_value_7dav" +wd = c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +wd2 = c("Mon2", "Tue2", "Wed2", "Thurs2", "Fri2", "Sat2") +wm <- c("W1_issue", "W2_issue", "W3_issue") +#sqrtscale = c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') +sqrtscale_covid = c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') +sqrtscale_total = c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') +sqrtscale = c('sqrty0', 'sqrty1', "sqrty2") +log_lag = "inv_log_lag" \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R new file mode 100644 index 000000000..5ebf660fc --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -0,0 +1,107 @@ +library(tidyverse) +library(dplyr) + +#' Tempt usage +#' params = list() +#' customize +#' params$reg_lag: reference lag, after x days, the update is considered to be the response +#' params$data_path: link to the input data file +#' params$testing_window: the testing window used for saving the runtime. Could set it to be 1 if time allows +#' params$test_dates: list of two elements, the first one is the start date and the second one is the end date + +#' Main function for getting backfill corrected estimates +#' +#' @param params +#' +#' @import constants +#' @import preprocessing +#' @import beta_prior_estimation +#' @import model +#' +#' @export +run_backfill <- function(params){ + # Get the input data + df <- read_data(params$data_path) + refd_col <- "time_value" + lag_col <- "lag" + testing_window <- params$testing_window + ref_lag <- params$ref_lag + min_refd <- test_date_list[1] + max_refd <- test_date_list[length(test_date_list)] + + for (geo_level in params$geo_levels){ + # Get full list of interested locations + geo_list <- unique(df$geo_value) + # Build model for each location + for (geo in geo_list) { + subdf <- df %>% filter(geo_value == geo) %>% filter(lag < ref_lag) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + for (value_type in value_types){ + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) + + combined_denom_df <- fill_missing_updates(subdf, params$denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) + + combined_df <- merge(combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom")) + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + test_date_list <- get_test_dates(combined_df, params$test_dates) + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date+testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date-7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, taus, params_list, lp_solver, lambda, test_date) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, + geo, test_lag) + }# End for test lags + }# End for test date list + }# End for value types + }# End for geo lsit + }# End for geo level + + +} \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R new file mode 100644 index 000000000..a4079dbfb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -0,0 +1,105 @@ +#' Filtration for training and testing data with different lags +#' +#' @param test_lag +#' @param geo_train_data training data for a certain location +#' @param geo_test_data testing data for a certain location +#' +#' @expert +data_filteration <- function(test_lag, geo_train_data, geo_test_data){ + if (test_lag <= 14){ + test_lag_pad=2 + test_lag_pad1=0 + test_lag_pad2=0 + }else if (test_lag < 51){ + test_lag_pad=7 + test_lag_pad1=6 + test_lag_pad2=7 + }else { + test_lag_pad=9 + test_lag_pad1=8 + test_lag_pad2=9 + } + train_data = geo_train_data %>% + filter(lag >= test_lag-test_lag_pad ) %>% + filter(lag <= test_lag+test_lag_pad ) + test_data = geo_test_data %>% + filter(lag >= test_lag-test_lag_pad1) %>% + filter(lag <= test_lag+test_lag_pad2) + return (list(train_data, test_data)) +} + + +#' Model training and prediction using quantile regression with Lasso penalty +#' The quantile regression uses the quantile_lasso function from quantgen package +#' +#' @param train_data Data frame for training +#' @param test_data Data frame for testing +#' @param taus vector of considered quantiles +#' @param params_list the list of column names serving as the covariates +#' @param lp_solver the lp solver used in Quantgen +#' @param lambda the level of lasso penalty +#' @param test_date as.Date +#' +#' @expert +model_training_and_testing <- function(train_data, test_data, taus, params_list, + lp_solver, lambda, test_date){ + success = 0 + coefs_result = list() + coef_list = c("intercept", paste(params_list, '_coef', sep='')) + for (tau in taus){ + #options(error=NULL) + tryCatch( + expr = { + # Quantile regression + obj = quantile_lasso(as.matrix(train_data[params_list]), + train_data$log_value_target, tau = tau, + lambda = lambda, stand = FALSE, lp_solver = lp_solver) + + y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[params_list]))) + test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all + + coefs_result[[success+1]] = coef(obj) + success = success + 1 + }, + error=function(e) {print(paste(geo, test_date, model_name, as.character(tau), sep="_"))} + ) + } + if (success < 9) next + coef_combined_result = data.frame(tau=taus, + issue_date=test_date) + coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) + + return (list(test_data, coef_combined_result)) +} + +#' Evaluation of the test results based on WIS score +#' The WIS score calculation is based on hte weighted_interval_score function +#' from the evalcast package from Delphi +#' +#' @param test_data multiple columns for the prediction results of difference +#' quantiles. Each row represents an update with certain (reference_date, +#' issue_date, location) +#' @param taus vector of quantiles interested +#' +#' @import covidcast +#' @importFrom evalcast import weighted_interval_score +#' +#' @export +evl <- function(test_data, taus){ + n_row = dim(test_data)[1] + taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) + + # Calculate WIS + predicted_all = as.matrix(test_data[c("predicted_tau0.01", "predicted_tau0.025", + "predicted_tau0.1", "predicted_tau0.25", + "predicted_tau0.5", "predicted_tau0.75", + "predicted_tau0.9", "predicted_tau0.975", + "predicted_tau0.99")]) + predicted_all_exp = exp(predicted_all) + predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) + predicted_trans_exp = as.list(data.frame(t(predicted_all_exp - test_data$value_target))) + test_data$wis =mapply(weighted_interval_score, taus_list, predicted_trans, 0) + test_data$wis_exp =mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) + + return (test_data) +} \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R new file mode 100644 index 000000000..5089e275d --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -0,0 +1,247 @@ +### Data Preprocessing +### The raw input data should have 4/5 basic columns: +### time_value: reference date +### issue_date: issue date/date of reporting +### geo_value: location +### lag: the number of days between issue date and the reference date +### counts: the number of counts used for estimation +### library(lubridate) +### library(stats) +### library(stats) +### library(dyplr) +### library(tidyverse) + + +#' Re-index, fill na, make sure all reference date have enough rows for updates +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param refd_col column name for the column of reference date +#' @param lag_col column name for the column of lag +#' @param min_refd the earliest reference date considered in the data +#' @param max_refd the latest reference date considered in the data +#' +#' @importFrom constants ref_lag +#' +#' @return df_new Data Frame with filled rows for missing lags +#' +#' @export +fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd){ + lags <- min(df[[lag_col]]): ref_lag # Full list of lags + refds <- seq(min_refd, max_refd, by="day") # Full list reference date + row_inds_df <- as.data.frame(crossing(refds, lags)) %>% + setNames(c(refd_col, lag_col)) + df_new = merge(x=df, y=row_inds_df, + by=c(refd_col, lag_col), all.y=TRUE) + return (df_new) +} + +#' Get pivot table, filling NANs. If there is no update on issue date D but +#' previous reports exist for issue date D_p < D, all the dates between +#' [D_p, D] are filled with with the reported value on date D_p. If there is +#' no update for any previous issue date, fill in with 0. +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param value_col column name for the column of counts +#' @param refd_col column name for the column of reference date +#' @param lag_col column name for the column of lag +#' +#' @importFrom constants ref_lag +#' @importFrom tidyr fill +#' @importFrom dplyr everything, select +#' +#' @export +fill_missing_updates <- function(df, value_col, refd_col, lag_col) { + pivot_df <- df[order(df[[lag_col]], decreasing=FALSE), ] %>% + pivot_wider(id_cols=lag_col, names_from=refd_col, values_from=value_col) + if (any(diff(pivot_df[[lag_col]])!=1)){stop("Risk exists in forward fill")} + pivot_df <- pivot_df %>% fill(everything(), .direction="down") + pivot_df[is.na(pivot_df)] <- 0 # fill NAs with 0s + backfill_df <- pivot_df %>% + pivot_longer(-lag_col, values_to="value_raw", names_to=refd_col) + backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) + return (as.data.frame(backfill_df)) +} + +#' Calculate 7 day moving average for each issue date +#' The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 +#' @param pivot_df Data Frame where the columns are issue dates and the rows are +#' reference dates +#' @param refd_col column name for the column of reference date +#' +#' @importFrom zoo rollmeanr +#' +#' @export +get_7dav <- function(pivot_df, refd_col){ + for (col in colnames(pivot_df)){ + if (col == refd_col) next + pivot_df[, col] <- rollmeanr(pivot_df[, col], 7, align="right", fill=NA) + } + backfill_df <- pivot_df %>% + pivot_longer(-refd_col, values_to="value_raw", names_to="issue_date") + backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) + backfill_df[["issue_date"]] = as.Date(backfill_df[["issue_date"]]) + return (as.data.frame(backfill_df)) +} + +#' Used for data shifting in terms of reference date +#' +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param n_day number of days to be shifted +#' @param refd_col column name for the column of reference date +#' +#' @export +add_shift <- function(df, n_day, refd_col){ + df[, refd_col] <- as.Date(df[, refd_col]) + n_day + return (df) +} + +wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +#' Add one hot encoding for day of a week info in terms of reference +#' and issue date +#' +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param wd vector of days of a week +#' @param time_col column used for the date, can be either reference date or +#' issue date +#' @param suffix suffix added to indicate which kind of date is used +#' +#' @importFrom constants wd +#' +#' @export +add_dayofweek <- function(df, wd, time_col, suffix){ + dayofweek <- as.numeric(format(df[[time_col]], format="%u")) + for (i in 1:6){ + df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) + } + if (suffix == "_ref"){ + df[, paste0("Sun", suffix)] <- as.numeric(dayofweek == 7) + } + return (df) +} + +#' Get week of a month info according to a date +#' All the dates on or before the ith Sunday but after the (i-1)th Sunday +#' is considered to be the ith week. Notice that the dates in the 5th week +#' this month are actually in the same week with the dates in the 1st week +#' next month and those dates are sparse. Thus, we assign the dates in the +#' 5th week to the 1st week. +#' +#' @param date as.Date +#' +#' @importFrom lubridate make_date, year, month, day +#' @return a integer indicating which week it is in a month +#' @export +get_weekofmonth <- function(date){ + year <- year(date) + month <- month(date) + day <- day(date) + firstdayofmonth <- as.numeric(format(make_date(year, month, 1), format="%u")) + return (((day + firstdayofmonth - 1) %/% 7) %% 5 + 1) +} + +wm <- c("W1_issue", "W2_issue", "W3_issue") +#' Add one hot encoding for week of a month info in terms of issue date +#' +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param wm vector of weeks of a month +#' @param time_col column used for the date, can be either reference date or +#' issue date +#' +#' @export +add_weekofmonth <- function(df, wm, time_col){ + weekofmonth <- get_weekofmonth(df[[time_col]]) + for (i in 1:3){ + df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) + } + return (df) +} + +#' Add 7dav and target to the data +#' Target is the updates made ref_lag days after the first release +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param value_col column name for the column of raw value +#' @param refd_col column name for the column of reference date +#' @param lag_col column name for the column of lag +#' +#' @export +add_7davs_and_target <- function(df, value_col, refd_col, lag_col){ + + df$issue_date <- df[[refd_col]] + df[[lag_col]] + pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% + pivot_wider(id_cols=refd_col, names_from="issue_date", + values_from=value_col) + + # Add 7dav avg + avg_df <- get_7dav(pivot_df, refd_col) + avg_df <- add_shift(avg_df, 1, refd_col) # 7dav until yesterday + names(avg_df)[names(avg_df) == 'value_raw'] <- 'value_7dav' + avg_df_prev7 <- add_shift(avg_df, 7, refd_col) + names(avg_df_prev7)[names(avg_df_prev7) == 'value_7dav'] <- 'value_prev_7dav' + + backfill_df <- Reduce(function(x, y) merge(x, y, all=TRUE), + list(df, avg_df, avg_df_prev7)) + + # Add target + target_df <- df[df$lag==ref_lag, ] %>% select(c(refd_col, "value_raw", "issue_date")) + names(target_df)[names(target_df) == 'value_raw'] <- 'value_target' + names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' + + backfill_df <- merge(backfill_df, target_df, by=refd_col, all.x=TRUE) + + # Remove invalid rows + backfill_df <- backfill_df %>% drop_na(c(lag_col)) + + return (as.data.frame(backfill_df)) +} + +#' Add params related to date +#' Target is the updates made ref_lag days after the first release +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. +#' @param refd_col column name for the column of reference date +#' @param lag_col column name for the column of lag +#' +#' @export +add_params_for_dates <- function(backfill_df, refd_col, lag_col){ + # Add columns for day-of-week effect + backfill_df <- add_dayofweek(backfill_df, wd, refd_col, "_ref") + backfill_df <- add_dayofweek(backfill_df, wd, "issue_date", "_issue") + + # Add columns for week-of-month effect + backfill_df <- add_weekofmonth(backfill_df, wm, "issue_date") + + return (as.data.frame(backfill_df)) +} + +#' Add columns to indicate the scale of value at square root level +#' +#' @param train_data Data Frame for training +#' @param test_data Data Frame for testing +#' @param value_col the column name of the considered value +#' @param the maximum value in the training data at square root level +#' @export +add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ + sqrtscale = c() + sub_max_raw = sqrt(max(train_data$value_raw)) / 2 + + for (split in seq(0, 3)){ + if (sub_max_raw < (max_raw * (split+1) * 0.1)) break + train_data[paste0("sqrty", as.character(split))] = 0 + test_data[paste0("sqrty", as.character(split))] = 0 + qv_pre = max_raw * split * 0.2 + qv_next = max_raw * (split+1) * 0.2 + + train_data[(train_data$value_raw <= (qv_next)^2) + & (train_data$value_raw > (qv_pre)^2), paste0("sqrty", as.character(split))] = 1 + test_data[(test_data$value_raw <= (qv_next)^2) + & (test_data$value_raw > (qv_pre)^2), paste0("sqrty", as.character(split))] = 1 + sqrtscale[split+1] = paste0("sqrty", as.character(split)) + } + return (list(train_data, test_data, sqrtscale)) +} + + diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R new file mode 100644 index 000000000..a7df9b80e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -0,0 +1,82 @@ +#' Return params file as an R list +#' +#' Reads a parameters file. If the file does not exist, the function will create a copy of +#' '"params.json.template" and read from that. +#' +#' @param path path to the parameters file; if not present, will try to copy the file +#' "params.json.template" +#' @param template_path path to the template parameters file +#' +#' @return a named list of parameters values +#' +#' @importFrom dplyr if_else +#' @importFrom jsonlite read_json +#' @importFrom lubridate ymd_hms +#' @export +read_params <- function(path = "params.json", template_path = "params.json.template") { + if (!file.exists(path)) file.copy(template_path, path) + params <- read_json(path, simplifyVector = TRUE) + + params$num_filter <- if_else(params$debug, 2L, 100L) + params$s_weight <- if_else(params$debug, 1.00, 0.01) + params$s_mix_coef <- if_else(params$debug, 0.05, 0.05) + + params$start_time <- ymd_hms( + sprintf("%s 00:00:00", params$start_date), tz = tz_to + ) + params$end_time <- ymd_hms( + sprintf("%s 23:59:59", params$end_date), tz = tz_to + ) + + params$parallel_max_cores <- if_else( + is.null(params$parallel_max_cores), + .Machine$integer.max, + params$parallel_max_cores + ) + + return(params) +} + +#' Create directory if not already existing +#' +#' @param path character vector giving the directory to create +#' +#' @export +create_dir_not_exist <- function(path) +{ + if (!dir.exists(path)) { dir.create(path) } +} + +#' Function to read input data +#' +#' @param path path to the input data +#' +#' @export +read_data <- function(path){ + df <- read_csv(path) + return (df) +} + +#‘ Export the result to customized directory + +#' @param test_data test data with prediction result +#' @param coef_data data frame with the estimated coefficients +#' @param export_dir export directory +#' @param geo_level geographical level, can be county or state +#' @param geo the geogrpahical location +#' @param test_lag +#' +#' @export +export_test_result <- function(test_data, coef_data, export_dir, + geo_level, geo, test_lag){ + pred_output_dir = paste("prediction", geo_level, geo, as.character(test_lag), sep="_") + write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) + + coef_output_dir = paste("coefs", geo_level, geo, as.character(test_lag), sep="_") + write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) + +} + + + + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R new file mode 100644 index 000000000..83f3bb312 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(delphiBackfillCorrection) + +test_check("delphiBackfillCorrection", stop_on_warning = FALSE) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R new file mode 100644 index 000000000..923749602 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R @@ -0,0 +1,22 @@ +# Constants for the backfill correction model + +taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) +ref_lag <- 60 +lag_knots <- c(1, 2, 3, 4, 5, 7, 10, 14) +upper_lag <- 15 +training_days <- 270 +testing_window <- 14 +lag_window <- 5 +lambda <- 0.1 + +yitl <- "log_value_raw" +slope <- "log_7dav_slope" +y7dav <- "log_value_7dav" +wd <- c("Mon_issue", "Tue_issue", "Wed_issue", "Thurs_issue", "Fri_issue", "Sat_issue") +wd2 <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref") +wm <- c("W1_issue", "W2_issue", "W3_issue") +#sqrtscale = c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') +sqrtscale_num <- c('sqrty0_num', 'sqrty1_num', 'sqrty2_num') +sqrtscale_denom <- c('sqrty0_denom', 'sqrty1_denom', 'sqrty2_denom') +sqrtscale <- c('sqrty0', 'sqrty1', "sqrty2") +log_lag <- "inv_log_lag" \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R new file mode 100644 index 000000000..aa6561719 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -0,0 +1,64 @@ +## Functions for Beta Prior Approach. +## This is used only for the ratio prediction e.g. fraction of Covid claims, +## percentage of positive tests. We assume that the ratio follows a beta distribution +## that is day-of-week dependent. A quantile regression model is used first with lasso +## penalty for supporting quantile estimation and then a non-linear minimization is used +## for prior estimation. +library(nlm) +library(quantgen) +library(gurobi) +library(tidyverse) +library(Matrix) +library(dplyr) + +lp_solver <- "gurobi" + +delta <- function(fit, actual) sum((fit-actual)^2) + +objective <- function(theta, x, prob, ...) { + ab <- exp(theta) # Parameters are the *logs* of alpha and beta + fit <- pbeta(x, ab[1], ab[2]) + return (delta(fit, prob)) +} + +## Main function for the beta prior approach +## Estimate the priors for the beta distribution based on data for +## a certain day of a week +est_priors <- function(train_data, prior_test_data, dw, taus, + params_list, response, lp_solver, lambda, + start=c(0, log(10), + base_pseudo_denum=1000, base_pseudo_num=10)){ + sub_train_data <- train_data %>% filter(train_data[dw] == 1) + sub_test_data <- prior_test_data %>% filter(prior_test_data[dw] == 1) + if (dim(sub_test_data)[1] == 0) { + pseudo_denum <- base_pseudo_denum + pseudo_num <- base_pseudo_num + } else { + # Using quantile regressison to get estimated quantiles at log scale + quantiles <- list() + for (idx in 1:length(taus)){ + tau <- taus[idx] + obj <- quantile_lasso(as.matrix(sub_train_data[params_list]), + sub_train_data[response], tau = tau, + lambda = lambda, stand = FALSE, lp_solver = lp_solver) + y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[params_list]))) + quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale + } + quantiles <- as.vector(unlist(quantiles)) + # Using nlm to estimate priors + sol <- nlm(objective, start, x=quantiles, prob=taus, lower=0, upper=1, + typsize=c(1,1), fscale=1e-12, gradtol=1e-12) + parms <- exp(sol$estimate) + # Computing pseudo counts based on beta priors + pseudo_denom <- parms[1] + parms[2] + pseudo_num <- parms[1] + } + return (pseudo_denom, pseudo_num) +} + +## Add pseudo counts for numerators and denominators +ratio_adj <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ + num_adj <- data[data[dw] == 1, num_col] + pseudo_num + denom_adj <- data[data[dw] == 1, denom_col] + pseudo_denom + return (num_adj / denom_adj) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R new file mode 100644 index 000000000..8c1c8bcd6 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -0,0 +1,113 @@ +library(jsonlite) +library(testthat) +library(dplyr) +library(tidyr) +library(zoo) +library(stats) +library(lubridate) +context("Testing preprocessing helper functions") + +refd_col <- "time_value" +lag_col <- "lag" +value_col <- "Counts_Products_Denom" +min_refd <- as.Date("2022-01-01") +max_refd <- as.Date("2022-01-07") +ref_lag <- 7 +fake_df <- data.frame(time_value = c(as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-04"), as.Date("2022-01-04"), + as.Date("2022-01-04"), as.Date("2022-01-05"), + as.Date("2022-01-05")), + lag = c(0, 1, 3, 7, 0, 6, 7, 0, 7), + Counts_Products_Denom=c(100, 200, 500, 1000, 0, 200, 220, 50, 300)) +wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +wm <- c("W1_issue", "W2_issue", "W3_issue") + + +test_that("testing rows filling for missing lags", { + #Make sure all reference date have enough rows for updates + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + n_refds <- as.numeric(max_refd - min_refd)+1 + + expect_equal(dim(df_new)[1], n_refds*(ref_lag+1)) + expect_equal(df_new %>% drop_na(), fake_df) +}) + + +test_that("testing NA filling for missing udpates", { + #Make sure all the updates are valid integers + + # Assuming the input data does not have enough rows for consecutive lags + expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), + "Risk exists in forward fill") + + # Assuming the input data is already prepared + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + + expect_equal(dim(backfill_df)[1], n_refds*(ref_lag+1)) + + for (d in seq(min_refd, max_refd, by="day")){ + expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) + } +}) + + +test_that("testing the caculation of 7-day moving average", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df$issue_date <- df[[refd_col]] + df[[lag_col]] + pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% + pivot_wider(id_cols=refd_col, names_from="issue_date", + values_from="value_raw") + pivot_df[is.na(pivot_df)] <- 0 + backfill_df <- get_7dav(pivot_df, refd_col) + + output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] + expected <- colSums(pivot_df[, -1]) / 7 + expect_true(all(output == expected)) +}) + +test_that("testing the data shifting", { + shifted_df <- add_shift(fake_df, 1, refd_col) + shifted_df[, refd_col] <- as.Date(shifted_df[, refd_col]) - 1 + + expect_equal(fake_df, shifted_df) +}) + + +test_that("testing adding columns for each day of a week", { + df_new <- add_dayofweek(fake_df, wd, refd_col, "_ref") + + expect_equal(dim(fake_df)[2] + 7, dim(df_new)[2]) + expect_true(all(rowSums(df_new[, -c(1:dim(fake_df)[2])]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) +}) + + +test_that("testing the calculation of week of a month", { + expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-03")), 2) + expect_equal(get_weekofmonth(as.Date("2022-01-10")), 3) + expect_equal(get_weekofmonth(as.Date("2022-01-31")), 1) + +}) + +test_that("testing the caculation of 7-day moving average", { + df_new <- add_weekofmonth(fake_df, wm, refd_col) + + expect_equal(dim(fake_df)[2] + 3, dim(df_new)[2]) + expect_true(all(rowSums(df_new[, -c(1:dim(fake_df)[2])]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) +}) + +test_that("testing adding 7 day avg and target", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col) + + expect_equal(dim(df_new)[2], 3 + 1 + 1 + 1 + 1) + expect_equal(dim(df_new)[1], 7 * 8) +}) + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R new file mode 100644 index 000000000..26b32ed11 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R @@ -0,0 +1,3 @@ +library(quantgen) +library(gurobi) +lp_solver <- "gurobi" \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R new file mode 100644 index 000000000..30b0a95bb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -0,0 +1,212 @@ +#' Return params file as an R list +#' +#' Reads a parameters file. If the file does not exist, the function will create a copy of +#' '"params.json.template" and read from that. +#' +#' @param path path to the parameters file; if not present, will try to copy the file +#' "params.json.template" +#' @param template_path path to the template parameters file +#' +#' @return a named list of parameters values +#' +#' @importFrom dplyr if_else +#' @importFrom jsonlite read_json +#' @importFrom lubridate ymd_hms +#' @export +read_params <- function(path = "params.json", template_path = "params.json.template") { + if (!file.exists(path)) file.copy(template_path, path) + params <- read_json(path, simplifyVector = TRUE) + + params$num_filter <- if_else(params$debug, 2L, 100L) + params$s_weight <- if_else(params$debug, 1.00, 0.01) + params$s_mix_coef <- if_else(params$debug, 0.05, 0.05) + + params$start_time <- ymd_hms( + sprintf("%s 00:00:00", params$start_date), tz = tz_to + ) + params$end_time <- ymd_hms( + sprintf("%s 23:59:59", params$end_date), tz = tz_to + ) + + params$parallel_max_cores <- if_else( + is.null(params$parallel_max_cores), + .Machine$integer.max, + params$parallel_max_cores + ) + + return(params) +} + +#' Write a message to the console +#' +#' @param text the body of the message to display +#' @param df a data frame; the message will show the number of rows in the data frame +#' +#' @export +msg_df <- function(text, df) { + message(sprintf("%s --- %s: %d rows", format(Sys.time()), text, nrow(df))) +} + +#' Assert a logical value +#' +#' Will issue a \code{stop} command if the given statement is false. +#' +#' @param statement a logical value +#' @param msg a character string displayed as an additional message +#' +#' @export +assert <- function(statement, msg="") +{ + if (!statement) + { + stop(msg, call.=(msg=="")) + } +} + +#' Create directory if not already existing +#' +#' @param path character vector giving the directory to create +#' +#' @export +create_dir_not_exist <- function(path) +{ + if (!dir.exists(path)) { dir.create(path) } +} + +#' Adjust weights so no weight is too much of the final estimate. +#' +#' For privacy and estimation quality, we do not want to allow one survey +#' response to have such a high weight that it becomes most of an estimate. +#' +#' So, for a specific time and location: +#' +#' 1. Normalize weights so they sum to 1. +#' +#' 2. Determine a "mixing coefficient" based on the maximum weight. The mixing +#' coefficient is chosen to make the maximum weight smaller than +#' `params$s_weight`, subject to the constraint that the mixing coefficient must +#' be larger than `params$s_mix_coef`. +#' +#' 3. Replace weights with a weighted average of the original weights and +#' uniform weights (meaning 1/N for every observation), weighted by the mixing +#' coefficient. +#' +#' @param weights a vector of sample weights +#' @param s_mix_coef Minimum allowable mixing coefficient. +#' @param s_weight Maximum desired normalized mixing weight for any one observation. +#' @export +mix_weights <- function(weights, s_mix_coef, s_weight) +{ + N <- length(weights) + + ## Step 1: Normalize weights to sum to 1. + weights <- weights / sum(weights) + + ## Step 2: Choose a mixing coefficient to bring down the maximum weight. + max_weight <- max(weights) + + ## Choose the mix_coef to solve this problem: + ## + ## max_weight * (1 - mix_coef) + mix_coef / N <= s_weight + ## + ## TODO: Determine if the fudge factors are really necessary + mix_coef <- if (max_weight <= s_weight) { + 0 + } else if (1/N > s_weight*0.999) { + 1 + } else { + (max_weight * N - 0.999 * N * s_weight + 1e-6) / + (max_weight * N - 1 + 1e-6) + } + precoef <- mix_coef + + ## Enforce minimum and maximum. + if (mix_coef < s_mix_coef) { mix_coef <- s_mix_coef } + if (mix_coef > 1) { mix_coef <- 1 } + + ## Step 3: Replace weights. + new_weights <- mix_coef / N + (1 - mix_coef) * weights + + return(list( + weights=new_weights, + coef=mix_coef, + precoef=precoef, + maxp=max_weight, + normalized_preweights=weights + )) +} + + +#' Aggregates counties into megacounties that have low sample size values for a +#' given day. +#' +#' @param df_intr Input tibble that requires aggregation, with `geo_id`, `val`, +#' `sample_size`, `effective_sample_size`, and `se` columns. +#' @param threshold Sample size value below which counties should be grouped +#' into megacounties. +#' @param groupby_vars Character vector of column names to perform `group_by` +#' over +#' @return Tibble of megacounties. Counties that are not grouped are not +#' included in the output. +#' @importFrom dplyr group_by across all_of +megacounty <- function( + df_intr, threshold, groupby_vars=c("day", "geo_id") +) +{ + df_megacounties <- df_intr[df_intr$sample_size < threshold | + df_intr$effective_sample_size < threshold, ] + + df_megacounties <- mutate(df_megacounties, + geo_id = make_megacounty_fips(.data$geo_id)) + + df_megacounties <- group_by(df_megacounties, across(all_of(groupby_vars))) + df_megacounties <- mutate( + df_megacounties, + county_weight = .data$effective_sample_size / sum(.data$effective_sample_size)) + + df_megacounties <- summarize( + df_megacounties, + val = weighted.mean(.data$val, .data$effective_sample_size), + se = sqrt(sum(.data$se^2 * .data$county_weight^2)), + sample_size = sum(.data$sample_size), + effective_sample_size = sum(.data$effective_sample_size) + ) + + df_megacounties <- mutate(df_megacounties, county_weight = NULL) + df_megacounties <- ungroup(df_megacounties) + + return(df_megacounties) +} + +#' Converts county FIPS code to megacounty code. +#' +#' We designate megacounties with a special FIPS ending in 000; for example, the +#' megacounty for state 26 would be 26000 and would comprise counties with FIPS +#' codes 26XXX. +#' +#' @param fips Geo-id +#' @return Megacounty +make_megacounty_fips <- function(fips) { + paste0(substr(fips, 1, 2), "000") +} + +#' `any_true` returns TRUE if at least one is TRUE +#' Returns FALSE if at least one is FALSE and none are TRUE +#' Returns NA if all are NA +#' +#' @param ... One or more logical vectors of the same length. +#' @return A logical vector of the same length as the input vector(s). +#' @noRd +is_true <- function(x) x %in% TRUE +or <- function(a, b) ifelse(is.na(a) & is.na(b), NA, is_true(a) | is_true(b)) +any_true <- function(...) Reduce(or, list(...), NA) + +#' `all_true` returns TRUE if all are TRUE +#' Returns FALSE if at least one is FALSE and none are NA +#' Returns NA if at least one is NA +#' +#' @param ... One or more logical vectors of the same length. +#' @return A logical vector of the same length as the input vector(s). +#' @noRd +and <- function(a, b) ifelse(is.na(a) | is.na(b), NA, a & b) +all_true <- function(...) Reduce(and, list(...), TRUE) \ No newline at end of file diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template new file mode 100644 index 000000000..b26306e1a --- /dev/null +++ b/Backfill_Correction/params.json.template @@ -0,0 +1,6 @@ +{ + "ref_lag": 60, + "data_path": + "test_dates":["", ""] , + "export_dir": "./receiving", +} From 51c22e19a38e603e691e393f5f186501fd7a1828 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 07:49:44 -0400 Subject: [PATCH 002/145] update params template --- Backfill_Correction/params.json.template | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index b26306e1a..d32c6c23c 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,6 +1,12 @@ { "ref_lag": 60, - "data_path": + "data_path": "", "test_dates":["", ""] , + "testing_window": 1, + "training_days": 270, "export_dir": "./receiving", + "geo_levels": ["state", "county"], + "value_type": ["count", "ratio"], + "num_col": "", + "denom_col": "", } From 2ec5c04da7a599e84a1e30316819d06b14bc2fa3 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 07:57:29 -0400 Subject: [PATCH 003/145] add readme for the input data --- Backfill_Correction/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 Backfill_Correction/README.md diff --git a/Backfill_Correction/README.md b/Backfill_Correction/README.md new file mode 100644 index 000000000..a4cbfe65f --- /dev/null +++ b/Backfill_Correction/README.md @@ -0,0 +1,12 @@ +Requirement for the input data + +Required columns with fixed column names +- geo_value: strings or floating numbers to indicate the location +- time_value: reference date. +- lag: the number of days between issue date and the reference date +- issue_date: issue date/report, required if lag is not available + +Required columns without fixed column names +- num_col: the column for the number of reported counts of the numerator. e.g. the number of COVID claims counts according to the insurance data. +- denom_col: the column for the number of reported counts of the denominator. e.g. the number of total claims counts according to the insurance data. Required if considering the backfill correction of ratios. + From 57b70d3d963ec1785df86ee556328ed2b57ce00b Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 08:05:42 -0400 Subject: [PATCH 004/145] add more instruction to the main --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 5ebf660fc..b5c1e0253 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -4,10 +4,14 @@ library(dplyr) #' Tempt usage #' params = list() #' customize -#' params$reg_lag: reference lag, after x days, the update is considered to be the response +#' params$ref_lag: reference lag, after x days, the update is considered to be the response. 60 is a reasonable choice for CHNG outpatient data #' params$data_path: link to the input data file #' params$testing_window: the testing window used for saving the runtime. Could set it to be 1 if time allows #' params$test_dates: list of two elements, the first one is the start date and the second one is the end date +#' params$training_days: set it to be 270 or larger if you have enough data +#' params$num_col: the column name for the counts of the numerator, e.g. the number of COVID claims +#' params$denom_col: the column name for the counts of the denominator, e.g. the number of total claims +#' params$geo_level: list("state", "county") #' Main function for getting backfill corrected estimates #' From 40133ce4a1b9310a03e8f9e6eb891efe84b074e4 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 08:08:47 -0400 Subject: [PATCH 005/145] add more instruction of required packages to the main --- .../delphiBackfillCorrection/R/main.R | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index b5c1e0253..f4e50523a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -1,5 +1,18 @@ -library(tidyverse) -library(dplyr) +#' library(tidyverse) +#' library(Matrix) +#' library(stats) +#' library(lubricate) +#' library(zoo) +#' library(dplyr) +#' library(ggplot2) +#' library(stringr) +#' library(plyr) +#' library(nlm) +#' library(covidcast) +#' library(evalcast) +#' library(quantgen) +#' library(gurobi) +#' lp_solver = "gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" #' Tempt usage #' params = list() From 4cd3d45c0b547101e28e8d9974fea572333da521 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 08:27:52 -0400 Subject: [PATCH 006/145] remove incorrect files --- .../unit-tests/testthat/constants.R | 22 -- .../testthat/test-beta_prior_estimation.R | 64 ------ .../unit-tests/testthat/test-training.R | 3 - .../unit-tests/testthat/test-utils.R | 212 ------------------ 4 files changed, 301 deletions(-) delete mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R delete mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R delete mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R delete mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R deleted file mode 100644 index 923749602..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/constants.R +++ /dev/null @@ -1,22 +0,0 @@ -# Constants for the backfill correction model - -taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) -ref_lag <- 60 -lag_knots <- c(1, 2, 3, 4, 5, 7, 10, 14) -upper_lag <- 15 -training_days <- 270 -testing_window <- 14 -lag_window <- 5 -lambda <- 0.1 - -yitl <- "log_value_raw" -slope <- "log_7dav_slope" -y7dav <- "log_value_7dav" -wd <- c("Mon_issue", "Tue_issue", "Wed_issue", "Thurs_issue", "Fri_issue", "Sat_issue") -wd2 <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref") -wm <- c("W1_issue", "W2_issue", "W3_issue") -#sqrtscale = c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') -sqrtscale_num <- c('sqrty0_num', 'sqrty1_num', 'sqrty2_num') -sqrtscale_denom <- c('sqrty0_denom', 'sqrty1_denom', 'sqrty2_denom') -sqrtscale <- c('sqrty0', 'sqrty1', "sqrty2") -log_lag <- "inv_log_lag" \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R deleted file mode 100644 index aa6561719..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ /dev/null @@ -1,64 +0,0 @@ -## Functions for Beta Prior Approach. -## This is used only for the ratio prediction e.g. fraction of Covid claims, -## percentage of positive tests. We assume that the ratio follows a beta distribution -## that is day-of-week dependent. A quantile regression model is used first with lasso -## penalty for supporting quantile estimation and then a non-linear minimization is used -## for prior estimation. -library(nlm) -library(quantgen) -library(gurobi) -library(tidyverse) -library(Matrix) -library(dplyr) - -lp_solver <- "gurobi" - -delta <- function(fit, actual) sum((fit-actual)^2) - -objective <- function(theta, x, prob, ...) { - ab <- exp(theta) # Parameters are the *logs* of alpha and beta - fit <- pbeta(x, ab[1], ab[2]) - return (delta(fit, prob)) -} - -## Main function for the beta prior approach -## Estimate the priors for the beta distribution based on data for -## a certain day of a week -est_priors <- function(train_data, prior_test_data, dw, taus, - params_list, response, lp_solver, lambda, - start=c(0, log(10), - base_pseudo_denum=1000, base_pseudo_num=10)){ - sub_train_data <- train_data %>% filter(train_data[dw] == 1) - sub_test_data <- prior_test_data %>% filter(prior_test_data[dw] == 1) - if (dim(sub_test_data)[1] == 0) { - pseudo_denum <- base_pseudo_denum - pseudo_num <- base_pseudo_num - } else { - # Using quantile regressison to get estimated quantiles at log scale - quantiles <- list() - for (idx in 1:length(taus)){ - tau <- taus[idx] - obj <- quantile_lasso(as.matrix(sub_train_data[params_list]), - sub_train_data[response], tau = tau, - lambda = lambda, stand = FALSE, lp_solver = lp_solver) - y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[params_list]))) - quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale - } - quantiles <- as.vector(unlist(quantiles)) - # Using nlm to estimate priors - sol <- nlm(objective, start, x=quantiles, prob=taus, lower=0, upper=1, - typsize=c(1,1), fscale=1e-12, gradtol=1e-12) - parms <- exp(sol$estimate) - # Computing pseudo counts based on beta priors - pseudo_denom <- parms[1] + parms[2] - pseudo_num <- parms[1] - } - return (pseudo_denom, pseudo_num) -} - -## Add pseudo counts for numerators and denominators -ratio_adj <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ - num_adj <- data[data[dw] == 1, num_col] + pseudo_num - denom_adj <- data[data[dw] == 1, denom_col] + pseudo_denom - return (num_adj / denom_adj) -} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R deleted file mode 100644 index 26b32ed11..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-training.R +++ /dev/null @@ -1,3 +0,0 @@ -library(quantgen) -library(gurobi) -lp_solver <- "gurobi" \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R deleted file mode 100644 index 30b0a95bb..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ /dev/null @@ -1,212 +0,0 @@ -#' Return params file as an R list -#' -#' Reads a parameters file. If the file does not exist, the function will create a copy of -#' '"params.json.template" and read from that. -#' -#' @param path path to the parameters file; if not present, will try to copy the file -#' "params.json.template" -#' @param template_path path to the template parameters file -#' -#' @return a named list of parameters values -#' -#' @importFrom dplyr if_else -#' @importFrom jsonlite read_json -#' @importFrom lubridate ymd_hms -#' @export -read_params <- function(path = "params.json", template_path = "params.json.template") { - if (!file.exists(path)) file.copy(template_path, path) - params <- read_json(path, simplifyVector = TRUE) - - params$num_filter <- if_else(params$debug, 2L, 100L) - params$s_weight <- if_else(params$debug, 1.00, 0.01) - params$s_mix_coef <- if_else(params$debug, 0.05, 0.05) - - params$start_time <- ymd_hms( - sprintf("%s 00:00:00", params$start_date), tz = tz_to - ) - params$end_time <- ymd_hms( - sprintf("%s 23:59:59", params$end_date), tz = tz_to - ) - - params$parallel_max_cores <- if_else( - is.null(params$parallel_max_cores), - .Machine$integer.max, - params$parallel_max_cores - ) - - return(params) -} - -#' Write a message to the console -#' -#' @param text the body of the message to display -#' @param df a data frame; the message will show the number of rows in the data frame -#' -#' @export -msg_df <- function(text, df) { - message(sprintf("%s --- %s: %d rows", format(Sys.time()), text, nrow(df))) -} - -#' Assert a logical value -#' -#' Will issue a \code{stop} command if the given statement is false. -#' -#' @param statement a logical value -#' @param msg a character string displayed as an additional message -#' -#' @export -assert <- function(statement, msg="") -{ - if (!statement) - { - stop(msg, call.=(msg=="")) - } -} - -#' Create directory if not already existing -#' -#' @param path character vector giving the directory to create -#' -#' @export -create_dir_not_exist <- function(path) -{ - if (!dir.exists(path)) { dir.create(path) } -} - -#' Adjust weights so no weight is too much of the final estimate. -#' -#' For privacy and estimation quality, we do not want to allow one survey -#' response to have such a high weight that it becomes most of an estimate. -#' -#' So, for a specific time and location: -#' -#' 1. Normalize weights so they sum to 1. -#' -#' 2. Determine a "mixing coefficient" based on the maximum weight. The mixing -#' coefficient is chosen to make the maximum weight smaller than -#' `params$s_weight`, subject to the constraint that the mixing coefficient must -#' be larger than `params$s_mix_coef`. -#' -#' 3. Replace weights with a weighted average of the original weights and -#' uniform weights (meaning 1/N for every observation), weighted by the mixing -#' coefficient. -#' -#' @param weights a vector of sample weights -#' @param s_mix_coef Minimum allowable mixing coefficient. -#' @param s_weight Maximum desired normalized mixing weight for any one observation. -#' @export -mix_weights <- function(weights, s_mix_coef, s_weight) -{ - N <- length(weights) - - ## Step 1: Normalize weights to sum to 1. - weights <- weights / sum(weights) - - ## Step 2: Choose a mixing coefficient to bring down the maximum weight. - max_weight <- max(weights) - - ## Choose the mix_coef to solve this problem: - ## - ## max_weight * (1 - mix_coef) + mix_coef / N <= s_weight - ## - ## TODO: Determine if the fudge factors are really necessary - mix_coef <- if (max_weight <= s_weight) { - 0 - } else if (1/N > s_weight*0.999) { - 1 - } else { - (max_weight * N - 0.999 * N * s_weight + 1e-6) / - (max_weight * N - 1 + 1e-6) - } - precoef <- mix_coef - - ## Enforce minimum and maximum. - if (mix_coef < s_mix_coef) { mix_coef <- s_mix_coef } - if (mix_coef > 1) { mix_coef <- 1 } - - ## Step 3: Replace weights. - new_weights <- mix_coef / N + (1 - mix_coef) * weights - - return(list( - weights=new_weights, - coef=mix_coef, - precoef=precoef, - maxp=max_weight, - normalized_preweights=weights - )) -} - - -#' Aggregates counties into megacounties that have low sample size values for a -#' given day. -#' -#' @param df_intr Input tibble that requires aggregation, with `geo_id`, `val`, -#' `sample_size`, `effective_sample_size`, and `se` columns. -#' @param threshold Sample size value below which counties should be grouped -#' into megacounties. -#' @param groupby_vars Character vector of column names to perform `group_by` -#' over -#' @return Tibble of megacounties. Counties that are not grouped are not -#' included in the output. -#' @importFrom dplyr group_by across all_of -megacounty <- function( - df_intr, threshold, groupby_vars=c("day", "geo_id") -) -{ - df_megacounties <- df_intr[df_intr$sample_size < threshold | - df_intr$effective_sample_size < threshold, ] - - df_megacounties <- mutate(df_megacounties, - geo_id = make_megacounty_fips(.data$geo_id)) - - df_megacounties <- group_by(df_megacounties, across(all_of(groupby_vars))) - df_megacounties <- mutate( - df_megacounties, - county_weight = .data$effective_sample_size / sum(.data$effective_sample_size)) - - df_megacounties <- summarize( - df_megacounties, - val = weighted.mean(.data$val, .data$effective_sample_size), - se = sqrt(sum(.data$se^2 * .data$county_weight^2)), - sample_size = sum(.data$sample_size), - effective_sample_size = sum(.data$effective_sample_size) - ) - - df_megacounties <- mutate(df_megacounties, county_weight = NULL) - df_megacounties <- ungroup(df_megacounties) - - return(df_megacounties) -} - -#' Converts county FIPS code to megacounty code. -#' -#' We designate megacounties with a special FIPS ending in 000; for example, the -#' megacounty for state 26 would be 26000 and would comprise counties with FIPS -#' codes 26XXX. -#' -#' @param fips Geo-id -#' @return Megacounty -make_megacounty_fips <- function(fips) { - paste0(substr(fips, 1, 2), "000") -} - -#' `any_true` returns TRUE if at least one is TRUE -#' Returns FALSE if at least one is FALSE and none are TRUE -#' Returns NA if all are NA -#' -#' @param ... One or more logical vectors of the same length. -#' @return A logical vector of the same length as the input vector(s). -#' @noRd -is_true <- function(x) x %in% TRUE -or <- function(a, b) ifelse(is.na(a) & is.na(b), NA, is_true(a) | is_true(b)) -any_true <- function(...) Reduce(or, list(...), NA) - -#' `all_true` returns TRUE if all are TRUE -#' Returns FALSE if at least one is FALSE and none are NA -#' Returns NA if at least one is NA -#' -#' @param ... One or more logical vectors of the same length. -#' @return A logical vector of the same length as the input vector(s). -#' @noRd -and <- function(a, b) ifelse(is.na(a) | is.na(b), NA, a & b) -all_true <- function(...) Reduce(and, list(...), TRUE) \ No newline at end of file From 0e37e2f9801af94c55b5eb948610e4ad45a0f5a7 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Fri, 13 May 2022 09:05:11 -0400 Subject: [PATCH 007/145] add details for the required support from the engineering side --- ...Support needed from the engineering side.docx | Bin 0 -> 8365 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 Backfill_Correction/Support needed from the engineering side.docx diff --git a/Backfill_Correction/Support needed from the engineering side.docx b/Backfill_Correction/Support needed from the engineering side.docx new file mode 100644 index 0000000000000000000000000000000000000000..d023a3a09b9b139213e2074beebe3fe20c8ea930 GIT binary patch literal 8365 zcma)h1yo$yvMugG8#%ZW+zIaPZo%E5aR}~C2M-Y3-Q6{~1rH9v-66myIq%++bN;(` zyy`J}clRE(*IHe(R?SsYQ3etU00svK2L@y+R|Wgc&|g2h**lsr+PT;onL1k9nKQWC z+8k>Q*)21nJ_S4nX1A8lA_jfoVw0T8d5gey50;ukL8=247q`{PSg-90QgtQU>%rL2 zS{=;|y##wac}|Se@(=1u7fWh&DEq?*G?+s1@%qOV=b9#KF<2z{29nqun$)4s#(-%t^z^Y-u?9>gu zxhXpnZ;fMj4~&sKGeGB}okm5tW6d}$HFpeE%=@-~xE`1;j%djcy5SI6KE_7*;q-ER>k&dJ#iiAFW-i9X| ztQNT-Jq9tW;zoGhj-fn&8;xrvm|O(6SXba&9t`NU4BfQB|9JlgAzME-9JkC)4&Imd z#0CgKR^VUg;evXx0lv^>U@J_6Ls`HM!3btasH{SgL=ErS3MXa;Uu94>hW3T8#TwK^ z?MxGFW7f4^R%;t_mo<2UCv3oHRS3ar+QQmeg*D@L*-}(x)Z?CqFuKB;Jr|mU228&M zp_>N1g4jCN6GvfC2_N@4e7kFfhxrrudpw~>O3p|6VaQs|M`B-GOF7Y3Z#Ihc{)`i>k&%QK^zfZiTpRbPKKv`@zG| z!2#au)#~f)ziw1&Xs8#{9KN7?QC6FiAJbgOrQZ6a-U_9@_@u5W)P8u`uzi70l!1k7 zvvBJGLV$t6SG*z(W*VHL?q%HNN^#)0cxqN3 zS7l7|fsqY0pn40%ZS5RW4JOUz32vAbO8UkLOe4t&@IPUC0dodeC}a}KmFu$5X?sj` za|9{{lGY}BxIs`&ado2RR#P)4BgPt<2kEyphzg(v)F#nrrxr={NIaUZiiCn`JX%vI zPG5Ex(k}e>?S0Y~4o6*_PP^m^GxWI3C8FgY`@=L5LZWL*9K9v8OCk)ppM&67sZLQZ z${c8moRg!%v%k$}<=Lh*_*0(bdl+L77se89lDnTBUZvag3GU+2-+_u$LM(Eb>gP%) z0)eDv`%y4AVbn(Q@bg?E6c8!tOivEq{0hF&W`3|)g2p5wzv;Q)3PvheJ2L1>U(*10 zySlV(GpjAQ3t7+Au`cj$gZ0N_VgS42gQ__D7Y9_yE0!w`r)}J@SWj_|1tdiF{5Bt8D>l zO}IUa{}ArbL3ZI^x`?DFvPniXIJ!;0B}7pg3A`CNC9yzJdJk($P%k&hp|lKRyKFtC zrPBL}_kknGxKbm+X;$fic4K5fEhGJ@a^PpcbbO7_;F!b+D&&=M=S=G?U z#`G7666EB2nNWijx(G;5m-FCIBT`l2pzvI~z+ncZ%9aSNkJzqmn%D#s$rD1<5~9{_ z2ez;)-w|nx@d(N13@uoFs*=Qs31(9pSk)uKZlrT0lm$nK{<5IQ*)YU7XRDoa&Vb;! zr98%}{=t5R)3pr?_ZxaSQ?@3l<=IAJi6*2iUh8@hz56{w;??6Fib%(?dB&Ve=z z6?zyfnj|F5x&G|DZe`#(B#}(IP;;lrkG0|KAOs7jASHk%9(j4`2qX4ku_|7`w&0@* zmt0R*MB=8(a*|;bI%qeFtb}J*mORT@h3*Kdk7dW4M27P%Z!c77^fCAN0*#5LL4JTd zVt6f7Xn+)l#(?SYcPIRuO(oAT2B1?i4^{zu4o##5qNp=+)G&^dRtbgoth2dJMA>R; z9}US)6Z!}plDx@?a2!MUwl0c0rJzl!mLN(->odMv$!2Q6jdF1jLQSkFPM~5w*nX~{ zW|Qa?{UoMpKR!KoqLUnTPZcW2CQ`M-+B8(YTqnbL-J-OCQ~o&3fO;*dbxp$792QyP z6vIKrU*-uSO4B{`I|!XZPK7r-N2{)8kS|vnc}@jHGT`q*&$M@TX!+I+7cQ?Q3CC6T zoIP>YeOYoyV5tqFT}h|0?%QxnG?;DzhiLQ|LtOPC^@S(x%r@$#Y=A*+@k*s}sl^ZJ za1Ox|O_CABMO~8CZVDc%x}YV8<8h&shs|oGu@)au|7(i_<}|m24+pt#jb%xMcAPEv z-^PA!=Geg;g#+~Y7@<&j6AgDd|jYrMI!tn^F zNlItKZkRXWfLAB~hrH9|W zloS2tj`-;O1s-p{=NaRUQFA{{aVsWXOMvbAobdW~O-P*nt%O9jm%MpNqS@?uw!fzx zV-D4i!vJe)*9BC)5xHp|hj=>Qp^?s?dA8(Up+XA@1~&1!2mT+5tcks`i>;}h^Dk2P ze%W!E6a6XR2{s`kjz*#Zz=J)@Sxg>7RWU5w7B6-+AzCvb)-}C;CT17uZnMwgCVN?h#B)FK7vYN?YV5c{y_mvHC zda@WzJ@vYK6~sID`%Ojxda7cn5_|+kMd1xf4@cgV5Bl;qx4JB$IxX4M*na{^yCuNed-1HIo(;pj7HD1YWm3|+!IRsaLvs!T4YCU3edxnTSD8% znzBcv(ph4g?1i@QWjDT;IM>&CX;?0BRtD2`)Q5>x9Ns4F*ow=P>*R1d62NHVrX|gh zujBLc!AD2;O?A_AfJE8&>{x@ZnGZj0D`Q6EKn#XtnH`bD5WI^Pt4FlF?b%d@$L8+7p_!&nS5$GRhPGDY2VjR4iLxphfR@Z$!vnwxWm~Y_S9@N!M@8z*X=jSe2^(FloN4t5#7-Xvax(hnnj2L&8o#vg9V13K3 z4#aS>PDm+Le%vFKG`>dn75mlCxK2`0UK?oM3lz)`s#+59I6=#|HO;gpF_h1i@QdJ4 zSNT5t8PmpMmvEcbK>JAN_HG|*){4ctYl^{7m;HyHrgWa2mWJ=xfcdfxarw}&uHb># zoj8?PqZd~dD11FwSAxK;icOA0C%3d4w2j<4ET?9$jbw3IQ%9~`sv{N>%zPT5Qi$|3 zr~TkMVr2&!+Nv=;!&2cq<^5U3GTTQ-7X3tn>fQV*B-9T{{D1tA(1^@pZ_qonaCS~0 z2M{V4#Cw$W_^J7s<?LOI1AtdJ{XV43z)Wu3&0fYu7)9@MW;?(;=w3;H zj}@qAmyRuI_N7q%J|Ym$2<@it%lvlKCrQw_Uw}rx4XIff;ex(twxBAkkAMQdH>rovU-@Zw zY3^iKt$VGk8-rt`p=0f|$@&)dm68>XMy*8|OgD!1b$6WuiDLN~ohcWoM*-0BT+;TI zkQQX%blGUwh$1BNN@c8Eb=?pl2)#CMSKA)1cli#2A(9HFi&IDyp-3u zID%`WgR_8M9j0YV z{G+qxM6g^3`NKYE<_e@aBImnO9t-a8*h0OOj336zk(dzRQQ*iSS$E`EGsv91|7m($w1tEe4tvX&OX#4~rEoliYJ z^f3#;E|K(eVvg2Q$K6kitWtfFE>+x&U@w)g`UY@#*Zx)ST@3&IW5%6zFkJ<7>LI9> zzozY5o)s3v3rENX^S6h$;+*z$I>zgk{pY-q7L*Z|{%t|!U-Vj*TCZ8dy=pNUV_)o> z`2@|BMW`Iv3mvDa9WpTTQ7^N0#h-GQ$3^6E9u4xAw-u_R(h!2^gpNNBq-dk`m*TWL z%XF?>hEYa+Z(PbC*Z>rHsa=^0N7y@%4dQmSkv&FA-(Fk$~Pe8`J6?WGE^9GR72fI)Dkcx~;Zty^(ei2SvKef_@#H1|Q)2u=?%h#cCdvY+x zlJ=GTPP1NiXL!Z=NHckTUa4AxYOEm7MA3@I!|jJ_R1J~9 zd=nZ$SnF!<<{2SYYcDCJN-J7~SCf}?=otHyP00Ncx%U&4r?WFk(2Bo{I zNGj{QBAz-8k(s_(r)28+S9kl12JHj6#nm!wWqQ;5&n+|s(FS;Z6H({aw)h+KG%r;& z`(=w$;Es{!o+ppj0&K^PFW>Gr^?!0ZOeMs?&+uSiB;5av+r4&I{_ObaJDS=!G5*f* z7=Hdz9zXVa3`X+H^m8k6EYyOH@}E1dVrDjg6QW&Dp*)G5qf|@JaaB;jF*?jE-ki)O zxW6=i|7rAq)U89%xFAn~*G&)~q!TVn$qyg6bDzR#kpiI8K=+yki(9<1xj^7zZGf36 z6CD&Le;{WsXv+t9AV7;~BoZIXlsR`ty>Sti8F)tr&W(>GuG;vAiD#d)dJmN+1@9)5 z9Q=!6Dd#vR9iS2c^j#Fu%7$K(cm^IP^QdPnZ&w>9yO1Eno0}NNI5`qFx>Gi}M=TSe zI*!snX;r(Qf0hEcSQu0m0ay3=k+@pHMM~$|6KQ69lE0tf#7f|x*4s*odt&>$!C9c# z2xdfX+S|m1cn7sfWk~JeiWG!{eO?^?6))bD-CO9n%-CC{T7r497@KFTkkt{#6E!W* z_BXlg&>uf(1?R_7Tibh;2N7&naAZ*|%gM%eTwme+49;1s{ea5r0hbW!zXb>JH8_99 z7x1pluN`y^>Vo!9=Tm8p=Ef|f?|%~w3HX#(v}~1Z;N3l z=7Mdg;d_6qh5SdhMQ$5{$;zru!X)an@m#q^?0fU(%iAoi|~lB^2Wko)L@>iTv%O2neSb7y-;rl|gONna09FJL%Emrg{41FSsHSeNaP$LGhY`gL zbZodTV|nFj-lzMv5)@2|#mnxQ#CXM@KgVo|-%{=2W1)7SC1$&Fn z4fP%Zgl}cWRJegAI``A}gZ!~LU%uVWu)}N5l?;`O5wL~JE@gA@<)g;*@q|K{;n2vo zR`$lZvQ75qXy_kSg)=}6+PeXyHJz?TE84MWPw2JujTNyc9=w$kY3S4OGg`#E=lt13 zGt5kXVezeYXlL9B78VMVzD0JjHt4fCHgL1I??)=q`PD}p>3eRQ_8^H78PwUtt>Qpv zH(>MqQOMA8(Ch30^;Gw9RM<(%T9TdtlqI&_=>|n%e{Ap666y1Q2#7fm;A?pqnF@rJ zxb@{w)a?akskGlcM|Kiztle0iQR?|gd5|D(ux{j!qi<^1aW(w_h2 zGBgzCE~HC@fIkcE;~piKAP+A@FA~HT#q?++RHG6Vn?CR-Xy2*okiv3ARPgQp4(i|JMnw>X&)v5g5D1J668T-_Kt z+JO+wvT}5-B*jI|W5nB>#LP->y$5dLm_aM~b({0_2k;E{lG&{Q7bf-=6T#47S?6eR z)Z}m}p)j}pckkU4Ls>B%wsE(CmYFgzsjHRKJGU^*h>m|t`V=c7bCJrqz^apv6AxT95BGmYRq{B75 z+J;>i2I`gxI)qjgGP!h9P-$>|f}5*AzJXK2cX`G{zX7Yr$9qSSPMai~8ZBZMq3DEe z%V2hf_6#N}*bL>=tm~yMgXHs zoFw+g^Z~r{f)A7w&{WHjvwfQfNCb7|YKF4*cJ{{VzUiaSP7+6pi&g4uvK8w&F z-A~3%g}GY|4cln*ZW)X9+3gW!k>EuVAbGyKJh~_jlGo3ahuU_nm29)xz^Z3{87?UL zV>AvGdvSHzD6_W~SS-RZ^R+g4?>VA?ZF~xFyTVQ}ro)-Z@#AP3s?buX9e=q`< zb2fRNo_<$H<`6_XYJyk^y3s!3o~lNTz~OtY*tQ|)?4@jQbO85f1Za}wlB!E6GkM)L z!osKsew7m1_#bYPH}H6Qv#oJU_Sp_gatIP8P4`)g1qZk=PmOo$(_Z<}=9EPqNn@48 z?B2Gtn%;bv^0naBn+Il*hbPmY&3#Jc)|33F87;o4y4$$&5}Xl)@zdOTK!v+!t0m|+ zb3%Q7p9-Wd^*sc9)kZ(PSpPW}TV9R`+J0V%L*@%71-v(Rr zZo`w+>^?%)APJ>G%Ij&f&m#c)ddwB-p`gU@g zbXm=H^E62*9f40xc`2*s(wj$E=0;s+aZX-@J2hfS8Aj)aO-F#?G+ndN)tQzffWIen za&hmLuDbg$2L&OQDui31uddMZQf-3n3+&IrN}`<;MEBb6Uncl(|DIzWcn~y1 zabjw;>El@2tp~ZnF3)KMy;%*js+Dq#ntylSQixhUKXzpOhcC0;L%W-C)}^{3v#r;#&0J?NVPCj(cBuXsTRhwSV;1 z=Dx$4wg!T-;~W@bG8U^^+mR@*L4)~IWQT3hgn_sOh$Z*X%EA_7B?C50+P0@vmU0>Q z1%Q?|fLoTk=x#~fX6B`g1~-~Cir{%-%j$gC*ycmLnDo1ZG*UsnC9==}2kUGw{= z`|k?E--o|`o$N37zsWuS^#8px`cr54%PL;c_2=;aRvrH7{5$pj``ob*{>S-uqWw?z z-^t-me*TwTyaM;v*#Cj8|MN+|ldqp-@-K^hWn8~e%YQolo<={p*?^*>YiCsX_9 mBYp?R&wTsK_FfnDzhs}H4D{Oc9zJy<6tVa0r+y4VCg57KY literal 0 HcmV?d00001 From 28730c028ceef119efc83c7f1d56dccf22ed7f76 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 16 May 2022 08:58:01 -0400 Subject: [PATCH 008/145] fixed import errors --- .../delphiBackfillCorrection/R/beta_prior_estimation.R | 3 ++- Backfill_Correction/delphiBackfillCorrection/R/main.R | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 366c0a060..36b96870a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -43,7 +43,8 @@ objective <- function(theta, x, prob, ...) { #' @param base_pseudo_denum the pseudo counts added to denominator if little data for training #' @param base_pseudo_num the pseudo counts added to numerator if little data for training #' -#' @import nlm +#' @import MASS +#' @import stats4 #' @import gurobi #' @import Matrix #' @import tidyverse diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index f4e50523a..95d37bb6f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -1,13 +1,17 @@ #' library(tidyverse) #' library(Matrix) #' library(stats) -#' library(lubricate) +#' library(tidyverse) +#' library(dplyr) +#' library(lubridate) #' library(zoo) #' library(dplyr) #' library(ggplot2) #' library(stringr) #' library(plyr) -#' library(nlm) +#' library(MASS) +#' library(stats4) +#' #' library(covidcast) #' library(evalcast) #' library(quantgen) From a4cc2a05e872628ccc0876747768674122581a80 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 16 Jun 2022 16:55:14 -0400 Subject: [PATCH 009/145] Add missed definition of n_refds --- .../unit-tests/testthat/test-preprocessing.R | 1 + 1 file changed, 1 insertion(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index 8c1c8bcd6..bd9b1c9b9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -43,6 +43,7 @@ test_that("testing NA filling for missing udpates", { # Assuming the input data is already prepared df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + n_refds <- as.numeric(max_refd - min_refd)+1 backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) expect_equal(dim(backfill_df)[1], n_refds*(ref_lag+1)) From ca6d76e0f8c5a62ba49b4bf25e3aebd593a4cd1e Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 16 Jun 2022 17:02:35 -0400 Subject: [PATCH 010/145] Fix an error in the unit test --- .../unit-tests/testthat/test-preprocessing.R | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index bd9b1c9b9..c4e5982fd 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -108,7 +108,17 @@ test_that("testing adding 7 day avg and target", { backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col) - expect_equal(dim(df_new)[2], 3 + 1 + 1 + 1 + 1) + # Existing columns: + # time_value: reference date + # value_raw: raw counts + # lag: number of days between issue date and reference date + # Added columns + # issue_date: report/issue date + # value_7dav: 7day avg of the raw counts + # value_prev_7dav: 7day avg of the counts from -14 days to -8 days + # value_target: updated counts on the target date + # target_date: the date ref_lag days after the reference date + expect_equal(dim(df_new)[2], 3 + 1 + 1 + 1 + 1 + 1) expect_equal(dim(df_new)[1], 7 * 8) }) From 9b946d011245b708434049892fac363da92776a9 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 18 Aug 2022 06:21:24 -0400 Subject: [PATCH 011/145] Fix errors, add tooling scripts --- .../R/beta_prior_estimation.R | 32 ++-- .../delphiBackfillCorrection/R/constants.R | 8 +- .../delphiBackfillCorrection/R/model.R | 5 +- .../R/preprocessing.R | 11 +- .../delphiBackfillCorrection/R/tooling.R | 147 ++++++++++++++++++ .../delphiBackfillCorrection/R/utils.R | 8 +- 6 files changed, 182 insertions(+), 29 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/tooling.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 36b96870a..0598ec960 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -51,12 +51,12 @@ objective <- function(theta, x, prob, ...) { #' @import dplyr #' @importFrom quantgen quantile_lasso #' @importFrom constants lp_solver -est_priors <- function(train_data, prior_test_data, dw, taus, +est_priors <- function(train_data, prior_test_data, cov, taus, params_list, response, lp_solver, lambda, start=c(0, log(10)), base_pseudo_denom=1000, base_pseudo_num=10){ - sub_train_data <- train_data %>% filter(train_data[dw] == 1) - sub_test_data <- prior_test_data %>% filter(prior_test_data[dw] == 1) + sub_train_data <- train_data %>% filter(train_data[[cov]] == 1) + sub_test_data <- prior_test_data %>% filter(prior_test_data[[cov]] == 1) if (dim(sub_test_data)[1] == 0) { pseudo_denom <- base_pseudo_denom pseudo_num <- base_pseudo_num @@ -93,13 +93,13 @@ est_priors <- function(train_data, prior_test_data, dw, taus, #' @param denom_col the column name for the denominator #' #' @export -ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ - if (is.null(dw)){ +ratio_adj_with_pseudo <- function(data, cov, pseudo_num, pseudo_denom, num_col, denom_col){ + if (is.null(cov)){ num_adj <- data[[num_col]] + pseudo_num denom_adj <- data[[denom_col]] + pseudo_denom } else { - num_adj <- data[data[dw] == 1, num_col] + pseudo_num - denom_adj <- data[data[dw] == 1, denom_col] + pseudo_denom + num_adj <- data[[num_col]][data[[cov]] == 1] + pseudo_num + denom_adj <- data[data[[cov]] == 1, denom_col] + pseudo_denom } return (num_adj / denom_adj) } @@ -140,28 +140,28 @@ ratio_adj <- function(train_data, test_data, prior_test_data){ for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")){ pseudo_counts <- est_priors(train_data, prior_test_data, cov, taus, - pre_params_list, "value_target", lp_solver, lambda=0.1) + pre_params_list, "log_value_target", lp_solver, lambda=0.1) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] # update current data # For training - train_data$value_raw[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data$value_raw[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - train_data$value_7dav[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data$value_7dav[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - train_data$value_prev_7dav[train_data[cov] == 1] <- ratio_adj_with_pseudo( + train_data$value_prev_7dav[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") #For testing - test_data$value_raw[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data$value_raw[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - test_data$value_7dav[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data$value_7dav[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - test_data$value_prev_7dav[test_data[cov] == 1] <- ratio_adj_with_pseudo( + test_data$value_prev_7dav[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") - test_data$pseudo_num[test_data[cov] == 1] = pseudo_num - test_data$pseudo_denum[test_data[cov] == 1] = pseudo_denum + test_data$pseudo_num[test_data[[cov]] == 1] = pseudo_num + test_data$pseudo_denum[test_data[[cov]] == 1] = pseudo_denum } train_data$log_value_raw = log(train_data$value_raw) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 47da159da..d7733f9c8 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,21 +1,19 @@ # Constants for the backfill correction model - taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) ref_lag <- 60 -lag_knots <- c(1, 2, 3, 4, 5, 7, 10, 14) -upper_lag <- 15 +test_lags <- c(1:14, 21, 35, 51) training_days <- 270 testing_window <- 14 lag_window <- 5 lambda <- 0.1 +lp_solver = "gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" -ld_name = "01" yitl = "log_value_raw" slope = "log_7dav_slope" y7dav = "log_value_7dav" wd = c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -wd2 = c("Mon2", "Tue2", "Wed2", "Thurs2", "Fri2", "Sat2") wm <- c("W1_issue", "W2_issue", "W3_issue") + #sqrtscale = c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') sqrtscale_covid = c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') sqrtscale_total = c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index a4079dbfb..6a0f9c61d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -59,12 +59,13 @@ model_training_and_testing <- function(train_data, test_data, taus, params_list, test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all coefs_result[[success+1]] = coef(obj) + coefs_result[[success+1]]$tau = tau success = success + 1 }, - error=function(e) {print(paste(geo, test_date, model_name, as.character(tau), sep="_"))} + error=function(e) {print(paste(geo, test_date, as.character(tau), sep="_"))} ) } - if (success < 9) next + if (success < 9){ return (NULL)} coef_combined_result = data.frame(tau=taus, issue_date=test_date) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index 5089e275d..e7c04fc87 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -168,7 +168,7 @@ add_weekofmonth <- function(df, wm, time_col){ #' @param lag_col column name for the column of lag #' #' @export -add_7davs_and_target <- function(df, value_col, refd_col, lag_col){ +add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ df$issue_date <- df[[refd_col]] + df[[lag_col]] pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% @@ -186,12 +186,19 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col){ list(df, avg_df, avg_df_prev7)) # Add target - target_df <- df[df$lag==ref_lag, ] %>% select(c(refd_col, "value_raw", "issue_date")) + target_df <- df[df$lag==ref_lag, c(refd_col, "value_raw", "issue_date")] names(target_df)[names(target_df) == 'value_raw'] <- 'value_target' names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' backfill_df <- merge(backfill_df, target_df, by=refd_col, all.x=TRUE) + # Add log values + backfill_df$log_value_raw = log(backfill_df$value_raw + 1) + backfill_df$log_value_7dav = log(backfill_df$value_7dav + 1) + backfill_df$log_value_target = log(backfill_df$value_target + 1) + backfill_df$log_value_prev_7dav = log(backfill_df$value_prev_7dav + 1) + backfill_df$log_7dav_slope = backfill_df$log_value_7dav - backfill_df$log_value_prev_7dav + # Remove invalid rows backfill_df <- backfill_df %>% drop_na(c(lag_col)) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R new file mode 100644 index 000000000..69458a9eb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -0,0 +1,147 @@ +library(tidyverse) +#' library(Matrix) +#' library(stats) +library(dplyr) +library(lubridate) +library(zoo) +#' library(ggplot2) +#' library(stringr) +#' library(plyr) +library(MASS) +library(stats4) +#' +#' library(covidcast) +library(evalcast) +library(quantgen) +library(gurobi) + +#' Main function for getting backfill corrected estimates +#' +#' @import utils +#' @import constants +#' @import preprocessing +#' @import beta_prior_estimation +#' @import model +#' +#' @export +run_backfill <- function(df, export_dir, taus, + test_date_list, test_lags, + value_cols, training_days, testing_window, + ref_lag, value_type, lambda){ + # Get all the locations that are considered + geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) + # Build model for each location + res_list = list() + res_indx = 1 + coef_df_list = list() + + for (geo in geo_list) { + subdf <- df %>% filter(geo_value == geo) %>% filter(lag < ref_lag) + min_refd <- min(subdf$time_value) + max_refd <- max(subdf$time_value) + subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") + combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) + + combined_denom_df <- fill_missing_updates(subdf, value_cols[2], "time_value", "lag") + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", "time_value", "lag", ref_lag) + + combined_df <- merge(combined_num_df, combined_denom_df, + by=c("time_value", "issue_date", "lag", "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom")) + } + combined_df <- add_params_for_dates(combined_df, "time_value", "lag") + + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date+testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date-7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in test_lags){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, paste0(wd, "_ref"), paste0(wd, "_issue"), wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, taus, params_list, lp_solver, lambda, test_date) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, taus) + test_data$test_date <- test_date + coefs$test_date <- test_date + coefs$test_lag <- test_lag + coefs$geo_value <- geo + + res_list[[res_indx]] = test_data + coef_df_list[[res_indx]] = coefs + res_indx = res_indx+1 + export_test_result(test_data, coefs, export_dir, + geo, test_lag) + }# End for test lags + }# End for test date list + result_df = do.call(rbind, res_list) + coefs_df = do.call(rbind.fill, coef_df_list) + export_test_result(result_df, coefs_df, export_dir, geo) + }# End for geo lsit +} + +#' Main function +#' Check the parameters and the input +#' +#' @import utils +#' @import constants +#' @import preprocessing +#' @import beta_prior_estimation +#' @import model +#' +#' @export +main <- function(){ + + + # Check input data + # Check data type and required columns + + # Get test date list according to the test start date + + # Check available training days + + run_backfill(df, export_dir, taus, + test_date_list, test_lags, + value_cols, training_days, testing_window, + ref_lag, value_type, lambda) + +} + +####### Run Main Function +main() \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index a7df9b80e..323a6b8b9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -57,6 +57,7 @@ read_data <- function(path){ return (df) } + #‘ Export the result to customized directory #' @param test_data test data with prediction result @@ -67,12 +68,11 @@ read_data <- function(path){ #' @param test_lag #' #' @export -export_test_result <- function(test_data, coef_data, export_dir, - geo_level, geo, test_lag){ - pred_output_dir = paste("prediction", geo_level, geo, as.character(test_lag), sep="_") +export_test_result <- function(test_data, coef_data, export_dir, geo){ + pred_output_dir = paste("prediction", geo, sep="_") write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) - coef_output_dir = paste("coefs", geo_level, geo, as.character(test_lag), sep="_") + coef_output_dir = paste("coefs", geo, sep="_") write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) } From c8a310e7150a5ec97bfa976b3e17e3c3edc9daae Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 18 Aug 2022 13:37:11 -0400 Subject: [PATCH 012/145] Add checks of the arguments --- .../delphiBackfillCorrection/R/tooling.R | 77 ++++++++++++++++--- 1 file changed, 68 insertions(+), 9 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 69458a9eb..4b89260ff 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -1,19 +1,15 @@ library(tidyverse) -#' library(Matrix) -#' library(stats) library(dplyr) library(lubridate) library(zoo) -#' library(ggplot2) #' library(stringr) #' library(plyr) library(MASS) library(stats4) -#' -#' library(covidcast) library(evalcast) library(quantgen) library(gurobi) +library(argparser) #' Main function for getting backfill corrected estimates #' @@ -43,7 +39,7 @@ run_backfill <- function(df, export_dir, taus, if (value_type == "count") { # For counts data only combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "ratio"){ + } else if (value_type == "fraction"){ combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) @@ -68,7 +64,7 @@ run_backfill <- function(df, export_dir, taus, drop_na() if (dim(geo_test_data)[1] == 0) next if (dim(geo_train_data)[1] <= 200) next - if (value_type == "ratio"){ + if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% filter(issue_date > test_date-7) %>% filter(issue_date <= test_date) @@ -119,6 +115,7 @@ run_backfill <- function(df, export_dir, taus, #' Main function #' Check the parameters and the input #' +#' @import tidyverse #' @import utils #' @import constants #' @import preprocessing @@ -126,15 +123,60 @@ run_backfill <- function(df, export_dir, taus, #' @import model #' #' @export -main <- function(){ +main <- function(data_path, export_dir, + test_start_date, test_end_date, traning_days, testing_window, + value_type, num_col, denom_col, + lambda, ref_lag){ # Check input data + df = read_csv(data_path) + # Check data type and required columns + if (value_type == "count"){ + if (num_col %in% colnames(df)) {value_cols=c(num_col)} + else if (denom_col %in% colnames(df)) {value_cols=c(denom_col)} + else { + stop("No valid column name detected for the count values!") + } + } else if (value_type == "fraction"){ + value_cols = c(num_col, denom_col) + if ( any(!value_cols %in% colnames(df)) ){ + stop("No valid column name detected for the fraction values!") + } + } + + # time_value must exists in the dataset + if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} + + # issue date of lag should exist in the dataset + if ( !"lag" %in% colnames(df) ){ + if ( "issue_date" %in% colnames(df) ){ + df$lag = as.integer(df$issue_date - df$time_value) + } + else {stop("No issue_date or lag exists!")} + } # Get test date list according to the test start date + if (is.null(test_start_date)){ + test_start_date = max(df$issue_date) + } else { + test_start_date = as.Date(test_start_date) + } + + if (is.null(test_end_date)){ + test_end_date = max(df$issue_date) + } else { + test_end_date = as.Date(test_end_date) + } + + test_date_list = seq(test_start_date, test_end_date, by="days") # Check available training days + valid_training_days = as.integer(test_start_date - min(df$issue_date)) + if (training_days > valid_training_days){ + warning(sprintf("Only %d days are available at most for training.", valid_training_days)) + } run_backfill(df, export_dir, taus, test_date_list, test_lags, @@ -144,4 +186,21 @@ main <- function(){ } ####### Run Main Function -main() \ No newline at end of file +parser <- arg_parser(description='Process commandline arguments') +parser <- add_argument(parser, arg="--data_path", type="character", help = "Path to the input file") +parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") +parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") +parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") +parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") +parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") +parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") +parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") +parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") +parser <- add_argument(parser, arg="--training_days", type="integer", default = 270, help = "The number of issue dates used for model training") +parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") +args = parse_args(parser) + +main(args.data_path, args.export_dir, + args.test_start_date, args.test_end_date, args.traning_days, args.testing_window, + args.value_type, args.num_col, args.denom_col, + args.lambda, args.ref_lag) From 4c3527c3191f146db4e3cba22c2f82cea8b6416c Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 18 Aug 2022 13:40:17 -0400 Subject: [PATCH 013/145] Add explanation for the scripts --- Backfill_Correction/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Backfill_Correction/README.md b/Backfill_Correction/README.md index a4cbfe65f..8784fcbc7 100644 --- a/Backfill_Correction/README.md +++ b/Backfill_Correction/README.md @@ -10,3 +10,6 @@ Required columns without fixed column names - num_col: the column for the number of reported counts of the numerator. e.g. the number of COVID claims counts according to the insurance data. - denom_col: the column for the number of reported counts of the denominator. e.g. the number of total claims counts according to the insurance data. Required if considering the backfill correction of ratios. +The scripts except for tooling.R is used to create a pipeline that can help create backfill correction for specified Delphi Covidcast indicators. + +The script tooling.R is used to provide a user-friendly way people to crate backfill correction for any dataset that they have in hand before we have the backfill correction officially available in `epiprocess`. From 86a98711ff0d76eaee605c8acf6ad2456de09fdc Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 18 Aug 2022 13:42:18 -0400 Subject: [PATCH 014/145] Fix a typo in the comment --- Backfill_Correction/delphiBackfillCorrection/R/tooling.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 4b89260ff..a6cc23de2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -149,7 +149,7 @@ main <- function(data_path, export_dir, # time_value must exists in the dataset if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} - # issue date of lag should exist in the dataset + # issue_date or lag should exist in the dataset if ( !"lag" %in% colnames(df) ){ if ( "issue_date" %in% colnames(df) ){ df$lag = as.integer(df$issue_date - df$time_value) From 08572c6b246fa6ab8312d11c5db3265ba4da38e8 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 19 Aug 2022 16:49:10 -0400 Subject: [PATCH 015/145] read parquet data --- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 323a6b8b9..3a0f55924 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -47,13 +47,17 @@ create_dir_not_exist <- function(path) if (!dir.exists(path)) { dir.create(path) } } -#' Function to read input data +#' Read a parquet file into a dataframe #' #' @param path path to the input data -#' +#' +#' @importFrom arrow read_parquet +#' @importFrom dplyr select %>% +#' #' @export read_data <- function(path){ - df <- read_csv(path) + df <- read_parquet(path, as_data_frame = TRUE) %>% + select(-`__index_level_0__`) return (df) } From 8e1220db17265b3821f8978ca92fecc83ad19c4c Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 22 Aug 2022 18:00:05 -0400 Subject: [PATCH 016/145] disambiguate tooling function names; factor out df validity checks --- .../delphiBackfillCorrection/R/tooling.R | 35 +++++-------------- .../delphiBackfillCorrection/R/utils.R | 28 ++++++++++++++- 2 files changed, 35 insertions(+), 28 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index a6cc23de2..aa423d148 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -20,7 +20,7 @@ library(argparser) #' @import model #' #' @export -run_backfill <- function(df, export_dir, taus, +run_backfill_local <- function(df, export_dir, taus, test_date_list, test_lags, value_cols, training_days, testing_window, ref_lag, value_type, lambda){ @@ -51,6 +51,9 @@ run_backfill <- function(df, export_dir, taus, suffixes=c("_num", "_denom")) } combined_df <- add_params_for_dates(combined_df, "time_value", "lag") + if (missing(test_date_list) || is.null(test_date_list)) { + test_date_list <- get_test_dates(combined_df, params$test_dates) + } for (test_date in test_date_list){ geo_train_data = combined_df %>% @@ -123,7 +126,7 @@ run_backfill <- function(df, export_dir, taus, #' @import model #' #' @export -main <- function(data_path, export_dir, +main_local <- function(data_path, export_dir, test_start_date, test_end_date, traning_days, testing_window, value_type, num_col, denom_col, lambda, ref_lag){ @@ -133,29 +136,7 @@ main <- function(data_path, export_dir, df = read_csv(data_path) # Check data type and required columns - if (value_type == "count"){ - if (num_col %in% colnames(df)) {value_cols=c(num_col)} - else if (denom_col %in% colnames(df)) {value_cols=c(denom_col)} - else { - stop("No valid column name detected for the count values!") - } - } else if (value_type == "fraction"){ - value_cols = c(num_col, denom_col) - if ( any(!value_cols %in% colnames(df)) ){ - stop("No valid column name detected for the fraction values!") - } - } - - # time_value must exists in the dataset - if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} - - # issue_date or lag should exist in the dataset - if ( !"lag" %in% colnames(df) ){ - if ( "issue_date" %in% colnames(df) ){ - df$lag = as.integer(df$issue_date - df$time_value) - } - else {stop("No issue_date or lag exists!")} - } + validity_checks(df, value_type) # Get test date list according to the test start date if (is.null(test_start_date)){ @@ -178,7 +159,7 @@ main <- function(data_path, export_dir, warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } - run_backfill(df, export_dir, taus, + run_backfill_local(df, export_dir, taus, test_date_list, test_lags, value_cols, training_days, testing_window, ref_lag, value_type, lambda) @@ -200,7 +181,7 @@ parser <- add_argument(parser, arg="--training_days", type="integer", default = parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") args = parse_args(parser) -main(args.data_path, args.export_dir, +main_local(args.data_path, args.export_dir, args.test_start_date, args.test_end_date, args.traning_days, args.testing_window, args.value_type, args.num_col, args.denom_col, args.lambda, args.ref_lag) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 3a0f55924..261b4fbd6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -82,5 +82,31 @@ export_test_result <- function(test_data, coef_data, export_dir, geo){ } - +#' Check input data for validity +validity_checks <- function(df, value_type) { + # Check data type and required columns + if (value_type == "count"){ + if (num_col %in% colnames(df)) {value_cols=c(num_col)} + else if (denom_col %in% colnames(df)) {value_cols=c(denom_col)} + else { + stop("No valid column name detected for the count values!") + } + } else if (value_type == "fraction"){ + value_cols = c(num_col, denom_col) + if ( any(!value_cols %in% colnames(df)) ){ + stop("No valid column name detected for the fraction values!") + } + } + + # time_value must exists in the dataset + if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} + + # issue_date or lag should exist in the dataset + if ( !"lag" %in% colnames(df) ){ + if ( "issue_date" %in% colnames(df) ){ + df$lag = as.integer(df$issue_date - df$time_value) + } + else {stop("No issue_date or lag exists!")} + } +} From 456c52b61efd9871d1c3e8cf2691bcf3c3868377 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 22 Aug 2022 18:01:45 -0400 Subject: [PATCH 017/145] new main func to take over signal/geo looping; docs --- .../delphiBackfillCorrection/R/main.R | 240 +++++++++++------- 1 file changed, 148 insertions(+), 92 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 95d37bb6f..3fae14a3a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -21,18 +21,36 @@ #' Tempt usage #' params = list() #' customize -#' params$ref_lag: reference lag, after x days, the update is considered to be the response. 60 is a reasonable choice for CHNG outpatient data +#' params$ref_lag: reference lag, after x days, the update is considered to be +#' the response. 60 is a reasonable choice for CHNG outpatient data #' params$data_path: link to the input data file -#' params$testing_window: the testing window used for saving the runtime. Could set it to be 1 if time allows -#' params$test_dates: list of two elements, the first one is the start date and the second one is the end date +#' params$testing_window: the testing window used for saving the runtime. Could +#' set it to be 1 if time allows +#' params$test_dates: list of two elements, the first one is the start date and +#' the second one is the end date #' params$training_days: set it to be 270 or larger if you have enough data -#' params$num_col: the column name for the counts of the numerator, e.g. the number of COVID claims -#' params$denom_col: the column name for the counts of the denominator, e.g. the number of total claims +#' params$num_col: the column name for the counts of the numerator, e.g. the +#' number of COVID claims +#' params$denom_col: the column name for the counts of the denominator, e.g. the +#' number of total claims #' params$geo_level: list("state", "county") +#' params$taus: ?? +#' params$lambda: ?? +#' params$export_dir: ?? -#' Main function for getting backfill corrected estimates +#' Get backfill-corrected estimates for a single signal + geo combination #' -#' @param params +#' @param df dataframe of input data containing a single indicator + signal + +#' level of geographic coverage. +#' @param value_type string describing signal type of "count" and "ratio". +#' @param geo_level string describing geo coverage of input data. "state" or +#' "county". If "county" is selected, only data from the 200 most populous +#' counties in the US (*not* the dataset) will be used. +#' @param params named list containing modeling and data settings. Must include +#' the following elements: `ref_lag`, `testing_window`, `test_dates`, +#' `training_days`, `num_col`, `taus`, `lambda`, and `export_dir`. +#' @param refd_col string containing name of reference date field within `df`. +#' @param lag_col string containing name of lag field within `df`. #' #' @import constants #' @import preprocessing @@ -40,89 +58,127 @@ #' @import model #' #' @export -run_backfill <- function(params){ - # Get the input data - df <- read_data(params$data_path) - refd_col <- "time_value" - lag_col <- "lag" - testing_window <- params$testing_window - ref_lag <- params$ref_lag - min_refd <- test_date_list[1] - max_refd <- test_date_list[length(test_date_list)] - - for (geo_level in params$geo_levels){ - # Get full list of interested locations - geo_list <- unique(df$geo_value) - # Build model for each location - for (geo in geo_list) { - subdf <- df %>% filter(geo_value == geo) %>% filter(lag < ref_lag) - subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) - for (value_type in value_types){ - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) - combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - } else if (value_type == "ratio"){ - combined_num_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - - combined_denom_df <- fill_missing_updates(subdf, params$denom_col, refd_col, lag_col) - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - - combined_df <- merge(combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom")) - } - combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - test_date_list <- get_test_dates(combined_df, params$test_dates) - for (test_date in test_date_list){ - geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date+testing_window) %>% - drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next - if (value_type == "ratio"){ - geo_prior_test_data = combined_df %>% - filter(issue_date > test_date-7) %>% - filter(issue_date <= test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)){ - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) - - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, taus, params_list, lp_solver, lambda, test_date) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evl(test_data, params$taus) - - export_test_result(test_data, coefs, params$export_dir, geo_level, - geo, test_lag) - }# End for test lags - }# End for test date list - }# End for value types - }# End for geo lsit - }# End for geo level - +run_backfill <- function(df, value_type, geo_level, params, + refd_col = "time_value", lag_col = "lag") { + # Get full list of interested locations + geo_list <- unique(df$geo_value) + # Build model for each location + for (geo in geo_list) { + subdf <- df %>% filter(geo_value == geo) %>% filter(lag < params$ref_lag) + min_refd <- min(subdf[[refd_col]]) + max_refd <- max(subdf[[refd_col]]) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + + # Handle different signals + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) + + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) + + combined_denom_df <- fill_missing_updates(subdf, params$denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) + + combined_df <- merge(combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom")) + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + test_date_list <- get_test_dates(combined_df, params$test_dates) + + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - params$training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date + params$testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next + + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date - 7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, lp_solver, params$lambda, test_date) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, + geo, test_lag) + }# End for test lags + }# End for test date list + }# End for geo list +} + +#' Perform backfill correction on all desired signals and geo levels +#' +#' @import tidyverse +#' @import utils +#' @import constants +#' @import preprocessing +#' @import beta_prior_estimation +#' @import model +#' @importFrom dplyr bind_rows +#' +#' @export +main <- function(params, ...){ + # Create groups by indicator, signal, and geo type. Cover all params$geo_level + # values (should be state and county) + # Set associated value_type as well. + groups <- product(INDICATORS_AND_SIGNALS, params$geo_level) -} \ No newline at end of file + # Loop over every indicator + signal + geo type combination. + for (input_group in groups) { + # Convert input_group into file names. + daily_pattern <- create_daily_name(input_group$indicator, input_group$signal, input_group$geo_level) + rollup_pattern <- create_rollup_name(input_group$indicator, input_group$signal, input_group$geo_level) + + # Make sure we're reading in both 4-week rollup and daily files. + daily_input_files <- list.files(params$data_path, pattern = daily_pattern) + rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) + + ## TODO: what filtering do we need to do on dates? + + # Read in all listed files and combine + input_data <- lapply(c(daily_input_files, rollup_input_files), function(file) { + input_data[[file]] <- read_data(file) + } + ) %>% bind_rows + + # Check data type and required columns + value_type <- get_value_type(input_group$indicator, input_group$signal) + validity_checks(input_data, value_type) + + # Perform backfill corrections and save result + run_backfill(input_data, value_type, input_group$geo_level, params) + } +} + From 8ab512c28dbadea9ad038673224cb325f8879c0a Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 22 Aug 2022 18:25:38 -0400 Subject: [PATCH 018/145] county filter; line wraps --- .../delphiBackfillCorrection/R/main.R | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 3fae14a3a..1f10eeb27 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -62,6 +62,11 @@ run_backfill <- function(df, value_type, geo_level, params, refd_col = "time_value", lag_col = "lag") { # Get full list of interested locations geo_list <- unique(df$geo_value) + if (geo_level == "county") { + # Keep only 200 most populous (within the US) counties + geo_list <- filter_counties(geo_list) + } + # Build model for each location for (geo in geo_list) { subdf <- df %>% filter(geo_value == geo) %>% filter(lag < params$ref_lag) @@ -81,9 +86,11 @@ run_backfill <- function(df, value_type, geo_level, params, combined_denom_df <- fill_missing_updates(subdf, params$denom_col, refd_col, lag_col) combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - combined_df <- merge(combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom")) + combined_df <- merge( + combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom") + ) } combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) test_date_list <- get_test_dates(combined_df, params$test_dates) @@ -126,7 +133,9 @@ run_backfill <- function(df, value_type, geo_level, params, # Model training and testing prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, lp_solver, params$lambda, test_date) + train_data, test_data, params$taus, params_list, + lp_solver, params$lambda, test_date + ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] test_data <- evl(test_data, params$taus) @@ -158,8 +167,12 @@ main <- function(params, ...){ # Loop over every indicator + signal + geo type combination. for (input_group in groups) { # Convert input_group into file names. - daily_pattern <- create_daily_name(input_group$indicator, input_group$signal, input_group$geo_level) - rollup_pattern <- create_rollup_name(input_group$indicator, input_group$signal, input_group$geo_level) + daily_pattern <- create_daily_name( + input_group$indicator, input_group$signal, input_group$geo_level + ) + rollup_pattern <- create_rollup_name( + input_group$indicator, input_group$signal, input_group$geo_level + ) # Make sure we're reading in both 4-week rollup and daily files. daily_input_files <- list.files(params$data_path, pattern = daily_pattern) @@ -168,9 +181,11 @@ main <- function(params, ...){ ## TODO: what filtering do we need to do on dates? # Read in all listed files and combine - input_data <- lapply(c(daily_input_files, rollup_input_files), function(file) { - input_data[[file]] <- read_data(file) - } + input_data <- lapply( + c(daily_input_files, rollup_input_files), + function(file) { + input_data[[file]] <- read_data(file) + } ) %>% bind_rows # Check data type and required columns From 8282c5374c412bb1d72f5b8a8fcff3eb8025724e Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 22 Aug 2022 18:44:42 -0400 Subject: [PATCH 019/145] factor out valid training days check --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 3 +++ Backfill_Correction/delphiBackfillCorrection/R/tooling.R | 5 +---- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 7 +++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 1f10eeb27..6be6b1f75 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -192,6 +192,9 @@ main <- function(params, ...){ value_type <- get_value_type(input_group$indicator, input_group$signal) validity_checks(input_data, value_type) + # Check available training days + training_days_check(input_data$issue_date, params$training_days) + # Perform backfill corrections and save result run_backfill(input_data, value_type, input_group$geo_level, params) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index aa423d148..8aab7a6ab 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -154,10 +154,7 @@ main_local <- function(data_path, export_dir, test_date_list = seq(test_start_date, test_end_date, by="days") # Check available training days - valid_training_days = as.integer(test_start_date - min(df$issue_date)) - if (training_days > valid_training_days){ - warning(sprintf("Only %d days are available at most for training.", valid_training_days)) - } + training_days_check(df$issue_date, training_days) run_backfill_local(df, export_dir, taus, test_date_list, test_lags, diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 261b4fbd6..349934cb8 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -110,3 +110,10 @@ validity_checks <- function(df, value_type) { } } +#' Check available training days +training_days_check <- function(issue_date, training_days) { + valid_training_days = as.integer(max(issue_date) - min(issue_date)) + if (training_days > valid_training_days){ + warning(sprintf("Only %d days are available at most for training.", valid_training_days)) + } +} From f3e9baff687b72ae796c01e2503fedfa6c5f802a Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 12:53:23 -0400 Subject: [PATCH 020/145] outline input filename-fetching funcs --- .../delphiBackfillCorrection/R/main.R | 16 ++---- .../delphiBackfillCorrection/R/utils.R | 50 +++++++++++++++++++ 2 files changed, 53 insertions(+), 13 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 6be6b1f75..1a8c8c1b8 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -166,23 +166,13 @@ main <- function(params, ...){ # Loop over every indicator + signal + geo type combination. for (input_group in groups) { - # Convert input_group into file names. - daily_pattern <- create_daily_name( - input_group$indicator, input_group$signal, input_group$geo_level + files_list <- get_files_list( + input_group$indicator, input_group$signal, input_group$geo_level, params ) - rollup_pattern <- create_rollup_name( - input_group$indicator, input_group$signal, input_group$geo_level - ) - - # Make sure we're reading in both 4-week rollup and daily files. - daily_input_files <- list.files(params$data_path, pattern = daily_pattern) - rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) - - ## TODO: what filtering do we need to do on dates? # Read in all listed files and combine input_data <- lapply( - c(daily_input_files, rollup_input_files), + files_list, function(file) { input_data[[file]] <- read_data(file) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 349934cb8..da86b5038 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -117,3 +117,53 @@ training_days_check <- function(issue_date, training_days) { warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } } + +#' List valid input files. +get_files_list(indicator, signal, geo_level, params) { + # Convert input_group into file names. + daily_pattern <- create_name_pattern( + indicator, signal, geo_level, "daily" + ) + rollup_pattern <- create_name_pattern( + indicator, signal, geo_level, "rollup" + ) + + # Make sure we're reading in both 4-week rollup and daily files. + daily_input_files <- list.files(params$data_path, pattern = daily_pattern) + rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) + + # Filter files lists to only include those containing dates we need for training + daily_input_files <- subset_valid_files(daily_input_files, "daily", params) + rollup_input_files <- subset_valid_files(rollup_input_files, "rollup", params) + + return(c(daily_input_files, rollup_input_files)) +} + +#' Return file names only if they contain data to be used in training +#' +#' Parse filenames to find included dates. Use different patterns if file +#' includes daily or rollup (multiple days) data. +subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { + file_type <- match.arg(file_type) + switch(file_type, + daily = { + ... + }, + rollup = { + ... + } + ) +} + +#' Create pattern to match input files of a given type, signal, and geo level +#' +#' @importFrom stringr str_interp +create_name_pattern <- function(indicator, signal, geo_level, + file_type = c("daily", "rollup")) { + file_type <- match.arg(file_type) + switch(file_type, + daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet"), + rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet") + ) +} + From f073ef95de3ecd4427e494a844ce53996594692c Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 12:59:04 -0400 Subject: [PATCH 021/145] move data funcs to io.R --- .../delphiBackfillCorrection/R/io.R | 81 ++++++++++++++++++ .../delphiBackfillCorrection/R/main.R | 9 +- .../delphiBackfillCorrection/R/utils.R | 85 ------------------- 3 files changed, 86 insertions(+), 89 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/io.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R new file mode 100644 index 000000000..f13d5153b --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -0,0 +1,81 @@ +#' Read a parquet file into a dataframe +#' +#' @param path path to the input data +#' +#' @importFrom arrow read_parquet +#' @importFrom dplyr select %>% +#' +#' @export +read_data <- function(path){ + df <- read_parquet(path, as_data_frame = TRUE) %>% + select(-`__index_level_0__`) + return (df) +} + +#‘ Export the result to customized directory + +#' @param test_data test data with prediction result +#' @param coef_data data frame with the estimated coefficients +#' @param export_dir export directory +#' @param geo_level geographical level, can be county or state +#' @param geo the geogrpahical location +#' @param test_lag +#' +#' @export +export_test_result <- function(test_data, coef_data, export_dir, geo){ + pred_output_dir = paste("prediction", geo, sep="_") + write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) + + coef_output_dir = paste("coefs", geo, sep="_") + write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) + +} + +#' List valid input files. +get_files_list(indicator, signal, geo_level, params) { + # Convert input_group into file names. + daily_pattern <- create_name_pattern( + indicator, signal, geo_level, "daily" + ) + rollup_pattern <- create_name_pattern( + indicator, signal, geo_level, "rollup" + ) + + # Make sure we're reading in both 4-week rollup and daily files. + daily_input_files <- list.files(params$data_path, pattern = daily_pattern) + rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) + + # Filter files lists to only include those containing dates we need for training + daily_input_files <- subset_valid_files(daily_input_files, "daily", params) + rollup_input_files <- subset_valid_files(rollup_input_files, "rollup", params) + + return(c(daily_input_files, rollup_input_files)) +} + +#' Return file names only if they contain data to be used in training +#' +#' Parse filenames to find included dates. Use different patterns if file +#' includes daily or rollup (multiple days) data. +subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { + file_type <- match.arg(file_type) + switch(file_type, + daily = { + ... + }, + rollup = { + ... + } + ) +} + +#' Create pattern to match input files of a given type, signal, and geo level +#' +#' @importFrom stringr str_interp +create_name_pattern <- function(indicator, signal, geo_level, + file_type = c("daily", "rollup")) { + file_type <- match.arg(file_type) + switch(file_type, + daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet"), + rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet") + ) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 1a8c8c1b8..c06130514 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -16,7 +16,7 @@ #' library(evalcast) #' library(quantgen) #' library(gurobi) -#' lp_solver = "gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" + #' Tempt usage #' params = list() @@ -37,6 +37,7 @@ #' params$taus: ?? #' params$lambda: ?? #' params$export_dir: ?? +#' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" #' Get backfill-corrected estimates for a single signal + geo combination #' @@ -48,7 +49,8 @@ #' counties in the US (*not* the dataset) will be used. #' @param params named list containing modeling and data settings. Must include #' the following elements: `ref_lag`, `testing_window`, `test_dates`, -#' `training_days`, `num_col`, `taus`, `lambda`, and `export_dir`. +#' `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +#' and `data_path` (input dir). #' @param refd_col string containing name of reference date field within `df`. #' @param lag_col string containing name of lag field within `df`. #' @@ -134,7 +136,7 @@ run_backfill <- function(df, value_type, geo_level, params, # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, - lp_solver, params$lambda, test_date + params$lp_solver, params$lambda, test_date ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] @@ -189,4 +191,3 @@ main <- function(params, ...){ run_backfill(input_data, value_type, input_group$geo_level, params) } } - diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index da86b5038..7287fd775 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -47,41 +47,6 @@ create_dir_not_exist <- function(path) if (!dir.exists(path)) { dir.create(path) } } -#' Read a parquet file into a dataframe -#' -#' @param path path to the input data -#' -#' @importFrom arrow read_parquet -#' @importFrom dplyr select %>% -#' -#' @export -read_data <- function(path){ - df <- read_parquet(path, as_data_frame = TRUE) %>% - select(-`__index_level_0__`) - return (df) -} - - -#‘ Export the result to customized directory - -#' @param test_data test data with prediction result -#' @param coef_data data frame with the estimated coefficients -#' @param export_dir export directory -#' @param geo_level geographical level, can be county or state -#' @param geo the geogrpahical location -#' @param test_lag -#' -#' @export -export_test_result <- function(test_data, coef_data, export_dir, geo){ - pred_output_dir = paste("prediction", geo, sep="_") - write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) - - coef_output_dir = paste("coefs", geo, sep="_") - write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) - -} - - #' Check input data for validity validity_checks <- function(df, value_type) { # Check data type and required columns @@ -117,53 +82,3 @@ training_days_check <- function(issue_date, training_days) { warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } } - -#' List valid input files. -get_files_list(indicator, signal, geo_level, params) { - # Convert input_group into file names. - daily_pattern <- create_name_pattern( - indicator, signal, geo_level, "daily" - ) - rollup_pattern <- create_name_pattern( - indicator, signal, geo_level, "rollup" - ) - - # Make sure we're reading in both 4-week rollup and daily files. - daily_input_files <- list.files(params$data_path, pattern = daily_pattern) - rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) - - # Filter files lists to only include those containing dates we need for training - daily_input_files <- subset_valid_files(daily_input_files, "daily", params) - rollup_input_files <- subset_valid_files(rollup_input_files, "rollup", params) - - return(c(daily_input_files, rollup_input_files)) -} - -#' Return file names only if they contain data to be used in training -#' -#' Parse filenames to find included dates. Use different patterns if file -#' includes daily or rollup (multiple days) data. -subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { - file_type <- match.arg(file_type) - switch(file_type, - daily = { - ... - }, - rollup = { - ... - } - ) -} - -#' Create pattern to match input files of a given type, signal, and geo level -#' -#' @importFrom stringr str_interp -create_name_pattern <- function(indicator, signal, geo_level, - file_type = c("daily", "rollup")) { - file_type <- match.arg(file_type) - switch(file_type, - daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet"), - rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet") - ) -} - From 3aec897e3e4e96afbd955e3b4479acfc54c16925 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 15:55:29 -0400 Subject: [PATCH 022/145] finish subset_valid_files logic --- .../delphiBackfillCorrection/R/constants.R | 4 ++- .../delphiBackfillCorrection/R/io.R | 29 +++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index d7733f9c8..a1bf34324 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -18,4 +18,6 @@ wm <- c("W1_issue", "W2_issue", "W3_issue") sqrtscale_covid = c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') sqrtscale_total = c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') sqrtscale = c('sqrty0', 'sqrty1', "sqrty2") -log_lag = "inv_log_lag" \ No newline at end of file +log_lag = "inv_log_lag" + +today = Sys.Date() diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index f13d5153b..d3dad3a39 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -58,14 +58,39 @@ get_files_list(indicator, signal, geo_level, params) { #' includes daily or rollup (multiple days) data. subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { file_type <- match.arg(file_type) + date_format = "%Y%m%d" switch(file_type, daily = { - ... + start_dates <- as.Date( + sub("^.*_as_of_([0-9]{8}).parquet$", "\\1", files_list), + format = date_format + ) + end_dates <- start_dates }, rollup = { - ... + rollup_pattern <- "^.*_from_([0-9]{8})_to_([0-9]{8}).parquet$" + start_dates <- as.Date( + sub(rollup_pattern, "\\1", files_list), + format = date_format + ) + end_dates <- as.Date( + sub(rollup_pattern, "\\2", files_list), + format = date_format + ) } ) + + ## TODO: start_date depends on if we're doing model training or just corrections. + start_date <- today - params$training_days - params$ref_lag + end_date <- today - 1 + + # Only keep files with data that falls at least somewhat between the desired + # start and end range dates. + files_list <- files_list[ + !(( start_dates < start_date & end_dates < start_date ) | + ( start_dates > end_date & end_dates > end_date ))] + + return(files_list) } #' Create pattern to match input files of a given type, signal, and geo level From a7f41655c2b33fbeff198d8b21a622ba8bbf242f Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 17:54:19 -0400 Subject: [PATCH 023/145] create indicator-signal combos; loop over columns --- .../delphiBackfillCorrection/R/constants.R | 11 ++ .../delphiBackfillCorrection/R/io.R | 12 +- .../delphiBackfillCorrection/R/main.R | 166 ++++++++++-------- 3 files changed, 113 insertions(+), 76 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index a1bf34324..55d6d8f3c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,3 +1,5 @@ +import(tibble) + # Constants for the backfill correction model taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) ref_lag <- 60 @@ -21,3 +23,12 @@ sqrtscale = c('sqrty0', 'sqrty1', "sqrty2") log_lag = "inv_log_lag" today = Sys.Date() + +indicators_and_signals <- tribble( + ~indicator, ~signal, ~name_suffix, ~value_type, ~sub_dir, + "changehc", "covid", "", "count", "chng", + "changehc", "flu", "", "count", "chng", + "claims_hosp", "", "", "count", "claims_hosp", + # "dv",,, + "quidel", "covidtest", c("total", "age_0_4", "age_5_17", "age_18_49", "age_50_64", "age_65plus", "age_0_17"), "count", "quidel_covidtest" +) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index d3dad3a39..98496c83b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -32,7 +32,7 @@ export_test_result <- function(test_data, coef_data, export_dir, geo){ } #' List valid input files. -get_files_list(indicator, signal, geo_level, params) { +get_files_list(indicator, signal, geo_level, params, sub_dir = "") { # Convert input_group into file names. daily_pattern <- create_name_pattern( indicator, signal, geo_level, "daily" @@ -42,8 +42,14 @@ get_files_list(indicator, signal, geo_level, params) { ) # Make sure we're reading in both 4-week rollup and daily files. - daily_input_files <- list.files(params$data_path, pattern = daily_pattern) - rollup_input_files <- list.files(params$data_path, pattern = rollup_pattern) + if (!is.null(sub_dir) && sub_dir != "") { + data_path <- paste(params$data_path, sub_dir, sep="_") + } else { + data_path <- params$data_path + } + + daily_input_files <- list.files(data_path, pattern = daily_pattern) + rollup_input_files <- list.files(data_path, pattern = rollup_pattern) # Filter files lists to only include those containing dates we need for training daily_input_files <- subset_valid_files(daily_input_files, "daily", params) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index c06130514..a6bd0c140 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -61,7 +61,8 @@ #' #' @export run_backfill <- function(df, value_type, geo_level, params, - refd_col = "time_value", lag_col = "lag") { + refd_col = "time_value", lag_col = "lag", + signal_suffixes = c("")) { # Get full list of interested locations geo_list <- unique(df$geo_value) if (geo_level == "county") { @@ -76,76 +77,89 @@ run_backfill <- function(df, value_type, geo_level, params, max_refd <- max(subdf[[refd_col]]) subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) - # Handle different signals - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) - combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - - } else if (value_type == "ratio"){ - combined_num_df <- fill_missing_updates(subdf, params$num_col, refd_col, lag_col) - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - - combined_denom_df <- fill_missing_updates(subdf, params$denom_col, refd_col, lag_col) - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - - combined_df <- merge( - combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom") - ) - } - combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - test_date_list <- get_test_dates(combined_df, params$test_dates) - - for (test_date in test_date_list){ - geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - params$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date + params$testing_window) %>% - drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next - - if (value_type == "ratio"){ - geo_prior_test_data = combined_df %>% - filter(issue_date > test_date - 7) %>% - filter(issue_date <= test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] + for (suffix in signal_suffixes) { + # For each suffix listed in `signal_suffixes`, run training/testing + # process again. Main use case is for quidel which has overall and + # age-based signals. + if (suffix != "") { + num_col <- paste(params$num_col, suffix, sep = "_") + denom_col <- paste(params$denom_col, suffix, sep = "_") + } else { + num_col <- params$num_col + denom_col <- params$denom_col } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)){ - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] + + # Handle different signal types + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) + combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date + combined_df <- merge( + combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom") ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evl(test_data, params$taus) + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + test_date_list <- get_test_dates(combined_df, params$test_dates) + + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - params$training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date + params$testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next - export_test_result(test_data, coefs, params$export_dir, geo_level, - geo, test_lag) - }# End for test lags - }# End for test date list + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date - 7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, + params$lp_solver, params$lambda, test_date + ) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, + geo, test_lag) + }# End for test lags + }# End for test date list + }# End for signal suffixes }# End for geo list } @@ -161,17 +175,22 @@ run_backfill <- function(df, value_type, geo_level, params, #' #' @export main <- function(params, ...){ - # Create groups by indicator, signal, and geo type. Cover all params$geo_level - # values (should be state and county) - # Set associated value_type as well. - groups <- product(INDICATORS_AND_SIGNALS, params$geo_level) + # Load indicator x signal groups. Combine with params$geo_level to get all + # possible geo x signal combinations. + groups <- merge(indicators_and_signals, data.frame(geo_level = params$geo_level)) # Loop over every indicator + signal + geo type combination. for (input_group in groups) { files_list <- get_files_list( - input_group$indicator, input_group$signal, input_group$geo_level, params + input_group$indicator, input_group$signal, input_group$geo_level, + params, input_group$sub_dir ) + if (length(files_list) == 0) { + warning(str_interp("No files found for {input_group$indicator} {input_group$signal}, skipping")) + next + } + # Read in all listed files and combine input_data <- lapply( files_list, @@ -181,13 +200,14 @@ main <- function(params, ...){ ) %>% bind_rows # Check data type and required columns - value_type <- get_value_type(input_group$indicator, input_group$signal) - validity_checks(input_data, value_type) + validity_checks(input_data, input_group$value_type) # Check available training days training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, value_type, input_group$geo_level, params) + run_backfill(input_data, input_group$value_type, input_group$geo_level, + params, signal_suffixes = input_group$name_suffix + ) } } From b1d57fa4e1522ff8d1ef3211a2ec9c7f17bf1c29 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 18:39:09 -0400 Subject: [PATCH 024/145] make county filter funcs --- .../delphiBackfillCorrection/R/utils.R | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 7287fd775..32e4691d1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -12,7 +12,6 @@ #' @importFrom dplyr if_else #' @importFrom jsonlite read_json #' @importFrom lubridate ymd_hms -#' @export read_params <- function(path = "params.json", template_path = "params.json.template") { if (!file.exists(path)) file.copy(template_path, path) params <- read_json(path, simplifyVector = TRUE) @@ -82,3 +81,25 @@ training_days_check <- function(issue_date, training_days) { warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } } + +#' Subset list of counties to those included in the 200 most populous in the US +filter_counties <- function(geos) { + top_200_geos <- get_populous_counties() + return(intersect(geos, top_200_geos)) +} + +#' Subset list of counties to those included in the 200 most populous in the US +#' +#' @importFrom covidcast county_census +#' @importFrom dplyr select %>% arrange desc +get_populous_counties <- function() { + return( + county_census %>% + select(pop = POPESTIMATE2019, fips = FIPS) %>% + # Drop megacounties (states) + filter(!endsWith(fips, "000")) %>% + arrange(desc(pop)) %>% + pull(fips) %>% + head(n=200) + ) +} From e2d1c1d451d31718007a1592a5ec2ada2d0cdd39 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 23 Aug 2022 18:41:34 -0400 Subject: [PATCH 025/145] check existence input data --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index a6bd0c140..7e83baddb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -199,6 +199,11 @@ main <- function(params, ...){ } ) %>% bind_rows + if (nrow(input_data) == 0) { + warning(str_interp("No data available for {input_group$indicator} {input_group$signal}, skipping")) + next + } + # Check data type and required columns validity_checks(input_data, input_group$value_type) From fa0c10714ff55cb884bf7bb8b67d2e5f1f5b5d3b Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 24 Aug 2022 13:50:09 -0400 Subject: [PATCH 026/145] formatting --- Backfill_Correction/delphiBackfillCorrection/R/constants.R | 2 +- Backfill_Correction/delphiBackfillCorrection/R/io.R | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 55d6d8f3c..3749c7dc1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,4 +1,4 @@ -import(tibble) +library(tibble) # Constants for the backfill correction model taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 98496c83b..ab2aec473 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -22,7 +22,7 @@ read_data <- function(path){ #' @param test_lag #' #' @export -export_test_result <- function(test_data, coef_data, export_dir, geo){ +export_test_result <- function(test_data, coef_data, export_dir, geo) { pred_output_dir = paste("prediction", geo, sep="_") write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) @@ -32,7 +32,7 @@ export_test_result <- function(test_data, coef_data, export_dir, geo){ } #' List valid input files. -get_files_list(indicator, signal, geo_level, params, sub_dir = "") { +get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { # Convert input_group into file names. daily_pattern <- create_name_pattern( indicator, signal, geo_level, "daily" From b1ed2fe287cc6c3eeaddeee1e52e7ffe4252a768 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 24 Aug 2022 14:09:21 -0400 Subject: [PATCH 027/145] move local arg parsing and call out of package --- Backfill_Correction/correct_local_signal.R | 31 +++++++++++++++++++ .../delphiBackfillCorrection/R/tooling.R | 20 ------------ 2 files changed, 31 insertions(+), 20 deletions(-) create mode 100644 Backfill_Correction/correct_local_signal.R diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R new file mode 100644 index 000000000..bd8d30dc6 --- /dev/null +++ b/Backfill_Correction/correct_local_signal.R @@ -0,0 +1,31 @@ +#!/usr/bin/env Rscript + +## Run backfill corrections on a single signal + geo type combination of local data. +## +## Usage: +## +## Rscript correct_local_signal.R [options] + +suppressPackageStartupMessages({ + library(delphiBackfillCorrection) +}) + + +parser <- arg_parser(description='Process commandline arguments') +parser <- add_argument(parser, arg="--data_path", type="character", help = "Path to the input file") +parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") +parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") +parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") +parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") +parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") +parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") +parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") +parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") +parser <- add_argument(parser, arg="--training_days", type="integer", default = 270, help = "The number of issue dates used for model training") +parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") +args = parse_args(parser) + +main_local(args.data_path, args.export_dir, + args.test_start_date, args.test_end_date, args.traning_days, args.testing_window, + args.value_type, args.num_col, args.denom_col, + args.lambda, args.ref_lag) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 8aab7a6ab..104eac324 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -162,23 +162,3 @@ main_local <- function(data_path, export_dir, ref_lag, value_type, lambda) } - -####### Run Main Function -parser <- arg_parser(description='Process commandline arguments') -parser <- add_argument(parser, arg="--data_path", type="character", help = "Path to the input file") -parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") -parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") -parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") -parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") -parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") -parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") -parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") -parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") -parser <- add_argument(parser, arg="--training_days", type="integer", default = 270, help = "The number of issue dates used for model training") -parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") -args = parse_args(parser) - -main_local(args.data_path, args.export_dir, - args.test_start_date, args.test_end_date, args.traning_days, args.testing_window, - args.value_type, args.num_col, args.denom_col, - args.lambda, args.ref_lag) From 5c1de08b31af7de2927a8f42bd3f23daf25c5673 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 24 Aug 2022 15:26:12 -0400 Subject: [PATCH 028/145] initial roxygen build --- Backfill_Correction/correct_local_signal.R | 1 + .../delphiBackfillCorrection/DESCRIPTION | 33 +++++++++++ .../delphiBackfillCorrection/LICENSE | 2 + .../delphiBackfillCorrection/NAMESPACE | 46 +++++++++++++++ .../R/beta_prior_estimation.R | 13 +---- .../delphiBackfillCorrection/R/constants.R | 3 + .../delphiBackfillCorrection/R/io.R | 4 +- .../delphiBackfillCorrection/R/main.R | 54 +----------------- .../delphiBackfillCorrection/R/model.R | 19 +++---- .../R/preprocessing.R | 57 +++++++++---------- .../delphiBackfillCorrection/R/tooling.R | 38 ++----------- .../delphiBackfillCorrection/R/utils.R | 24 +++++++- .../man/add_7davs_and_target.Rd | 23 ++++++++ .../man/add_dayofweek.Rd | 24 ++++++++ .../man/add_params_for_dates.Rd | 21 +++++++ .../delphiBackfillCorrection/man/add_shift.Rd | 19 +++++++ .../man/add_sqrtscale.Rd | 20 +++++++ .../man/add_weekofmonth.Rd | 20 +++++++ .../man/create_dir_not_exist.Rd | 14 +++++ .../man/create_name_pattern.Rd | 16 ++++++ .../man/data_filteration.Rd | 18 ++++++ .../delphiBackfillCorrection/man/delta.Rd | 28 +++++++++ .../man/est_priors.Rd | 50 ++++++++++++++++ .../delphiBackfillCorrection/man/evl.Rd | 22 +++++++ .../man/export_test_result.Rd | 24 ++++++++ .../man/fill_missing_updates.Rd | 27 +++++++++ .../delphiBackfillCorrection/man/fill_rows.Rd | 26 +++++++++ .../man/filter_counties.Rd | 11 ++++ .../delphiBackfillCorrection/man/get_7dav.Rd | 19 +++++++ .../man/get_files_list.Rd | 11 ++++ .../man/get_populous_counties.Rd | 11 ++++ .../man/get_weekofmonth.Rd | 27 +++++++++ .../delphiBackfillCorrection/man/main.Rd | 11 ++++ .../man/main_local.Rd | 23 ++++++++ .../man/model_training_and_testing.Rd | 36 ++++++++++++ .../delphiBackfillCorrection/man/objective.Rd | 16 ++++++ .../delphiBackfillCorrection/man/ratio_adj.Rd | 18 ++++++ .../man/ratio_adj_with_pseudo.Rd | 24 ++++++++ .../delphiBackfillCorrection/man/read_data.Rd | 14 +++++ .../man/read_params.Rd | 43 ++++++++++++++ .../man/run_backfill.Rd | 38 +++++++++++++ .../man/run_backfill_local.Rd | 23 ++++++++ .../man/subset_valid_files.Rd | 12 ++++ .../man/training_days_check.Rd | 11 ++++ .../man/validity_checks.Rd | 11 ++++ Backfill_Correction/run.R | 5 ++ 46 files changed, 871 insertions(+), 139 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/DESCRIPTION create mode 100644 Backfill_Correction/delphiBackfillCorrection/LICENSE create mode 100644 Backfill_Correction/delphiBackfillCorrection/NAMESPACE create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/delta.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/evl.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/main.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/objective.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd create mode 100644 Backfill_Correction/run.R diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index bd8d30dc6..d5655bfdb 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -8,6 +8,7 @@ suppressPackageStartupMessages({ library(delphiBackfillCorrection) + library(argparser) }) diff --git a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION new file mode 100644 index 000000000..8dc159939 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION @@ -0,0 +1,33 @@ +Package: delphiBackfillCorrection +Type: Package +Title: Correct signal outliers +Version: 1.0 +Date: 2022-08-24 +Author: Jingjing Tang +Maintainer: Jingjing Tang +Description: Takes formatted output from COVIDcast API data pipelines and + adjusted unusual values using a lasso-penalized quantile regression. + Output is used for research and model development. +License: file LICENSE +Depends: + R (>= 3.5.0), +Imports: + dplyr, + readr, + tibble, + stringr, + covidcast, + quantgen, + arrow, + evalcast, + jsonlite, + lubridate, + tidyr, + zoo +Suggests: + knitr (>= 1.15), + rmarkdown (>= 1.4), + testthat (>= 1.0.1), + covr (>= 2.2.2) +RoxygenNote: 7.2.0 +Encoding: UTF-8 diff --git a/Backfill_Correction/delphiBackfillCorrection/LICENSE b/Backfill_Correction/delphiBackfillCorrection/LICENSE new file mode 100644 index 000000000..2d1447e00 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/LICENSE @@ -0,0 +1,2 @@ +Currently approved for internal DELPHI use only. + diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE new file mode 100644 index 000000000..0b4ac3fb9 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -0,0 +1,46 @@ +# Generated by roxygen2: do not edit by hand + +export(add_7davs_and_target) +export(add_dayofweek) +export(add_shift) +export(add_weekofmonth) +export(create_dir_not_exist) +export(data_filteration) +export(evl) +export(export_test_result) +export(fill_missing_updates) +export(fill_rows) +export(get_7dav) +export(main) +export(main_local) +export(model_training_and_testing) +export(objective) +export(ratio_adj) +export(ratio_adj_with_pseudo) +export(read_data) +export(run_backfill) +importFrom(arrow,read_parquet) +importFrom(dplyr,"%>%") +importFrom(dplyr,arrange) +importFrom(dplyr,bind_rows) +importFrom(dplyr,desc) +importFrom(dplyr,everything) +importFrom(dplyr,filter) +importFrom(dplyr,if_else) +importFrom(dplyr,select) +importFrom(evalcast,weighted_interval_score) +importFrom(jsonlite,read_json) +importFrom(lubridate,day) +importFrom(lubridate,make_date) +importFrom(lubridate,month) +importFrom(lubridate,year) +importFrom(lubridate,ymd_hms) +importFrom(quantgen,quantile_lasso) +importFrom(readr,read_csv) +importFrom(stats,nlm) +importFrom(stats,pbeta) +importFrom(stringr,str_interp) +importFrom(tidyr,fill) +importFrom(tidyr,pivot_longer) +importFrom(tidyr,pivot_wider) +importFrom(zoo,rollmeanr) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 0598ec960..46f4b22ac 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -4,14 +4,11 @@ #' that is day-of-week dependent. A quantile regression model is used first with lasso #' penalty for supporting quantile estimation and then a non-linear minimization is used #' for prior estimation. -lp_solver <- "gurobi" #' Sum of squared error #' #' @param fit estimated values #' @param actual actual values -#' -#' @export delta <- function(fit, actual) sum((fit-actual)^2) #' Generate objection function @@ -43,14 +40,9 @@ objective <- function(theta, x, prob, ...) { #' @param base_pseudo_denum the pseudo counts added to denominator if little data for training #' @param base_pseudo_num the pseudo counts added to numerator if little data for training #' -#' @import MASS -#' @import stats4 -#' @import gurobi -#' @import Matrix -#' @import tidyverse -#' @import dplyr +#' @importFrom stats nlm +#' @importFrom dplyr %>% filter #' @importFrom quantgen quantile_lasso -#' @importFrom constants lp_solver est_priors <- function(train_data, prior_test_data, cov, taus, params_list, response, lp_solver, lambda, start=c(0, log(10)), @@ -110,7 +102,6 @@ ratio_adj_with_pseudo <- function(data, cov, pseudo_num, pseudo_denom, num_col, #' @param test_data testing data #' @param prior_test_data testing data for the lag -1 model #' -#' @importFrom constants taus, dw, lp_solver #' @export ratio_adj <- function(train_data, test_data, prior_test_data){ train_data$value_target <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 3749c7dc1..b4eefe903 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -22,6 +22,9 @@ sqrtscale_total = c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') sqrtscale = c('sqrty0', 'sqrty1', "sqrty2") log_lag = "inv_log_lag" +# Dates +weekdays_abbr <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +week_issues <- c("W1_issue", "W2_issue", "W3_issue") today = Sys.Date() indicators_and_signals <- tribble( diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index ab2aec473..4dd072d33 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -12,8 +12,8 @@ read_data <- function(path){ return (df) } -#‘ Export the result to customized directory - +#' Export the result to customized directory +#' #' @param test_data test data with prediction result #' @param coef_data data frame with the estimated coefficients #' @param export_dir export directory diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 7e83baddb..9b40596bc 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -1,44 +1,3 @@ -#' library(tidyverse) -#' library(Matrix) -#' library(stats) -#' library(tidyverse) -#' library(dplyr) -#' library(lubridate) -#' library(zoo) -#' library(dplyr) -#' library(ggplot2) -#' library(stringr) -#' library(plyr) -#' library(MASS) -#' library(stats4) -#' -#' library(covidcast) -#' library(evalcast) -#' library(quantgen) -#' library(gurobi) - - -#' Tempt usage -#' params = list() -#' customize -#' params$ref_lag: reference lag, after x days, the update is considered to be -#' the response. 60 is a reasonable choice for CHNG outpatient data -#' params$data_path: link to the input data file -#' params$testing_window: the testing window used for saving the runtime. Could -#' set it to be 1 if time allows -#' params$test_dates: list of two elements, the first one is the start date and -#' the second one is the end date -#' params$training_days: set it to be 270 or larger if you have enough data -#' params$num_col: the column name for the counts of the numerator, e.g. the -#' number of COVID claims -#' params$denom_col: the column name for the counts of the denominator, e.g. the -#' number of total claims -#' params$geo_level: list("state", "county") -#' params$taus: ?? -#' params$lambda: ?? -#' params$export_dir: ?? -#' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" - #' Get backfill-corrected estimates for a single signal + geo combination #' #' @param df dataframe of input data containing a single indicator + signal + @@ -54,10 +13,7 @@ #' @param refd_col string containing name of reference date field within `df`. #' @param lag_col string containing name of lag field within `df`. #' -#' @import constants -#' @import preprocessing -#' @import beta_prior_estimation -#' @import model +#' @importFrom dplyr %>% filter #' #' @export run_backfill <- function(df, value_type, geo_level, params, @@ -161,16 +117,12 @@ run_backfill <- function(df, value_type, geo_level, params, }# End for test date list }# End for signal suffixes }# End for geo list + + return(NULL) } #' Perform backfill correction on all desired signals and geo levels #' -#' @import tidyverse -#' @import utils -#' @import constants -#' @import preprocessing -#' @import beta_prior_estimation -#' @import model #' @importFrom dplyr bind_rows #' #' @export diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 6a0f9c61d..587d076cc 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -4,7 +4,7 @@ #' @param geo_train_data training data for a certain location #' @param geo_test_data testing data for a certain location #' -#' @expert +#' @export data_filteration <- function(test_lag, geo_train_data, geo_test_data){ if (test_lag <= 14){ test_lag_pad=2 @@ -40,7 +40,7 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ #' @param lambda the level of lasso penalty #' @param test_date as.Date #' -#' @expert +#' @export model_training_and_testing <- function(train_data, test_data, taus, params_list, lp_solver, lambda, test_date){ success = 0 @@ -74,16 +74,15 @@ model_training_and_testing <- function(train_data, test_data, taus, params_list, } #' Evaluation of the test results based on WIS score -#' The WIS score calculation is based on hte weighted_interval_score function -#' from the evalcast package from Delphi +#' The WIS score calculation is based on the weighted_interval_score function +#' from the `evalcast` package from Delphi #' -#' @param test_data multiple columns for the prediction results of difference +#' @param test_data multiple columns for the prediction results of different #' quantiles. Each row represents an update with certain (reference_date, -#' issue_date, location) +#' issue_date, location) combination. #' @param taus vector of quantiles interested #' -#' @import covidcast -#' @importFrom evalcast import weighted_interval_score +#' @importFrom evalcast weighted_interval_score #' #' @export evl <- function(test_data, taus){ @@ -99,8 +98,8 @@ evl <- function(test_data, taus){ predicted_all_exp = exp(predicted_all) predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) predicted_trans_exp = as.list(data.frame(t(predicted_all_exp - test_data$value_target))) - test_data$wis =mapply(weighted_interval_score, taus_list, predicted_trans, 0) - test_data$wis_exp =mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) + test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) + test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) return (test_data) } \ No newline at end of file diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index e7c04fc87..ac986a419 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -1,15 +1,11 @@ -### Data Preprocessing -### The raw input data should have 4/5 basic columns: -### time_value: reference date -### issue_date: issue date/date of reporting -### geo_value: location -### lag: the number of days between issue date and the reference date -### counts: the number of counts used for estimation -### library(lubridate) -### library(stats) -### library(stats) -### library(dyplr) -### library(tidyverse) +## Data Preprocessing +## +## The raw input data should have 4/5 basic columns: +## time_value: reference date +## issue_date: issue date/date of reporting +## geo_value: location +## lag: the number of days between issue date and the reference date +## counts: the number of counts used for estimation #' Re-index, fill na, make sure all reference date have enough rows for updates @@ -20,8 +16,6 @@ #' @param min_refd the earliest reference date considered in the data #' @param max_refd the latest reference date considered in the data #' -#' @importFrom constants ref_lag -#' #' @return df_new Data Frame with filled rows for missing lags #' #' @export @@ -45,20 +39,26 @@ fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd){ #' @param refd_col column name for the column of reference date #' @param lag_col column name for the column of lag #' -#' @importFrom constants ref_lag -#' @importFrom tidyr fill -#' @importFrom dplyr everything, select +#' @importFrom tidyr fill pivot_wider pivot_longer +#' @importFrom dplyr %>% everything select #' #' @export fill_missing_updates <- function(df, value_col, refd_col, lag_col) { pivot_df <- df[order(df[[lag_col]], decreasing=FALSE), ] %>% pivot_wider(id_cols=lag_col, names_from=refd_col, values_from=value_col) - if (any(diff(pivot_df[[lag_col]])!=1)){stop("Risk exists in forward fill")} + + if (any(diff(pivot_df[[lag_col]]) != 1)) { + stop("Risk exists in forward fill") + } pivot_df <- pivot_df %>% fill(everything(), .direction="down") - pivot_df[is.na(pivot_df)] <- 0 # fill NAs with 0s + + # Fill NAs with 0s + pivot_df[is.na(pivot_df)] <- 0 + backfill_df <- pivot_df %>% pivot_longer(-lag_col, values_to="value_raw", names_to=refd_col) backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) + return (as.data.frame(backfill_df)) } @@ -96,7 +96,6 @@ add_shift <- function(df, n_day, refd_col){ return (df) } -wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") #' Add one hot encoding for day of a week info in terms of reference #' and issue date #' @@ -107,10 +106,8 @@ wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") #' issue date #' @param suffix suffix added to indicate which kind of date is used #' -#' @importFrom constants wd -#' #' @export -add_dayofweek <- function(df, wd, time_col, suffix){ +add_dayofweek <- function(df, wd = weekdays_abbr, time_col, suffix){ dayofweek <- as.numeric(format(df[[time_col]], format="%u")) for (i in 1:6){ df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) @@ -130,9 +127,9 @@ add_dayofweek <- function(df, wd, time_col, suffix){ #' #' @param date as.Date #' -#' @importFrom lubridate make_date, year, month, day +#' @importFrom lubridate make_date year month day +#' #' @return a integer indicating which week it is in a month -#' @export get_weekofmonth <- function(date){ year <- year(date) month <- month(date) @@ -141,7 +138,6 @@ get_weekofmonth <- function(date){ return (((day + firstdayofmonth - 1) %/% 7) %% 5 + 1) } -wm <- c("W1_issue", "W2_issue", "W3_issue") #' Add one hot encoding for week of a month info in terms of issue date #' #' @param df Data Frame of aggregated counts within a single location @@ -151,7 +147,7 @@ wm <- c("W1_issue", "W2_issue", "W3_issue") #' issue date #' #' @export -add_weekofmonth <- function(df, wm, time_col){ +add_weekofmonth <- function(df, wm = week_issues, time_col){ weekofmonth <- get_weekofmonth(df[[time_col]]) for (i in 1:3){ df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) @@ -167,6 +163,9 @@ add_weekofmonth <- function(df, wm, time_col){ #' @param refd_col column name for the column of reference date #' @param lag_col column name for the column of lag #' +#' @importFrom dplyr %>% +#' @importFrom tidyr pivot_wider +#' #' @export add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ @@ -211,8 +210,6 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ #' reported for each reference date and issue date. #' @param refd_col column name for the column of reference date #' @param lag_col column name for the column of lag -#' -#' @export add_params_for_dates <- function(backfill_df, refd_col, lag_col){ # Add columns for day-of-week effect backfill_df <- add_dayofweek(backfill_df, wd, refd_col, "_ref") @@ -230,7 +227,6 @@ add_params_for_dates <- function(backfill_df, refd_col, lag_col){ #' @param test_data Data Frame for testing #' @param value_col the column name of the considered value #' @param the maximum value in the training data at square root level -#' @export add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ sqrtscale = c() sub_max_raw = sqrt(max(train_data$value_raw)) / 2 @@ -248,6 +244,7 @@ add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ & (test_data$value_raw > (qv_pre)^2), paste0("sqrty", as.character(split))] = 1 sqrtscale[split+1] = paste0("sqrty", as.character(split)) } + return (list(train_data, test_data, sqrtscale)) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 104eac324..33abdc8e7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -1,25 +1,6 @@ -library(tidyverse) -library(dplyr) -library(lubridate) -library(zoo) -#' library(stringr) -#' library(plyr) -library(MASS) -library(stats4) -library(evalcast) -library(quantgen) -library(gurobi) -library(argparser) - -#' Main function for getting backfill corrected estimates -#' -#' @import utils -#' @import constants -#' @import preprocessing -#' @import beta_prior_estimation -#' @import model +#' Corrected estimates from a single local signal #' -#' @export +#' @importFrom dplyr %>% filter run_backfill_local <- function(df, export_dir, taus, test_date_list, test_lags, value_cols, training_days, testing_window, @@ -112,26 +93,18 @@ run_backfill_local <- function(df, export_dir, taus, result_df = do.call(rbind, res_list) coefs_df = do.call(rbind.fill, coef_df_list) export_test_result(result_df, coefs_df, export_dir, geo) - }# End for geo lsit + }# End for geo list } -#' Main function -#' Check the parameters and the input +#' Main function to correct a single local signal #' -#' @import tidyverse -#' @import utils -#' @import constants -#' @import preprocessing -#' @import beta_prior_estimation -#' @import model +#' @importFrom readr read_csv #' #' @export main_local <- function(data_path, export_dir, test_start_date, test_end_date, traning_days, testing_window, value_type, num_col, denom_col, lambda, ref_lag){ - - # Check input data df = read_csv(data_path) @@ -160,5 +133,4 @@ main_local <- function(data_path, export_dir, test_date_list, test_lags, value_cols, training_days, testing_window, ref_lag, value_type, lambda) - } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 32e4691d1..fcdc0f3f0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -3,6 +3,27 @@ #' Reads a parameters file. If the file does not exist, the function will create a copy of #' '"params.json.template" and read from that. #' +#' A params list should contain the following fields. If not included, +#' they will be filled with default values when possible. +#' +#' params$ref_lag: reference lag, after x days, the update is considered to be +#' the response. 60 is a reasonable choice for CHNG outpatient data +#' params$data_path: link to the input data file +#' params$testing_window: the testing window used for saving the runtime. Could +#' set it to be 1 if time allows +#' params$test_dates: list of two elements, the first one is the start date and +#' the second one is the end date +#' params$training_days: set it to be 270 or larger if you have enough data +#' params$num_col: the column name for the counts of the numerator, e.g. the +#' number of COVID claims +#' params$denom_col: the column name for the counts of the denominator, e.g. the +#' number of total claims +#' params$geo_level: list("state", "county") +#' params$taus: ?? +#' params$lambda: ?? +#' params$export_dir: ?? +#' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" +#' #' @param path path to the parameters file; if not present, will try to copy the file #' "params.json.template" #' @param template_path path to the template parameters file @@ -90,11 +111,10 @@ filter_counties <- function(geos) { #' Subset list of counties to those included in the 200 most populous in the US #' -#' @importFrom covidcast county_census #' @importFrom dplyr select %>% arrange desc get_populous_counties <- function() { return( - county_census %>% + covidcast::county_census %>% select(pop = POPESTIMATE2019, fips = FIPS) %>% # Drop megacounties (states) filter(!endsWith(fips, "000")) %>% diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd new file mode 100644 index 000000000..0dae9267c --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_7davs_and_target} +\alias{add_7davs_and_target} +\title{Add 7dav and target to the data +Target is the updates made ref_lag days after the first release} +\usage{ +add_7davs_and_target(df, value_col, refd_col, lag_col, ref_lag) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_col}{column name for the column of raw value} + +\item{refd_col}{column name for the column of reference date} + +\item{lag_col}{column name for the column of lag} +} +\description{ +Add 7dav and target to the data +Target is the updates made ref_lag days after the first release +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd new file mode 100644 index 000000000..518480c88 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_dayofweek} +\alias{add_dayofweek} +\title{Add one hot encoding for day of a week info in terms of reference +and issue date} +\usage{ +add_dayofweek(df, wd = weekdays_abbr, time_col, suffix) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{wd}{vector of days of a week} + +\item{time_col}{column used for the date, can be either reference date or +issue date} + +\item{suffix}{suffix added to indicate which kind of date is used} +} +\description{ +Add one hot encoding for day of a week info in terms of reference +and issue date +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd new file mode 100644 index 000000000..b48d8b36a --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_params_for_dates} +\alias{add_params_for_dates} +\title{Add params related to date +Target is the updates made ref_lag days after the first release} +\usage{ +add_params_for_dates(backfill_df, refd_col, lag_col) +} +\arguments{ +\item{refd_col}{column name for the column of reference date} + +\item{lag_col}{column name for the column of lag} + +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} +} +\description{ +Add params related to date +Target is the updates made ref_lag days after the first release +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd new file mode 100644 index 000000000..a1947aa2e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_shift} +\alias{add_shift} +\title{Used for data shifting in terms of reference date} +\usage{ +add_shift(df, n_day, refd_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{n_day}{number of days to be shifted} + +\item{refd_col}{column name for the column of reference date} +} +\description{ +Used for data shifting in terms of reference date +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd new file mode 100644 index 000000000..4b9b2d616 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_sqrtscale} +\alias{add_sqrtscale} +\title{Add columns to indicate the scale of value at square root level} +\usage{ +add_sqrtscale(train_data, test_data, max_raw, value_col) +} +\arguments{ +\item{train_data}{Data Frame for training} + +\item{test_data}{Data Frame for testing} + +\item{value_col}{the column name of the considered value} + +\item{the}{maximum value in the training data at square root level} +} +\description{ +Add columns to indicate the scale of value at square root level +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd new file mode 100644 index 000000000..0d7c80d5c --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_weekofmonth} +\alias{add_weekofmonth} +\title{Add one hot encoding for week of a month info in terms of issue date} +\usage{ +add_weekofmonth(df, wm = week_issues, time_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{wm}{vector of weeks of a month} + +\item{time_col}{column used for the date, can be either reference date or +issue date} +} +\description{ +Add one hot encoding for week of a month info in terms of issue date +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd b/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd new file mode 100644 index 000000000..245bb2084 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{create_dir_not_exist} +\alias{create_dir_not_exist} +\title{Create directory if not already existing} +\usage{ +create_dir_not_exist(path) +} +\arguments{ +\item{path}{character vector giving the directory to create} +} +\description{ +Create directory if not already existing +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd new file mode 100644 index 000000000..54deb96d5 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{create_name_pattern} +\alias{create_name_pattern} +\title{Create pattern to match input files of a given type, signal, and geo level} +\usage{ +create_name_pattern( + indicator, + signal, + geo_level, + file_type = c("daily", "rollup") +) +} +\description{ +Create pattern to match input files of a given type, signal, and geo level +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd new file mode 100644 index 000000000..e224b9b7b --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{data_filteration} +\alias{data_filteration} +\title{Filtration for training and testing data with different lags} +\usage{ +data_filteration(test_lag, geo_train_data, geo_test_data) +} +\arguments{ +\item{test_lag}{} + +\item{geo_train_data}{training data for a certain location} + +\item{geo_test_data}{testing data for a certain location} +} +\description{ +Filtration for training and testing data with different lags +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd b/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd new file mode 100644 index 000000000..e51f197ec --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{delta} +\alias{delta} +\title{Functions for Beta Prior Approach. +This is used only for the ratio prediction e.g. fraction of Covid claims, +percentage of positive tests. We assume that the ratio follows a beta distribution +that is day-of-week dependent. A quantile regression model is used first with lasso +penalty for supporting quantile estimation and then a non-linear minimization is used +for prior estimation. +Sum of squared error} +\usage{ +delta(fit, actual) +} +\arguments{ +\item{fit}{estimated values} + +\item{actual}{actual values} +} +\description{ +Functions for Beta Prior Approach. +This is used only for the ratio prediction e.g. fraction of Covid claims, +percentage of positive tests. We assume that the ratio follows a beta distribution +that is day-of-week dependent. A quantile regression model is used first with lasso +penalty for supporting quantile estimation and then a non-linear minimization is used +for prior estimation. +Sum of squared error +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd new file mode 100644 index 000000000..87c6a96f0 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{est_priors} +\alias{est_priors} +\title{Main function for the beta prior approach +Estimate the priors for the beta distribution based on data for +a certain day of a week} +\usage{ +est_priors( + train_data, + prior_test_data, + cov, + taus, + params_list, + response, + lp_solver, + lambda, + start = c(0, log(10)), + base_pseudo_denom = 1000, + base_pseudo_num = 10 +) +} +\arguments{ +\item{train_data}{Data Frame for training} + +\item{prior_test_data}{Data Frame for testing} + +\item{taus}{vector of considered quantiles} + +\item{params_list}{the list of parameters for training} + +\item{response}{the column name of the response variable} + +\item{lp_solver}{the lp solver used in Quantgen} + +\item{start}{the initialization of the the points in nlm} + +\item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} + +\item{dw}{column name to indicate which day of a week it is} + +\item{labmda}{the level of lasso penalty} + +\item{base_pseudo_denum}{the pseudo counts added to denominator if little data for training} +} +\description{ +Main function for the beta prior approach +Estimate the priors for the beta distribution based on data for +a certain day of a week +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd b/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd new file mode 100644 index 000000000..c18067c6e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{evl} +\alias{evl} +\title{Evaluation of the test results based on WIS score +The WIS score calculation is based on the weighted_interval_score function +from the `evalcast` package from Delphi} +\usage{ +evl(test_data, taus) +} +\arguments{ +\item{test_data}{multiple columns for the prediction results of different +quantiles. Each row represents an update with certain (reference_date, +issue_date, location) combination.} + +\item{taus}{vector of quantiles interested} +} +\description{ +Evaluation of the test results based on WIS score +The WIS score calculation is based on the weighted_interval_score function +from the `evalcast` package from Delphi +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd new file mode 100644 index 000000000..6dfea4b50 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{export_test_result} +\alias{export_test_result} +\title{Export the result to customized directory} +\usage{ +export_test_result(test_data, coef_data, export_dir, geo) +} +\arguments{ +\item{test_data}{test data with prediction result} + +\item{coef_data}{data frame with the estimated coefficients} + +\item{export_dir}{export directory} + +\item{geo}{the geogrpahical location} + +\item{geo_level}{geographical level, can be county or state} + +\item{test_lag}{} +} +\description{ +Export the result to customized directory +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd b/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd new file mode 100644 index 000000000..8b93cdeb0 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{fill_missing_updates} +\alias{fill_missing_updates} +\title{Get pivot table, filling NANs. If there is no update on issue date D but +previous reports exist for issue date D_p < D, all the dates between +[D_p, D] are filled with with the reported value on date D_p. If there is +no update for any previous issue date, fill in with 0.} +\usage{ +fill_missing_updates(df, value_col, refd_col, lag_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_col}{column name for the column of counts} + +\item{refd_col}{column name for the column of reference date} + +\item{lag_col}{column name for the column of lag} +} +\description{ +Get pivot table, filling NANs. If there is no update on issue date D but +previous reports exist for issue date D_p < D, all the dates between +[D_p, D] are filled with with the reported value on date D_p. If there is +no update for any previous issue date, fill in with 0. +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd new file mode 100644 index 000000000..bfd001907 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{fill_rows} +\alias{fill_rows} +\title{Re-index, fill na, make sure all reference date have enough rows for updates} +\usage{ +fill_rows(df, refd_col, lag_col, min_refd, max_refd) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{refd_col}{column name for the column of reference date} + +\item{lag_col}{column name for the column of lag} + +\item{min_refd}{the earliest reference date considered in the data} + +\item{max_refd}{the latest reference date considered in the data} +} +\value{ +df_new Data Frame with filled rows for missing lags +} +\description{ +Re-index, fill na, make sure all reference date have enough rows for updates +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd b/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd new file mode 100644 index 000000000..0998af0f0 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{filter_counties} +\alias{filter_counties} +\title{Subset list of counties to those included in the 200 most populous in the US} +\usage{ +filter_counties(geos) +} +\description{ +Subset list of counties to those included in the 200 most populous in the US +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd new file mode 100644 index 000000000..84a9f1e2d --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{get_7dav} +\alias{get_7dav} +\title{Calculate 7 day moving average for each issue date +The 7dav for date D reported on issue date D_i is the average from D-7 to D-1} +\usage{ +get_7dav(pivot_df, refd_col) +} +\arguments{ +\item{pivot_df}{Data Frame where the columns are issue dates and the rows are +reference dates} + +\item{refd_col}{column name for the column of reference date} +} +\description{ +Calculate 7 day moving average for each issue date +The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd new file mode 100644 index 000000000..2ea29da80 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{get_files_list} +\alias{get_files_list} +\title{List valid input files.} +\usage{ +get_files_list(indicator, signal, geo_level, params, sub_dir = "") +} +\description{ +List valid input files. +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd new file mode 100644 index 000000000..9f53bfe65 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{get_populous_counties} +\alias{get_populous_counties} +\title{Subset list of counties to those included in the 200 most populous in the US} +\usage{ +get_populous_counties() +} +\description{ +Subset list of counties to those included in the 200 most populous in the US +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd new file mode 100644 index 000000000..3b53bcb0e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{get_weekofmonth} +\alias{get_weekofmonth} +\title{Get week of a month info according to a date +All the dates on or before the ith Sunday but after the (i-1)th Sunday +is considered to be the ith week. Notice that the dates in the 5th week +this month are actually in the same week with the dates in the 1st week +next month and those dates are sparse. Thus, we assign the dates in the +5th week to the 1st week.} +\usage{ +get_weekofmonth(date) +} +\arguments{ +\item{date}{as.Date} +} +\value{ +a integer indicating which week it is in a month +} +\description{ +Get week of a month info according to a date +All the dates on or before the ith Sunday but after the (i-1)th Sunday +is considered to be the ith week. Notice that the dates in the 5th week +this month are actually in the same week with the dates in the 1st week +next month and those dates are sparse. Thus, we assign the dates in the +5th week to the 1st week. +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd new file mode 100644 index 000000000..0dce32a76 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/main.R +\name{main} +\alias{main} +\title{Perform backfill correction on all desired signals and geo levels} +\usage{ +main(params, ...) +} +\description{ +Perform backfill correction on all desired signals and geo levels +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd new file mode 100644 index 000000000..d9d745fcf --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tooling.R +\name{main_local} +\alias{main_local} +\title{Main function to correct a single local signal} +\usage{ +main_local( + data_path, + export_dir, + test_start_date, + test_end_date, + traning_days, + testing_window, + value_type, + num_col, + denom_col, + lambda, + ref_lag +) +} +\description{ +Main function to correct a single local signal +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd new file mode 100644 index 000000000..5468e1e75 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{model_training_and_testing} +\alias{model_training_and_testing} +\title{Model training and prediction using quantile regression with Lasso penalty +The quantile regression uses the quantile_lasso function from quantgen package} +\usage{ +model_training_and_testing( + train_data, + test_data, + taus, + params_list, + lp_solver, + lambda, + test_date +) +} +\arguments{ +\item{train_data}{Data frame for training} + +\item{test_data}{Data frame for testing} + +\item{taus}{vector of considered quantiles} + +\item{params_list}{the list of column names serving as the covariates} + +\item{lp_solver}{the lp solver used in Quantgen} + +\item{lambda}{the level of lasso penalty} + +\item{test_date}{as.Date} +} +\description{ +Model training and prediction using quantile regression with Lasso penalty +The quantile regression uses the quantile_lasso function from quantgen package +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd b/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd new file mode 100644 index 000000000..256d1ce4b --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{objective} +\alias{objective} +\title{Generate objection function} +\usage{ +objective(theta, x, prob, ...) +} +\arguments{ +\item{theta}{parameters for the distribution in log scale} + +\item{prob}{the expected probabilities} +} +\description{ +Generate objection function +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd new file mode 100644 index 000000000..f4d9f2c21 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{ratio_adj} +\alias{ratio_adj} +\title{Update ratio using beta prior approach} +\usage{ +ratio_adj(train_data, test_data, prior_test_data) +} +\arguments{ +\item{train_data}{training data} + +\item{test_data}{testing data} + +\item{prior_test_data}{testing data for the lag -1 model} +} +\description{ +Update ratio using beta prior approach +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd new file mode 100644 index 000000000..185b78411 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{ratio_adj_with_pseudo} +\alias{ratio_adj_with_pseudo} +\title{Update ratio based on the pseudo counts for numerators and denominators} +\usage{ +ratio_adj_with_pseudo(data, cov, pseudo_num, pseudo_denom, num_col, denom_col) +} +\arguments{ +\item{data}{Data Frame} + +\item{pseudo_num}{the estimated counts to be added to numerators} + +\item{pseudo_denom}{the estimated counts to be added to denominators} + +\item{num_col}{the column name for the numerator} + +\item{denom_col}{the column name for the denominator} + +\item{dw}{character to indicate the day of a week. Can be NULL for all the days} +} +\description{ +Update ratio based on the pseudo counts for numerators and denominators +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd new file mode 100644 index 000000000..75ae00b8a --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{read_data} +\alias{read_data} +\title{Read a parquet file into a dataframe} +\usage{ +read_data(path) +} +\arguments{ +\item{path}{path to the input data} +} +\description{ +Read a parquet file into a dataframe +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd new file mode 100644 index 000000000..2a8e9e239 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{read_params} +\alias{read_params} +\title{Return params file as an R list} +\usage{ +read_params(path = "params.json", template_path = "params.json.template") +} +\arguments{ +\item{path}{path to the parameters file; if not present, will try to copy the file +"params.json.template"} + +\item{template_path}{path to the template parameters file} +} +\value{ +a named list of parameters values +} +\description{ +Reads a parameters file. If the file does not exist, the function will create a copy of +'"params.json.template" and read from that. +} +\details{ +A params list should contain the following fields. If not included, +they will be filled with default values when possible. + +params$ref_lag: reference lag, after x days, the update is considered to be + the response. 60 is a reasonable choice for CHNG outpatient data +params$data_path: link to the input data file +params$testing_window: the testing window used for saving the runtime. Could + set it to be 1 if time allows +params$test_dates: list of two elements, the first one is the start date and + the second one is the end date +params$training_days: set it to be 270 or larger if you have enough data +params$num_col: the column name for the counts of the numerator, e.g. the + number of COVID claims +params$denom_col: the column name for the counts of the denominator, e.g. the + number of total claims +params$geo_level: list("state", "county") +params$taus: ?? +params$lambda: ?? +params$export_dir: ?? +params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd new file mode 100644 index 000000000..2f1f5acda --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/main.R +\name{run_backfill} +\alias{run_backfill} +\title{Get backfill-corrected estimates for a single signal + geo combination} +\usage{ +run_backfill( + df, + value_type, + geo_level, + params, + refd_col = "time_value", + lag_col = "lag", + signal_suffixes = c("") +) +} +\arguments{ +\item{df}{dataframe of input data containing a single indicator + signal + +level of geographic coverage.} + +\item{value_type}{string describing signal type of "count" and "ratio".} + +\item{geo_level}{string describing geo coverage of input data. "state" or +"county". If "county" is selected, only data from the 200 most populous +counties in the US (*not* the dataset) will be used.} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +and `data_path` (input dir).} + +\item{refd_col}{string containing name of reference date field within `df`.} + +\item{lag_col}{string containing name of lag field within `df`.} +} +\description{ +Get backfill-corrected estimates for a single signal + geo combination +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd new file mode 100644 index 000000000..a3bf2d5fb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tooling.R +\name{run_backfill_local} +\alias{run_backfill_local} +\title{Corrected estimates from a single local signal} +\usage{ +run_backfill_local( + df, + export_dir, + taus, + test_date_list, + test_lags, + value_cols, + training_days, + testing_window, + ref_lag, + value_type, + lambda +) +} +\description{ +Corrected estimates from a single local signal +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd new file mode 100644 index 000000000..0a15eca0f --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{subset_valid_files} +\alias{subset_valid_files} +\title{Return file names only if they contain data to be used in training} +\usage{ +subset_valid_files(files_list, file_type = c("daily", "rollup"), params) +} +\description{ +Parse filenames to find included dates. Use different patterns if file +includes daily or rollup (multiple days) data. +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd b/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd new file mode 100644 index 000000000..7309f5608 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{training_days_check} +\alias{training_days_check} +\title{Check available training days} +\usage{ +training_days_check(issue_date, training_days) +} +\description{ +Check available training days +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd new file mode 100644 index 000000000..54f7d66e9 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{validity_checks} +\alias{validity_checks} +\title{Check input data for validity} +\usage{ +validity_checks(df, value_type) +} +\description{ +Check input data for validity +} diff --git a/Backfill_Correction/run.R b/Backfill_Correction/run.R new file mode 100644 index 000000000..85f1ece05 --- /dev/null +++ b/Backfill_Correction/run.R @@ -0,0 +1,5 @@ +library(delphiBackfillCorrection) + +params <- read_params("params.json") +delphiBackfillCorrection::main(params) +message("backfill correction completed successfully") From b1e3e69854db4f059b7bf16fdc1f8f5f76a69bb9 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 24 Aug 2022 18:30:44 -0400 Subject: [PATCH 029/145] get tests working; convert dim to ncol/nrow --- .../delphiBackfillCorrection/NAMESPACE | 3 ++ .../R/beta_prior_estimation.R | 4 +- .../delphiBackfillCorrection/R/constants.R | 44 +++++++++---------- .../delphiBackfillCorrection/R/io.R | 12 ++--- .../delphiBackfillCorrection/R/main.R | 8 ++-- .../delphiBackfillCorrection/R/model.R | 4 +- .../R/preprocessing.R | 9 ++-- .../delphiBackfillCorrection/R/tooling.R | 5 ++- .../man/export_test_result.Rd | 4 +- .../delphiBackfillCorrection/man/fill_rows.Rd | 2 +- .../unit-tests/testthat/test-preprocessing.R | 42 ++++++++---------- 11 files changed, 69 insertions(+), 68 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 0b4ac3fb9..2f057757e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -39,7 +39,10 @@ importFrom(quantgen,quantile_lasso) importFrom(readr,read_csv) importFrom(stats,nlm) importFrom(stats,pbeta) +importFrom(stats,setNames) importFrom(stringr,str_interp) +importFrom(tidyr,crossing) +importFrom(tidyr,drop_na) importFrom(tidyr,fill) importFrom(tidyr,pivot_longer) importFrom(tidyr,pivot_wider) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 46f4b22ac..249c43f84 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -49,7 +49,7 @@ est_priors <- function(train_data, prior_test_data, cov, taus, base_pseudo_denom=1000, base_pseudo_num=10){ sub_train_data <- train_data %>% filter(train_data[[cov]] == 1) sub_test_data <- prior_test_data %>% filter(prior_test_data[[cov]] == 1) - if (dim(sub_test_data)[1] == 0) { + if (nrow(sub_test_data) == 0) { pseudo_denom <- base_pseudo_denom pseudo_num <- base_pseudo_num } else { @@ -59,7 +59,7 @@ est_priors <- function(train_data, prior_test_data, cov, taus, tau <- taus[idx] obj <- quantile_lasso(as.matrix(sub_train_data[params_list]), sub_train_data[response], tau = tau, - lambda = lambda, stand = FALSE, lp_solver = lp_solver) + lambda = lambda, standardize = FALSE, lp_solver = lp_solver) y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[params_list]))) quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index b4eefe903..717fb6312 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,33 +1,33 @@ +## TODO not sure how to import roxygen-style outside of a function library(tibble) +## TODO convert all constant usages in package to uppercase # Constants for the backfill correction model -taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) -ref_lag <- 60 -test_lags <- c(1:14, 21, 35, 51) -training_days <- 270 -testing_window <- 14 -lag_window <- 5 -lambda <- 0.1 -lp_solver = "gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" +TAUS <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) +REF_LAG <- 60 +TEST_LAGS <- c(1:14, 21, 35, 51) +TRAINING_DAYS <- 270 +TESTING_WINDOW <- 14 +LAG_WINDOW <- 5 +LAMBDA <- 0.1 +LP_SOLVER <-"gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" -yitl = "log_value_raw" -slope = "log_7dav_slope" -y7dav = "log_value_7dav" -wd = c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -wm <- c("W1_issue", "W2_issue", "W3_issue") +YITL <-"log_value_raw" +SLOPE <-"log_7dav_slope" +Y7DAV <-"log_value_7dav" -#sqrtscale = c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') -sqrtscale_covid = c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') -sqrtscale_total = c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') -sqrtscale = c('sqrty0', 'sqrty1', "sqrty2") -log_lag = "inv_log_lag" +#SQRTSCALE <-c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') +SQRTSCALE_COVID <-c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') +SQRTSCALE_TOTAL <-c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') +SQRTSCALE <-c('sqrty0', 'sqrty1', "sqrty2") +LOG_LAG <-"inv_log_lag" # Dates -weekdays_abbr <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -week_issues <- c("W1_issue", "W2_issue", "W3_issue") -today = Sys.Date() +WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") +TODAY <-Sys.Date() -indicators_and_signals <- tribble( +INDICATORS_AND_SIGNALS <- tribble( ~indicator, ~signal, ~name_suffix, ~value_type, ~sub_dir, "changehc", "covid", "", "count", "chng", "changehc", "flu", "", "count", "chng", diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 4dd072d33..6190b0e12 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -18,15 +18,17 @@ read_data <- function(path){ #' @param coef_data data frame with the estimated coefficients #' @param export_dir export directory #' @param geo_level geographical level, can be county or state -#' @param geo the geogrpahical location -#' @param test_lag +#' @param test_lag #' #' @export -export_test_result <- function(test_data, coef_data, export_dir, geo) { - pred_output_dir = paste("prediction", geo, sep="_") +export_test_result <- function(test_data, coef_data, export_dir, + geo_level, test_lag) { + ## TODO + warning("test_lag arg not being used") + pred_output_dir = paste("prediction", geo_level, sep="_") write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) - coef_output_dir = paste("coefs", geo, sep="_") + coef_output_dir = paste("coefs", geo_level, sep="_") write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 9b40596bc..fb282b0c4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -14,6 +14,7 @@ #' @param lag_col string containing name of lag field within `df`. #' #' @importFrom dplyr %>% filter +#' @importFrom tidyr drop_na #' #' @export run_backfill <- function(df, value_type, geo_level, params, @@ -76,8 +77,8 @@ run_backfill <- function(df, value_type, geo_level, params, filter(issue_date >= test_date) %>% filter(issue_date < test_date + params$testing_window) %>% drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next + if (nrow(geo_test_data) == 0) next + if (nrow(geo_train_data) <= 200) next if (value_type == "ratio"){ geo_prior_test_data = combined_df %>% @@ -111,8 +112,7 @@ run_backfill <- function(df, value_type, geo_level, params, coefs <- prediction_results[[2]] test_data <- evl(test_data, params$taus) - export_test_result(test_data, coefs, params$export_dir, geo_level, - geo, test_lag) + export_test_result(test_data, coefs, params$export_dir, geo_level, test_lag) }# End for test lags }# End for test date list }# End for signal suffixes diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 587d076cc..d668a0dd2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -53,7 +53,7 @@ model_training_and_testing <- function(train_data, test_data, taus, params_list, # Quantile regression obj = quantile_lasso(as.matrix(train_data[params_list]), train_data$log_value_target, tau = tau, - lambda = lambda, stand = FALSE, lp_solver = lp_solver) + lambda = lambda, standardize = FALSE, lp_solver = lp_solver) y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[params_list]))) test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all @@ -86,7 +86,7 @@ model_training_and_testing <- function(train_data, test_data, taus, params_list, #' #' @export evl <- function(test_data, taus){ - n_row = dim(test_data)[1] + n_row = nrow(test_data) taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) # Calculate WIS diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index ac986a419..4e921c959 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -17,9 +17,12 @@ #' @param max_refd the latest reference date considered in the data #' #' @return df_new Data Frame with filled rows for missing lags -#' +#' +#' @importFrom tidyr crossing +#' @importFrom stats setNames +#' #' @export -fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd){ +fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG){ lags <- min(df[[lag_col]]): ref_lag # Full list of lags refds <- seq(min_refd, max_refd, by="day") # Full list reference date row_inds_df <- as.data.frame(crossing(refds, lags)) %>% @@ -164,7 +167,7 @@ add_weekofmonth <- function(df, wm = week_issues, time_col){ #' @param lag_col column name for the column of lag #' #' @importFrom dplyr %>% -#' @importFrom tidyr pivot_wider +#' @importFrom tidyr pivot_wider drop_na #' #' @export add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 33abdc8e7..da9c4f849 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -1,6 +1,7 @@ #' Corrected estimates from a single local signal #' #' @importFrom dplyr %>% filter +#' @importFrom tidyr drop_na run_backfill_local <- function(df, export_dir, taus, test_date_list, test_lags, value_cols, training_days, testing_window, @@ -46,8 +47,8 @@ run_backfill_local <- function(df, export_dir, taus, filter(issue_date >= test_date) %>% filter(issue_date < test_date+testing_window) %>% drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next + if (nrow(geo_test_data) == 0) next + if (nrow(geo_train_data) <= 200) next if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% filter(issue_date > test_date-7) %>% diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 6dfea4b50..7eb775780 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -4,7 +4,7 @@ \alias{export_test_result} \title{Export the result to customized directory} \usage{ -export_test_result(test_data, coef_data, export_dir, geo) +export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) } \arguments{ \item{test_data}{test data with prediction result} @@ -13,8 +13,6 @@ export_test_result(test_data, coef_data, export_dir, geo) \item{export_dir}{export directory} -\item{geo}{the geogrpahical location} - \item{geo_level}{geographical level, can be county or state} \item{test_lag}{} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd index bfd001907..79ad1571e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd @@ -4,7 +4,7 @@ \alias{fill_rows} \title{Re-index, fill na, make sure all reference date have enough rows for updates} \usage{ -fill_rows(df, refd_col, lag_col, min_refd, max_refd) +fill_rows(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) } \arguments{ \item{df}{Data Frame of aggregated counts within a single location diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index c4e5982fd..eec0e0124 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -1,10 +1,3 @@ -library(jsonlite) -library(testthat) -library(dplyr) -library(tidyr) -library(zoo) -library(stats) -library(lubridate) context("Testing preprocessing helper functions") refd_col <- "time_value" @@ -25,28 +18,28 @@ wm <- c("W1_issue", "W2_issue", "W3_issue") test_that("testing rows filling for missing lags", { - #Make sure all reference date have enough rows for updates - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + # Make sure all reference date have enough rows for updates + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) n_refds <- as.numeric(max_refd - min_refd)+1 - expect_equal(dim(df_new)[1], n_refds*(ref_lag+1)) + expect_equal(nrow(df_new), n_refds*(ref_lag+1)) expect_equal(df_new %>% drop_na(), fake_df) }) test_that("testing NA filling for missing udpates", { - #Make sure all the updates are valid integers + # Make sure all the updates are valid integers # Assuming the input data does not have enough rows for consecutive lags expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), "Risk exists in forward fill") # Assuming the input data is already prepared - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) n_refds <- as.numeric(max_refd - min_refd)+1 backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - expect_equal(dim(backfill_df)[1], n_refds*(ref_lag+1)) + expect_equal(nrow(backfill_df), n_refds*(ref_lag+1)) for (d in seq(min_refd, max_refd, by="day")){ expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) @@ -54,8 +47,8 @@ test_that("testing NA filling for missing udpates", { }) -test_that("testing the caculation of 7-day moving average", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) +test_that("testing the calculation of 7-day moving average", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) df$issue_date <- df[[refd_col]] + df[[lag_col]] pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% @@ -80,8 +73,8 @@ test_that("testing the data shifting", { test_that("testing adding columns for each day of a week", { df_new <- add_dayofweek(fake_df, wd, refd_col, "_ref") - expect_equal(dim(fake_df)[2] + 7, dim(df_new)[2]) - expect_true(all(rowSums(df_new[, -c(1:dim(fake_df)[2])]) == 1)) + expect_equal(ncol(fake_df) + 7, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) }) @@ -95,18 +88,18 @@ test_that("testing the calculation of week of a month", { }) -test_that("testing the caculation of 7-day moving average", { +test_that("testing the calculation of 7-day moving average", { df_new <- add_weekofmonth(fake_df, wm, refd_col) - expect_equal(dim(fake_df)[2] + 3, dim(df_new)[2]) - expect_true(all(rowSums(df_new[, -c(1:dim(fake_df)[2])]) == 1)) + expect_equal(ncol(fake_df) + 3, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) }) test_that("testing adding 7 day avg and target", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd) + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col) + df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col, ref_lag) # Existing columns: # time_value: reference date @@ -118,7 +111,8 @@ test_that("testing adding 7 day avg and target", { # value_prev_7dav: 7day avg of the counts from -14 days to -8 days # value_target: updated counts on the target date # target_date: the date ref_lag days after the reference date - expect_equal(dim(df_new)[2], 3 + 1 + 1 + 1 + 1 + 1) - expect_equal(dim(df_new)[1], 7 * 8) + # and 5 log columns + expect_equal(ncol(df_new), 3 + 10) + expect_equal(nrow(df_new), 7 * 8) }) From 8cfd7850516b990e542c99f6bc45e4c0eb0ce61f Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 24 Aug 2022 18:35:37 -0400 Subject: [PATCH 030/145] allow any name for value field --- .../delphiBackfillCorrection/R/preprocessing.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index 4e921c959..1673b85bb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -180,7 +180,7 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ # Add 7dav avg avg_df <- get_7dav(pivot_df, refd_col) avg_df <- add_shift(avg_df, 1, refd_col) # 7dav until yesterday - names(avg_df)[names(avg_df) == 'value_raw'] <- 'value_7dav' + names(avg_df)[names(avg_df) == value_col] <- 'value_7dav' avg_df_prev7 <- add_shift(avg_df, 7, refd_col) names(avg_df_prev7)[names(avg_df_prev7) == 'value_7dav'] <- 'value_prev_7dav' @@ -189,7 +189,7 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ # Add target target_df <- df[df$lag==ref_lag, c(refd_col, "value_raw", "issue_date")] - names(target_df)[names(target_df) == 'value_raw'] <- 'value_target' + names(target_df)[names(target_df) == value_col] <- 'value_target' names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' backfill_df <- merge(backfill_df, target_df, by=refd_col, all.x=TRUE) From 30397618141f1492c1de7a334820d6cd809894ec Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:07:02 -0400 Subject: [PATCH 031/145] get R CMD check passing --- .../delphiBackfillCorrection/DESCRIPTION | 5 +- .../delphiBackfillCorrection/NAMESPACE | 9 +- .../R/beta_prior_estimation.R | 57 ++++++------ .../delphiBackfillCorrection/R/constants.R | 6 +- .../delphiBackfillCorrection/R/io.R | 78 ++++++++++------ .../delphiBackfillCorrection/R/main.R | 65 ++++++++------ .../delphiBackfillCorrection/R/model.R | 42 +++++---- .../R/preprocessing.R | 79 ++++++++--------- .../delphiBackfillCorrection/R/tooling.R | 88 +++++++++++++------ .../delphiBackfillCorrection/R/utils.R | 59 +++++++------ .../man-roxygen/df-template.R | 2 + .../man-roxygen/file_type-template.R | 2 + .../man-roxygen/geo_level-template.R | 2 + .../man-roxygen/indicator-template.R | 3 + .../man-roxygen/lag_col-template.R | 2 + .../man-roxygen/params-template.R | 4 + .../man-roxygen/refd_col-template.R | 2 + .../man-roxygen/signal-template.R | 3 + .../man-roxygen/taus-template.R | 2 + .../man-roxygen/test_lag-template.R | 1 + .../man-roxygen/value_col-template.R | 2 + .../man-roxygen/value_type-template.R | 1 + .../man/add_7davs_and_target.Rd | 15 ++-- .../man/add_dayofweek.Rd | 4 +- .../man/add_params_for_dates.Rd | 16 ++-- .../delphiBackfillCorrection/man/add_shift.Rd | 5 +- .../man/add_sqrtscale.Rd | 5 +- .../man/add_weekofmonth.Rd | 8 +- .../man/create_dir_not_exist.Rd | 2 +- .../man/create_name_pattern.Rd | 15 ++++ .../man/data_filteration.Rd | 2 +- .../delphiBackfillCorrection/man/delta.Rd | 14 +-- .../man/est_priors.Rd | 21 ++--- .../delphiBackfillCorrection/man/evl.Rd | 9 +- .../man/export_test_result.Rd | 9 +- .../man/fill_missing_updates.Rd | 11 ++- .../delphiBackfillCorrection/man/fill_rows.Rd | 10 ++- .../man/filter_counties.Rd | 3 + .../delphiBackfillCorrection/man/get_7dav.Rd | 3 +- .../man/get_files_list.Rd | 20 +++++ .../man/get_weekofmonth.Rd | 10 +-- .../delphiBackfillCorrection/man/main.Rd | 8 +- .../man/main_local.Rd | 35 +++++++- .../man/model_training_and_testing.Rd | 15 ++-- .../delphiBackfillCorrection/man/objective.Rd | 4 + .../delphiBackfillCorrection/man/ratio_adj.Rd | 13 ++- .../man/ratio_adj_with_pseudo.Rd | 6 +- .../man/read_params.Rd | 6 +- .../man/run_backfill.Rd | 26 ++++-- .../man/run_backfill_local.Rd | 41 +++++++-- .../man/subset_valid_files.Rd | 11 +++ .../man/training_days_check.Rd | 7 +- .../man/validity_checks.Rd | 12 ++- 53 files changed, 572 insertions(+), 308 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION index 8dc159939..04bbae163 100644 --- a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION +++ b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION @@ -13,6 +13,7 @@ Depends: R (>= 3.5.0), Imports: dplyr, + plyr, readr, tibble, stringr, @@ -23,7 +24,9 @@ Imports: jsonlite, lubridate, tidyr, - zoo + zoo, + utils, + rlang Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 2f057757e..b05297020 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -14,11 +14,11 @@ export(get_7dav) export(main) export(main_local) export(model_training_and_testing) -export(objective) export(ratio_adj) export(ratio_adj_with_pseudo) export(read_data) export(run_backfill) +export(run_backfill_local) importFrom(arrow,read_parquet) importFrom(dplyr,"%>%") importFrom(dplyr,arrange) @@ -27,6 +27,7 @@ importFrom(dplyr,desc) importFrom(dplyr,everything) importFrom(dplyr,filter) importFrom(dplyr,if_else) +importFrom(dplyr,pull) importFrom(dplyr,select) importFrom(evalcast,weighted_interval_score) importFrom(jsonlite,read_json) @@ -35,10 +36,15 @@ importFrom(lubridate,make_date) importFrom(lubridate,month) importFrom(lubridate,year) importFrom(lubridate,ymd_hms) +importFrom(plyr,rbind.fill) importFrom(quantgen,quantile_lasso) importFrom(readr,read_csv) +importFrom(readr,write_csv) +importFrom(rlang,.data) +importFrom(stats,coef) importFrom(stats,nlm) importFrom(stats,pbeta) +importFrom(stats,predict) importFrom(stats,setNames) importFrom(stringr,str_interp) importFrom(tidyr,crossing) @@ -46,4 +52,5 @@ importFrom(tidyr,drop_na) importFrom(tidyr,fill) importFrom(tidyr,pivot_longer) importFrom(tidyr,pivot_wider) +importFrom(utils,head) importFrom(zoo,rollmeanr) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 249c43f84..a81b421d4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -1,9 +1,10 @@ -#' Functions for Beta Prior Approach. -#' This is used only for the ratio prediction e.g. fraction of Covid claims, -#' percentage of positive tests. We assume that the ratio follows a beta distribution -#' that is day-of-week dependent. A quantile regression model is used first with lasso -#' penalty for supporting quantile estimation and then a non-linear minimization is used -#' for prior estimation. +## Functions for Beta Prior Approach. +## +## This is used only for the ratio prediction e.g. fraction of Covid claims, +## percentage of positive tests. We assume that the ratio follows a beta distribution +## that is day-of-week dependent. A quantile regression model is used first with lasso +## penalty for supporting quantile estimation and then a non-linear minimization is used +## for prior estimation. #' Sum of squared error #' @@ -13,11 +14,11 @@ delta <- function(fit, actual) sum((fit-actual)^2) #' Generate objection function #' @param theta parameters for the distribution in log scale +#' @param x vector of quantiles #' @param prob the expected probabilities +#' @param ... additional arguments #' #' @importFrom stats pbeta -#' -#' @export objective <- function(theta, x, prob, ...) { ab <- exp(theta) # Parameters are the *logs* of alpha and beta fit <- pbeta(x, ab[1], ab[2]) @@ -31,24 +32,24 @@ objective <- function(theta, x, prob, ...) { #' @param train_data Data Frame for training #' @param prior_test_data Data Frame for testing #' @param dw column name to indicate which day of a week it is -#' @param taus vector of considered quantiles -#' @param params_list the list of parameters for training +#' @template taus-template +#' @param covariates character vector of column names serving as the covariates for the model #' @param response the column name of the response variable #' @param lp_solver the lp solver used in Quantgen -#' @param labmda the level of lasso penalty +#' @param lambda the level of lasso penalty #' @param start the initialization of the the points in nlm -#' @param base_pseudo_denum the pseudo counts added to denominator if little data for training +#' @param base_pseudo_denom the pseudo counts added to denominator if little data for training #' @param base_pseudo_num the pseudo counts added to numerator if little data for training #' -#' @importFrom stats nlm +#' @importFrom stats nlm predict #' @importFrom dplyr %>% filter #' @importFrom quantgen quantile_lasso -est_priors <- function(train_data, prior_test_data, cov, taus, - params_list, response, lp_solver, lambda, +est_priors <- function(train_data, prior_test_data, dw, taus, + covariates, response, lp_solver, lambda, start=c(0, log(10)), base_pseudo_denom=1000, base_pseudo_num=10){ - sub_train_data <- train_data %>% filter(train_data[[cov]] == 1) - sub_test_data <- prior_test_data %>% filter(prior_test_data[[cov]] == 1) + sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) + sub_test_data <- prior_test_data %>% filter(prior_test_data[[dw]] == 1) if (nrow(sub_test_data) == 0) { pseudo_denom <- base_pseudo_denom pseudo_num <- base_pseudo_num @@ -57,10 +58,10 @@ est_priors <- function(train_data, prior_test_data, cov, taus, quantiles <- list() for (idx in 1:length(taus)){ tau <- taus[idx] - obj <- quantile_lasso(as.matrix(sub_train_data[params_list]), + obj <- quantile_lasso(as.matrix(sub_train_data[covariates]), sub_train_data[response], tau = tau, lambda = lambda, standardize = FALSE, lp_solver = lp_solver) - y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[params_list]))) + y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[covariates]))) quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale } quantiles <- as.vector(unlist(quantiles)) @@ -85,13 +86,13 @@ est_priors <- function(train_data, prior_test_data, cov, taus, #' @param denom_col the column name for the denominator #' #' @export -ratio_adj_with_pseudo <- function(data, cov, pseudo_num, pseudo_denom, num_col, denom_col){ - if (is.null(cov)){ +ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ + if (is.null(dw)){ num_adj <- data[[num_col]] + pseudo_num denom_adj <- data[[denom_col]] + pseudo_denom } else { - num_adj <- data[[num_col]][data[[cov]] == 1] + pseudo_num - denom_adj <- data[data[[cov]] == 1, denom_col] + pseudo_denom + num_adj <- data[[num_col]][data[[dw]] == 1] + pseudo_num + denom_adj <- data[data[[dw]] == 1, denom_col] + pseudo_denom } return (num_adj / denom_adj) } @@ -101,9 +102,11 @@ ratio_adj_with_pseudo <- function(data, cov, pseudo_num, pseudo_denom, num_col, #' @param train_data training data #' @param test_data testing data #' @param prior_test_data testing data for the lag -1 model +#' @template taus-template +#' @param lp_solver the lp solver used in Quantgen #' #' @export -ratio_adj <- function(train_data, test_data, prior_test_data){ +ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_solver = LP_SOLVER){ train_data$value_target <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") test_data$value_target <- ratio_adj_with_pseudo(test_data, NULL, 1, 100, "value_target_num", "value_target_denom") @@ -114,7 +117,7 @@ ratio_adj <- function(train_data, test_data, prior_test_data){ test_data$log_value_target <- log(test_data$value_target) prior_test_data$log_value_7dav <- log(prior_test_data$value_7dav) - pre_params_list = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", + pre_covariates = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "log_value_7dav") #For training train_data$value_raw = NaN @@ -131,7 +134,7 @@ ratio_adj <- function(train_data, test_data, prior_test_data){ for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")){ pseudo_counts <- est_priors(train_data, prior_test_data, cov, taus, - pre_params_list, "log_value_target", lp_solver, lambda=0.1) + pre_covariates, "log_value_target", lp_solver, lambda=0.1) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] # update current data @@ -166,4 +169,4 @@ ratio_adj <- function(train_data, test_data, prior_test_data){ test_data$log_7dav_slope = test_data$log_value_7dav - test_data$log_value_prev_7dav return (list(train_data, test_data)) -} \ No newline at end of file +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 717fb6312..298bb18a1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -23,9 +23,9 @@ SQRTSCALE <-c('sqrty0', 'sqrty1', "sqrty2") LOG_LAG <-"inv_log_lag" # Dates -WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") -TODAY <-Sys.Date() +WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") # wd +WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") # wm +TODAY <- Sys.Date() INDICATORS_AND_SIGNALS <- tribble( ~indicator, ~signal, ~name_suffix, ~value_type, ~sub_dir, diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 6190b0e12..c6a556f2e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -4,37 +4,55 @@ #' #' @importFrom arrow read_parquet #' @importFrom dplyr select %>% +#' @importFrom rlang .data #' #' @export -read_data <- function(path){ - df <- read_parquet(path, as_data_frame = TRUE) %>% - select(-`__index_level_0__`) +read_data <- function(path) { + df <- read_parquet(path, as_data_frame = TRUE) %>% + ## TODO make this more robust + select(-.data$`__index_level_0__`) return (df) } #' Export the result to customized directory #' -#' @param test_data test data with prediction result -#' @param coef_data data frame with the estimated coefficients +#' @param test_data test data containing prediction results +#' @param coef_data data frame containing the estimated coefficients #' @param export_dir export directory -#' @param geo_level geographical level, can be county or state -#' @param test_lag -#' +#' @template geo_level-template +#' @template test_lag-template +#' +#' @importFrom readr write_csv +#' #' @export export_test_result <- function(test_data, coef_data, export_dir, geo_level, test_lag) { - ## TODO - warning("test_lag arg not being used") - pred_output_dir = paste("prediction", geo_level, sep="_") - write.csv(test_data, paste(export_dir, pred_output_dir , ".csv", sep=""), row.names = FALSE) - - coef_output_dir = paste("coefs", geo_level, sep="_") - write.csv(test_data, paste(export_dir, coef_output_dir , ".csv", sep=""), row.names = FALSE) + ## TODO why not being used? Probably want test_lag in output name + warning("test_lag arg ", test_lag, " not being used") + + pred_output_dir = paste("prediction", geo_level, ".csv", sep="_") + write_csv(test_data, file.path(export_dir, pred_output_dir)) + coef_output_dir = paste("coefs", geo_level, ".csv", sep="_") + write_csv(test_data, file.path(export_dir, coef_output_dir)) } #' List valid input files. +#' +#' @template indicator-template +#' @template signal-template +#' @template geo_level-template +#' @template params-template +#' @param sub_dir string specifying the indicator-specific directory within +#' the general input directory `params$data_path` get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { + # Make sure we're reading in both 4-week rollup and daily files. + if (!is.null(sub_dir) && sub_dir != "") { + data_path <- paste(params$data_path, sub_dir, sep="_") + } else { + data_path <- params$data_path + } + # Convert input_group into file names. daily_pattern <- create_name_pattern( indicator, signal, geo_level, "daily" @@ -43,19 +61,12 @@ get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { indicator, signal, geo_level, "rollup" ) - # Make sure we're reading in both 4-week rollup and daily files. - if (!is.null(sub_dir) && sub_dir != "") { - data_path <- paste(params$data_path, sub_dir, sep="_") - } else { - data_path <- params$data_path - } - - daily_input_files <- list.files(data_path, pattern = daily_pattern) - rollup_input_files <- list.files(data_path, pattern = rollup_pattern) - + ## TODO: decide whether to use full path or just file name (may not be able to read in) # Filter files lists to only include those containing dates we need for training - daily_input_files <- subset_valid_files(daily_input_files, "daily", params) - rollup_input_files <- subset_valid_files(rollup_input_files, "rollup", params) + daily_input_files <- list.files(data_path, pattern = daily_pattern) %>% + subset_valid_files("daily", params) + rollup_input_files <- list.files(data_path, pattern = rollup_pattern) %>% + subset_valid_files("rollup", params) return(c(daily_input_files, rollup_input_files)) } @@ -64,6 +75,10 @@ get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { #' #' Parse filenames to find included dates. Use different patterns if file #' includes daily or rollup (multiple days) data. +#' +#' @param files_list character vector of input files of a given `file_type` +#' @template file_type-template +#' @template params-template subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { file_type <- match.arg(file_type) date_format = "%Y%m%d" @@ -89,8 +104,8 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par ) ## TODO: start_date depends on if we're doing model training or just corrections. - start_date <- today - params$training_days - params$ref_lag - end_date <- today - 1 + start_date <- TODAY - params$training_days - params$ref_lag + end_date <- TODAY - 1 # Only keep files with data that falls at least somewhat between the desired # start and end range dates. @@ -103,6 +118,11 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par #' Create pattern to match input files of a given type, signal, and geo level #' +#' @template indicator-template +#' @template signal-template +#' @template geo_level-template +#' @template file_type-template +#' #' @importFrom stringr str_interp create_name_pattern <- function(indicator, signal, geo_level, file_type = c("daily", "rollup")) { diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index fb282b0c4..9271a44ad 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -1,20 +1,23 @@ #' Get backfill-corrected estimates for a single signal + geo combination #' -#' @param df dataframe of input data containing a single indicator + signal + -#' level of geographic coverage. -#' @param value_type string describing signal type of "count" and "ratio". -#' @param geo_level string describing geo coverage of input data. "state" or -#' "county". If "county" is selected, only data from the 200 most populous -#' counties in the US (*not* the dataset) will be used. -#' @param params named list containing modeling and data settings. Must include -#' the following elements: `ref_lag`, `testing_window`, `test_dates`, -#' `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -#' and `data_path` (input dir). -#' @param refd_col string containing name of reference date field within `df`. -#' @param lag_col string containing name of lag field within `df`. +#' If "county" is selected for `geo_level`, only data from the 200 most populous +#' counties in the US (*not* the dataset) will be used. +#' +#' @template df-template +#' @template value_type-template +#' @template geo_level-template +#' @template params-template +#' @template refd_col-template +#' @template lag_col-template +#' @param signal_suffixes character vector specifying value column name +#' endings to be appended to standard value column names from +#' `params$num_col` and `params$denom_col`. Used for non-standard +#' value column names and when processing multiple signals from a +#' single input dataframe, as with `quidel`'s age buckets. #' #' @importFrom dplyr %>% filter #' @importFrom tidyr drop_na +#' @importFrom rlang .data #' #' @export run_backfill <- function(df, value_type, geo_level, params, @@ -29,7 +32,7 @@ run_backfill <- function(df, value_type, geo_level, params, # Build model for each location for (geo in geo_list) { - subdf <- df %>% filter(geo_value == geo) %>% filter(lag < params$ref_lag) + subdf <- df %>% filter(.data$geo_value == geo) %>% filter(.data$lag < params$ref_lag) min_refd <- min(subdf[[refd_col]]) max_refd <- max(subdf[[refd_col]]) subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) @@ -65,25 +68,24 @@ run_backfill <- function(df, value_type, geo_level, params, ) } combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - test_date_list <- get_test_dates(combined_df, params$test_dates) - for (test_date in test_date_list){ + for (test_date in params$test_dates) { geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - params$training_days) %>% + filter(.data$issue_date < test_date) %>% + filter(.data$target_date <= test_date) %>% + filter(.data$target_date > test_date - params$training_days) %>% drop_na() geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date + params$testing_window) %>% + filter(.data$issue_date >= test_date) %>% + filter(.data$issue_date < test_date + params$testing_window) %>% drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next if (value_type == "ratio"){ geo_prior_test_data = combined_df %>% - filter(issue_date > test_date - 7) %>% - filter(issue_date <= test_date) + filter(.data$issue_date > test_date - 7) %>% + filter(.data$issue_date <= test_date) updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) geo_train_data <- updated_data[[1]] @@ -100,13 +102,14 @@ run_backfill <- function(df, value_type, geo_level, params, test_data <- updated_data[[2]] sqrtscale <- updated_data[[3]] - covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) + ## TODO what is wd2? + covariates <- list(Y7DAV, WEEKDAYS_ABBR, wd2, WEEK_ISSUES, SLOPE, SQRTSCALE) + params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date + params$lp_solver, params$lambda, test_date, geo ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] @@ -123,13 +126,17 @@ run_backfill <- function(df, value_type, geo_level, params, #' Perform backfill correction on all desired signals and geo levels #' +#' @template params-template +#' #' @importFrom dplyr bind_rows #' #' @export -main <- function(params, ...){ +main <- function(params){ + ## TODO may need more args, not sure + # Load indicator x signal groups. Combine with params$geo_level to get all # possible geo x signal combinations. - groups <- merge(indicators_and_signals, data.frame(geo_level = params$geo_level)) + groups <- merge(INDICATORS_AND_SIGNALS, data.frame(geo_level = params$geo_level)) # Loop over every indicator + signal + geo type combination. for (input_group in groups) { @@ -157,7 +164,9 @@ main <- function(params, ...){ } # Check data type and required columns - validity_checks(input_data, input_group$value_type) + ## TODO num and denom names need suffixes to be check properly + result <- validity_checks(input_data, input_group$value_type, params$num_col, params$denom_col) + input_data <- result[["df"]] # Check available training days training_days_check(input_data$issue_date, params$training_days) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index d668a0dd2..0bdce5304 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -1,9 +1,11 @@ #' Filtration for training and testing data with different lags #' -#' @param test_lag +#' @template test_lag-template #' @param geo_train_data training data for a certain location #' @param geo_test_data testing data for a certain location #' +#' @importFrom rlang .data +#' #' @export data_filteration <- function(test_lag, geo_train_data, geo_test_data){ if (test_lag <= 14){ @@ -20,11 +22,11 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ test_lag_pad2=9 } train_data = geo_train_data %>% - filter(lag >= test_lag-test_lag_pad ) %>% - filter(lag <= test_lag+test_lag_pad ) + filter(.data$lag >= test_lag-test_lag_pad ) %>% + filter(.data$lag <= test_lag+test_lag_pad ) test_data = geo_test_data %>% - filter(lag >= test_lag-test_lag_pad1) %>% - filter(lag <= test_lag+test_lag_pad2) + filter(.data$lag >= test_lag-test_lag_pad1) %>% + filter(.data$lag <= test_lag+test_lag_pad2) return (list(train_data, test_data)) } @@ -34,28 +36,32 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ #' #' @param train_data Data frame for training #' @param test_data Data frame for testing -#' @param taus vector of considered quantiles -#' @param params_list the list of column names serving as the covariates +#' @template taus-template +#' @param covariates list of column names serving as the covariates for the model #' @param lp_solver the lp solver used in Quantgen #' @param lambda the level of lasso penalty -#' @param test_date as.Date +#' @param test_date Date object representing test date +#' @param geo string specifying the name of the geo region (e.g. FIPS +#' code for counties) +#' +#' @importFrom stats predict coef #' #' @export -model_training_and_testing <- function(train_data, test_data, taus, params_list, - lp_solver, lambda, test_date){ +model_training_and_testing <- function(train_data, test_data, taus, covariates, + lp_solver, lambda, test_date, geo) { success = 0 coefs_result = list() - coef_list = c("intercept", paste(params_list, '_coef', sep='')) + coef_list = c("intercept", paste(covariates, '_coef', sep='')) for (tau in taus){ #options(error=NULL) tryCatch( expr = { # Quantile regression - obj = quantile_lasso(as.matrix(train_data[params_list]), + obj = quantile_lasso(as.matrix(train_data[covariates]), train_data$log_value_target, tau = tau, lambda = lambda, standardize = FALSE, lp_solver = lp_solver) - y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[params_list]))) + y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all coefs_result[[success+1]] = coef(obj) @@ -77,10 +83,10 @@ model_training_and_testing <- function(train_data, test_data, taus, params_list, #' The WIS score calculation is based on the weighted_interval_score function #' from the `evalcast` package from Delphi #' -#' @param test_data multiple columns for the prediction results of different -#' quantiles. Each row represents an update with certain (reference_date, -#' issue_date, location) combination. -#' @param taus vector of quantiles interested +#' @param test_data dataframe with a column containing the prediction results of +#' each requested quantile. Each row represents an update with certain +#' (reference_date, issue_date, location) combination. +#' @template taus-template #' #' @importFrom evalcast weighted_interval_score #' @@ -102,4 +108,4 @@ evl <- function(test_data, taus){ test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) return (test_data) -} \ No newline at end of file +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index 1673b85bb..9e9801afd 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -9,12 +9,12 @@ #' Re-index, fill na, make sure all reference date have enough rows for updates -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. -#' @param refd_col column name for the column of reference date -#' @param lag_col column name for the column of lag +#' @template df-template +#' @template refd_col-template +#' @template lag_col-template #' @param min_refd the earliest reference date considered in the data #' @param max_refd the latest reference date considered in the data +#' @param ref_lag max lag to use for training #' #' @return df_new Data Frame with filled rows for missing lags #' @@ -36,11 +36,10 @@ fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_L #' previous reports exist for issue date D_p < D, all the dates between #' [D_p, D] are filled with with the reported value on date D_p. If there is #' no update for any previous issue date, fill in with 0. -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. -#' @param value_col column name for the column of counts -#' @param refd_col column name for the column of reference date -#' @param lag_col column name for the column of lag +#' @template df-template +#' @template value_col-template +#' @template refd_col-template +#' @template lag_col-template #' #' @importFrom tidyr fill pivot_wider pivot_longer #' @importFrom dplyr %>% everything select @@ -69,7 +68,7 @@ fill_missing_updates <- function(df, value_col, refd_col, lag_col) { #' The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 #' @param pivot_df Data Frame where the columns are issue dates and the rows are #' reference dates -#' @param refd_col column name for the column of reference date +#' @template refd_col-template #' #' @importFrom zoo rollmeanr #' @@ -88,10 +87,9 @@ get_7dav <- function(pivot_df, refd_col){ #' Used for data shifting in terms of reference date #' -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. +#' @template df-template #' @param n_day number of days to be shifted -#' @param refd_col column name for the column of reference date +#' @template refd_col-template #' #' @export add_shift <- function(df, n_day, refd_col){ @@ -102,15 +100,14 @@ add_shift <- function(df, n_day, refd_col){ #' Add one hot encoding for day of a week info in terms of reference #' and issue date #' -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. +#' @template df-template #' @param wd vector of days of a week #' @param time_col column used for the date, can be either reference date or #' issue date #' @param suffix suffix added to indicate which kind of date is used #' #' @export -add_dayofweek <- function(df, wd = weekdays_abbr, time_col, suffix){ +add_dayofweek <- function(df, wd = WEEKDAYS_ABBR, time_col, suffix){ dayofweek <- as.numeric(format(df[[time_col]], format="%u")) for (i in 1:6){ df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) @@ -122,13 +119,14 @@ add_dayofweek <- function(df, wd = weekdays_abbr, time_col, suffix){ } #' Get week of a month info according to a date +#' #' All the dates on or before the ith Sunday but after the (i-1)th Sunday #' is considered to be the ith week. Notice that the dates in the 5th week #' this month are actually in the same week with the dates in the 1st week #' next month and those dates are sparse. Thus, we assign the dates in the #' 5th week to the 1st week. #' -#' @param date as.Date +#' @param date Date object #' #' @importFrom lubridate make_date year month day #' @@ -143,14 +141,13 @@ get_weekofmonth <- function(date){ #' Add one hot encoding for week of a month info in terms of issue date #' -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. +#' @template df-template #' @param wm vector of weeks of a month -#' @param time_col column used for the date, can be either reference date or -#' issue date +#' @param time_col string specifying name of column used for the date, +#' can be either reference date or issue date #' #' @export -add_weekofmonth <- function(df, wm = week_issues, time_col){ +add_weekofmonth <- function(df, wm = WEEK_ISSUES, time_col){ weekofmonth <- get_weekofmonth(df[[time_col]]) for (i in 1:3){ df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) @@ -160,18 +157,17 @@ add_weekofmonth <- function(df, wm = week_issues, time_col){ #' Add 7dav and target to the data #' Target is the updates made ref_lag days after the first release -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. -#' @param value_col column name for the column of raw value -#' @param refd_col column name for the column of reference date -#' @param lag_col column name for the column of lag +#' @template df-template +#' @template value_col-template +#' @template refd_col-template +#' @template lag_col-template +#' @param ref_lag max lag to use for training #' #' @importFrom dplyr %>% #' @importFrom tidyr pivot_wider drop_na #' #' @export -add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ - +add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF_LAG){ df$issue_date <- df[[refd_col]] + df[[lag_col]] pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% pivot_wider(id_cols=refd_col, names_from="issue_date", @@ -208,28 +204,29 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag){ } #' Add params related to date +#' #' Target is the updates made ref_lag days after the first release -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. -#' @param refd_col column name for the column of reference date -#' @param lag_col column name for the column of lag -add_params_for_dates <- function(backfill_df, refd_col, lag_col){ +#' +#' @template df-template +#' @template refd_col-template +#' @template lag_col-template +add_params_for_dates <- function(df, refd_col, lag_col){ # Add columns for day-of-week effect - backfill_df <- add_dayofweek(backfill_df, wd, refd_col, "_ref") - backfill_df <- add_dayofweek(backfill_df, wd, "issue_date", "_issue") + df <- add_dayofweek(df, WEEKDAYS_ABBR, refd_col, "_ref") + df <- add_dayofweek(df, WEEKDAYS_ABBR, "issue_date", "_issue") # Add columns for week-of-month effect - backfill_df <- add_weekofmonth(backfill_df, wm, "issue_date") + df <- add_weekofmonth(df, WEEK_ISSUES, "issue_date") - return (as.data.frame(backfill_df)) + return (as.data.frame(df)) } #' Add columns to indicate the scale of value at square root level #' #' @param train_data Data Frame for training #' @param test_data Data Frame for testing -#' @param value_col the column name of the considered value -#' @param the maximum value in the training data at square root level +#' @param max_raw the maximum value in the training data at square root level +#' @template value_col-template add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ sqrtscale = c() sub_max_raw = sqrt(max(train_data$value_raw)) / 2 @@ -250,5 +247,3 @@ add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ return (list(train_data, test_data, sqrtscale)) } - - diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index da9c4f849..a500f2a1a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -1,11 +1,29 @@ #' Corrected estimates from a single local signal #' +#' @template df-template +#' @param export_dir path to save output +#' @template taus-template +#' @param test_date_list Date vector of dates to make predictions for +#' @param test_lags integer vector of number of days ago to predict for +#' @param value_cols character vector of numerator and/or denominator field names +#' @param training_days integer number of days to use for training +#' @param testing_window the testing window used for saving the runtime. Could +#' set it to be 1 if time allows +#' @param ref_lag max lag to use for training +#' @template value_type-template +#' @param lambda the level of lasso penalty +#' @param lp_solver the lp solver used in Quantgen +#' #' @importFrom dplyr %>% filter +#' @importFrom plyr rbind.fill #' @importFrom tidyr drop_na -run_backfill_local <- function(df, export_dir, taus, - test_date_list, test_lags, - value_cols, training_days, testing_window, - ref_lag, value_type, lambda){ +#' @importFrom rlang .data +#' +#' @export +run_backfill_local <- function(df, export_dir, taus = TAUS, + test_date_list, test_lags = TEST_LAGS, + value_cols, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, + ref_lag = REF_LAG, value_type, lambda = LAMBDA, lp_solver = LP_SOLVER) { # Get all the locations that are considered geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) # Build model for each location @@ -14,7 +32,7 @@ run_backfill_local <- function(df, export_dir, taus, coef_df_list = list() for (geo in geo_list) { - subdf <- df %>% filter(geo_value == geo) %>% filter(lag < ref_lag) + subdf <- df %>% filter(.data$geo_value == geo) %>% filter(.data$lag < ref_lag) min_refd <- min(subdf$time_value) max_refd <- max(subdf$time_value) subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) @@ -33,26 +51,23 @@ run_backfill_local <- function(df, export_dir, taus, suffixes=c("_num", "_denom")) } combined_df <- add_params_for_dates(combined_df, "time_value", "lag") - if (missing(test_date_list) || is.null(test_date_list)) { - test_date_list <- get_test_dates(combined_df, params$test_dates) - } - for (test_date in test_date_list){ + for (test_date in test_date_list) { geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - training_days) %>% + filter(.data$issue_date < test_date) %>% + filter(.data$target_date <= test_date) %>% + filter(.data$target_date > test_date - training_days) %>% drop_na() geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date+testing_window) %>% + filter(.data$issue_date >= test_date) %>% + filter(.data$issue_date < test_date+testing_window) %>% drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% - filter(issue_date > test_date-7) %>% - filter(issue_date <= test_date) + filter(.data$issue_date > test_date-7) %>% + filter(.data$issue_date <= test_date) updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) geo_train_data <- updated_data[[1]] @@ -70,12 +85,17 @@ run_backfill_local <- function(df, export_dir, taus, test_data <- updated_data[[2]] sqrtscale <- updated_data[[3]] - covariates <- list(y7dav, paste0(wd, "_ref"), paste0(wd, "_issue"), wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) + covariates <- list( + Y7DAV, paste0(WEEKDAYS_ABBR, "_ref"), paste0(WEEKDAYS_ABBR, "_issue"), + WEEK_ISSUES, SLOPE, SQRTSCALE + ) + params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing prediction_results <- model_training_and_testing( - train_data, test_data, taus, params_list, lp_solver, lambda, test_date) + train_data, test_data, taus, params_list, lp_solver, + lambda, test_date, geo + ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] test_data <- evl(test_data, taus) @@ -98,19 +118,35 @@ run_backfill_local <- function(df, export_dir, taus, } #' Main function to correct a single local signal -#' +#' +#' @param data_path path to the input data files +#' @param export_dir path to save output +#' @param test_start_date Date to start making predictions on +#' @param test_end_date Date to stop making predictions on +#' @param training_days integer number of days to use for training +#' @param testing_window the testing window used for saving the runtime. Could +#' set it to be 1 if time allows +#' @template value_type-template +#' @param num_col name of numerator column in the input dataframe +#' @param denom_col name of denominator column in the input dataframe +#' @param lambda the level of lasso penalty +#' @param ref_lag max lag to use for training +#' @param lp_solver the lp solver used in Quantgen +#' #' @importFrom readr read_csv #' #' @export main_local <- function(data_path, export_dir, - test_start_date, test_end_date, traning_days, testing_window, + test_start_date, test_end_date, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, value_type, num_col, denom_col, - lambda, ref_lag){ + lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER){ # Check input data df = read_csv(data_path) # Check data type and required columns - validity_checks(df, value_type) + result <- validity_checks(df, value_type, num_col, denom_col) + df <- result[["df"]] + value_cols <- result[["value_cols"]] # Get test date list according to the test start date if (is.null(test_start_date)){ @@ -130,8 +166,8 @@ main_local <- function(data_path, export_dir, # Check available training days training_days_check(df$issue_date, training_days) - run_backfill_local(df, export_dir, taus, - test_date_list, test_lags, + run_backfill_local(df, export_dir, TAUS, + test_date_list, TEST_LAGS, value_cols, training_days, testing_window, - ref_lag, value_type, lambda) + ref_lag, value_type, lambda, lp_solver) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index fcdc0f3f0..7fef7977d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -19,14 +19,14 @@ #' params$denom_col: the column name for the counts of the denominator, e.g. the #' number of total claims #' params$geo_level: list("state", "county") -#' params$taus: ?? -#' params$lambda: ?? -#' params$export_dir: ?? +#' params$taus: vector of considered quantiles +#' params$lambda: the level of lasso penalty +#' params$export_dir: directory to save corrected data to #' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" #' -#' @param path path to the parameters file; if not present, will try to copy the file -#' "params.json.template" -#' @param template_path path to the template parameters file +#' @param path path to the parameters file; if not present, will try to copy the file +#' "params.json.template" +#' @param template_path path to the template parameters file #' #' @return a named list of parameters values #' @@ -37,17 +37,8 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!file.exists(path)) file.copy(template_path, path) params <- read_json(path, simplifyVector = TRUE) - params$num_filter <- if_else(params$debug, 2L, 100L) - params$s_weight <- if_else(params$debug, 1.00, 0.01) - params$s_mix_coef <- if_else(params$debug, 0.05, 0.05) - - params$start_time <- ymd_hms( - sprintf("%s 00:00:00", params$start_date), tz = tz_to - ) - params$end_time <- ymd_hms( - sprintf("%s 23:59:59", params$end_date), tz = tz_to - ) - + ## TODO set default parameter values if not specified + params$parallel_max_cores <- if_else( is.null(params$parallel_max_cores), .Machine$integer.max, @@ -59,7 +50,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ #' Create directory if not already existing #' -#' @param path character vector giving the directory to create +#' @param path string specifying a directory to create #' #' @export create_dir_not_exist <- function(path) @@ -68,7 +59,12 @@ create_dir_not_exist <- function(path) } #' Check input data for validity -validity_checks <- function(df, value_type) { +#' +#' @template df-template +#' @template value_type-template +#' @param num_col name of numerator column in the input dataframe +#' @param denom_col name of denominator column in the input dataframe +validity_checks <- function(df, value_type, num_col, denom_col) { # Check data type and required columns if (value_type == "count"){ if (num_col %in% colnames(df)) {value_cols=c(num_col)} @@ -78,12 +74,12 @@ validity_checks <- function(df, value_type) { } } else if (value_type == "fraction"){ value_cols = c(num_col, denom_col) - if ( any(!value_cols %in% colnames(df)) ){ + if ( !any(value_cols %in% colnames(df)) ){ stop("No valid column name detected for the fraction values!") } } - # time_value must exists in the dataset + # time_value must exist in the dataset if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} # issue_date or lag should exist in the dataset @@ -93,10 +89,15 @@ validity_checks <- function(df, value_type) { } else {stop("No issue_date or lag exists!")} } + + return(list(df = df, value_cols = value_cols)) } #' Check available training days -training_days_check <- function(issue_date, training_days) { +#' +#' @param issue_date contents of input data's `issue_date` column +#' @param training_days integer number of days to use for training +training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { valid_training_days = as.integer(max(issue_date) - min(issue_date)) if (training_days > valid_training_days){ warning(sprintf("Only %d days are available at most for training.", valid_training_days)) @@ -104,6 +105,8 @@ training_days_check <- function(issue_date, training_days) { } #' Subset list of counties to those included in the 200 most populous in the US +#' +#' @param geos character vector of county FIPS codes filter_counties <- function(geos) { top_200_geos <- get_populous_counties() return(intersect(geos, top_200_geos)) @@ -111,15 +114,17 @@ filter_counties <- function(geos) { #' Subset list of counties to those included in the 200 most populous in the US #' -#' @importFrom dplyr select %>% arrange desc +#' @importFrom dplyr select %>% arrange desc pull +#' @importFrom rlang .data +#' @importFrom utils head get_populous_counties <- function() { return( covidcast::county_census %>% - select(pop = POPESTIMATE2019, fips = FIPS) %>% + select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% # Drop megacounties (states) - filter(!endsWith(fips, "000")) %>% - arrange(desc(pop)) %>% - pull(fips) %>% + filter(!endsWith(.data$fips, "000")) %>% + arrange(desc(.data$pop)) %>% + pull(.data$fips) %>% head(n=200) ) } diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R new file mode 100644 index 000000000..4aa746f51 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R @@ -0,0 +1,2 @@ +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R new file mode 100644 index 000000000..36c241abd --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R @@ -0,0 +1,2 @@ +#' @param file_type string specifying time period coverage of input files. +#' Either "daily" or "rollup" diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R new file mode 100644 index 000000000..778da39a4 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R @@ -0,0 +1,2 @@ +#' @param geo_level string describing geo coverage of input data. Either "state" +#' or "county". diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R new file mode 100644 index 000000000..964cada2d --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R @@ -0,0 +1,3 @@ +#' @param indicator string specifying the name of the indicator as used in +#' `parquet` input data filenames. One indicator can be associated +#' with multiple signals. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R new file mode 100644 index 000000000..b3e79f0fa --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R @@ -0,0 +1,2 @@ +#' @param lag_col string specifying name of lag field within +#' the input dataframe. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R new file mode 100644 index 000000000..3106660a9 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R @@ -0,0 +1,4 @@ +#' @param params named list containing modeling and data settings. Must include +#' the following elements: `ref_lag`, `testing_window`, `test_dates`, +#' `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +#' and `data_path` (input dir). diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R new file mode 100644 index 000000000..09644a4aa --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R @@ -0,0 +1,2 @@ +#' @param refd_col string specifying name of reference date field within +#' the input dataframe. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R new file mode 100644 index 000000000..d87790af7 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R @@ -0,0 +1,3 @@ +#' @param signal string specifying the name of the signal as used in +#' `parquet` input data filenames. One indicator can be associated +#' with multiple signals. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R new file mode 100644 index 000000000..b383e35f8 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R @@ -0,0 +1,2 @@ +#' @param taus numeric vector of quantiles to be predicted. Values +#' must be between 0 and 1. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R new file mode 100644 index 000000000..bd26b3386 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R @@ -0,0 +1 @@ +#' @param test_lag integer number of days ago to predict for diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R new file mode 100644 index 000000000..0cc922d14 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R @@ -0,0 +1,2 @@ +#' @param value_col string specifying name of value (counts) field within +#' the input dataframe. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R new file mode 100644 index 000000000..07939a96e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R @@ -0,0 +1 @@ +#' @param value_type string describing signal type. Either "count" or "ratio". diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd index 0dae9267c..25a0dee23 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd @@ -5,17 +5,22 @@ \title{Add 7dav and target to the data Target is the updates made ref_lag days after the first release} \usage{ -add_7davs_and_target(df, value_col, refd_col, lag_col, ref_lag) +add_7davs_and_target(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{value_col}{column name for the column of raw value} +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} -\item{refd_col}{column name for the column of reference date} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} -\item{lag_col}{column name for the column of lag} +\item{lag_col}{string specifying name of lag field within +the input dataframe.} + +\item{ref_lag}{max lag to use for training} } \description{ Add 7dav and target to the data diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd index 518480c88..8e4b338eb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd @@ -5,10 +5,10 @@ \title{Add one hot encoding for day of a week info in terms of reference and issue date} \usage{ -add_dayofweek(df, wd = weekdays_abbr, time_col, suffix) +add_dayofweek(df, wd = WEEKDAYS_ABBR, time_col, suffix) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} \item{wd}{vector of days of a week} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd index b48d8b36a..d9303d7d6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd @@ -2,20 +2,20 @@ % Please edit documentation in R/preprocessing.R \name{add_params_for_dates} \alias{add_params_for_dates} -\title{Add params related to date -Target is the updates made ref_lag days after the first release} +\title{Add params related to date} \usage{ -add_params_for_dates(backfill_df, refd_col, lag_col) +add_params_for_dates(df, refd_col, lag_col) } \arguments{ -\item{refd_col}{column name for the column of reference date} +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} -\item{lag_col}{column name for the column of lag} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} +\item{lag_col}{string specifying name of lag field within +the input dataframe.} } \description{ -Add params related to date Target is the updates made ref_lag days after the first release } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd index a1947aa2e..d4adc5823 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd @@ -7,12 +7,13 @@ add_shift(df, n_day, refd_col) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} \item{n_day}{number of days to be shifted} -\item{refd_col}{column name for the column of reference date} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} } \description{ Used for data shifting in terms of reference date diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd index 4b9b2d616..2dcf7e147 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -11,9 +11,10 @@ add_sqrtscale(train_data, test_data, max_raw, value_col) \item{test_data}{Data Frame for testing} -\item{value_col}{the column name of the considered value} +\item{max_raw}{the maximum value in the training data at square root level} -\item{the}{maximum value in the training data at square root level} +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} } \description{ Add columns to indicate the scale of value at square root level diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd index 0d7c80d5c..8202e10f9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd @@ -4,16 +4,16 @@ \alias{add_weekofmonth} \title{Add one hot encoding for week of a month info in terms of issue date} \usage{ -add_weekofmonth(df, wm = week_issues, time_col) +add_weekofmonth(df, wm = WEEK_ISSUES, time_col) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} \item{wm}{vector of weeks of a month} -\item{time_col}{column used for the date, can be either reference date or -issue date} +\item{time_col}{string specifying name of column used for the date, +can be either reference date or issue date} } \description{ Add one hot encoding for week of a month info in terms of issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd b/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd index 245bb2084..1a9b887a5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd @@ -7,7 +7,7 @@ create_dir_not_exist(path) } \arguments{ -\item{path}{character vector giving the directory to create} +\item{path}{string specifying a directory to create} } \description{ Create directory if not already existing diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd index 54deb96d5..dad0d76bb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd @@ -11,6 +11,21 @@ create_name_pattern( file_type = c("daily", "rollup") ) } +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{file_type}{string specifying time period coverage of input files. +Either "daily" or "rollup"} +} \description{ Create pattern to match input files of a given type, signal, and geo level } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd index e224b9b7b..bdea55b36 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd @@ -7,7 +7,7 @@ data_filteration(test_lag, geo_train_data, geo_test_data) } \arguments{ -\item{test_lag}{} +\item{test_lag}{integer number of days ago to predict for} \item{geo_train_data}{training data for a certain location} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd b/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd index e51f197ec..7d1af25ca 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd @@ -2,13 +2,7 @@ % Please edit documentation in R/beta_prior_estimation.R \name{delta} \alias{delta} -\title{Functions for Beta Prior Approach. -This is used only for the ratio prediction e.g. fraction of Covid claims, -percentage of positive tests. We assume that the ratio follows a beta distribution -that is day-of-week dependent. A quantile regression model is used first with lasso -penalty for supporting quantile estimation and then a non-linear minimization is used -for prior estimation. -Sum of squared error} +\title{Sum of squared error} \usage{ delta(fit, actual) } @@ -18,11 +12,5 @@ delta(fit, actual) \item{actual}{actual values} } \description{ -Functions for Beta Prior Approach. -This is used only for the ratio prediction e.g. fraction of Covid claims, -percentage of positive tests. We assume that the ratio follows a beta distribution -that is day-of-week dependent. A quantile regression model is used first with lasso -penalty for supporting quantile estimation and then a non-linear minimization is used -for prior estimation. Sum of squared error } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd index 87c6a96f0..87f7155c2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -9,9 +9,9 @@ a certain day of a week} est_priors( train_data, prior_test_data, - cov, + dw, taus, - params_list, + covariates, response, lp_solver, lambda, @@ -25,23 +25,24 @@ est_priors( \item{prior_test_data}{Data Frame for testing} -\item{taus}{vector of considered quantiles} +\item{dw}{column name to indicate which day of a week it is} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} -\item{params_list}{the list of parameters for training} +\item{covariates}{character vector of column names serving as the covariates for the model} \item{response}{the column name of the response variable} \item{lp_solver}{the lp solver used in Quantgen} -\item{start}{the initialization of the the points in nlm} - -\item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} +\item{lambda}{the level of lasso penalty} -\item{dw}{column name to indicate which day of a week it is} +\item{start}{the initialization of the the points in nlm} -\item{labmda}{the level of lasso penalty} +\item{base_pseudo_denom}{the pseudo counts added to denominator if little data for training} -\item{base_pseudo_denum}{the pseudo counts added to denominator if little data for training} +\item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} } \description{ Main function for the beta prior approach diff --git a/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd b/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd index c18067c6e..7da0bb7ad 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd @@ -9,11 +9,12 @@ from the `evalcast` package from Delphi} evl(test_data, taus) } \arguments{ -\item{test_data}{multiple columns for the prediction results of different -quantiles. Each row represents an update with certain (reference_date, -issue_date, location) combination.} +\item{test_data}{dataframe with a column containing the prediction results of +each requested quantile. Each row represents an update with certain +(reference_date, issue_date, location) combination.} -\item{taus}{vector of quantiles interested} +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} } \description{ Evaluation of the test results based on WIS score diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 7eb775780..47988a0c4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -7,15 +7,16 @@ export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) } \arguments{ -\item{test_data}{test data with prediction result} +\item{test_data}{test data containing prediction results} -\item{coef_data}{data frame with the estimated coefficients} +\item{coef_data}{data frame containing the estimated coefficients} \item{export_dir}{export directory} -\item{geo_level}{geographical level, can be county or state} +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} -\item{test_lag}{} +\item{test_lag}{integer number of days ago to predict for} } \description{ Export the result to customized directory diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd b/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd index 8b93cdeb0..6318730ee 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd @@ -10,14 +10,17 @@ no update for any previous issue date, fill in with 0.} fill_missing_updates(df, value_col, refd_col, lag_col) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{value_col}{column name for the column of counts} +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} -\item{refd_col}{column name for the column of reference date} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} -\item{lag_col}{column name for the column of lag} +\item{lag_col}{string specifying name of lag field within +the input dataframe.} } \description{ Get pivot table, filling NANs. If there is no update on issue date D but diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd index 79ad1571e..e446e6e1d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd @@ -7,16 +7,20 @@ fill_rows(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) } \arguments{ -\item{df}{Data Frame of aggregated counts within a single location +\item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{refd_col}{column name for the column of reference date} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} -\item{lag_col}{column name for the column of lag} +\item{lag_col}{string specifying name of lag field within +the input dataframe.} \item{min_refd}{the earliest reference date considered in the data} \item{max_refd}{the latest reference date considered in the data} + +\item{ref_lag}{max lag to use for training} } \value{ df_new Data Frame with filled rows for missing lags diff --git a/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd b/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd index 0998af0f0..c4a731de5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd @@ -6,6 +6,9 @@ \usage{ filter_counties(geos) } +\arguments{ +\item{geos}{character vector of county FIPS codes} +} \description{ Subset list of counties to those included in the 200 most populous in the US } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd index 84a9f1e2d..b328bfb2b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd @@ -11,7 +11,8 @@ get_7dav(pivot_df, refd_col) \item{pivot_df}{Data Frame where the columns are issue dates and the rows are reference dates} -\item{refd_col}{column name for the column of reference date} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} } \description{ Calculate 7 day moving average for each issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd index 2ea29da80..b6e8b83cf 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd @@ -6,6 +6,26 @@ \usage{ get_files_list(indicator, signal, geo_level, params, sub_dir = "") } +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +and `data_path` (input dir).} + +\item{sub_dir}{string specifying the indicator-specific directory within +the general input directory `params$data_path`} +} \description{ List valid input files. } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd index 3b53bcb0e..c9307d25e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd @@ -2,23 +2,17 @@ % Please edit documentation in R/preprocessing.R \name{get_weekofmonth} \alias{get_weekofmonth} -\title{Get week of a month info according to a date -All the dates on or before the ith Sunday but after the (i-1)th Sunday -is considered to be the ith week. Notice that the dates in the 5th week -this month are actually in the same week with the dates in the 1st week -next month and those dates are sparse. Thus, we assign the dates in the -5th week to the 1st week.} +\title{Get week of a month info according to a date} \usage{ get_weekofmonth(date) } \arguments{ -\item{date}{as.Date} +\item{date}{Date object} } \value{ a integer indicating which week it is in a month } \description{ -Get week of a month info according to a date All the dates on or before the ith Sunday but after the (i-1)th Sunday is considered to be the ith week. Notice that the dates in the 5th week this month are actually in the same week with the dates in the 1st week diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd index 0dce32a76..7eb810d03 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd @@ -4,7 +4,13 @@ \alias{main} \title{Perform backfill correction on all desired signals and geo levels} \usage{ -main(params, ...) +main(params) +} +\arguments{ +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +and `data_path` (input dir).} } \description{ Perform backfill correction on all desired signals and geo levels diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd index d9d745fcf..2ec970161 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -9,15 +9,42 @@ main_local( export_dir, test_start_date, test_end_date, - traning_days, - testing_window, + training_days = TRAINING_DAYS, + testing_window = TESTING_WINDOW, value_type, num_col, denom_col, - lambda, - ref_lag + lambda = LAMBDA, + ref_lag = REF_LAG, + lp_solver = LP_SOLVER ) } +\arguments{ +\item{data_path}{path to the input data files} + +\item{export_dir}{path to save output} + +\item{test_start_date}{Date to start making predictions on} + +\item{test_end_date}{Date to stop making predictions on} + +\item{training_days}{integer number of days to use for training} + +\item{testing_window}{the testing window used for saving the runtime. Could +set it to be 1 if time allows} + +\item{value_type}{string describing signal type. Either "count" or "ratio".} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} + +\item{lambda}{the level of lasso penalty} + +\item{ref_lag}{max lag to use for training} + +\item{lp_solver}{the lp solver used in Quantgen} +} \description{ Main function to correct a single local signal } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index 5468e1e75..97cebcbc5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -9,10 +9,11 @@ model_training_and_testing( train_data, test_data, taus, - params_list, + covariates, lp_solver, lambda, - test_date + test_date, + geo ) } \arguments{ @@ -20,15 +21,19 @@ model_training_and_testing( \item{test_data}{Data frame for testing} -\item{taus}{vector of considered quantiles} +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} -\item{params_list}{the list of column names serving as the covariates} +\item{covariates}{list of column names serving as the covariates for the model} \item{lp_solver}{the lp solver used in Quantgen} \item{lambda}{the level of lasso penalty} -\item{test_date}{as.Date} +\item{test_date}{Date object representing test date} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} } \description{ Model training and prediction using quantile regression with Lasso penalty diff --git a/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd b/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd index 256d1ce4b..375b69c2e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd @@ -9,7 +9,11 @@ objective(theta, x, prob, ...) \arguments{ \item{theta}{parameters for the distribution in log scale} +\item{x}{vector of quantiles} + \item{prob}{the expected probabilities} + +\item{...}{additional arguments} } \description{ Generate objection function diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd index f4d9f2c21..c23602f42 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd @@ -4,7 +4,13 @@ \alias{ratio_adj} \title{Update ratio using beta prior approach} \usage{ -ratio_adj(train_data, test_data, prior_test_data) +ratio_adj( + train_data, + test_data, + prior_test_data, + taus = TAUS, + lp_solver = LP_SOLVER +) } \arguments{ \item{train_data}{training data} @@ -12,6 +18,11 @@ ratio_adj(train_data, test_data, prior_test_data) \item{test_data}{testing data} \item{prior_test_data}{testing data for the lag -1 model} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{lp_solver}{the lp solver used in Quantgen} } \description{ Update ratio using beta prior approach diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd index 185b78411..fe1d6c652 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd @@ -4,11 +4,13 @@ \alias{ratio_adj_with_pseudo} \title{Update ratio based on the pseudo counts for numerators and denominators} \usage{ -ratio_adj_with_pseudo(data, cov, pseudo_num, pseudo_denom, num_col, denom_col) +ratio_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) } \arguments{ \item{data}{Data Frame} +\item{dw}{character to indicate the day of a week. Can be NULL for all the days} + \item{pseudo_num}{the estimated counts to be added to numerators} \item{pseudo_denom}{the estimated counts to be added to denominators} @@ -16,8 +18,6 @@ ratio_adj_with_pseudo(data, cov, pseudo_num, pseudo_denom, num_col, denom_col) \item{num_col}{the column name for the numerator} \item{denom_col}{the column name for the denominator} - -\item{dw}{character to indicate the day of a week. Can be NULL for all the days} } \description{ Update ratio based on the pseudo counts for numerators and denominators diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd index 2a8e9e239..22fda14e4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd @@ -36,8 +36,8 @@ params$num_col: the column name for the counts of the numerator, e.g. the params$denom_col: the column name for the counts of the denominator, e.g. the number of total claims params$geo_level: list("state", "county") -params$taus: ?? -params$lambda: ?? -params$export_dir: ?? +params$taus: vector of considered quantiles +params$lambda: the level of lasso penalty +params$export_dir: directory to save corrected data to params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index 2f1f5acda..f0401215a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -15,24 +15,32 @@ run_backfill( ) } \arguments{ -\item{df}{dataframe of input data containing a single indicator + signal + -level of geographic coverage.} +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} -\item{value_type}{string describing signal type of "count" and "ratio".} +\item{value_type}{string describing signal type. Either "count" or "ratio".} -\item{geo_level}{string describing geo coverage of input data. "state" or -"county". If "county" is selected, only data from the 200 most populous -counties in the US (*not* the dataset) will be used.} +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} \item{params}{named list containing modeling and data settings. Must include the following elements: `ref_lag`, `testing_window`, `test_dates`, `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, and `data_path` (input dir).} -\item{refd_col}{string containing name of reference date field within `df`.} +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} -\item{lag_col}{string containing name of lag field within `df`.} +\item{lag_col}{string specifying name of lag field within +the input dataframe.} + +\item{signal_suffixes}{character vector specifying value column name +endings to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} } \description{ -Get backfill-corrected estimates for a single signal + geo combination +If "county" is selected for `geo_level`, only data from the 200 most populous +counties in the US (*not* the dataset) will be used. } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd index a3bf2d5fb..4fac25eec 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -7,17 +7,46 @@ run_backfill_local( df, export_dir, - taus, + taus = TAUS, test_date_list, - test_lags, + test_lags = TEST_LAGS, value_cols, - training_days, - testing_window, - ref_lag, + training_days = TRAINING_DAYS, + testing_window = TESTING_WINDOW, + ref_lag = REF_LAG, value_type, - lambda + lambda = LAMBDA, + lp_solver = LP_SOLVER ) } +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{export_dir}{path to save output} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{test_date_list}{Date vector of dates to make predictions for} + +\item{test_lags}{integer vector of number of days ago to predict for} + +\item{value_cols}{character vector of numerator and/or denominator field names} + +\item{training_days}{integer number of days to use for training} + +\item{testing_window}{the testing window used for saving the runtime. Could +set it to be 1 if time allows} + +\item{ref_lag}{max lag to use for training} + +\item{value_type}{string describing signal type. Either "count" or "ratio".} + +\item{lambda}{the level of lasso penalty} + +\item{lp_solver}{the lp solver used in Quantgen} +} \description{ Corrected estimates from a single local signal } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd index 0a15eca0f..847aae881 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd @@ -6,6 +6,17 @@ \usage{ subset_valid_files(files_list, file_type = c("daily", "rollup"), params) } +\arguments{ +\item{files_list}{character vector of input files of a given `file_type`} + +\item{file_type}{string specifying time period coverage of input files. +Either "daily" or "rollup"} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, +and `data_path` (input dir).} +} \description{ Parse filenames to find included dates. Use different patterns if file includes daily or rollup (multiple days) data. diff --git a/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd b/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd index 7309f5608..1692da955 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd @@ -4,7 +4,12 @@ \alias{training_days_check} \title{Check available training days} \usage{ -training_days_check(issue_date, training_days) +training_days_check(issue_date, training_days = TRAINING_DAYS) +} +\arguments{ +\item{issue_date}{contents of input data's `issue_date` column} + +\item{training_days}{integer number of days to use for training} } \description{ Check available training days diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd index 54f7d66e9..78f463506 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd @@ -4,7 +4,17 @@ \alias{validity_checks} \title{Check input data for validity} \usage{ -validity_checks(df, value_type) +validity_checks(df, value_type, num_col, denom_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_type}{string describing signal type. Either "count" or "ratio".} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} } \description{ Check input data for validity From 79ce886f3e01c6b9586495f863f42704ccf28d7e Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:07:28 -0400 Subject: [PATCH 032/145] add Makefile --- Backfill_Correction/Makefile | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 Backfill_Correction/Makefile diff --git a/Backfill_Correction/Makefile b/Backfill_Correction/Makefile new file mode 100644 index 000000000..ebd308367 --- /dev/null +++ b/Backfill_Correction/Makefile @@ -0,0 +1,34 @@ +SHELL:=/bin/bash --rcfile bash-init.sh + +default: + @echo No default implemented yet + +install: dev + +dev: delphiBackfillCorrection_1.0.tar.gz + R CMD INSTALL delphiBackfillCorrection_1.0.tar.gz + +lib: + R -e 'roxygen2::roxygenise("delphiBackfillCorrection")' + +run-R: + rm -rf tmp + time Rscript run.R 2>&1 | tee tmp + grep "run_facebook completed successfully" tmp + grep "scheduled core" tmp ; \ + [ "$$?" -eq 1 ] + +coverage: + Rscript -e 'covr::package_coverage("delphiBackfillCorrection")' + +# best we can do +lint: coverage + +test: delphiBackfillCorrection_1.0.tar.gz + R CMD check --test-dir=unit-tests $< + +delphiBackfillCorrection_1.0.tar.gz: $(wildcard delphiBackfillCorrection/R/*.R) + R CMD build delphiBackfillCorrection + +validate-covidcast: + @echo validate-covidcast not yet implemented From 9d538e1ebc8cac83b35ad0931eb555889195a366 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:08:16 -0400 Subject: [PATCH 033/145] remove wd2 -- not defined --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 9271a44ad..c56b93cb2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -102,8 +102,7 @@ run_backfill <- function(df, value_type, geo_level, params, test_data <- updated_data[[2]] sqrtscale <- updated_data[[3]] - ## TODO what is wd2? - covariates <- list(Y7DAV, WEEKDAYS_ABBR, wd2, WEEK_ISSUES, SLOPE, SQRTSCALE) + covariates <- list(Y7DAV, WEEKDAYS_ABBR, WEEK_ISSUES, SLOPE, SQRTSCALE) params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing From e6ce2a7646b567e987959644679119cf8bea4ba9 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:13:07 -0400 Subject: [PATCH 034/145] rename evl to evaluate --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 2 +- Backfill_Correction/delphiBackfillCorrection/R/main.R | 2 +- Backfill_Correction/delphiBackfillCorrection/R/model.R | 2 +- Backfill_Correction/delphiBackfillCorrection/R/tooling.R | 2 +- .../delphiBackfillCorrection/man/{evl.Rd => evaluate.Rd} | 6 +++--- 5 files changed, 7 insertions(+), 7 deletions(-) rename Backfill_Correction/delphiBackfillCorrection/man/{evl.Rd => evaluate.Rd} (92%) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index b05297020..a378deaee 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -6,7 +6,7 @@ export(add_shift) export(add_weekofmonth) export(create_dir_not_exist) export(data_filteration) -export(evl) +export(evaluate) export(export_test_result) export(fill_missing_updates) export(fill_rows) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index c56b93cb2..85f32c54a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -112,7 +112,7 @@ run_backfill <- function(df, value_type, geo_level, params, ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] - test_data <- evl(test_data, params$taus) + test_data <- evaluate(test_data, params$taus) export_test_result(test_data, coefs, params$export_dir, geo_level, test_lag) }# End for test lags diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 0bdce5304..d499ca9c0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -91,7 +91,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, #' @importFrom evalcast weighted_interval_score #' #' @export -evl <- function(test_data, taus){ +evaluate <- function(test_data, taus){ n_row = nrow(test_data) taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index a500f2a1a..1d5a2e2ac 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -98,7 +98,7 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] - test_data <- evl(test_data, taus) + test_data <- evaluate(test_data, taus) test_data$test_date <- test_date coefs$test_date <- test_date coefs$test_lag <- test_lag diff --git a/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd b/Backfill_Correction/delphiBackfillCorrection/man/evaluate.Rd similarity index 92% rename from Backfill_Correction/delphiBackfillCorrection/man/evl.Rd rename to Backfill_Correction/delphiBackfillCorrection/man/evaluate.Rd index 7da0bb7ad..fc4d3c347 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/evl.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/evaluate.Rd @@ -1,12 +1,12 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/model.R -\name{evl} -\alias{evl} +\name{evaluate} +\alias{evaluate} \title{Evaluation of the test results based on WIS score The WIS score calculation is based on the weighted_interval_score function from the `evalcast` package from Delphi} \usage{ -evl(test_data, taus) +evaluate(test_data, taus) } \arguments{ \item{test_data}{dataframe with a column containing the prediction results of From 07177f11e97f74da33644b568c78ddf9c360b597 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:19:22 -0400 Subject: [PATCH 035/145] mark environment vars in dplyr logic --- .../delphiBackfillCorrection/NAMESPACE | 1 + .../delphiBackfillCorrection/R/main.R | 18 +++++++++--------- .../delphiBackfillCorrection/R/model.R | 18 ++++++++++-------- .../delphiBackfillCorrection/R/tooling.R | 18 +++++++++--------- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index a378deaee..d339b8f07 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -41,6 +41,7 @@ importFrom(quantgen,quantile_lasso) importFrom(readr,read_csv) importFrom(readr,write_csv) importFrom(rlang,.data) +importFrom(rlang,.env) importFrom(stats,coef) importFrom(stats,nlm) importFrom(stats,pbeta) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 85f32c54a..a9b98e09b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -17,7 +17,7 @@ #' #' @importFrom dplyr %>% filter #' @importFrom tidyr drop_na -#' @importFrom rlang .data +#' @importFrom rlang .data .env #' #' @export run_backfill <- function(df, value_type, geo_level, params, @@ -32,7 +32,7 @@ run_backfill <- function(df, value_type, geo_level, params, # Build model for each location for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == geo) %>% filter(.data$lag < params$ref_lag) + subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) min_refd <- min(subdf[[refd_col]]) max_refd <- max(subdf[[refd_col]]) subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) @@ -71,21 +71,21 @@ run_backfill <- function(df, value_type, geo_level, params, for (test_date in params$test_dates) { geo_train_data = combined_df %>% - filter(.data$issue_date < test_date) %>% - filter(.data$target_date <= test_date) %>% - filter(.data$target_date > test_date - params$training_days) %>% + filter(.data$issue_date < .env$test_date) %>% + filter(.data$target_date <= .env$test_date) %>% + filter(.data$target_date > .env$test_date - params$training_days) %>% drop_na() geo_test_data = combined_df %>% - filter(.data$issue_date >= test_date) %>% - filter(.data$issue_date < test_date + params$testing_window) %>% + filter(.data$issue_date >= .env$test_date) %>% + filter(.data$issue_date < .env$test_date + params$testing_window) %>% drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next if (value_type == "ratio"){ geo_prior_test_data = combined_df %>% - filter(.data$issue_date > test_date - 7) %>% - filter(.data$issue_date <= test_date) + filter(.data$issue_date > .env$test_date - 7) %>% + filter(.data$issue_date <= .env$test_date) updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) geo_train_data <- updated_data[[1]] diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index d499ca9c0..7d3cb53c1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -4,29 +4,31 @@ #' @param geo_train_data training data for a certain location #' @param geo_test_data testing data for a certain location #' -#' @importFrom rlang .data +#' @importFrom rlang .data .env #' #' @export data_filteration <- function(test_lag, geo_train_data, geo_test_data){ - if (test_lag <= 14){ + if (test_lag <= 14) { test_lag_pad=2 test_lag_pad1=0 test_lag_pad2=0 - }else if (test_lag < 51){ + } else if (test_lag < 51) { test_lag_pad=7 test_lag_pad1=6 test_lag_pad2=7 - }else { + } else { test_lag_pad=9 test_lag_pad1=8 test_lag_pad2=9 } + train_data = geo_train_data %>% - filter(.data$lag >= test_lag-test_lag_pad ) %>% - filter(.data$lag <= test_lag+test_lag_pad ) + filter(.data$lag >= .env$test_lag - .env$test_lag_pad ) %>% + filter(.data$lag <= .env$test_lag + .env$test_lag_pad ) test_data = geo_test_data %>% - filter(.data$lag >= test_lag-test_lag_pad1) %>% - filter(.data$lag <= test_lag+test_lag_pad2) + filter(.data$lag >= .env$test_lag - .env$test_lag_pad1) %>% + filter(.data$lag <= .env$test_lag + .env$test_lag_pad2) + return (list(train_data, test_data)) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 1d5a2e2ac..7ac13cc6b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -17,7 +17,7 @@ #' @importFrom dplyr %>% filter #' @importFrom plyr rbind.fill #' @importFrom tidyr drop_na -#' @importFrom rlang .data +#' @importFrom rlang .data .env #' #' @export run_backfill_local <- function(df, export_dir, taus = TAUS, @@ -32,7 +32,7 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, coef_df_list = list() for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == geo) %>% filter(.data$lag < ref_lag) + subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < .env$ref_lag) min_refd <- min(subdf$time_value) max_refd <- max(subdf$time_value) subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) @@ -54,20 +54,20 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, for (test_date in test_date_list) { geo_train_data = combined_df %>% - filter(.data$issue_date < test_date) %>% - filter(.data$target_date <= test_date) %>% - filter(.data$target_date > test_date - training_days) %>% + filter(.data$issue_date < .env$test_date) %>% + filter(.data$target_date <= .env$test_date) %>% + filter(.data$target_date > .env$test_date - .env$training_days) %>% drop_na() geo_test_data = combined_df %>% - filter(.data$issue_date >= test_date) %>% - filter(.data$issue_date < test_date+testing_window) %>% + filter(.data$issue_date >= .env$test_date) %>% + filter(.data$issue_date < .env$test_date + .env$testing_window) %>% drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% - filter(.data$issue_date > test_date-7) %>% - filter(.data$issue_date <= test_date) + filter(.data$issue_date > .env$test_date - 7) %>% + filter(.data$issue_date <= .env$test_date) updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) geo_train_data <- updated_data[[1]] From 6953078ba3286d30d4091d50d6eb410158a8dcd8 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:15:46 -0400 Subject: [PATCH 036/145] set default params values; change data_path to input_dir --- .../delphiBackfillCorrection/R/constants.R | 2 -- .../delphiBackfillCorrection/R/io.R | 10 +++---- .../delphiBackfillCorrection/R/main.R | 4 +-- .../delphiBackfillCorrection/R/tooling.R | 6 ++--- .../delphiBackfillCorrection/R/utils.R | 26 ++++++++++++++++--- Backfill_Correction/params.json.template | 6 ++--- 6 files changed, 35 insertions(+), 19 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 298bb18a1..217432106 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,7 +1,6 @@ ## TODO not sure how to import roxygen-style outside of a function library(tibble) -## TODO convert all constant usages in package to uppercase # Constants for the backfill correction model TAUS <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) REF_LAG <- 60 @@ -16,7 +15,6 @@ YITL <-"log_value_raw" SLOPE <-"log_7dav_slope" Y7DAV <-"log_value_7dav" -#SQRTSCALE <-c('sqrty0', 'sqrty1', 'sqrty2', 'sqrty3') SQRTSCALE_COVID <-c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') SQRTSCALE_TOTAL <-c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') SQRTSCALE <-c('sqrty0', 'sqrty1', "sqrty2") diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index c6a556f2e..859119ee4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -44,13 +44,13 @@ export_test_result <- function(test_data, coef_data, export_dir, #' @template geo_level-template #' @template params-template #' @param sub_dir string specifying the indicator-specific directory within -#' the general input directory `params$data_path` +#' the general input directory `params$input_dir` get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { # Make sure we're reading in both 4-week rollup and daily files. if (!is.null(sub_dir) && sub_dir != "") { - data_path <- paste(params$data_path, sub_dir, sep="_") + input_dir <- paste(params$input_dir, sub_dir, sep="_") } else { - data_path <- params$data_path + input_dir <- params$input_dir } # Convert input_group into file names. @@ -63,9 +63,9 @@ get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { ## TODO: decide whether to use full path or just file name (may not be able to read in) # Filter files lists to only include those containing dates we need for training - daily_input_files <- list.files(data_path, pattern = daily_pattern) %>% + daily_input_files <- list.files(input_dir, pattern = daily_pattern) %>% subset_valid_files("daily", params) - rollup_input_files <- list.files(data_path, pattern = rollup_pattern) %>% + rollup_input_files <- list.files(input_dir, pattern = rollup_pattern) %>% subset_valid_files("rollup", params) return(c(daily_input_files, rollup_input_files)) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index a9b98e09b..4ce72affb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -131,8 +131,6 @@ run_backfill <- function(df, value_type, geo_level, params, #' #' @export main <- function(params){ - ## TODO may need more args, not sure - # Load indicator x signal groups. Combine with params$geo_level to get all # possible geo x signal combinations. groups <- merge(INDICATORS_AND_SIGNALS, data.frame(geo_level = params$geo_level)) @@ -163,7 +161,7 @@ main <- function(params){ } # Check data type and required columns - ## TODO num and denom names need suffixes to be check properly + ## TODO num and denom names need suffixes to be checked properly result <- validity_checks(input_data, input_group$value_type, params$num_col, params$denom_col) input_data <- result[["df"]] diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 7ac13cc6b..f7bb1ecb6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -119,7 +119,7 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, #' Main function to correct a single local signal #' -#' @param data_path path to the input data files +#' @param input_dir path to the input data files #' @param export_dir path to save output #' @param test_start_date Date to start making predictions on #' @param test_end_date Date to stop making predictions on @@ -136,12 +136,12 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, #' @importFrom readr read_csv #' #' @export -main_local <- function(data_path, export_dir, +main_local <- function(input_dir, export_dir, test_start_date, test_end_date, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, value_type, num_col, denom_col, lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER){ # Check input data - df = read_csv(data_path) + df = read_csv(input_dir) # Check data type and required columns result <- validity_checks(df, value_type, num_col, denom_col) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 7fef7977d..93113dea9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -8,7 +8,7 @@ #' #' params$ref_lag: reference lag, after x days, the update is considered to be #' the response. 60 is a reasonable choice for CHNG outpatient data -#' params$data_path: link to the input data file +#' params$input_dir: link to the input data file #' params$testing_window: the testing window used for saving the runtime. Could #' set it to be 1 if time allows #' params$test_dates: list of two elements, the first one is the start date and @@ -18,7 +18,7 @@ #' number of COVID claims #' params$denom_col: the column name for the counts of the denominator, e.g. the #' number of total claims -#' params$geo_level: list("state", "county") +#' params$geo_level: character vector of "state" and "county", by default #' params$taus: vector of considered quantiles #' params$lambda: the level of lasso penalty #' params$export_dir: directory to save corrected data to @@ -36,8 +36,28 @@ read_params <- function(path = "params.json", template_path = "params.json.template") { if (!file.exists(path)) file.copy(template_path, path) params <- read_json(path, simplifyVector = TRUE) + + # Required parameters + if (!(input_dir %in% names(params)) || dir.exists(params$input_dir)) { + stop("input_dir must be set in `params` and exist") + } + if (!(export_dir %in% names(params))) { + stop("export_dir must be set in `params`") + } - ## TODO set default parameter values if not specified + # Set default parameter values if not specified + if (!(ref_lag %in% names(params))) {params$ref_lag <- REF_LAG} + if (!(testing_window %in% names(params))) {params$testing_window <- TESTING_WINDOW} + if (!(test_dates %in% names(params))) {params$test_dates <- ...} + if (!(training_days %in% names(params))) {params$training_days <- TRAINING_DAYS} + if (!(num_col %in% names(params))) {params$num_col <- "num"} + if (!(denom_col %in% names(params))) {params$denom_col <- "denom"} + if (!(geo_level %in% names(params))) {params$geo_level <- c("state", "county")} + if (!(taus %in% names(params))) {params$taus <- TAUS} + if (!(lambda %in% names(params))) {params$lambda <- LAMBDA} + if (!(lp_solver %in% names(params))) {params$lp_solver <- LP_SOLVER} + + ## TODO what to do with `value_type` parameter? params$parallel_max_cores <- if_else( is.null(params$parallel_max_cores), diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index d32c6c23c..a4c2dbff0 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,12 +1,12 @@ { "ref_lag": 60, - "data_path": "", + "input_dir": "", "test_dates":["", ""] , "testing_window": 1, "training_days": 270, "export_dir": "./receiving", "geo_levels": ["state", "county"], "value_type": ["count", "ratio"], - "num_col": "", - "denom_col": "", + "num_col": "num", + "denom_col": "den", } From 65a9e6029fb30919708d5b863a50224ced2cc2e1 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:27:30 -0400 Subject: [PATCH 037/145] process all value_type combos in groups --- .../delphiBackfillCorrection/R/constants.R | 10 +++++----- Backfill_Correction/delphiBackfillCorrection/R/main.R | 5 +++-- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 5 ++--- Backfill_Correction/params.json.template | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 217432106..eb64003e1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -26,10 +26,10 @@ WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") # wm TODAY <- Sys.Date() INDICATORS_AND_SIGNALS <- tribble( - ~indicator, ~signal, ~name_suffix, ~value_type, ~sub_dir, - "changehc", "covid", "", "count", "chng", - "changehc", "flu", "", "count", "chng", - "claims_hosp", "", "", "count", "claims_hosp", + ~indicator, ~signal, ~name_suffix, ~sub_dir, + "changehc", "covid", "", "chng", + "changehc", "flu", "", "chng", + "claims_hosp", "", "", "claims_hosp", # "dv",,, - "quidel", "covidtest", c("total", "age_0_4", "age_5_17", "age_18_49", "age_50_64", "age_65plus", "age_0_17"), "count", "quidel_covidtest" + "quidel", "covidtest", c("total", "age_0_4", "age_5_17", "age_18_49", "age_50_64", "age_65plus", "age_0_17"), "quidel_covidtest" ) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 4ce72affb..7dfc1372e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -133,9 +133,10 @@ run_backfill <- function(df, value_type, geo_level, params, main <- function(params){ # Load indicator x signal groups. Combine with params$geo_level to get all # possible geo x signal combinations. - groups <- merge(INDICATORS_AND_SIGNALS, data.frame(geo_level = params$geo_level)) + groups <- merge(INDICATORS_AND_SIGNALS, data.frame(geo_level = params$geo_level)) %>% + merge(data.frame(value_type = params$value_types)) - # Loop over every indicator + signal + geo type combination. + # Loop over every indicator + signal + geo type + value_type combination. for (input_group in groups) { files_list <- get_files_list( input_group$indicator, input_group$signal, input_group$geo_level, diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 93113dea9..da7063481 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -56,12 +56,11 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!(taus %in% names(params))) {params$taus <- TAUS} if (!(lambda %in% names(params))) {params$lambda <- LAMBDA} if (!(lp_solver %in% names(params))) {params$lp_solver <- LP_SOLVER} - - ## TODO what to do with `value_type` parameter? + if (!(value_types %in% names(params))) {params$lp_solver <- c("count", "ratio")} params$parallel_max_cores <- if_else( is.null(params$parallel_max_cores), - .Machine$integer.max, + .Machine$integer.max - 1, params$parallel_max_cores ) diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index a4c2dbff0..4c167d616 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -6,7 +6,7 @@ "training_days": 270, "export_dir": "./receiving", "geo_levels": ["state", "county"], - "value_type": ["count", "ratio"], + "value_types": ["count", "ratio"], "num_col": "num", "denom_col": "den", } From 1206576498102075d6d03eb3b6da4297311ca4c6 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:48:17 -0400 Subject: [PATCH 038/145] set parallel core params --- .../delphiBackfillCorrection/DESCRIPTION | 3 +- .../delphiBackfillCorrection/NAMESPACE | 2 +- .../delphiBackfillCorrection/R/main.R | 15 ++++++- .../delphiBackfillCorrection/R/utils.R | 42 ++++++++++--------- .../man/get_files_list.Rd | 2 +- .../man/main_local.Rd | 4 +- .../man/read_params.Rd | 4 +- 7 files changed, 43 insertions(+), 29 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION index 04bbae163..a550a521f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION +++ b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION @@ -26,7 +26,8 @@ Imports: tidyr, zoo, utils, - rlang + rlang, + parallel Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index d339b8f07..03914d072 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -35,7 +35,7 @@ importFrom(lubridate,day) importFrom(lubridate,make_date) importFrom(lubridate,month) importFrom(lubridate,year) -importFrom(lubridate,ymd_hms) +importFrom(parallel,detectCores) importFrom(plyr,rbind.fill) importFrom(quantgen,quantile_lasso) importFrom(readr,read_csv) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 7dfc1372e..64f43800f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -119,8 +119,6 @@ run_backfill <- function(df, value_type, geo_level, params, }# End for test date list }# End for signal suffixes }# End for geo list - - return(NULL) } #' Perform backfill correction on all desired signals and geo levels @@ -128,9 +126,22 @@ run_backfill <- function(df, value_type, geo_level, params, #' @template params-template #' #' @importFrom dplyr bind_rows +#' @importFrom parallel detectCores #' #' @export main <- function(params){ + ## Set default number of cores for mclapply to the half of the total available number. + if (params$parallel) { + cores <- detectCores() + + if (is.na(cores)) { + warning("Could not detect the number of CPU cores; parallel mode disabled") + params$parallel <- FALSE + } else { + options(mc.cores = min(params$parallel_max_cores, floor(cores / 2))) + } + } + # Load indicator x signal groups. Combine with params$geo_level to get all # possible geo x signal combinations. groups <- merge(INDICATORS_AND_SIGNALS, data.frame(geo_level = params$geo_level)) %>% diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index da7063481..3462ef3ab 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -32,37 +32,39 @@ #' #' @importFrom dplyr if_else #' @importFrom jsonlite read_json -#' @importFrom lubridate ymd_hms read_params <- function(path = "params.json", template_path = "params.json.template") { if (!file.exists(path)) file.copy(template_path, path) params <- read_json(path, simplifyVector = TRUE) # Required parameters - if (!(input_dir %in% names(params)) || dir.exists(params$input_dir)) { + if (!("input_dir" %in% names(params)) || dir.exists(params$input_dir)) { stop("input_dir must be set in `params` and exist") } - if (!(export_dir %in% names(params))) { + if (!("export_dir" %in% names(params))) { stop("export_dir must be set in `params`") } - # Set default parameter values if not specified - if (!(ref_lag %in% names(params))) {params$ref_lag <- REF_LAG} - if (!(testing_window %in% names(params))) {params$testing_window <- TESTING_WINDOW} - if (!(test_dates %in% names(params))) {params$test_dates <- ...} - if (!(training_days %in% names(params))) {params$training_days <- TRAINING_DAYS} - if (!(num_col %in% names(params))) {params$num_col <- "num"} - if (!(denom_col %in% names(params))) {params$denom_col <- "denom"} - if (!(geo_level %in% names(params))) {params$geo_level <- c("state", "county")} - if (!(taus %in% names(params))) {params$taus <- TAUS} - if (!(lambda %in% names(params))) {params$lambda <- LAMBDA} - if (!(lp_solver %in% names(params))) {params$lp_solver <- LP_SOLVER} - if (!(value_types %in% names(params))) {params$lp_solver <- c("count", "ratio")} + ## Set default parameter values if not specified + # Model parameters + if (!("taus" %in% names(params))) {params$taus <- TAUS} + if (!("lambda" %in% names(params))) {params$lambda <- LAMBDA} + if (!("lp_solver" %in% names(params))) {params$lp_solver <- LP_SOLVER} - params$parallel_max_cores <- if_else( - is.null(params$parallel_max_cores), - .Machine$integer.max - 1, - params$parallel_max_cores - ) + # Date parameters + if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} + if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} + if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} + if (!("test_dates" %in% names(params))) {params$test_dates <- ...} + + # Data parameters + if (!("num_col" %in% names(params))) {params$num_col <- "num"} + if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} + if (!("geo_level" %in% names(params))) {params$geo_level <- c("state", "county")} + if (!("value_types" %in% names(params))) {params$lp_solver <- c("count", "ratio")} + + # Parallel parameters + if (!("parallel" %in% names(params))) {params$parallel <- FALSE} + if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} return(params) } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd index b6e8b83cf..ec68ff7d5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd @@ -24,7 +24,7 @@ the following elements: `ref_lag`, `testing_window`, `test_dates`, and `data_path` (input dir).} \item{sub_dir}{string specifying the indicator-specific directory within -the general input directory `params$data_path`} +the general input directory `params$input_dir`} } \description{ List valid input files. diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd index 2ec970161..f6f1d185b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -5,7 +5,7 @@ \title{Main function to correct a single local signal} \usage{ main_local( - data_path, + input_dir, export_dir, test_start_date, test_end_date, @@ -20,7 +20,7 @@ main_local( ) } \arguments{ -\item{data_path}{path to the input data files} +\item{input_dir}{path to the input data files} \item{export_dir}{path to save output} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd index 22fda14e4..713eed281 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd @@ -25,7 +25,7 @@ they will be filled with default values when possible. params$ref_lag: reference lag, after x days, the update is considered to be the response. 60 is a reasonable choice for CHNG outpatient data -params$data_path: link to the input data file +params$input_dir: link to the input data file params$testing_window: the testing window used for saving the runtime. Could set it to be 1 if time allows params$test_dates: list of two elements, the first one is the start date and @@ -35,7 +35,7 @@ params$num_col: the column name for the counts of the numerator, e.g. the number of COVID claims params$denom_col: the column name for the counts of the denominator, e.g. the number of total claims -params$geo_level: list("state", "county") +params$geo_level: character vector of "state" and "county", by default params$taus: vector of considered quantiles params$lambda: the level of lasso penalty params$export_dir: directory to save corrected data to From c2625780d9a62b79e79fa90eafcdda3ce96b49c1 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:55:22 -0400 Subject: [PATCH 039/145] define test dates if not specified --- .../delphiBackfillCorrection/R/utils.R | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 3462ef3ab..f5970b28b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -45,26 +45,30 @@ read_params <- function(path = "params.json", template_path = "params.json.templ } ## Set default parameter values if not specified + # Parallel parameters + if (!("parallel" %in% names(params))) {params$parallel <- FALSE} + if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} + # Model parameters if (!("taus" %in% names(params))) {params$taus <- TAUS} if (!("lambda" %in% names(params))) {params$lambda <- LAMBDA} if (!("lp_solver" %in% names(params))) {params$lp_solver <- LP_SOLVER} - # Date parameters - if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} - if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} - if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} - if (!("test_dates" %in% names(params))) {params$test_dates <- ...} - # Data parameters if (!("num_col" %in% names(params))) {params$num_col <- "num"} if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} if (!("geo_level" %in% names(params))) {params$geo_level <- c("state", "county")} if (!("value_types" %in% names(params))) {params$lp_solver <- c("count", "ratio")} - # Parallel parameters - if (!("parallel" %in% names(params))) {params$parallel <- FALSE} - if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} + # Date parameters + if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} + if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} + if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} + if (!("test_dates" %in% names(params))) { + start_date <- TODAY - params$testing_window + end_date <- TODAY - 1 + params$test_dates <- seq(start_date, end_date, by="days") + } return(params) } From e52931ee2e07720b1330822463e11023afb2e520 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 25 Aug 2022 18:03:20 -0400 Subject: [PATCH 040/145] get full path to input files --- .../delphiBackfillCorrection/R/io.R | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 859119ee4..474c045fe 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -61,11 +61,14 @@ get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { indicator, signal, geo_level, "rollup" ) - ## TODO: decide whether to use full path or just file name (may not be able to read in) # Filter files lists to only include those containing dates we need for training - daily_input_files <- list.files(input_dir, pattern = daily_pattern) %>% + daily_input_files <- list.files( + input_dir, pattern = daily_pattern, full.names = TRUE + ) %>% subset_valid_files("daily", params) - rollup_input_files <- list.files(input_dir, pattern = rollup_pattern) %>% + rollup_input_files <- list.files( + input_dir, pattern = rollup_pattern, full.names = TRUE + ) %>% subset_valid_files("rollup", params) return(c(daily_input_files, rollup_input_files)) @@ -85,13 +88,13 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par switch(file_type, daily = { start_dates <- as.Date( - sub("^.*_as_of_([0-9]{8}).parquet$", "\\1", files_list), + sub("^.*/.*_as_of_([0-9]{8}).parquet$", "\\1", files_list), format = date_format ) end_dates <- start_dates }, rollup = { - rollup_pattern <- "^.*_from_([0-9]{8})_to_([0-9]{8}).parquet$" + rollup_pattern <- "^.*/.*_from_([0-9]{8})_to_([0-9]{8}).parquet$" start_dates <- as.Date( sub(rollup_pattern, "\\1", files_list), format = date_format @@ -128,7 +131,7 @@ create_name_pattern <- function(indicator, signal, geo_level, file_type = c("daily", "rollup")) { file_type <- match.arg(file_type) switch(file_type, - daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet"), - rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet") + daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet$"), + rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet$") ) } From 54010cddd525915334a0ba5d31426eb7d3ea91cf Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 10:26:11 -0400 Subject: [PATCH 041/145] suppress tibble import note --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 1 + Backfill_Correction/delphiBackfillCorrection/R/constants.R | 5 +---- .../delphiBackfillCorrection/R/delphiBackfillCorrection.R | 3 +++ 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 03914d072..dc9244334 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -48,6 +48,7 @@ importFrom(stats,pbeta) importFrom(stats,predict) importFrom(stats,setNames) importFrom(stringr,str_interp) +importFrom(tibble,tribble) importFrom(tidyr,crossing) importFrom(tidyr,drop_na) importFrom(tidyr,fill) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index eb64003e1..466b7136b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -1,6 +1,3 @@ -## TODO not sure how to import roxygen-style outside of a function -library(tibble) - # Constants for the backfill correction model TAUS <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) REF_LAG <- 60 @@ -25,7 +22,7 @@ WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") # wd WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") # wm TODAY <- Sys.Date() -INDICATORS_AND_SIGNALS <- tribble( +INDICATORS_AND_SIGNALS <- tibble::tribble( ~indicator, ~signal, ~name_suffix, ~sub_dir, "changehc", "covid", "", "chng", "changehc", "flu", "", "chng", diff --git a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R new file mode 100644 index 000000000..5661265f7 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R @@ -0,0 +1,3 @@ +# Suppress R CMD check note +#' @importFrom tibble tribble +NULL From 2b6dea76996ec57dceb68f6e2bd2bc68cd5d8fe2 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 11:15:07 -0400 Subject: [PATCH 042/145] formalize tooling funcs --- Backfill_Correction/correct_local_signal.R | 6 ++- .../delphiBackfillCorrection/R/io.R | 2 +- .../delphiBackfillCorrection/R/tooling.R | 43 +++++++++++-------- .../delphiBackfillCorrection/R/utils.R | 18 +++++--- .../man/export_test_result.Rd | 8 +++- .../man/main_local.Rd | 22 +++++----- .../man/run_backfill_local.Rd | 18 ++++---- .../man/validity_checks.Rd | 5 +++ 8 files changed, 73 insertions(+), 49 deletions(-) diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index d5655bfdb..55319c53e 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -27,6 +27,8 @@ parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, he args = parse_args(parser) main_local(args.data_path, args.export_dir, - args.test_start_date, args.test_end_date, args.traning_days, args.testing_window, - args.value_type, args.num_col, args.denom_col, + args.test_start_date, args.test_end_date, + args.num_col, args.denom_col, + args.value_type, + args.training_days, args.testing_window, args.lambda, args.ref_lag) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 474c045fe..383370f29 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -26,7 +26,7 @@ read_data <- function(path) { #' #' @export export_test_result <- function(test_data, coef_data, export_dir, - geo_level, test_lag) { + geo_level, test_lag = NULL) { ## TODO why not being used? Probably want test_lag in output name warning("test_lag arg ", test_lag, " not being used") diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index f7bb1ecb6..e41c7be7f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -2,15 +2,15 @@ #' #' @template df-template #' @param export_dir path to save output -#' @template taus-template #' @param test_date_list Date vector of dates to make predictions for -#' @param test_lags integer vector of number of days ago to predict for #' @param value_cols character vector of numerator and/or denominator field names +#' @template value_type-template +#' @template taus-template +#' @param test_lags integer vector of number of days ago to predict for #' @param training_days integer number of days to use for training #' @param testing_window the testing window used for saving the runtime. Could #' set it to be 1 if time allows #' @param ref_lag max lag to use for training -#' @template value_type-template #' @param lambda the level of lasso penalty #' @param lp_solver the lp solver used in Quantgen #' @@ -20,10 +20,10 @@ #' @importFrom rlang .data .env #' #' @export -run_backfill_local <- function(df, export_dir, taus = TAUS, - test_date_list, test_lags = TEST_LAGS, - value_cols, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - ref_lag = REF_LAG, value_type, lambda = LAMBDA, lp_solver = LP_SOLVER) { +run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value_type, + taus = TAUS, test_lags = TEST_LAGS, + training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, + ref_lag = REF_LAG, lambda = LAMBDA, lp_solver = LP_SOLVER) { # Get all the locations that are considered geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) # Build model for each location @@ -64,7 +64,7 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction"){ + if (value_type == "ratio"){ geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) @@ -121,14 +121,16 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, #' #' @param input_dir path to the input data files #' @param export_dir path to save output -#' @param test_start_date Date to start making predictions on -#' @param test_end_date Date to stop making predictions on +#' @param test_start_date Date or string in the format "YYYY-MM-DD" to start +#' making predictions on +#' @param test_end_date Date or string in the format "YYYY-MM-DD" to stop +#' making predictions on +#' @param num_col name of numerator column in the input dataframe +#' @param denom_col name of denominator column in the input dataframe +#' @template value_type-template #' @param training_days integer number of days to use for training #' @param testing_window the testing window used for saving the runtime. Could #' set it to be 1 if time allows -#' @template value_type-template -#' @param num_col name of numerator column in the input dataframe -#' @param denom_col name of denominator column in the input dataframe #' @param lambda the level of lasso penalty #' @param ref_lag max lag to use for training #' @param lp_solver the lp solver used in Quantgen @@ -137,9 +139,12 @@ run_backfill_local <- function(df, export_dir, taus = TAUS, #' #' @export main_local <- function(input_dir, export_dir, - test_start_date, test_end_date, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - value_type, num_col, denom_col, + test_start_date, test_end_date, + num_col, denom_col,value_type = c("count", "ratio"), + training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER){ + value_type <- match.arg(value_type) + # Check input data df = read_csv(input_dir) @@ -166,8 +171,8 @@ main_local <- function(input_dir, export_dir, # Check available training days training_days_check(df$issue_date, training_days) - run_backfill_local(df, export_dir, TAUS, - test_date_list, TEST_LAGS, - value_cols, training_days, testing_window, - ref_lag, value_type, lambda, lp_solver) + run_backfill_local(df, export_dir, + test_date_list, value_cols, value_type, + TAUS, TEST_LAGS, training_days, testing_window, + ref_lag, lambda, lp_solver) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index f5970b28b..ac37f701d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -89,23 +89,27 @@ create_dir_not_exist <- function(path) #' @template value_type-template #' @param num_col name of numerator column in the input dataframe #' @param denom_col name of denominator column in the input dataframe +#' +#' @return list of input dataframe augmented with lag column, if it +#' didn't already exist, and character vector of one or two value +#' column names, depending on requested `value_type` validity_checks <- function(df, value_type, num_col, denom_col) { # Check data type and required columns if (value_type == "count"){ if (num_col %in% colnames(df)) {value_cols=c(num_col)} else if (denom_col %in% colnames(df)) {value_cols=c(denom_col)} - else { - stop("No valid column name detected for the count values!") - } - } else if (value_type == "fraction"){ + else {stop("No valid column name detected for the count values!")} + } else if (value_type == "ratio"){ value_cols = c(num_col, denom_col) - if ( !any(value_cols %in% colnames(df)) ){ - stop("No valid column name detected for the fraction values!") + if ( any(!(value_cols %in% colnames(df))) ){ + stop("No valid column name detected for the ratio values!") } } # time_value must exist in the dataset - if ( !"time_value" %in% colnames(df) ){stop("No column for the reference date")} + if ( !"time_value" %in% colnames(df) ) { + stop("No 'time_value' column detected for the reference date!") + } # issue_date or lag should exist in the dataset if ( !"lag" %in% colnames(df) ){ diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 47988a0c4..4a8c76241 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -4,7 +4,13 @@ \alias{export_test_result} \title{Export the result to customized directory} \usage{ -export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) +export_test_result( + test_data, + coef_data, + export_dir, + geo_level, + test_lag = NULL +) } \arguments{ \item{test_data}{test data containing prediction results} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd index f6f1d185b..077274914 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -9,11 +9,11 @@ main_local( export_dir, test_start_date, test_end_date, - training_days = TRAINING_DAYS, - testing_window = TESTING_WINDOW, - value_type, num_col, denom_col, + value_type = c("count", "ratio"), + training_days = TRAINING_DAYS, + testing_window = TESTING_WINDOW, lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER @@ -24,20 +24,22 @@ main_local( \item{export_dir}{path to save output} -\item{test_start_date}{Date to start making predictions on} +\item{test_start_date}{Date or string in the format "YYYY-MM-DD" to start +making predictions on} -\item{test_end_date}{Date to stop making predictions on} +\item{test_end_date}{Date or string in the format "YYYY-MM-DD" to stop +making predictions on} -\item{training_days}{integer number of days to use for training} +\item{num_col}{name of numerator column in the input dataframe} -\item{testing_window}{the testing window used for saving the runtime. Could -set it to be 1 if time allows} +\item{denom_col}{name of denominator column in the input dataframe} \item{value_type}{string describing signal type. Either "count" or "ratio".} -\item{num_col}{name of numerator column in the input dataframe} +\item{training_days}{integer number of days to use for training} -\item{denom_col}{name of denominator column in the input dataframe} +\item{testing_window}{the testing window used for saving the runtime. Could +set it to be 1 if time allows} \item{lambda}{the level of lasso penalty} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd index 4fac25eec..42902abe4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -7,14 +7,14 @@ run_backfill_local( df, export_dir, - taus = TAUS, test_date_list, - test_lags = TEST_LAGS, value_cols, + value_type, + taus = TAUS, + test_lags = TEST_LAGS, training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, ref_lag = REF_LAG, - value_type, lambda = LAMBDA, lp_solver = LP_SOLVER ) @@ -25,15 +25,17 @@ reported for each reference date and issue date.} \item{export_dir}{path to save output} +\item{test_date_list}{Date vector of dates to make predictions for} + +\item{value_cols}{character vector of numerator and/or denominator field names} + +\item{value_type}{string describing signal type. Either "count" or "ratio".} + \item{taus}{numeric vector of quantiles to be predicted. Values must be between 0 and 1.} -\item{test_date_list}{Date vector of dates to make predictions for} - \item{test_lags}{integer vector of number of days ago to predict for} -\item{value_cols}{character vector of numerator and/or denominator field names} - \item{training_days}{integer number of days to use for training} \item{testing_window}{the testing window used for saving the runtime. Could @@ -41,8 +43,6 @@ set it to be 1 if time allows} \item{ref_lag}{max lag to use for training} -\item{value_type}{string describing signal type. Either "count" or "ratio".} - \item{lambda}{the level of lasso penalty} \item{lp_solver}{the lp solver used in Quantgen} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd index 78f463506..865126f97 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd @@ -16,6 +16,11 @@ reported for each reference date and issue date.} \item{denom_col}{name of denominator column in the input dataframe} } +\value{ +list of input dataframe augmented with lag column, if it + didn't already exist, and character vector of one or two value + column names, depending on requested `value_type` +} \description{ Check input data for validity } From 0eb4757599cafcc78637ca394c78bd7cb817d9aa Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 11:28:09 -0400 Subject: [PATCH 043/145] make validity checks work for multiple suffixes --- .../delphiBackfillCorrection/R/main.R | 12 +++++------- .../delphiBackfillCorrection/R/utils.R | 12 +++++++++--- .../delphiBackfillCorrection/man/validity_checks.Rd | 8 +++++++- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 64f43800f..641272a6b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -9,11 +9,7 @@ #' @template params-template #' @template refd_col-template #' @template lag_col-template -#' @param signal_suffixes character vector specifying value column name -#' endings to be appended to standard value column names from -#' `params$num_col` and `params$denom_col`. Used for non-standard -#' value column names and when processing multiple signals from a -#' single input dataframe, as with `quidel`'s age buckets. +#' @template signal_suffixes-template #' #' @importFrom dplyr %>% filter #' @importFrom tidyr drop_na @@ -173,8 +169,10 @@ main <- function(params){ } # Check data type and required columns - ## TODO num and denom names need suffixes to be checked properly - result <- validity_checks(input_data, input_group$value_type, params$num_col, params$denom_col) + result <- validity_checks( + input_data, input_group$value_type, + params$num_col, params$denom_col, input_group$name_suffix + ) input_data <- result[["df"]] # Check available training days diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index ac37f701d..00c7f0a9d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -89,15 +89,21 @@ create_dir_not_exist <- function(path) #' @template value_type-template #' @param num_col name of numerator column in the input dataframe #' @param denom_col name of denominator column in the input dataframe +#' @template signal_suffixes-template #' #' @return list of input dataframe augmented with lag column, if it #' didn't already exist, and character vector of one or two value #' column names, depending on requested `value_type` -validity_checks <- function(df, value_type, num_col, denom_col) { +validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) { + if (!missing(signal_suffixes)) { + num_col <- paste(num_col, signal_suffixes, sep = "_") + denom_col <- paste(num_col, signal_suffixes, sep = "_") + } + # Check data type and required columns if (value_type == "count"){ - if (num_col %in% colnames(df)) {value_cols=c(num_col)} - else if (denom_col %in% colnames(df)) {value_cols=c(denom_col)} + if (all(num_col %in% colnames(df))) {value_cols=c(num_col)} + else if (all(denom_col %in% colnames(df))) {value_cols=c(denom_col)} else {stop("No valid column name detected for the count values!")} } else if (value_type == "ratio"){ value_cols = c(num_col, denom_col) diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd index 865126f97..1abff193a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd @@ -4,7 +4,7 @@ \alias{validity_checks} \title{Check input data for validity} \usage{ -validity_checks(df, value_type, num_col, denom_col) +validity_checks(df, value_type, num_col, denom_col, signal_suffixes) } \arguments{ \item{df}{Data Frame of aggregated counts within a single location @@ -15,6 +15,12 @@ reported for each reference date and issue date.} \item{num_col}{name of numerator column in the input dataframe} \item{denom_col}{name of denominator column in the input dataframe} + +\item{signal_suffixes}{character vector specifying value column name +endings to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} } \value{ list of input dataframe augmented with lag column, if it From cd511265613b5a28c73289242fc11c1d9322afcf Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 11:38:37 -0400 Subject: [PATCH 044/145] include test_lag in output filename --- .../delphiBackfillCorrection/R/io.R | 20 +++++++++++-------- .../man/export_test_result.Rd | 8 +------- .../man/get_files_list.Rd | 2 +- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 383370f29..b721c4678 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -23,17 +23,21 @@ read_data <- function(path) { #' @template test_lag-template #' #' @importFrom readr write_csv +#' @importFrom stringr str_interp #' #' @export export_test_result <- function(test_data, coef_data, export_dir, - geo_level, test_lag = NULL) { - ## TODO why not being used? Probably want test_lag in output name - warning("test_lag arg ", test_lag, " not being used") + geo_level, test_lag) { + if (!missing(test_lag)) { + base_name = str_interp("{geo_level}_lag{test_lag}.csv") + } else { + base_name = str_interp("{geo_level}.csv") + } - pred_output_dir = paste("prediction", geo_level, ".csv", sep="_") + pred_output_dir = str_interp("prediction_{base_name}") write_csv(test_data, file.path(export_dir, pred_output_dir)) - coef_output_dir = paste("coefs", geo_level, ".csv", sep="_") + coef_output_dir = str_interp("coefs_{base_name}") write_csv(test_data, file.path(export_dir, coef_output_dir)) } @@ -45,10 +49,10 @@ export_test_result <- function(test_data, coef_data, export_dir, #' @template params-template #' @param sub_dir string specifying the indicator-specific directory within #' the general input directory `params$input_dir` -get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { +get_files_list <- function(indicator, signal, geo_level, params, sub_dir) { # Make sure we're reading in both 4-week rollup and daily files. - if (!is.null(sub_dir) && sub_dir != "") { - input_dir <- paste(params$input_dir, sub_dir, sep="_") + if (!missing(sub_dir)) { + input_dir <- file.path(params$input_dir, sub_dir) } else { input_dir <- params$input_dir } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 4a8c76241..47988a0c4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -4,13 +4,7 @@ \alias{export_test_result} \title{Export the result to customized directory} \usage{ -export_test_result( - test_data, - coef_data, - export_dir, - geo_level, - test_lag = NULL -) +export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) } \arguments{ \item{test_data}{test data containing prediction results} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd index ec68ff7d5..c78417835 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd @@ -4,7 +4,7 @@ \alias{get_files_list} \title{List valid input files.} \usage{ -get_files_list(indicator, signal, geo_level, params, sub_dir = "") +get_files_list(indicator, signal, geo_level, params, sub_dir) } \arguments{ \item{indicator}{string specifying the name of the indicator as used in From 540a3a377dfc3a2053b558a6879d92d9017293ab Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 12:19:39 -0400 Subject: [PATCH 045/145] add more templating to docs --- .../R/beta_prior_estimation.R | 16 +++++----- .../delphiBackfillCorrection/R/io.R | 8 ++--- .../delphiBackfillCorrection/R/model.R | 8 ++--- .../R/preprocessing.R | 12 +++---- .../delphiBackfillCorrection/R/tooling.R | 32 +++++++++---------- .../delphiBackfillCorrection/R/utils.R | 6 ++-- .../man-roxygen/covariates-template.R | 1 + .../man-roxygen/denom_col-template.R | 1 + .../man-roxygen/export_dir-template.R | 1 + .../man-roxygen/input_dir-template.R | 1 + .../man-roxygen/lambda-template.R | 1 + .../man-roxygen/lp_solver-template.R | 4 +++ .../man-roxygen/num_col-template.R | 1 + .../man-roxygen/ref_lag-template.R | 1 + .../man-roxygen/signal_suffixes-template.R | 5 +++ .../man-roxygen/testing_window-template.R | 2 ++ .../man-roxygen/time_col-template.R | 2 ++ .../man-roxygen/train_data-template.R | 1 + .../man-roxygen/training_days-template.R | 1 + .../man/add_dayofweek.Rd | 4 +-- .../man/add_sqrtscale.Rd | 2 +- .../man/add_weekofmonth.Rd | 4 +-- .../man/est_priors.Rd | 7 ++-- .../man/export_test_result.Rd | 2 +- .../man/main_local.Rd | 9 ++++-- .../man/model_training_and_testing.Rd | 9 ++++-- .../delphiBackfillCorrection/man/ratio_adj.Rd | 7 ++-- .../man/ratio_adj_with_pseudo.Rd | 4 +-- .../delphiBackfillCorrection/man/read_data.Rd | 4 +-- .../man/run_backfill_local.Rd | 7 ++-- 30 files changed, 98 insertions(+), 65 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index a81b421d4..cac47f477 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -29,14 +29,14 @@ objective <- function(theta, x, prob, ...) { #' Estimate the priors for the beta distribution based on data for #' a certain day of a week #' -#' @param train_data Data Frame for training +#' @template train_data-template #' @param prior_test_data Data Frame for testing #' @param dw column name to indicate which day of a week it is #' @template taus-template -#' @param covariates character vector of column names serving as the covariates for the model +#' @template covariates-template #' @param response the column name of the response variable -#' @param lp_solver the lp solver used in Quantgen -#' @param lambda the level of lasso penalty +#' @template lp_solver-template +#' @template lambda-template #' @param start the initialization of the the points in nlm #' @param base_pseudo_denom the pseudo counts added to denominator if little data for training #' @param base_pseudo_num the pseudo counts added to numerator if little data for training @@ -82,8 +82,8 @@ est_priors <- function(train_data, prior_test_data, dw, taus, #' @param dw character to indicate the day of a week. Can be NULL for all the days #' @param pseudo_num the estimated counts to be added to numerators #' @param pseudo_denom the estimated counts to be added to denominators -#' @param num_col the column name for the numerator -#' @param denom_col the column name for the denominator +#' @template num_col-template +#' @template denom_col-template #' #' @export ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ @@ -99,11 +99,11 @@ ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, d #' Update ratio using beta prior approach #' -#' @param train_data training data +#' @template train_data-template #' @param test_data testing data #' @param prior_test_data testing data for the lag -1 model #' @template taus-template -#' @param lp_solver the lp solver used in Quantgen +#' @template lp_solver-template #' #' @export ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_solver = LP_SOLVER){ diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index b721c4678..10583fc55 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -1,14 +1,14 @@ #' Read a parquet file into a dataframe #' -#' @param path path to the input data +#' @template input_dir-template #' #' @importFrom arrow read_parquet #' @importFrom dplyr select %>% #' @importFrom rlang .data #' #' @export -read_data <- function(path) { - df <- read_parquet(path, as_data_frame = TRUE) %>% +read_data <- function(input_dir) { + df <- read_parquet(input_dir, as_data_frame = TRUE) %>% ## TODO make this more robust select(-.data$`__index_level_0__`) return (df) @@ -18,7 +18,7 @@ read_data <- function(path) { #' #' @param test_data test data containing prediction results #' @param coef_data data frame containing the estimated coefficients -#' @param export_dir export directory +#' @template export_dir-template #' @template geo_level-template #' @template test_lag-template #' diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 7d3cb53c1..cf5d0d96d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -36,12 +36,12 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ #' Model training and prediction using quantile regression with Lasso penalty #' The quantile regression uses the quantile_lasso function from quantgen package #' -#' @param train_data Data frame for training +#' @template train_data-template #' @param test_data Data frame for testing #' @template taus-template -#' @param covariates list of column names serving as the covariates for the model -#' @param lp_solver the lp solver used in Quantgen -#' @param lambda the level of lasso penalty +#' @template covariates-template +#' @template lp_solver-template +#' @template lambda-template #' @param test_date Date object representing test date #' @param geo string specifying the name of the geo region (e.g. FIPS #' code for counties) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index 9e9801afd..bd4c365b3 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -14,7 +14,7 @@ #' @template lag_col-template #' @param min_refd the earliest reference date considered in the data #' @param max_refd the latest reference date considered in the data -#' @param ref_lag max lag to use for training +#' @template ref_lag-template #' #' @return df_new Data Frame with filled rows for missing lags #' @@ -102,8 +102,7 @@ add_shift <- function(df, n_day, refd_col){ #' #' @template df-template #' @param wd vector of days of a week -#' @param time_col column used for the date, can be either reference date or -#' issue date +#' @template time_col-template #' @param suffix suffix added to indicate which kind of date is used #' #' @export @@ -143,8 +142,7 @@ get_weekofmonth <- function(date){ #' #' @template df-template #' @param wm vector of weeks of a month -#' @param time_col string specifying name of column used for the date, -#' can be either reference date or issue date +#' @template time_col-template #' #' @export add_weekofmonth <- function(df, wm = WEEK_ISSUES, time_col){ @@ -161,7 +159,7 @@ add_weekofmonth <- function(df, wm = WEEK_ISSUES, time_col){ #' @template value_col-template #' @template refd_col-template #' @template lag_col-template -#' @param ref_lag max lag to use for training +#' @template ref_lag-template #' #' @importFrom dplyr %>% #' @importFrom tidyr pivot_wider drop_na @@ -223,7 +221,7 @@ add_params_for_dates <- function(df, refd_col, lag_col){ #' Add columns to indicate the scale of value at square root level #' -#' @param train_data Data Frame for training +#' @template train_data-template #' @param test_data Data Frame for testing #' @param max_raw the maximum value in the training data at square root level #' @template value_col-template diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index e41c7be7f..8d4754fea 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -1,18 +1,17 @@ #' Corrected estimates from a single local signal #' #' @template df-template -#' @param export_dir path to save output +#' @template export_dir-template #' @param test_date_list Date vector of dates to make predictions for #' @param value_cols character vector of numerator and/or denominator field names #' @template value_type-template #' @template taus-template #' @param test_lags integer vector of number of days ago to predict for -#' @param training_days integer number of days to use for training -#' @param testing_window the testing window used for saving the runtime. Could -#' set it to be 1 if time allows -#' @param ref_lag max lag to use for training -#' @param lambda the level of lasso penalty -#' @param lp_solver the lp solver used in Quantgen +#' @template training_days-template +#' @template testing_window-template +#' @template ref_lag-template +#' @template lambda-template +#' @template lp_solver-template #' #' @importFrom dplyr %>% filter #' @importFrom plyr rbind.fill @@ -119,21 +118,20 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value #' Main function to correct a single local signal #' -#' @param input_dir path to the input data files -#' @param export_dir path to save output +#' @template input_dir-template +#' @template export_dir-template #' @param test_start_date Date or string in the format "YYYY-MM-DD" to start #' making predictions on #' @param test_end_date Date or string in the format "YYYY-MM-DD" to stop #' making predictions on -#' @param num_col name of numerator column in the input dataframe -#' @param denom_col name of denominator column in the input dataframe +#' @template num_col-template +#' @template denom_col-template #' @template value_type-template -#' @param training_days integer number of days to use for training -#' @param testing_window the testing window used for saving the runtime. Could -#' set it to be 1 if time allows -#' @param lambda the level of lasso penalty -#' @param ref_lag max lag to use for training -#' @param lp_solver the lp solver used in Quantgen +#' @template training_days-template +#' @template testing_window-template +#' @template lambda-template +#' @template ref_lag-template +#' @template lp_solver-template #' #' @importFrom readr read_csv #' diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 00c7f0a9d..9d33a1c70 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -87,8 +87,8 @@ create_dir_not_exist <- function(path) #' #' @template df-template #' @template value_type-template -#' @param num_col name of numerator column in the input dataframe -#' @param denom_col name of denominator column in the input dataframe +#' @template num_col-template +#' @template denom_col-template #' @template signal_suffixes-template #' #' @return list of input dataframe augmented with lag column, if it @@ -131,7 +131,7 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) #' Check available training days #' #' @param issue_date contents of input data's `issue_date` column -#' @param training_days integer number of days to use for training +#' @template training_days-template training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { valid_training_days = as.integer(max(issue_date) - min(issue_date)) if (training_days > valid_training_days){ diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R new file mode 100644 index 000000000..b343ffea6 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R @@ -0,0 +1 @@ +#' @param covariates character vector of column names serving as the covariates for the model diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R new file mode 100644 index 000000000..8b16d87bb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R @@ -0,0 +1 @@ +#' @param denom_col name of denominator column in the input dataframe diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R new file mode 100644 index 000000000..4d933cada --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R @@ -0,0 +1 @@ +#' @param export_dir path to directory to save output to diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R new file mode 100644 index 000000000..a17583499 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R @@ -0,0 +1 @@ +#' @param input_dir path to the directory containing input data diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R new file mode 100644 index 000000000..aacbb3865 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R @@ -0,0 +1 @@ +#' @param lambda the level of lasso penalty diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R new file mode 100644 index 000000000..d42a4435b --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R @@ -0,0 +1,4 @@ +#' @param lp_solver string specifying the lp solver to use in +#' Quantgen fitting. Either "glpk" or "gurobi". For faster +#' optimization, use Gurobi (requires separate installation +#' of the `gurobi` package). diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R new file mode 100644 index 000000000..76b0aa148 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R @@ -0,0 +1 @@ +#' @param num_col name of numerator column in the input dataframe diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R new file mode 100644 index 000000000..b10e188c4 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R @@ -0,0 +1 @@ +#' @param ref_lag max lag to use for training diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R new file mode 100644 index 000000000..e58e6cc4e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R @@ -0,0 +1,5 @@ +#' @param signal_suffixes character vector specifying value column name +#' endings to be appended to standard value column names from +#' `params$num_col` and `params$denom_col`. Used for non-standard +#' value column names and when processing multiple signals from a +#' single input dataframe, as with `quidel`'s age buckets. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R new file mode 100644 index 000000000..60b6c847b --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R @@ -0,0 +1,2 @@ +#' @param testing_window the testing window used for saving the runtime. Could +#' set it to be 1 if time allows diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R new file mode 100644 index 000000000..3be84de74 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R @@ -0,0 +1,2 @@ +#' @param time_col string specifying name of column used for the +#' date, can be either reference date or issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R new file mode 100644 index 000000000..2c8fd3de6 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R @@ -0,0 +1 @@ +#' @param train_data Data Frame containing training data diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R new file mode 100644 index 000000000..32f6c3a9d --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R @@ -0,0 +1 @@ +#' @param training_days integer number of days to use for training diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd index 8e4b338eb..478d571a3 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd @@ -13,8 +13,8 @@ reported for each reference date and issue date.} \item{wd}{vector of days of a week} -\item{time_col}{column used for the date, can be either reference date or -issue date} +\item{time_col}{string specifying name of column used for the +date, can be either reference date or issue date} \item{suffix}{suffix added to indicate which kind of date is used} } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd index 2dcf7e147..8b4d55222 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -7,7 +7,7 @@ add_sqrtscale(train_data, test_data, max_raw, value_col) } \arguments{ -\item{train_data}{Data Frame for training} +\item{train_data}{Data Frame containing training data} \item{test_data}{Data Frame for testing} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd index 8202e10f9..a6cc97d85 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd @@ -12,8 +12,8 @@ reported for each reference date and issue date.} \item{wm}{vector of weeks of a month} -\item{time_col}{string specifying name of column used for the date, -can be either reference date or issue date} +\item{time_col}{string specifying name of column used for the +date, can be either reference date or issue date} } \description{ Add one hot encoding for week of a month info in terms of issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd index 87f7155c2..1a95c8dcb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -21,7 +21,7 @@ est_priors( ) } \arguments{ -\item{train_data}{Data Frame for training} +\item{train_data}{Data Frame containing training data} \item{prior_test_data}{Data Frame for testing} @@ -34,7 +34,10 @@ must be between 0 and 1.} \item{response}{the column name of the response variable} -\item{lp_solver}{the lp solver used in Quantgen} +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} \item{lambda}{the level of lasso penalty} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 47988a0c4..c24dfdbbb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -11,7 +11,7 @@ export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) \item{coef_data}{data frame containing the estimated coefficients} -\item{export_dir}{export directory} +\item{export_dir}{path to directory to save output to} \item{geo_level}{string describing geo coverage of input data. Either "state" or "county".} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd index 077274914..ee9d41d49 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -20,9 +20,9 @@ main_local( ) } \arguments{ -\item{input_dir}{path to the input data files} +\item{input_dir}{path to the directory containing input data} -\item{export_dir}{path to save output} +\item{export_dir}{path to directory to save output to} \item{test_start_date}{Date or string in the format "YYYY-MM-DD" to start making predictions on} @@ -45,7 +45,10 @@ set it to be 1 if time allows} \item{ref_lag}{max lag to use for training} -\item{lp_solver}{the lp solver used in Quantgen} +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} } \description{ Main function to correct a single local signal diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index 97cebcbc5..156ed8484 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -17,16 +17,19 @@ model_training_and_testing( ) } \arguments{ -\item{train_data}{Data frame for training} +\item{train_data}{Data Frame containing training data} \item{test_data}{Data frame for testing} \item{taus}{numeric vector of quantiles to be predicted. Values must be between 0 and 1.} -\item{covariates}{list of column names serving as the covariates for the model} +\item{covariates}{character vector of column names serving as the covariates for the model} -\item{lp_solver}{the lp solver used in Quantgen} +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} \item{lambda}{the level of lasso penalty} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd index c23602f42..e13db45d5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd @@ -13,7 +13,7 @@ ratio_adj( ) } \arguments{ -\item{train_data}{training data} +\item{train_data}{Data Frame containing training data} \item{test_data}{testing data} @@ -22,7 +22,10 @@ ratio_adj( \item{taus}{numeric vector of quantiles to be predicted. Values must be between 0 and 1.} -\item{lp_solver}{the lp solver used in Quantgen} +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} } \description{ Update ratio using beta prior approach diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd index fe1d6c652..7f41eca6d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd @@ -15,9 +15,9 @@ ratio_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) \item{pseudo_denom}{the estimated counts to be added to denominators} -\item{num_col}{the column name for the numerator} +\item{num_col}{name of numerator column in the input dataframe} -\item{denom_col}{the column name for the denominator} +\item{denom_col}{name of denominator column in the input dataframe} } \description{ Update ratio based on the pseudo counts for numerators and denominators diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd index 75ae00b8a..1b5f24726 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd @@ -4,10 +4,10 @@ \alias{read_data} \title{Read a parquet file into a dataframe} \usage{ -read_data(path) +read_data(input_dir) } \arguments{ -\item{path}{path to the input data} +\item{input_dir}{path to the directory containing input data} } \description{ Read a parquet file into a dataframe diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd index 42902abe4..5b6a28871 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -23,7 +23,7 @@ run_backfill_local( \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{export_dir}{path to save output} +\item{export_dir}{path to directory to save output to} \item{test_date_list}{Date vector of dates to make predictions for} @@ -45,7 +45,10 @@ set it to be 1 if time allows} \item{lambda}{the level of lasso penalty} -\item{lp_solver}{the lp solver used in Quantgen} +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} } \description{ Corrected estimates from a single local signal From 788f3c910d8d7ee63c3d72044fef363b0e26bf52 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 13:06:23 -0400 Subject: [PATCH 046/145] more info in readme --- Backfill_Correction/Makefile | 3 - Backfill_Correction/README.md | 123 ++++++++++++++++-- Backfill_Correction/correct_local_signal.R | 6 +- .../delphiBackfillCorrection/DESCRIPTION | 2 +- .../delphiBackfillCorrection/R/utils.R | 7 +- .../unit-tests/testthat/helper-relativize.R | 13 ++ Backfill_Correction/params.json.template | 1 + 7 files changed, 137 insertions(+), 18 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R diff --git a/Backfill_Correction/Makefile b/Backfill_Correction/Makefile index ebd308367..76b4d1fd1 100644 --- a/Backfill_Correction/Makefile +++ b/Backfill_Correction/Makefile @@ -29,6 +29,3 @@ test: delphiBackfillCorrection_1.0.tar.gz delphiBackfillCorrection_1.0.tar.gz: $(wildcard delphiBackfillCorrection/R/*.R) R CMD build delphiBackfillCorrection - -validate-covidcast: - @echo validate-covidcast not yet implemented diff --git a/Backfill_Correction/README.md b/Backfill_Correction/README.md index 8784fcbc7..1f9d4af90 100644 --- a/Backfill_Correction/README.md +++ b/Backfill_Correction/README.md @@ -1,15 +1,122 @@ -Requirement for the input data +# Backfill Correction + +## Running the Pipeline + +The indicator is run by installing the package `delphiBackfillCorrection` and +running the script "run.R". To install the package, run the following code +from this directory: + +``` +make install +``` + +All of the user-changable parameters are stored in `params.json`. A basic +template is included as `params.json.template`. Default values are provided +for most parameters; `input_dir` is the only requied parameter. + +To execute the module and produce the output datasets (by default, in +`receiving`), run the following: + +``` +Rscript run.R +``` + +The functions in `tooling.R` are provided as a user-friendly way to run +backfill corrections on any dataset that the user has on hand. This local +processing can be done by running the following from this directory: + +``` +Rscript correct_local_signal.R +``` + +Default values are provided for most parameters; `input_dir`, +`test_start_date`, and `test_end_date` must be provided as command line +arguments. + +## Building and testing the code + +The documentation for the package is written using the **roxygen2** package. To +(re)-create this documentation for the package, run the following from the package +directory: + +``` +make lib +``` + +Testing the package is done with the built-in R package checks (which include +both static and dynamic checks), as well as unit tests written with +**testthat**. To run all of these, use the following from within this +directory: + +``` +make test +``` + +None of the tests should fail and notes and warnings should be manually +checked for issues. To see the code coverage from the tests and example run +the following: + +``` +make coverage +``` + +There should be good coverage of all the core functions in the package. + +### Writing tests + +Because the package tests involve reading and writing files, we must be +careful with working directories to ensure the tests are portable. + +For reading and writing to files contained in the `tests/testthat/` directory, +use the `testthat::test_path` function. It works much like `file.path` but +automatically provides paths relative to `tests/testthat/`, so e.g. +`test_path("input")` becomes `tests/testthat/input/` or whatever relative path +is needed to get there. + +`params.json` files contain paths, so `tests/testthat/helper-relativize.R` +contains `relativize_params`, which takes a `params` list and applies +`test_path` to all of its path components. This object can then be passed to +anything that needs it to read or write files. + +### Testing during development + +Repeatedly building the package and running the full check suite is tedious if +you are working on fixing a failing test. A faster workflow is this: + +1. Set your R working directory to `delphiFacebook/tests/testthat`. +2. Run `testthat::test_dir('.')` + +This will test the live code without having to rebuild the package. + +## Outline of the Indicator + +TODO + +### Data requirements + +Required columns with fixed column names: -Required columns with fixed column names - geo_value: strings or floating numbers to indicate the location -- time_value: reference date. +- time_value: reference date - lag: the number of days between issue date and the reference date - issue_date: issue date/report, required if lag is not available -Required columns without fixed column names -- num_col: the column for the number of reported counts of the numerator. e.g. the number of COVID claims counts according to the insurance data. -- denom_col: the column for the number of reported counts of the denominator. e.g. the number of total claims counts according to the insurance data. Required if considering the backfill correction of ratios. +Required columns without fixed column names: + +- num_col: the column for the number of reported counts of the numerator. e.g. + the number of COVID claims counts according to insurance data. +- denom_col: the column for the number of reported counts of the denominator. + e.g. the number of total claims counts according to insurance data. Required + if correcting ratios. + +## Output Files -The scripts except for tooling.R is used to create a pipeline that can help create backfill correction for specified Delphi Covidcast indicators. +The pipeline produces two output types: -The script tooling.R is used to provide a user-friendly way people to crate backfill correction for any dataset that they have in hand before we have the backfill correction officially available in `epiprocess`. +1. Predictions +2. Model objects. In production, models are trained on the last year of + versions (as-of dates) and the last year of reference (report) dates. For + one signal at the state level, a model takes about 30 minutes to train. Due + to resource limitations in production, we only train models once a month + and save the model objects between runs. By default, these are saved to the + `cache` directory. diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index 55319c53e..ed40e4a11 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -13,12 +13,12 @@ suppressPackageStartupMessages({ parser <- arg_parser(description='Process commandline arguments') -parser <- add_argument(parser, arg="--data_path", type="character", help = "Path to the input file") +parser <- add_argument(parser, arg="--input_dir", type="character", help = "Path to the input file") parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") -parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") +parser <- add_argument(parser, arg="--value_type", type="character", default = "ratio", help = "Can be 'count' or 'fraction'") parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") @@ -26,7 +26,7 @@ parser <- add_argument(parser, arg="--training_days", type="integer", default = parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") args = parse_args(parser) -main_local(args.data_path, args.export_dir, +main_local(args.input_dir, args.export_dir, args.test_start_date, args.test_end_date, args.num_col, args.denom_col, args.value_type, diff --git a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION index a550a521f..d776a88c5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION +++ b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION @@ -6,7 +6,7 @@ Date: 2022-08-24 Author: Jingjing Tang Maintainer: Jingjing Tang Description: Takes formatted output from COVIDcast API data pipelines and - adjusted unusual values using a lasso-penalized quantile regression. + adjusts unusual values using a lasso-penalized quantile regression. Output is used for research and model development. License: file LICENSE Depends: diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 9d33a1c70..4d1672733 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -40,11 +40,12 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("input_dir" %in% names(params)) || dir.exists(params$input_dir)) { stop("input_dir must be set in `params` and exist") } - if (!("export_dir" %in% names(params))) { - stop("export_dir must be set in `params`") - } ## Set default parameter values if not specified + # Paths + if (!("export_dir" %in% names(params))) {params$export_dir <- "./receiving"} + if (!("cache_dir" %in% names(params))) {params$cache_dir <- "./cache"} + # Parallel parameters if (!("parallel" %in% names(params))) {params$parallel <- FALSE} if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R new file mode 100644 index 000000000..3d62d6a7f --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R @@ -0,0 +1,13 @@ +## Helper functions to relativize paths to the testing directory, so tests can +## be run via R CMD CHECK and do not depend on the current working directory +## being tests/testthat/. + +library(testthat) + +relativize_params <- function(params) { + params$export_dir <- test_path(params$export_dir) + params$cache_dir <- test_path(params$cache_dir) + params$input_dir <- test_path(params$input_dir) + + return(params) +} diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index 4c167d616..b22f2de12 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,6 +1,7 @@ { "ref_lag": 60, "input_dir": "", + "cache_dir": "./cache", "test_dates":["", ""] , "testing_window": 1, "training_days": 270, From 948726255846bdb79ecabf9b1546c1b8fea14ed9 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 13:36:10 -0400 Subject: [PATCH 047/145] explicitly import covidcast for county_census data --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 1 + .../delphiBackfillCorrection/R/delphiBackfillCorrection.R | 1 + 2 files changed, 2 insertions(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index dc9244334..5cb60c75d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -19,6 +19,7 @@ export(ratio_adj_with_pseudo) export(read_data) export(run_backfill) export(run_backfill_local) +import(covidcast) importFrom(arrow,read_parquet) importFrom(dplyr,"%>%") importFrom(dplyr,arrange) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R index 5661265f7..ff49ab375 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R @@ -1,3 +1,4 @@ # Suppress R CMD check note #' @importFrom tibble tribble +#' @import covidcast NULL From 0078d878fcc7c577a9b5f281aef3528e5a8c6026 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 26 Aug 2022 13:46:38 -0400 Subject: [PATCH 048/145] move args with defaults to end of list --- .../delphiBackfillCorrection/R/preprocessing.R | 10 +++++----- .../delphiBackfillCorrection/man/add_dayofweek.Rd | 6 +++--- .../delphiBackfillCorrection/man/add_weekofmonth.Rd | 6 +++--- .../unit-tests/testthat/test-preprocessing.R | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index bd4c365b3..abba9be4b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -106,7 +106,7 @@ add_shift <- function(df, n_day, refd_col){ #' @param suffix suffix added to indicate which kind of date is used #' #' @export -add_dayofweek <- function(df, wd = WEEKDAYS_ABBR, time_col, suffix){ +add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR){ dayofweek <- as.numeric(format(df[[time_col]], format="%u")) for (i in 1:6){ df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) @@ -145,7 +145,7 @@ get_weekofmonth <- function(date){ #' @template time_col-template #' #' @export -add_weekofmonth <- function(df, wm = WEEK_ISSUES, time_col){ +add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES){ weekofmonth <- get_weekofmonth(df[[time_col]]) for (i in 1:3){ df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) @@ -210,11 +210,11 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF #' @template lag_col-template add_params_for_dates <- function(df, refd_col, lag_col){ # Add columns for day-of-week effect - df <- add_dayofweek(df, WEEKDAYS_ABBR, refd_col, "_ref") - df <- add_dayofweek(df, WEEKDAYS_ABBR, "issue_date", "_issue") + df <- add_dayofweek(df, refd_col, "_ref", WEEKDAYS_ABBR) + df <- add_dayofweek(df, "issue_date", "_issue", WEEKDAYS_ABBR) # Add columns for week-of-month effect - df <- add_weekofmonth(df, WEEK_ISSUES, "issue_date") + df <- add_weekofmonth(df, "issue_date", WEEK_ISSUES) return (as.data.frame(df)) } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd index 478d571a3..02cc129a0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd @@ -5,18 +5,18 @@ \title{Add one hot encoding for day of a week info in terms of reference and issue date} \usage{ -add_dayofweek(df, wd = WEEKDAYS_ABBR, time_col, suffix) +add_dayofweek(df, time_col, suffix, wd = WEEKDAYS_ABBR) } \arguments{ \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{wd}{vector of days of a week} - \item{time_col}{string specifying name of column used for the date, can be either reference date or issue date} \item{suffix}{suffix added to indicate which kind of date is used} + +\item{wd}{vector of days of a week} } \description{ Add one hot encoding for day of a week info in terms of reference diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd index a6cc97d85..260efb519 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd @@ -4,16 +4,16 @@ \alias{add_weekofmonth} \title{Add one hot encoding for week of a month info in terms of issue date} \usage{ -add_weekofmonth(df, wm = WEEK_ISSUES, time_col) +add_weekofmonth(df, time_col, wm = WEEK_ISSUES) } \arguments{ \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{wm}{vector of weeks of a month} - \item{time_col}{string specifying name of column used for the date, can be either reference date or issue date} + +\item{wm}{vector of weeks of a month} } \description{ Add one hot encoding for week of a month info in terms of issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index eec0e0124..61018de44 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -71,7 +71,7 @@ test_that("testing the data shifting", { test_that("testing adding columns for each day of a week", { - df_new <- add_dayofweek(fake_df, wd, refd_col, "_ref") + df_new <- add_dayofweek(fake_df, refd_col, "_ref", wd) expect_equal(ncol(fake_df) + 7, ncol(df_new)) expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) @@ -89,7 +89,7 @@ test_that("testing the calculation of week of a month", { }) test_that("testing the calculation of 7-day moving average", { - df_new <- add_weekofmonth(fake_df, wm, refd_col) + df_new <- add_weekofmonth(fake_df, refd_col, wm) expect_equal(ncol(fake_df) + 3, ncol(df_new)) expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) From 999b9f49d1d8e6233f3b5abcacdfcc4f6a92c55f Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Tue, 30 Aug 2022 17:35:40 -0400 Subject: [PATCH 049/145] local script docs --- Backfill_Correction/correct_local_signal.R | 6 +++--- Backfill_Correction/delphiBackfillCorrection/R/tooling.R | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index ed40e4a11..1cd846014 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -1,6 +1,6 @@ #!/usr/bin/env Rscript -## Run backfill corrections on a single signal + geo type combination of local data. +## Run backfill corrections on a single signal + geo type combination from local data. ## ## Usage: ## @@ -12,13 +12,13 @@ suppressPackageStartupMessages({ }) -parser <- arg_parser(description='Process commandline arguments') +parser <- arg_parser(description='Run backfill corrections on a single signal + geo type combination from local data') parser <- add_argument(parser, arg="--input_dir", type="character", help = "Path to the input file") parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") -parser <- add_argument(parser, arg="--value_type", type="character", default = "ratio", help = "Can be 'count' or 'fraction'") +parser <- add_argument(parser, arg="--value_type", type="character", default = "ratio", help = "Can be 'count' or 'ratio'") parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 8d4754fea..5f5a3a666 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -38,7 +38,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value if (value_type == "count") { # For counts data only combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "fraction"){ + } else if (value_type == "ratio"){ combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) From 2a048795026d5a5f2c8240aceba9a138a1bd455f Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:38:34 -0400 Subject: [PATCH 050/145] add arg parsing to main run script Added flags for training models and for making predictions/corrections using the models. Both features are off (FALSE) by default. --- Backfill_Correction/run.R | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/run.R b/Backfill_Correction/run.R index 85f1ece05..b53dc8180 100644 --- a/Backfill_Correction/run.R +++ b/Backfill_Correction/run.R @@ -1,5 +1,26 @@ -library(delphiBackfillCorrection) +#!/usr/bin/env Rscript + +## Run backfill corrections pipeline. +## +## Usage: +## +## Rscript run.R [options] + +suppressPackageStartupMessages({ + library(delphiBackfillCorrection) + library(argparser) +}) + + +parser <- arg_parser(description='Run backfill corrections pipeline') +# Default to FALSE if not specified. +parser <- add_argument(parser, arg="--train_models", flag=TRUE) +parser <- add_argument(parser, arg="--make_predictions", flag=TRUE) +args = parse_args(parser) params <- read_params("params.json") +params$train_models <- args.train_models +params$make_predictions <- args.make_predictions + delphiBackfillCorrection::main(params) message("backfill correction completed successfully") From 14509e396aff836dea7abe3f968b894bbfa0e24e Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 31 Aug 2022 16:05:01 -0400 Subject: [PATCH 051/145] add NOT to params setup check --- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 4d1672733..a1d9a412e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -37,7 +37,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ params <- read_json(path, simplifyVector = TRUE) # Required parameters - if (!("input_dir" %in% names(params)) || dir.exists(params$input_dir)) { + if (!("input_dir" %in% names(params)) || !dir.exists(params$input_dir)) { stop("input_dir must be set in `params` and exist") } From e7a2f2804329bee1e3a1d9de46d2b56fe3fbcc10 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 31 Aug 2022 17:25:35 -0400 Subject: [PATCH 052/145] validate train/predict flags; set in params file more params comments --- .../delphiBackfillCorrection/R/utils.R | 14 +++++++++++++- Backfill_Correction/params.json.template | 7 ++++--- Backfill_Correction/run.R | 2 +- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index a1d9a412e..7d17c0b70 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -23,6 +23,10 @@ #' params$lambda: the level of lasso penalty #' params$export_dir: directory to save corrected data to #' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" +#' params$train_models: boolean indicating whether to train models (TRUE). If +#' FALSE previously trained models (stored locally) will be used instead. +#' params$make_predictions: boolean indicating whether to generate and save +#' corrections. #' #' @param path path to the parameters file; if not present, will try to copy the file #' "params.json.template" @@ -40,6 +44,14 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("input_dir" %in% names(params)) || !dir.exists(params$input_dir)) { stop("input_dir must be set in `params` and exist") } + if (!("train_models" %in% names(params))) { + stop("train_models flag must be set in `params`") + } + if (!("make_predictions" %in% names(params))) { + stop("make_predictions flag must be set in `params`") + } + +params$train_models params$make_predictions ## Set default parameter values if not specified # Paths @@ -65,7 +77,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} - if (!("test_dates" %in% names(params))) { + if (!("test_dates" %in% names(params)) || length(params$test_dates) == 0) { start_date <- TODAY - params$testing_window end_date <- TODAY - 1 params$test_dates <- seq(start_date, end_date, by="days") diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index b22f2de12..7bf914e22 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,13 +1,14 @@ { - "ref_lag": 60, + "ref_lag": 7, "input_dir": "", "cache_dir": "./cache", - "test_dates":["", ""] , "testing_window": 1, - "training_days": 270, + "training_days": 30, "export_dir": "./receiving", "geo_levels": ["state", "county"], "value_types": ["count", "ratio"], "num_col": "num", "denom_col": "den", + "train_models": true, + "make_predictions": true } diff --git a/Backfill_Correction/run.R b/Backfill_Correction/run.R index b53dc8180..e4fac8646 100644 --- a/Backfill_Correction/run.R +++ b/Backfill_Correction/run.R @@ -13,7 +13,7 @@ suppressPackageStartupMessages({ parser <- arg_parser(description='Run backfill corrections pipeline') -# Default to FALSE if not specified. +# Both flags default to FALSE (do not train/predict) if not specified. parser <- add_argument(parser, arg="--train_models", flag=TRUE) parser <- add_argument(parser, arg="--make_predictions", flag=TRUE) args = parse_args(parser) From eedf690345a634cbcd2aceebcd223b60161baa69 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 31 Aug 2022 17:26:00 -0400 Subject: [PATCH 053/145] add production params file with different defaults --- .../params.json.production.template | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 Backfill_Correction/params.json.production.template diff --git a/Backfill_Correction/params.json.production.template b/Backfill_Correction/params.json.production.template new file mode 100644 index 000000000..4dbda656d --- /dev/null +++ b/Backfill_Correction/params.json.production.template @@ -0,0 +1,14 @@ +{ + "ref_lag": 60, + "input_dir": "", + "cache_dir": "./cache", + "testing_window": 1, + "training_days": 270, + "export_dir": "./receiving", + "geo_levels": ["state", "county"], + "value_types": ["count", "ratio"], + "num_col": "num", + "denom_col": "den", + "train_models": false, + "make_predictions": false +} From 892c74eceee2f486b390f01fde4daf57bf361759 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 31 Aug 2022 18:07:40 -0400 Subject: [PATCH 054/145] exit if neither flag on --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 5 +++++ Backfill_Correction/delphiBackfillCorrection/R/utils.R | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 641272a6b..d80ff8b99 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -126,6 +126,11 @@ run_backfill <- function(df, value_type, geo_level, params, #' #' @export main <- function(params){ + if (!params$train_models && !params$make_predictions) { + message("both model training and prediction generation are turned off; exiting") + return + } + ## Set default number of cores for mclapply to the half of the total available number. if (params$parallel) { cores <- detectCores() diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 7d17c0b70..d4674eeca 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -50,8 +50,6 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("make_predictions" %in% names(params))) { stop("make_predictions flag must be set in `params`") } - -params$train_models params$make_predictions ## Set default parameter values if not specified # Paths From 42498b0ce93eadf00fb7ddfec8ccac4c0ee9ead4 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 1 Sep 2022 13:22:58 -0400 Subject: [PATCH 055/145] don't need __index_level__ drop; no longer in parquet files --- Backfill_Correction/delphiBackfillCorrection/R/io.R | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index ab2aec473..1bb5c36ce 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -1,14 +1,12 @@ #' Read a parquet file into a dataframe #' -#' @param path path to the input data +#' @param path path to a single parquet file to read in #' #' @importFrom arrow read_parquet -#' @importFrom dplyr select %>% #' #' @export read_data <- function(path){ - df <- read_parquet(path, as_data_frame = TRUE) %>% - select(-`__index_level_0__`) + df <- read_parquet(path, as_data_frame = TRUE) return (df) } From 169a6da3a9debff665d34bb492aead1c488c8391 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 1 Sep 2022 16:52:37 -0400 Subject: [PATCH 056/145] move geo_level loop to run_backfill Since we don't have separate state data, we need to create it by aggregating up county data. Thus, state and county predictions are made from the same base dataset. This means that we only need to read in the data once for both geo types to save time. --- .../delphiBackfillCorrection/R/io.R | 14 +- .../delphiBackfillCorrection/R/main.R | 223 +++++++++--------- 2 files changed, 123 insertions(+), 114 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 1bb5c36ce..cd93c588b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -30,14 +30,10 @@ export_test_result <- function(test_data, coef_data, export_dir, geo) { } #' List valid input files. -get_files_list <- function(indicator, signal, geo_level, params, sub_dir = "") { +get_files_list <- function(indicator, signal, params, sub_dir = "") { # Convert input_group into file names. - daily_pattern <- create_name_pattern( - indicator, signal, geo_level, "daily" - ) - rollup_pattern <- create_name_pattern( - indicator, signal, geo_level, "rollup" - ) + daily_pattern <- create_name_pattern(indicator, signal, "daily") + rollup_pattern <- create_name_pattern(indicator, signal, "rollup") # Make sure we're reading in both 4-week rollup and daily files. if (!is.null(sub_dir) && sub_dir != "") { @@ -97,10 +93,10 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par return(files_list) } -#' Create pattern to match input files of a given type, signal, and geo level +#' Create pattern to match input files of a given type and signal #' #' @importFrom stringr str_interp -create_name_pattern <- function(indicator, signal, geo_level, +create_name_pattern <- function(indicator, signal, file_type = c("daily", "rollup")) { file_type <- match.arg(file_type) switch(file_type, diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 7e83baddb..04b6d3d82 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -44,9 +44,6 @@ #' @param df dataframe of input data containing a single indicator + signal + #' level of geographic coverage. #' @param value_type string describing signal type of "count" and "ratio". -#' @param geo_level string describing geo coverage of input data. "state" or -#' "county". If "county" is selected, only data from the 200 most populous -#' counties in the US (*not* the dataset) will be used. #' @param params named list containing modeling and data settings. Must include #' the following elements: `ref_lag`, `testing_window`, `test_dates`, #' `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, @@ -58,109 +55,127 @@ #' @import preprocessing #' @import beta_prior_estimation #' @import model +#' @importFrom dplyr select %>% group_by summarize across everything #' #' @export -run_backfill <- function(df, value_type, geo_level, params, +run_backfill <- function(df, value_type, params, refd_col = "time_value", lag_col = "lag", signal_suffixes = c("")) { - # Get full list of interested locations - geo_list <- unique(df$geo_value) - if (geo_level == "county") { - # Keep only 200 most populous (within the US) counties - geo_list <- filter_counties(geo_list) + # If county included, do county first since state processing modifies + # `df` object. + geo_levels <- params$geo_level + if ("county" in ) { + geo_levels <- c("county", setdiff(geo_levels, c("county"))) } - - # Build model for each location - for (geo in geo_list) { - subdf <- df %>% filter(geo_value == geo) %>% filter(lag < params$ref_lag) - min_refd <- min(subdf[[refd_col]]) - max_refd <- max(subdf[[refd_col]]) - subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) - - for (suffix in signal_suffixes) { - # For each suffix listed in `signal_suffixes`, run training/testing - # process again. Main use case is for quidel which has overall and - # age-based signals. - if (suffix != "") { - num_col <- paste(params$num_col, suffix, sep = "_") - denom_col <- paste(params$denom_col, suffix, sep = "_") - } else { - num_col <- params$num_col - denom_col <- params$denom_col - } - - # Handle different signal types - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - - } else if (value_type == "ratio"){ - combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - - combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - - combined_df <- merge( - combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom") - ) - } - combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - test_date_list <- get_test_dates(combined_df, params$test_dates) - - for (test_date in test_date_list){ - geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - params$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date + params$testing_window) %>% - drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next - - if (value_type == "ratio"){ - geo_prior_test_data = combined_df %>% - filter(issue_date > test_date - 7) %>% - filter(issue_date <= test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] + for (geo_level in geo_levels) { + # Get full list of interested locations + if (geo_level == "state") { + # Drop county field and make new "geo_value" field from "state_id". + # Aggregate counties up to state level + df <- df %>% + select(-geo_value, geo_value = state_id) %>% + group_by(across(c("geo_value", refd_col, lag_col))) %>% + # Summarized columns keep original names + summarize(across(everything(), sum)) + } + geo_list <- unique(df$geo_value) + if (geo_level == "county") { + # Keep only 200 most populous (within the US) counties + geo_list <- filter_counties(geo_list) + } + + # Build model for each location + for (geo in geo_list) { + subdf <- df %>% filter(geo_value == geo) %>% filter(lag < params$ref_lag) + min_refd <- min(subdf[[refd_col]]) + max_refd <- max(subdf[[refd_col]]) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + + for (suffix in signal_suffixes) { + # For each suffix listed in `signal_suffixes`, run training/testing + # process again. Main use case is for quidel which has overall and + # age-based signals. + if (suffix != "") { + num_col <- paste(params$num_col, suffix, sep = "_") + denom_col <- paste(params$denom_col, suffix, sep = "_") + } else { + num_col <- params$num_col + denom_col <- params$denom_col } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)){ - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) - - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date + + # Handle different signal types + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) + + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) + + combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) + + combined_df <- merge( + combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom") ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evl(test_data, params$taus) - - export_test_result(test_data, coefs, params$export_dir, geo_level, - geo, test_lag) - }# End for test lags - }# End for test date list - }# End for signal suffixes - }# End for geo list + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + test_date_list <- get_test_dates(combined_df, params$test_dates) + + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - params$training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date + params$testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next + + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date - 7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, + params$lp_solver, params$lambda, test_date + ) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, + geo, test_lag) + }# End for test lags + }# End for test date list + }# End for signal suffixes + }# End for geo list + }# End geo type } #' Perform backfill correction on all desired signals and geo levels @@ -175,15 +190,13 @@ run_backfill <- function(df, value_type, geo_level, params, #' #' @export main <- function(params, ...){ - # Load indicator x signal groups. Combine with params$geo_level to get all - # possible geo x signal combinations. - groups <- merge(indicators_and_signals, data.frame(geo_level = params$geo_level)) + # Load indicator x signal groups. + groups <- indicators_and_signals # Loop over every indicator + signal + geo type combination. for (input_group in groups) { files_list <- get_files_list( - input_group$indicator, input_group$signal, input_group$geo_level, - params, input_group$sub_dir + input_group$indicator, input_group$signal, params, input_group$sub_dir ) if (length(files_list) == 0) { @@ -211,7 +224,7 @@ main <- function(params, ...){ training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, input_group$value_type, input_group$geo_level, + run_backfill(input_data, input_group$value_type, params, signal_suffixes = input_group$name_suffix ) } From fbd24434d4216053173673e0ed68fd4a8933f068 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 1 Sep 2022 17:30:47 -0400 Subject: [PATCH 057/145] move value_type loop to run_backfill to avoid reading same files --- .../delphiBackfillCorrection/R/main.R | 153 +++++++++--------- Backfill_Correction/params.json.template | 2 +- 2 files changed, 78 insertions(+), 77 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 04b6d3d82..d79b8ff5f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -103,76 +103,78 @@ run_backfill <- function(df, value_type, params, denom_col <- params$denom_col } - # Handle different signal types - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - - } else if (value_type == "ratio"){ - combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - - combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - - combined_df <- merge( - combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom") - ) - } - combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - test_date_list <- get_test_dates(combined_df, params$test_dates) - - for (test_date in test_date_list){ - geo_train_data = combined_df %>% - filter(issue_date < test_date) %>% - filter(target_date <= test_date) %>% - filter(target_date > test_date - params$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(issue_date >= test_date) %>% - filter(issue_date < test_date + params$testing_window) %>% - drop_na() - if (dim(geo_test_data)[1] == 0) next - if (dim(geo_train_data)[1] <= 200) next - - if (value_type == "ratio"){ - geo_prior_test_data = combined_df %>% - filter(issue_date > test_date - 7) %>% - filter(issue_date <= test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)){ - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) - params_list <- c(yitl, as.vector(unlist(covariates))) - - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date + for (value_type in params$value_types) { + # Handle different signal types + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) + + } else if (value_type == "ratio"){ + combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) + + combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) + + combined_df <- merge( + combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom") ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evl(test_data, params$taus) - - export_test_result(test_data, coefs, params$export_dir, geo_level, - geo, test_lag) - }# End for test lags - }# End for test date list + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + test_date_list <- get_test_dates(combined_df, params$test_dates) + + for (test_date in test_date_list){ + geo_train_data = combined_df %>% + filter(issue_date < test_date) %>% + filter(target_date <= test_date) %>% + filter(target_date > test_date - params$training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(issue_date >= test_date) %>% + filter(issue_date < test_date + params$testing_window) %>% + drop_na() + if (dim(geo_test_data)[1] == 0) next + if (dim(geo_train_data)[1] <= 200) next + + if (value_type == "ratio"){ + geo_prior_test_data = combined_df %>% + filter(issue_date > test_date - 7) %>% + filter(issue_date <= test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)){ + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list(y7dav, wd, wd2, wm, slope, sqrtscale) + params_list <- c(yitl, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, + params$lp_solver, params$lambda, test_date + ) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evl(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, + geo, test_lag) + }# End for test lags + }# End for test date list + }# End for value types }# End for signal suffixes }# End for geo list }# End geo type @@ -190,11 +192,8 @@ run_backfill <- function(df, value_type, params, #' #' @export main <- function(params, ...){ - # Load indicator x signal groups. - groups <- indicators_and_signals - - # Loop over every indicator + signal + geo type combination. - for (input_group in groups) { + # Loop over every indicator + signal combination. + for (input_group in indicators_and_signals) { files_list <- get_files_list( input_group$indicator, input_group$signal, params, input_group$sub_dir ) @@ -218,13 +217,15 @@ main <- function(params, ...){ } # Check data type and required columns - validity_checks(input_data, input_group$value_type) + for (value_type in params$value_types) { + validity_checks(input_data, value_type) + } # Check available training days training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, input_group$value_type, + run_backfill(input_data, params, signal_suffixes = input_group$name_suffix ) } diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index d32c6c23c..fa00b0e5b 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -6,7 +6,7 @@ "training_days": 270, "export_dir": "./receiving", "geo_levels": ["state", "county"], - "value_type": ["count", "ratio"], + "value_types": ["count", "ratio"], "num_col": "", "denom_col": "", } From b6bded7097cbf3b8f39b674b894442a28939c447 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 1 Sep 2022 19:52:16 -0400 Subject: [PATCH 058/145] create weekday ref and issue fields --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 62bba43c0..beb3768ae 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -111,7 +111,10 @@ run_backfill <- function(df, params, test_data <- updated_data[[2]] sqrtscale <- updated_data[[3]] - covariates <- list(Y7DAV, WEEKDAYS_ABBR, WEEK_ISSUES, SLOPE, SQRTSCALE) + covariates <- list( + Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), + paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, SQRTSCALE + ) params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing From 5d835a4968cc086a8a63c6eb0ecd765a9b336dc5 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 1 Sep 2022 19:52:34 -0400 Subject: [PATCH 059/145] use value_col arg instead of fixed name --- Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index abba9be4b..b718f8085 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -182,7 +182,7 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF list(df, avg_df, avg_df_prev7)) # Add target - target_df <- df[df$lag==ref_lag, c(refd_col, "value_raw", "issue_date")] + target_df <- df[df$lag==ref_lag, c(refd_col, value_col, "issue_date")] names(target_df)[names(target_df) == value_col] <- 'value_target' names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' From 34ade08a4ab53891a44188e917a86f948af8f32e Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 10:31:47 -0400 Subject: [PATCH 060/145] replace "ratio" with "fraction" --- Backfill_Correction/correct_local_signal.R | 2 +- .../delphiBackfillCorrection/R/beta_prior_estimation.R | 4 ++-- Backfill_Correction/delphiBackfillCorrection/R/main.R | 4 ++-- Backfill_Correction/delphiBackfillCorrection/R/tooling.R | 6 +++--- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 6 +++--- .../man-roxygen/value_type-template.R | 2 +- .../delphiBackfillCorrection/man/main_local.Rd | 4 ++-- .../delphiBackfillCorrection/man/ratio_adj.Rd | 4 ++-- .../delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd | 4 ++-- .../delphiBackfillCorrection/man/run_backfill.Rd | 2 +- .../delphiBackfillCorrection/man/run_backfill_local.Rd | 2 +- .../delphiBackfillCorrection/man/validity_checks.Rd | 2 +- Backfill_Correction/params.json.template | 2 +- 13 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index 1cd846014..15a5caa3b 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -18,7 +18,7 @@ parser <- add_argument(parser, arg="--export_dir", type="character", default = " parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") -parser <- add_argument(parser, arg="--value_type", type="character", default = "ratio", help = "Can be 'count' or 'ratio'") +parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index cac47f477..65bf660e0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -76,7 +76,7 @@ est_priors <- function(train_data, prior_test_data, dw, taus, return (c(pseudo_denom, pseudo_num)) } -#' Update ratio based on the pseudo counts for numerators and denominators +#' Update fraction based on the pseudo counts for numerators and denominators #' #' @param data Data Frame #' @param dw character to indicate the day of a week. Can be NULL for all the days @@ -97,7 +97,7 @@ ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, d return (num_adj / denom_adj) } -#' Update ratio using beta prior approach +#' Update fraction using beta prior approach #' #' @template train_data-template #' @param test_data testing data diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index beb3768ae..96c8cc4a9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -63,7 +63,7 @@ run_backfill <- function(df, params, combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - } else if (value_type == "ratio"){ + } else if (value_type == "fraction"){ combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) @@ -91,7 +91,7 @@ run_backfill <- function(df, params, if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next - if (value_type == "ratio"){ + if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 5f5a3a666..88f87ae7d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -38,7 +38,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value if (value_type == "count") { # For counts data only combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "ratio"){ + } else if (value_type == "fraction"){ combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) @@ -63,7 +63,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next - if (value_type == "ratio"){ + if (value_type == "fraction"){ geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) @@ -138,7 +138,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value #' @export main_local <- function(input_dir, export_dir, test_start_date, test_end_date, - num_col, denom_col,value_type = c("count", "ratio"), + num_col, denom_col,value_type = c("count", "fraction"), training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER){ value_type <- match.arg(value_type) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 4d1672733..780248c76 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -59,7 +59,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("num_col" %in% names(params))) {params$num_col <- "num"} if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} if (!("geo_level" %in% names(params))) {params$geo_level <- c("state", "county")} - if (!("value_types" %in% names(params))) {params$lp_solver <- c("count", "ratio")} + if (!("value_types" %in% names(params))) {params$lp_solver <- c("count", "fraction")} # Date parameters if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} @@ -106,10 +106,10 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) if (all(num_col %in% colnames(df))) {value_cols=c(num_col)} else if (all(denom_col %in% colnames(df))) {value_cols=c(denom_col)} else {stop("No valid column name detected for the count values!")} - } else if (value_type == "ratio"){ + } else if (value_type == "fraction"){ value_cols = c(num_col, denom_col) if ( any(!(value_cols %in% colnames(df))) ){ - stop("No valid column name detected for the ratio values!") + stop("No valid column name detected for the fraction values!") } } diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R index 07939a96e..c49b7e84b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R @@ -1 +1 @@ -#' @param value_type string describing signal type. Either "count" or "ratio". +#' @param value_type string describing signal type. Either "count" or "fraction". diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd index ee9d41d49..ae6ef023f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd @@ -11,7 +11,7 @@ main_local( test_end_date, num_col, denom_col, - value_type = c("count", "ratio"), + value_type = c("count", "fraction"), training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, lambda = LAMBDA, @@ -34,7 +34,7 @@ making predictions on} \item{denom_col}{name of denominator column in the input dataframe} -\item{value_type}{string describing signal type. Either "count" or "ratio".} +\item{value_type}{string describing signal type. Either "count" or "fraction".} \item{training_days}{integer number of days to use for training} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd index e13db45d5..1b4e53005 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/beta_prior_estimation.R \name{ratio_adj} \alias{ratio_adj} -\title{Update ratio using beta prior approach} +\title{Update fraction using beta prior approach} \usage{ ratio_adj( train_data, @@ -28,5 +28,5 @@ optimization, use Gurobi (requires separate installation of the `gurobi` package).} } \description{ -Update ratio using beta prior approach +Update fraction using beta prior approach } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd index 7f41eca6d..3363d684c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/beta_prior_estimation.R \name{ratio_adj_with_pseudo} \alias{ratio_adj_with_pseudo} -\title{Update ratio based on the pseudo counts for numerators and denominators} +\title{Update fraction based on the pseudo counts for numerators and denominators} \usage{ ratio_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) } @@ -20,5 +20,5 @@ ratio_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) \item{denom_col}{name of denominator column in the input dataframe} } \description{ -Update ratio based on the pseudo counts for numerators and denominators +Update fraction based on the pseudo counts for numerators and denominators } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index f0401215a..f0b87fa01 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -18,7 +18,7 @@ run_backfill( \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{value_type}{string describing signal type. Either "count" or "ratio".} +\item{value_type}{string describing signal type. Either "count" or "fraction".} \item{geo_level}{string describing geo coverage of input data. Either "state" or "county".} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd index 5b6a28871..6ee6bce71 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -29,7 +29,7 @@ reported for each reference date and issue date.} \item{value_cols}{character vector of numerator and/or denominator field names} -\item{value_type}{string describing signal type. Either "count" or "ratio".} +\item{value_type}{string describing signal type. Either "count" or "fraction".} \item{taus}{numeric vector of quantiles to be predicted. Values must be between 0 and 1.} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd index 1abff193a..1e55d8d8e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd @@ -10,7 +10,7 @@ validity_checks(df, value_type, num_col, denom_col, signal_suffixes) \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{value_type}{string describing signal type. Either "count" or "ratio".} +\item{value_type}{string describing signal type. Either "count" or "fraction".} \item{num_col}{name of numerator column in the input dataframe} diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index 166c4040e..71b82c41b 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -7,7 +7,7 @@ "training_days": 270, "export_dir": "./receiving", "geo_levels": ["state", "county"], - "value_types": ["count", "ratio"], + "value_types": ["count", "fraction"], "num_col": "num", "denom_col": "den" } From 606f45301770b5e8599bcdfdbbe3e875b83cba6f Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 11:21:17 -0400 Subject: [PATCH 061/145] rebuild --- .../delphiBackfillCorrection/NAMESPACE | 3 +++ .../delphiBackfillCorrection/R/io.R | 1 - .../delphiBackfillCorrection/R/main.R | 23 ++++++++++--------- .../man-roxygen/params-template.R | 4 ++-- .../man/create_name_pattern.Rd | 14 +++-------- .../man/get_files_list.Rd | 9 +++----- .../delphiBackfillCorrection/man/main.Rd | 4 ++-- .../man/run_backfill.Rd | 14 +++-------- .../man/subset_valid_files.Rd | 4 ++-- 9 files changed, 30 insertions(+), 46 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 5cb60c75d..ba63ea765 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -22,14 +22,17 @@ export(run_backfill_local) import(covidcast) importFrom(arrow,read_parquet) importFrom(dplyr,"%>%") +importFrom(dplyr,across) importFrom(dplyr,arrange) importFrom(dplyr,bind_rows) importFrom(dplyr,desc) importFrom(dplyr,everything) importFrom(dplyr,filter) +importFrom(dplyr,group_by) importFrom(dplyr,if_else) importFrom(dplyr,pull) importFrom(dplyr,select) +importFrom(dplyr,summarize) importFrom(evalcast,weighted_interval_score) importFrom(jsonlite,read_json) importFrom(lubridate,day) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index d2773bb14..bfef16bec 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -118,7 +118,6 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par #' #' @template indicator-template #' @template signal-template -#' @template geo_level-template #' @template file_type-template #' #' @importFrom stringr str_interp diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 96c8cc4a9..f37dd2d38 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -11,11 +11,10 @@ #' @importFrom rlang .data .env #' #' @export -run_backfill <- function(df, params, - refd_col = "time_value", lag_col = "lag", - signal_suffixes = c("")) { +run_backfill <- function(df, params, refd_col = "time_value", + lag_col = "lag", signal_suffixes = c("")) { geo_levels <- params$geo_level - if ("state" in geo_levels) { + if ("state" %in% geo_levels) { # If state included, do it last since state processing modifies the # `df` object. geo_levels <- c(setdiff(geo_levels, c("state")), "state") @@ -143,8 +142,8 @@ run_backfill <- function(df, params, #' @importFrom parallel detectCores #' #' @export -main <- function(params){ - ## Set default number of cores for mclapply to the half of the total available number. +main <- function(params) { + ## Set default number of cores for mclapply to half of the total available number. if (params$parallel) { cores <- detectCores() @@ -163,7 +162,9 @@ main <- function(params){ ) if (length(files_list) == 0) { - warning(str_interp("No files found for {input_group$indicator} {input_group$signal}, skipping")) + warning(str_interp( + "No files found for indicator {input_group$indicator} signal {input_group$signal}, skipping" + )) next } @@ -176,7 +177,9 @@ main <- function(params){ ) %>% bind_rows if (nrow(input_data) == 0) { - warning(str_interp("No data available for {input_group$indicator} {input_group$signal}, skipping")) + warning(str_interp( + "No data available for indicator {input_group$indicator} signal {input_group$signal}, skipping" + )) next } @@ -193,8 +196,6 @@ main <- function(params){ training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, - params, signal_suffixes = input_group$name_suffix - ) + run_backfill(input_data, params, signal_suffixes = input_group$name_suffix) } } diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R index 3106660a9..3af9823f3 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R @@ -1,4 +1,4 @@ #' @param params named list containing modeling and data settings. Must include #' the following elements: `ref_lag`, `testing_window`, `test_dates`, -#' `training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -#' and `data_path` (input dir). +#' `training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +#' `lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`. diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd index dad0d76bb..603e25627 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd @@ -2,14 +2,9 @@ % Please edit documentation in R/io.R \name{create_name_pattern} \alias{create_name_pattern} -\title{Create pattern to match input files of a given type, signal, and geo level} +\title{Create pattern to match input files of a given type and signal} \usage{ -create_name_pattern( - indicator, - signal, - geo_level, - file_type = c("daily", "rollup") -) +create_name_pattern(indicator, signal, file_type = c("daily", "rollup")) } \arguments{ \item{indicator}{string specifying the name of the indicator as used in @@ -20,12 +15,9 @@ with multiple signals.} `parquet` input data filenames. One indicator can be associated with multiple signals.} -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - \item{file_type}{string specifying time period coverage of input files. Either "daily" or "rollup"} } \description{ -Create pattern to match input files of a given type, signal, and geo level +Create pattern to match input files of a given type and signal } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd index c78417835..6b193bba5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd @@ -4,7 +4,7 @@ \alias{get_files_list} \title{List valid input files.} \usage{ -get_files_list(indicator, signal, geo_level, params, sub_dir) +get_files_list(indicator, signal, params, sub_dir) } \arguments{ \item{indicator}{string specifying the name of the indicator as used in @@ -15,13 +15,10 @@ with multiple signals.} `parquet` input data filenames. One indicator can be associated with multiple signals.} -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - \item{params}{named list containing modeling and data settings. Must include the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -and `data_path` (input dir).} +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} \item{sub_dir}{string specifying the indicator-specific directory within the general input directory `params$input_dir`} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd index 7eb810d03..ae211b289 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/main.Rd @@ -9,8 +9,8 @@ main(params) \arguments{ \item{params}{named list containing modeling and data settings. Must include the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -and `data_path` (input dir).} +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} } \description{ Perform backfill correction on all desired signals and geo levels diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index f0b87fa01..6e2f2dd99 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -6,8 +6,6 @@ \usage{ run_backfill( df, - value_type, - geo_level, params, refd_col = "time_value", lag_col = "lag", @@ -18,15 +16,10 @@ run_backfill( \item{df}{Data Frame of aggregated counts within a single location reported for each reference date and issue date.} -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - \item{params}{named list containing modeling and data settings. Must include the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -and `data_path` (input dir).} +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} \item{refd_col}{string specifying name of reference date field within the input dataframe.} @@ -41,6 +34,5 @@ value column names and when processing multiple signals from a single input dataframe, as with `quidel`'s age buckets.} } \description{ -If "county" is selected for `geo_level`, only data from the 200 most populous -counties in the US (*not* the dataset) will be used. +Get backfill-corrected estimates for a single signal + geo combination } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd index 847aae881..0fde2714c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd @@ -14,8 +14,8 @@ Either "daily" or "rollup"} \item{params}{named list containing modeling and data settings. Must include the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, -and `data_path` (input dir).} +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} } \description{ Parse filenames to find included dates. Use different patterns if file From f843672bc35b3004e6a9603d8be25630e77c2455 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 13:24:30 -0400 Subject: [PATCH 062/145] allow train/test flags to be set only via CL --- .../delphiBackfillCorrection/R/utils.R | 22 +++++++++---------- .../man/read_params.Rd | 14 +++++++++++- .../params.json.production.template | 4 +--- Backfill_Correction/params.json.template | 4 +--- Backfill_Correction/run.R | 7 +++--- 5 files changed, 29 insertions(+), 22 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 97f2bb8f2..d5a6765bb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -23,33 +23,31 @@ #' params$lambda: the level of lasso penalty #' params$export_dir: directory to save corrected data to #' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" -#' params$train_models: boolean indicating whether to train models (TRUE). If -#' FALSE previously trained models (stored locally) will be used instead. -#' params$make_predictions: boolean indicating whether to generate and save -#' corrections. #' #' @param path path to the parameters file; if not present, will try to copy the file #' "params.json.template" #' @param template_path path to the template parameters file +#' @param train_models boolean indicating whether to train models (TRUE). If +#' FALSE previously trained models (stored locally) will be used instead. +#' Default is TRUE. +#' @param make_predictions boolean indicating whether to generate and save +#' corrections (TRUE) or not. Default is TRUE. #' #' @return a named list of parameters values #' #' @importFrom dplyr if_else #' @importFrom jsonlite read_json -read_params <- function(path = "params.json", template_path = "params.json.template") { - if (!file.exists(path)) file.copy(template_path, path) +read_params <- function(path = "params.json", template_path = "params.json.template", + train_models = TRUE, make_predictions = TRUE) { + if (!file.exists(path)) {file.copy(template_path, path)} params <- read_json(path, simplifyVector = TRUE) # Required parameters if (!("input_dir" %in% names(params)) || !dir.exists(params$input_dir)) { stop("input_dir must be set in `params` and exist") } - if (!("train_models" %in% names(params))) { - stop("train_models flag must be set in `params`") - } - if (!("make_predictions" %in% names(params))) { - stop("make_predictions flag must be set in `params`") - } + params$train_models <- train_models + params$make_predictions <- make_predictions ## Set default parameter values if not specified # Paths diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd index 713eed281..426db62f9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd @@ -4,13 +4,25 @@ \alias{read_params} \title{Return params file as an R list} \usage{ -read_params(path = "params.json", template_path = "params.json.template") +read_params( + path = "params.json", + template_path = "params.json.template", + train_models = TRUE, + make_predictions = TRUE +) } \arguments{ \item{path}{path to the parameters file; if not present, will try to copy the file "params.json.template"} \item{template_path}{path to the template parameters file} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} } \value{ a named list of parameters values diff --git a/Backfill_Correction/params.json.production.template b/Backfill_Correction/params.json.production.template index 3c720af02..27c8b411c 100644 --- a/Backfill_Correction/params.json.production.template +++ b/Backfill_Correction/params.json.production.template @@ -8,7 +8,5 @@ "geo_levels": ["state", "county"], "value_types": ["count", "fraction"], "num_col": "num", - "denom_col": "den", - "train_models": false, - "make_predictions": false + "denom_col": "den" } diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index f889a956f..b6317e084 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -8,7 +8,5 @@ "geo_levels": ["state", "county"], "value_types": ["count", "fraction"], "num_col": "num", - "denom_col": "den", - "train_models": true, - "make_predictions": true + "denom_col": "den" } diff --git a/Backfill_Correction/run.R b/Backfill_Correction/run.R index e4fac8646..7f951d8a4 100644 --- a/Backfill_Correction/run.R +++ b/Backfill_Correction/run.R @@ -18,9 +18,10 @@ parser <- add_argument(parser, arg="--train_models", flag=TRUE) parser <- add_argument(parser, arg="--make_predictions", flag=TRUE) args = parse_args(parser) -params <- read_params("params.json") -params$train_models <- args.train_models -params$make_predictions <- args.make_predictions +params <- read_params( + "params.json", + train_models = args.train_models, make_predictions = args.make_predictions +) delphiBackfillCorrection::main(params) message("backfill correction completed successfully") From 7d8eb13c4e382e1471764a4181da380cff346579 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 16:50:45 -0400 Subject: [PATCH 063/145] add train_models flag logic If `train_models` is TRUE, model is trained and saved to specified dir. Model filename is generated automatically based on combo of parameters (value_type, etc). If `train_models` is FALSE, we load the model from the specified path. Also add space between all `){` --- .../R/beta_prior_estimation.R | 12 +-- .../delphiBackfillCorrection/R/main.R | 24 +++-- .../delphiBackfillCorrection/R/model.R | 90 ++++++++++++++++--- .../R/preprocessing.R | 28 +++--- .../delphiBackfillCorrection/R/tooling.R | 14 +-- .../delphiBackfillCorrection/R/utils.R | 19 ++-- .../man/model_training_and_testing.Rd | 40 +++++++-- .../man/run_backfill.Rd | 12 ++- .../unit-tests/testthat/test-preprocessing.R | 2 +- 9 files changed, 175 insertions(+), 66 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 65bf660e0..bf71666e1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -47,7 +47,7 @@ objective <- function(theta, x, prob, ...) { est_priors <- function(train_data, prior_test_data, dw, taus, covariates, response, lp_solver, lambda, start=c(0, log(10)), - base_pseudo_denom=1000, base_pseudo_num=10){ + base_pseudo_denom=1000, base_pseudo_num=10) { sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) sub_test_data <- prior_test_data %>% filter(prior_test_data[[dw]] == 1) if (nrow(sub_test_data) == 0) { @@ -56,7 +56,7 @@ est_priors <- function(train_data, prior_test_data, dw, taus, } else { # Using quantile regression to get estimated quantiles at log scale quantiles <- list() - for (idx in 1:length(taus)){ + for (idx in 1:length(taus)) { tau <- taus[idx] obj <- quantile_lasso(as.matrix(sub_train_data[covariates]), sub_train_data[response], tau = tau, @@ -86,8 +86,8 @@ est_priors <- function(train_data, prior_test_data, dw, taus, #' @template denom_col-template #' #' @export -ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col){ - if (is.null(dw)){ +ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) { + if (is.null(dw)) { num_adj <- data[[num_col]] + pseudo_num denom_adj <- data[[denom_col]] + pseudo_denom } else { @@ -106,7 +106,7 @@ ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, d #' @template lp_solver-template #' #' @export -ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_solver = LP_SOLVER){ +ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_solver = LP_SOLVER) { train_data$value_target <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") test_data$value_target <- ratio_adj_with_pseudo(test_data, NULL, 1, 100, "value_target_num", "value_target_denom") @@ -132,7 +132,7 @@ ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_so test_data$pseudo_num = NaN test_data$pseudo_denum = NaN - for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")){ + for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { pseudo_counts <- est_priors(train_data, prior_test_data, cov, taus, pre_covariates, "log_value_target", lp_solver, lambda=0.1) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 1c6cf362a..36339095d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -5,6 +5,8 @@ #' @template refd_col-template #' @template lag_col-template #' @template signal_suffixes-template +#' @template indicator-template +#' @template signal-template #' #' @importFrom dplyr %>% filter select group_by summarize across everything #' @importFrom tidyr drop_na @@ -12,7 +14,8 @@ #' #' @export run_backfill <- function(df, params, refd_col = "time_value", - lag_col = "lag", signal_suffixes = c("")) { + lag_col = "lag", signal_suffixes = c(""), + indicator = "", signal = "") { geo_levels <- params$geo_level if ("state" %in% geo_levels) { # If state included, do it last since state processing modifies the @@ -62,7 +65,7 @@ run_backfill <- function(df, params, refd_col = "time_value", combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - } else if (value_type == "fraction"){ + } else if (value_type == "fraction") { combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) @@ -90,7 +93,7 @@ run_backfill <- function(df, params, refd_col = "time_value", if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction"){ + if (value_type == "fraction") { geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) @@ -100,7 +103,7 @@ run_backfill <- function(df, params, refd_col = "time_value", geo_test_data <- updated_data[[2]] } max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)){ + for (test_lag in c(1:14, 21, 35, 51)) { filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) train_data <- filtered_data[[1]] test_data <- filtered_data[[2]] @@ -119,8 +122,13 @@ run_backfill <- function(df, params, refd_col = "time_value", # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date, geo + params$lp_solver, params$lambda, test_date, geo, + indicator = indicator, signal = signal, signal_suffix = suffix, + value_type = value_type, test_lag = test_lag, + train_models = params$train_models, + make_predictions = params$make_predictions ) + test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] test_data <- evaluate(test_data, params$taus) @@ -142,7 +150,7 @@ run_backfill <- function(df, params, refd_col = "time_value", #' @importFrom parallel detectCores #' #' @export -main <- function(params){ +main <- function(params) { if (!params$train_models && !params$make_predictions) { message("both model training and prediction generation are turned off; exiting") return @@ -201,6 +209,8 @@ main <- function(params){ training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, params, signal_suffixes = input_group$name_suffix) + run_backfill(input_data, params, + indicator = input_group$indicator, signal = input_group$signal, + signal_suffixes = input_group$name_suffix) } } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index cf5d0d96d..9e2988528 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -7,7 +7,7 @@ #' @importFrom rlang .data .env #' #' @export -data_filteration <- function(test_lag, geo_train_data, geo_test_data){ +data_filteration <- function(test_lag, geo_train_data, geo_test_data) { if (test_lag <= 14) { test_lag_pad=2 test_lag_pad1=0 @@ -33,8 +33,7 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ } -#' Model training and prediction using quantile regression with Lasso penalty -#' The quantile regression uses the quantile_lasso function from quantgen package +#' Fetch model and use to generate predictions/perform corrections #' #' @template train_data-template #' @param test_data Data frame for testing @@ -43,25 +42,32 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data){ #' @template lp_solver-template #' @template lambda-template #' @param test_date Date object representing test date -#' @param geo string specifying the name of the geo region (e.g. FIPS -#' code for counties) +#' @template geo-template +#' @template indicator-template +#' @template signal-template +#' @template signal_suffix-template +#' @template value_type-template +#' @template test_lag-template +#' @template train_models-template +#' @template make_predictions-template #' #' @importFrom stats predict coef #' #' @export model_training_and_testing <- function(train_data, test_data, taus, covariates, - lp_solver, lambda, test_date, geo) { + lp_solver, lambda, test_date, geo, indicator = "", signal = "", + train_models = TRUE, make_predictions = TRUE, + signal_suffix = "", value_type = "", test_lag = "") { success = 0 coefs_result = list() coef_list = c("intercept", paste(covariates, '_coef', sep='')) - for (tau in taus){ - #options(error=NULL) + for (tau in taus) { tryCatch( expr = { - # Quantile regression - obj = quantile_lasso(as.matrix(train_data[covariates]), - train_data$log_value_target, tau = tau, - lambda = lambda, standardize = FALSE, lp_solver = lp_solver) + model_path <- generate_model_filename(indicator, signal, geo, signal_suffix, + value_type, test_lag, tau, lambda) + obj = get_model(train_data, covariates, tau = tau, + lambda = lambda, lp_solver = lp_solver, model_path, train_models) y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all @@ -73,7 +79,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, error=function(e) {print(paste(geo, test_date, as.character(tau), sep="_"))} ) } - if (success < 9){ return (NULL)} + if (success < 9) { return (NULL)} coef_combined_result = data.frame(tau=taus, issue_date=test_date) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) @@ -93,7 +99,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, #' @importFrom evalcast weighted_interval_score #' #' @export -evaluate <- function(test_data, taus){ +evaluate <- function(test_data, taus) { n_row = nrow(test_data) taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) @@ -111,3 +117,59 @@ evaluate <- function(test_data, taus){ return (test_data) } + +#' Train model using quantile regression with Lasso penalty, or load from disk +#' +#' @param model_path path to read model from or to save model to +#' @template train_data-template +#' @template covariates-template +#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. +#' @template lp_solver-template +#' @template lambda-template +#' @template train_models-template +#' +#' @importFrom quantgen quantile_lasso +get_model <- function(model_path, train_data, covariates, tau, + lambda, lp_solver, train_models) { + if (train_models) { + # Quantile regression + obj <- quantile_lasso(as.matrix(train_data[covariates]), + train_data$log_value_target, tau = tau, + lambda = lambda, standardize = FALSE, lp_solver = lp_solver) + + # Save model to cache. + create_dir_not_exist(dirname(model_path)) + save(obj, file=model_path) + } else { + # Load model from cache. + obj <- load(model_path) + } + + return(obj) +} + +#' Construct filename for model with given parameters +#' +#' @template indicator-template +#' @template signal-template +#' @template geo-template +#' @template signal_suffix-template +#' @template value_type-template +#' @template test_lag-template +#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. +#' @template lambda-template +#' +#' @return path to file containing model object +#' +#' @importFrom stringr str_interp +generate_model_filename <- function(indicator, signal, geo, signal_suffix, + value_type, test_lag, tau, lambda) { + prefix_components <- c(indicator, signal, signal_suffix) + filename = paste0( + # Drop any empty strings. + paste(prefix_components[prefix_components != ""], sep="_"), + str_interp("_{value_type}_{geo}_lag{test_lag}_tau{tau}_lambda{lambda}.model") + ) + + return(filename) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index b718f8085..4253fd9c3 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -22,7 +22,7 @@ #' @importFrom stats setNames #' #' @export -fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG){ +fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) { lags <- min(df[[lag_col]]): ref_lag # Full list of lags refds <- seq(min_refd, max_refd, by="day") # Full list reference date row_inds_df <- as.data.frame(crossing(refds, lags)) %>% @@ -73,8 +73,8 @@ fill_missing_updates <- function(df, value_col, refd_col, lag_col) { #' @importFrom zoo rollmeanr #' #' @export -get_7dav <- function(pivot_df, refd_col){ - for (col in colnames(pivot_df)){ +get_7dav <- function(pivot_df, refd_col) { + for (col in colnames(pivot_df)) { if (col == refd_col) next pivot_df[, col] <- rollmeanr(pivot_df[, col], 7, align="right", fill=NA) } @@ -92,7 +92,7 @@ get_7dav <- function(pivot_df, refd_col){ #' @template refd_col-template #' #' @export -add_shift <- function(df, n_day, refd_col){ +add_shift <- function(df, n_day, refd_col) { df[, refd_col] <- as.Date(df[, refd_col]) + n_day return (df) } @@ -106,12 +106,12 @@ add_shift <- function(df, n_day, refd_col){ #' @param suffix suffix added to indicate which kind of date is used #' #' @export -add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR){ +add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR) { dayofweek <- as.numeric(format(df[[time_col]], format="%u")) - for (i in 1:6){ + for (i in 1:6) { df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) } - if (suffix == "_ref"){ + if (suffix == "_ref") { df[, paste0("Sun", suffix)] <- as.numeric(dayofweek == 7) } return (df) @@ -130,7 +130,7 @@ add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR){ #' @importFrom lubridate make_date year month day #' #' @return a integer indicating which week it is in a month -get_weekofmonth <- function(date){ +get_weekofmonth <- function(date) { year <- year(date) month <- month(date) day <- day(date) @@ -145,9 +145,9 @@ get_weekofmonth <- function(date){ #' @template time_col-template #' #' @export -add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES){ +add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES) { weekofmonth <- get_weekofmonth(df[[time_col]]) - for (i in 1:3){ + for (i in 1:3) { df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) } return (df) @@ -165,7 +165,7 @@ add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES){ #' @importFrom tidyr pivot_wider drop_na #' #' @export -add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF_LAG){ +add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) { df$issue_date <- df[[refd_col]] + df[[lag_col]] pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% pivot_wider(id_cols=refd_col, names_from="issue_date", @@ -208,7 +208,7 @@ add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF #' @template df-template #' @template refd_col-template #' @template lag_col-template -add_params_for_dates <- function(df, refd_col, lag_col){ +add_params_for_dates <- function(df, refd_col, lag_col) { # Add columns for day-of-week effect df <- add_dayofweek(df, refd_col, "_ref", WEEKDAYS_ABBR) df <- add_dayofweek(df, "issue_date", "_issue", WEEKDAYS_ABBR) @@ -225,11 +225,11 @@ add_params_for_dates <- function(df, refd_col, lag_col){ #' @param test_data Data Frame for testing #' @param max_raw the maximum value in the training data at square root level #' @template value_col-template -add_sqrtscale <- function(train_data, test_data, max_raw, value_col){ +add_sqrtscale <- function(train_data, test_data, max_raw, value_col) { sqrtscale = c() sub_max_raw = sqrt(max(train_data$value_raw)) / 2 - for (split in seq(0, 3)){ + for (split in seq(0, 3)) { if (sub_max_raw < (max_raw * (split+1) * 0.1)) break train_data[paste0("sqrty", as.character(split))] = 0 test_data[paste0("sqrty", as.character(split))] = 0 diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index 88f87ae7d..aba60f19c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -38,7 +38,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value if (value_type == "count") { # For counts data only combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "fraction"){ + } else if (value_type == "fraction") { combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) @@ -63,7 +63,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value drop_na() if (nrow(geo_test_data) == 0) next if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction"){ + if (value_type == "fraction") { geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) @@ -74,7 +74,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value } max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in test_lags){ + for (test_lag in test_lags) { filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) train_data <- filtered_data[[1]] test_data <- filtered_data[[2]] @@ -93,7 +93,7 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, taus, params_list, lp_solver, - lambda, test_date, geo + lambda, test_date, geo, value_type = value_type, test_lag = test_lag ) test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] @@ -140,7 +140,7 @@ main_local <- function(input_dir, export_dir, test_start_date, test_end_date, num_col, denom_col,value_type = c("count", "fraction"), training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER){ + lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER) { value_type <- match.arg(value_type) # Check input data @@ -152,13 +152,13 @@ main_local <- function(input_dir, export_dir, value_cols <- result[["value_cols"]] # Get test date list according to the test start date - if (is.null(test_start_date)){ + if (is.null(test_start_date)) { test_start_date = max(df$issue_date) } else { test_start_date = as.Date(test_start_date) } - if (is.null(test_end_date)){ + if (is.null(test_end_date)) { test_end_date = max(df$issue_date) } else { test_end_date = as.Date(test_end_date) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index d5a6765bb..b5d22eeb7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -27,11 +27,8 @@ #' @param path path to the parameters file; if not present, will try to copy the file #' "params.json.template" #' @param template_path path to the template parameters file -#' @param train_models boolean indicating whether to train models (TRUE). If -#' FALSE previously trained models (stored locally) will be used instead. -#' Default is TRUE. -#' @param make_predictions boolean indicating whether to generate and save -#' corrections (TRUE) or not. Default is TRUE. +#' @template train_models-template +#' @template make_predictions-template #' #' @return a named list of parameters values #' @@ -110,13 +107,13 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) } # Check data type and required columns - if (value_type == "count"){ + if (value_type == "count") { if (all(num_col %in% colnames(df))) {value_cols=c(num_col)} else if (all(denom_col %in% colnames(df))) {value_cols=c(denom_col)} else {stop("No valid column name detected for the count values!")} - } else if (value_type == "fraction"){ + } else if (value_type == "fraction") { value_cols = c(num_col, denom_col) - if ( any(!(value_cols %in% colnames(df))) ){ + if ( any(!(value_cols %in% colnames(df))) ) { stop("No valid column name detected for the fraction values!") } } @@ -127,8 +124,8 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) } # issue_date or lag should exist in the dataset - if ( !"lag" %in% colnames(df) ){ - if ( "issue_date" %in% colnames(df) ){ + if ( !"lag" %in% colnames(df) ) { + if ( "issue_date" %in% colnames(df) ) { df$lag = as.integer(df$issue_date - df$time_value) } else {stop("No issue_date or lag exists!")} @@ -143,7 +140,7 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) #' @template training_days-template training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { valid_training_days = as.integer(max(issue_date) - min(issue_date)) - if (training_days > valid_training_days){ + if (training_days > valid_training_days) { warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index 156ed8484..c255c1cea 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/model.R \name{model_training_and_testing} \alias{model_training_and_testing} -\title{Model training and prediction using quantile regression with Lasso penalty -The quantile regression uses the quantile_lasso function from quantgen package} +\title{Fetch model and use to generate predictions/perform corrections} \usage{ model_training_and_testing( train_data, @@ -13,7 +12,14 @@ model_training_and_testing( lp_solver, lambda, test_date, - geo + geo, + indicator = "", + signal = "", + train_models = TRUE, + make_predictions = TRUE, + signal_suffix = "", + value_type = "", + test_lag = "" ) } \arguments{ @@ -37,8 +43,32 @@ of the `gurobi` package).} \item{geo}{string specifying the name of the geo region (e.g. FIPS code for counties)} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{test_lag}{integer number of days ago to predict for} } \description{ -Model training and prediction using quantile regression with Lasso penalty -The quantile regression uses the quantile_lasso function from quantgen package +Fetch model and use to generate predictions/perform corrections } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index 6e2f2dd99..d8dd8498d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -9,7 +9,9 @@ run_backfill( params, refd_col = "time_value", lag_col = "lag", - signal_suffixes = c("") + signal_suffixes = c(""), + indicator = "", + signal = "" ) } \arguments{ @@ -32,6 +34,14 @@ endings to be appended to standard value column names from `params$num_col` and `params$denom_col`. Used for non-standard value column names and when processing multiple signals from a single input dataframe, as with `quidel`'s age buckets.} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} } \description{ Get backfill-corrected estimates for a single signal + geo combination diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index 61018de44..148e77202 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -41,7 +41,7 @@ test_that("testing NA filling for missing udpates", { expect_equal(nrow(backfill_df), n_refds*(ref_lag+1)) - for (d in seq(min_refd, max_refd, by="day")){ + for (d in seq(min_refd, max_refd, by="day")) { expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) } }) From 1e6294e6ab85ac1581f53dac63c589ab9feeea24 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 16:53:36 -0400 Subject: [PATCH 064/145] doc files --- .../man-roxygen/geo-template.R | 2 + .../man-roxygen/make_predictions-template.R | 2 + .../man-roxygen/signal_suffix-template.R | 5 ++ .../man-roxygen/train_models-template.R | 3 ++ .../man/generate_model_filename.Rd | 49 +++++++++++++++++++ .../delphiBackfillCorrection/man/get_model.Rd | 39 +++++++++++++++ 6 files changed, 100 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R new file mode 100644 index 000000000..ae9dfeef0 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R @@ -0,0 +1,2 @@ +#' @param geo string specifying the name of the geo region (e.g. FIPS +#' code for counties) diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R new file mode 100644 index 000000000..ff57c25f6 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R @@ -0,0 +1,2 @@ +#' @param make_predictions boolean indicating whether to generate and save +#' corrections (TRUE) or not. Default is TRUE. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R new file mode 100644 index 000000000..eb3819558 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R @@ -0,0 +1,5 @@ +#' @param signal_suffix string specifying value column name +#' ending to be appended to standard value column names from +#' `params$num_col` and `params$denom_col`. Used for non-standard +#' value column names and when processing multiple signals from a +#' single input dataframe, as with `quidel`'s age buckets. diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R new file mode 100644 index 000000000..3048087af --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R @@ -0,0 +1,3 @@ +#' @param train_models boolean indicating whether to train models (TRUE). If +#' FALSE previously trained models (stored locally) will be used instead. +#' Default is TRUE. diff --git a/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd b/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd new file mode 100644 index 000000000..14c20b2fb --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{generate_model_filename} +\alias{generate_model_filename} +\title{Construct filename for model with given parameters} +\usage{ +generate_model_filename( + indicator, + signal, + geo, + signal_suffix, + value_type, + test_lag, + tau, + lambda +) +} +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{test_lag}{integer number of days ago to predict for} + +\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} + +\item{lambda}{the level of lasso penalty} +} +\value{ +path to file containing model object +} +\description{ +Construct filename for model with given parameters +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd new file mode 100644 index 000000000..5eeb43213 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{get_model} +\alias{get_model} +\title{Train model using quantile regression with Lasso penalty, or load from disk} +\usage{ +get_model( + model_path, + train_data, + covariates, + tau, + lambda, + lp_solver, + train_models +) +} +\arguments{ +\item{model_path}{path to read model from or to save model to} + +\item{train_data}{Data Frame containing training data} + +\item{covariates}{character vector of column names serving as the covariates for the model} + +\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} + +\item{lambda}{the level of lasso penalty} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} +} +\description{ +Train model using quantile regression with Lasso penalty, or load from disk +} From 6bf446eb5240a93411d32d97121ad411c9a1a57a Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 17:12:47 -0400 Subject: [PATCH 065/145] add make_predictions flag logic When `make_predictions` is TRUE, model is trained/loaded and used to make predictions/corrections as usual. Predictions and coefs from model are saved. If `make_predictions` is FALSE, no predictions or coefs are saved. --- .../delphiBackfillCorrection/R/main.R | 14 +++++++++----- .../delphiBackfillCorrection/R/model.R | 19 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 36339095d..d7a088155 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -129,11 +129,15 @@ run_backfill <- function(df, params, refd_col = "time_value", make_predictions = params$make_predictions ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evaluate(test_data, params$taus) - - export_test_result(test_data, coefs, params$export_dir, geo_level, test_lag) + # Model objects are saved during training, so only need to export + # output if making predictions/corrections + if (params$make_predictions) { + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evaluate(test_data, params$taus) + + export_test_result(test_data, coefs, params$export_dir, geo_level, test_lag) + } }# End for test lags }# End for test date list }# End for value types diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 9e2988528..83c5a737c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -68,18 +68,23 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, value_type, test_lag, tau, lambda) obj = get_model(train_data, covariates, tau = tau, lambda = lambda, lp_solver = lp_solver, model_path, train_models) - - y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) - test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all - - coefs_result[[success+1]] = coef(obj) - coefs_result[[success+1]]$tau = tau + + if (make_predictions) { + y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) + test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all + + coefs_result[[success+1]] = coef(obj) + coefs_result[[success+1]]$tau = tau + } + success = success + 1 }, error=function(e) {print(paste(geo, test_date, as.character(tau), sep="_"))} ) } - if (success < 9) { return (NULL)} + if (success < 9) {return (NULL)} + if (!make_predictions) {return (list())} + coef_combined_result = data.frame(tau=taus, issue_date=test_date) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) From 9bbe7d11284b1bdd49a954276e4c64bb9573ec86 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 2 Sep 2022 17:25:35 -0400 Subject: [PATCH 066/145] train model if cached file not found --- Backfill_Correction/delphiBackfillCorrection/R/model.R | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 83c5a737c..3255edaf7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -134,9 +134,14 @@ evaluate <- function(test_data, taus) { #' @template train_models-template #' #' @importFrom quantgen quantile_lasso +#' @importFrom stringr str_interp get_model <- function(model_path, train_data, covariates, tau, lambda, lp_solver, train_models) { - if (train_models) { + if (train_models || !file.exists(model_path)) { + if (!train_models && !file.exists(model_path)) { + warning(str_interp("user requested use of cached model but file {model_path}"), + " does not exist; training new model") + } # Quantile regression obj <- quantile_lasso(as.matrix(train_data[covariates]), train_data$log_value_target, tau = tau, From ce597c836955c090cb7fa1495562d541efcdf041 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 20:12:15 -0400 Subject: [PATCH 067/145] fixed errors and added unit tests --- .../R/beta_prior_estimation.R | 53 ++++--- .../delphiBackfillCorrection/R/model.R | 86 ++++++---- .../R/preprocessing.R | 8 +- .../delphiBackfillCorrection/R/utils.R | 10 +- .../testthat/test-beta_prior_estimation.R | 114 +++++++++++++ .../unit-tests/testthat/test-model.R | 150 ++++++++++++++++++ .../unit-tests/testthat/test-preprocessing.R | 22 +-- .../unit-tests/testthat/test-utils.R | 142 +++++++++++++++++ 8 files changed, 515 insertions(+), 70 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index bf71666e1..a2e9888fc 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -1,7 +1,7 @@ ## Functions for Beta Prior Approach. ## -## This is used only for the ratio prediction e.g. fraction of Covid claims, -## percentage of positive tests. We assume that the ratio follows a beta distribution +## This is used only for the fraction prediction e.g. fraction of Covid claims, +## percentage of positive tests. We assume that the fraction follows a beta distribution ## that is day-of-week dependent. A quantile regression model is used first with lasso ## penalty for supporting quantile estimation and then a non-linear minimization is used ## for prior estimation. @@ -21,7 +21,7 @@ delta <- function(fit, actual) sum((fit-actual)^2) #' @importFrom stats pbeta objective <- function(theta, x, prob, ...) { ab <- exp(theta) # Parameters are the *logs* of alpha and beta - fit <- pbeta(x, ab[1], ab[2]) + fit <- qbeta(x, ab[1], ab[2]) return (delta(fit, prob)) } @@ -44,10 +44,12 @@ objective <- function(theta, x, prob, ...) { #' @importFrom stats nlm predict #' @importFrom dplyr %>% filter #' @importFrom quantgen quantile_lasso -est_priors <- function(train_data, prior_test_data, dw, taus, - covariates, response, lp_solver, lambda, +#' +est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, + covariates, response, lp_solver, lambda, model_path_prefix, start=c(0, log(10)), - base_pseudo_denom=1000, base_pseudo_num=10) { + base_pseudo_denom=1000, base_pseudo_num=10, + train_models = TRUE, make_predictions = TRUE) { sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) sub_test_data <- prior_test_data %>% filter(prior_test_data[[dw]] == 1) if (nrow(sub_test_data) == 0) { @@ -58,9 +60,11 @@ est_priors <- function(train_data, prior_test_data, dw, taus, quantiles <- list() for (idx in 1:length(taus)) { tau <- taus[idx] - obj <- quantile_lasso(as.matrix(sub_train_data[covariates]), - sub_train_data[response], tau = tau, - lambda = lambda, standardize = FALSE, lp_solver = lp_solver) + model_path <- paste0(model_path_prefix, "_beta_prior", + str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + obj = get_model(model_path, sub_train_data, covariates, tau = tau, + lambda = lambda, lp_solver = lp_solver, train_models) + y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[covariates]))) quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale } @@ -86,7 +90,7 @@ est_priors <- function(train_data, prior_test_data, dw, taus, #' @template denom_col-template #' #' @export -ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) { +frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) { if (is.null(dw)) { num_adj <- data[[num_col]] + pseudo_num denom_adj <- data[[denom_col]] + pseudo_denom @@ -106,15 +110,14 @@ ratio_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, d #' @template lp_solver-template #' #' @export -ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_solver = LP_SOLVER) { - train_data$value_target <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") - train_data$value_7dav <- ratio_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") - test_data$value_target <- ratio_adj_with_pseudo(test_data, NULL, 1, 100, "value_target_num", "value_target_denom") - prior_test_data$value_7dav <- ratio_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") +frac_adj <- function(train_data, test_data, prior_test_data, model_path_prefix, + geo, value_type, taus = TAUS, lp_solver = LP_SOLVER) { + train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") + train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") + prior_test_data$value_7dav <- frac_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") train_data$log_value_target <- log(train_data$value_target) train_data$log_value_7dav <- log(train_data$value_7dav) - test_data$log_value_target <- log(test_data$value_target) prior_test_data$log_value_7dav <- log(prior_test_data$value_7dav) pre_covariates = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", @@ -133,25 +136,27 @@ ratio_adj <- function(train_data, test_data, prior_test_data, taus = TAUS, lp_so test_data$pseudo_denum = NaN for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { - pseudo_counts <- est_priors(train_data, prior_test_data, cov, taus, - pre_covariates, "log_value_target", lp_solver, lambda=0.1) + pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, + cov, taus, pre_covariates, "log_value_target", + lp_solver, lambda=0.1, + model_path_prefix=model_path_prefix) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] # update current data # For training - train_data$value_raw[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( + train_data$value_raw[train_data[[cov]] == 1] <- frac_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - train_data$value_7dav[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( + train_data$value_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - train_data$value_prev_7dav[train_data[[cov]] == 1] <- ratio_adj_with_pseudo( + train_data$value_prev_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( train_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") #For testing - test_data$value_raw[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( + test_data$value_raw[test_data[[cov]] == 1] <- frac_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - test_data$value_7dav[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( + test_data$value_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - test_data$value_prev_7dav[test_data[[cov]] == 1] <- ratio_adj_with_pseudo( + test_data$value_prev_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( test_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") test_data$pseudo_num[test_data[[cov]] == 1] = pseudo_num diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 3255edaf7..afe5ef0e7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -10,28 +10,57 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data) { if (test_lag <= 14) { test_lag_pad=2 - test_lag_pad1=0 - test_lag_pad2=0 } else if (test_lag < 51) { - test_lag_pad=7 - test_lag_pad1=6 - test_lag_pad2=7 + test_lag_pad=3 } else { - test_lag_pad=9 - test_lag_pad1=8 - test_lag_pad2=9 + test_lag_pad=7 } train_data = geo_train_data %>% filter(.data$lag >= .env$test_lag - .env$test_lag_pad ) %>% filter(.data$lag <= .env$test_lag + .env$test_lag_pad ) test_data = geo_test_data %>% - filter(.data$lag >= .env$test_lag - .env$test_lag_pad1) %>% - filter(.data$lag <= .env$test_lag + .env$test_lag_pad2) + filter(.data$lag == .env$test_lag) return (list(train_data, test_data)) } +#' Add square root scale indicator +#' +#' @param train_data training data for a certain location and a certain test lag +#' @param test_data testing data for a certain location and a certain test lag +#' @param max_raw the value raw maximum for a certain location +#' +#' @export +add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { + if (!(value_col %in% colnames(train_data))){ + stop("value raw does not exist in training data!") + } + + if (!(value_col %in% colnames(test_data))){ + stop("value raw does not exist in testing data!") + } + + sqrtscale = c() + sub_max_raw = sqrt(max(train_data[[value_col]])) / 2 + + for (split in seq(0, 3)){ + if (sub_max_raw < (max_raw * (split+1) * 0.1)) break + train_data[paste0("sqrty", as.character(split))] = 0 + test_data[paste0("sqrty", as.character(split))] = 0 + qv_pre = max_raw * split * 0.2 + qv_next = max_raw * (split+1) * 0.2 + + train_data[(train_data[[value_col]] <= (qv_next)^2) + & (train_data[[value_col]] > (qv_pre)^2), + paste0("sqrty", as.character(split))] = 1 + test_data[(test_data[[value_col]] <= (qv_next)^2) + & (test_data[[value_col]] > (qv_pre)^2), + paste0("sqrty", as.character(split))] = 1 + sqrtscale[split+1] = paste0("sqrty", as.character(split)) + } + return (list(train_data, test_data, sqrtscale)) +} #' Fetch model and use to generate predictions/perform corrections #' @@ -55,38 +84,38 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data) { #' #' @export model_training_and_testing <- function(train_data, test_data, taus, covariates, - lp_solver, lambda, test_date, geo, indicator = "", signal = "", - train_models = TRUE, make_predictions = TRUE, - signal_suffix = "", value_type = "", test_lag = "") { + lp_solver, lambda, test_date, test_lag, + geo, value_type, model_path_prefix, + train_models = TRUE, + make_predictions = TRUE) { success = 0 coefs_result = list() coef_list = c("intercept", paste(covariates, '_coef', sep='')) for (tau in taus) { tryCatch( expr = { - model_path <- generate_model_filename(indicator, signal, geo, signal_suffix, - value_type, test_lag, tau, lambda) - obj = get_model(train_data, covariates, tau = tau, - lambda = lambda, lp_solver = lp_solver, model_path, train_models) + model_path <- paste(model_path_prefix, + str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + obj <- get_model(model_path, train_data, covariates, tau, + lambda, lp_solver, train_models=TRUE) if (make_predictions) { y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) - test_data[paste0("predicted_tau", as.character(tau))] = y_hat_all + test_data[[paste0("predicted_tau", as.character(tau))]] = y_hat_all coefs_result[[success+1]] = coef(obj) - coefs_result[[success+1]]$tau = tau } success = success + 1 }, - error=function(e) {print(paste(geo, test_date, as.character(tau), sep="_"))} + error=function(e) {print(paste("Training failed for", model_path, sep=" "))} ) } if (success < 9) {return (NULL)} if (!make_predictions) {return (list())} - coef_combined_result = data.frame(tau=taus, - issue_date=test_date) + coef_combined_result = data.frame(tau=taus, issue_date=test_date, + geo=geo, test_lag=test_lag) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) return (list(test_data, coef_combined_result)) @@ -116,9 +145,9 @@ evaluate <- function(test_data, taus) { "predicted_tau0.99")]) predicted_all_exp = exp(predicted_all) predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) - predicted_trans_exp = as.list(data.frame(t(predicted_all_exp - test_data$value_target))) + #predicted_trans_exp = as.list(data.frame(t(predicted_all_exp - test_data$value_target))) test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) - test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) + #test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) return (test_data) } @@ -172,13 +201,14 @@ get_model <- function(model_path, train_data, covariates, tau, #' @return path to file containing model object #' #' @importFrom stringr str_interp -generate_model_filename <- function(indicator, signal, geo, signal_suffix, - value_type, test_lag, tau, lambda) { +#' +generate_model_filename_prefix <- function(indicator, signal, geo_level, + signal_suffix, lambda) { prefix_components <- c(indicator, signal, signal_suffix) filename = paste0( # Drop any empty strings. - paste(prefix_components[prefix_components != ""], sep="_"), - str_interp("_{value_type}_{geo}_lag{test_lag}_tau{tau}_lambda{lambda}.model") + paste(prefix_components[prefix_components != ""], collapse="_"), + str_interp("_${geo_level}_lambda${lambda}") ) return(filename) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index 4253fd9c3..fa134cbbc 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -23,7 +23,9 @@ #' #' @export fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) { - lags <- min(df[[lag_col]]): ref_lag # Full list of lags + # Full list of lags + # +30 to have values for calculating 7-day averages + lags <- min(df[[lag_col]]): (ref_lag + 30) refds <- seq(min_refd, max_refd, by="day") # Full list reference date row_inds_df <- as.data.frame(crossing(refds, lags)) %>% setNames(c(refd_col, lag_col)) @@ -50,7 +52,7 @@ fill_missing_updates <- function(df, value_col, refd_col, lag_col) { pivot_wider(id_cols=lag_col, names_from=refd_col, values_from=value_col) if (any(diff(pivot_df[[lag_col]]) != 1)) { - stop("Risk exists in forward fill") + stop("Risk exists in forward filling") } pivot_df <- pivot_df %>% fill(everything(), .direction="down") @@ -135,7 +137,7 @@ get_weekofmonth <- function(date) { month <- month(date) day <- day(date) firstdayofmonth <- as.numeric(format(make_date(year, month, 1), format="%u")) - return (((day + firstdayofmonth - 1) %/% 7) %% 5 + 1) + return (((day + firstdayofmonth - 1) %/% 7) %% 4 + 1) } #' Add one hot encoding for week of a month info in terms of issue date diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index b5d22eeb7..439b6825a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -63,8 +63,8 @@ read_params <- function(path = "params.json", template_path = "params.json.templ # Data parameters if (!("num_col" %in% names(params))) {params$num_col <- "num"} if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} - if (!("geo_level" %in% names(params))) {params$geo_level <- c("state", "county")} - if (!("value_types" %in% names(params))) {params$lp_solver <- c("count", "fraction")} + if (!("geo_levels" %in% names(params))) {params$geo_levels <- c("state", "county")} + if (!("value_types" %in% names(params))) {params$value_types <- c("count", "fraction")} # Date parameters if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} @@ -139,7 +139,7 @@ validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) #' @param issue_date contents of input data's `issue_date` column #' @template training_days-template training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { - valid_training_days = as.integer(max(issue_date) - min(issue_date)) + valid_training_days = as.integer(max(issue_date) - min(issue_date)) + 1 if (training_days > valid_training_days) { warning(sprintf("Only %d days are available at most for training.", valid_training_days)) } @@ -161,10 +161,10 @@ filter_counties <- function(geos) { get_populous_counties <- function() { return( covidcast::county_census %>% - select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% + dplyr::select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% # Drop megacounties (states) filter(!endsWith(.data$fips, "000")) %>% - arrange(desc(.data$pop)) %>% + arrange(desc(pop)) %>% pull(.data$fips) %>% head(n=200) ) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R new file mode 100644 index 000000000..dbd111543 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -0,0 +1,114 @@ +context("Testing helper functions for beta prior estimation") + +# Generate Test Data +prior <- c(1, 2) +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" +lp_solver <- "gurobi" +lambda <- 0.1 +model_path_prefix <- "model/test" +geo <- "pa" +value_type <- "fraction" +taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(50, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} +prior_test_data <- test_data +covariates <- c(main_covariate, dayofweek_covariates) + + + +test_that("testing the sum of squared error", { + fit <- c(0, 1, 0) + actual <- c(1, 1, 1) + + expected <- 1^2 + 1^2 + computed <- delta(fit, actual) + expect_equal(expected, computed) +}) + + +test_that("testing the squared error objection function given the beta prior", { + theta <- c(log(1), log(2)) + x <- c(0.1, 0.25, 0.5, 0.75, 0.9) + prob <- qbeta(x, 1, 2) + + expected <-0 + computed <- objective(theta, x, prob) + expect_equal(expected, computed) +}) + + +test_that("testing the prior estimation", { + set.seed(1) + dw <- "Sat_ref" + priors <- est_priors(train_data, test_data, geo, value_type, dw, taus, + main_covariate, response, lp_solver, lambda, model_path_prefix, + start=c(0, log(10)), + base_pseudo_denom=1000, base_pseudo_num=10, + train_models = TRUE, make_predictions = TRUE) + beta <- priors[2] + alpha <- priors[1] - beta + expect_true((alpha > 0)& (alpha < 4)) + expect_true((beta > 4)& (beta < 6)) + + for (idx in 1:length(taus)) { + tau <- taus[idx] + model_path <- paste0(model_path_prefix, "_beta_prior", + str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + expect_true(file.exists(model_path)) + file.remove(model_path) + } +}) + + +test_that("testing the fraction adjustment with pseudo counts", { + value_raw <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) + + dw <- "Sat_ref" + value_raw <- frac_adj_with_pseudo(train_data, dw, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) +}) + + +test_that("testing the main beta prior adjustment function", { + set.seed(1) + updated_data <- frac_adj(train_data, test_data, prior_test_data, model_path_prefix, + geo, value_type, taus = taus, lp_solver = lp_solver) + updated_train_data <- updated_data[[1]] + updated_test_data <- updated_data[[2]] + + for (dw in c(dayofweek_covariates, "Sun_ref")){ + for (idx in 1:length(taus)) { + tau <- taus[idx] + model_path <- paste0(model_path_prefix, "_beta_prior", + str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + expect_true(file.exists(model_path)) + file.remove(model_path) + } + } + + expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) + expect_true(all(updated_train_data$value_raw < 6/(6+1))) + expect_true(all(updated_train_data$value_raw > 4/(4+4))) +}) + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R new file mode 100644 index 000000000..dc207230e --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -0,0 +1,150 @@ +context("Testing the helper functions for modeling") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +lp_solver <- "gurobi" +lambda <- 0.1 +model_path_prefix <- "model/test" +geo <- "pa" +value_type <- "fraction" +taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) + +# Generate Test Data +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(50, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} + + +test_that("testing the generation of model filename prefix", { + model_prefix <- generate_model_filename_prefix(indicator, signal, geo_level, + signal_suffix, lambda) + expected <- "chng_outpatient_state_lambda0.1" + expect_equal(model_prefix, expected) +}) + +test_that("testing the evaluation", { + for (tau in taus){ + test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) + } + result <- evaluate(test_data, taus) + expect_true(mean(result$wis) < 0.2) +}) + +test_that("testing generating or loading the model", { + # Check the model that does not exist + tau = 0.5 + model_path <- paste(model_path_prefix, + str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + expect_true(!file.exists(model_path)) + + # Generate the model and check again + obj <- get_model(model_path, train_data, covariates, tau, + lambda, lp_solver, train_models=TRUE) + expect_true(file.exists(model_path)) + + expect_silent(file.remove(model_path)) +}) + +test_that("testing model training and testing", { + result <- model_training_and_testing(train_data, test_data, taus, covariates, + lp_solver, lambda, test_date, test_lag, + geo, value_type, model_path_prefix, + train_models = TRUE, make_predictions = TRUE) + test_result <- result[[1]] + coef_df <- result[[2]] + + for (tau in taus){ + cov <- paste0("predicted_tau", as.character(tau)) + expect_true(cov %in% colnames(test_result)) + + model_path <- paste(model_path_prefix, + str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + expect_true(file.exists(model_path)) + + expect_silent(file.remove(model_path)) + } + + for (cov in covariates){ + cov <- paste(cov, "coef", sep="_") + expect_true(cov %in% colnames(coef_df)) + } +}) + +test_that("testing adding square root scale", { + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in training data!") + + train_data$value_raw <- rbeta(nrow(train_data), 2, 5) + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in testing data!") + + test_data$value_raw <- rbeta(nrow(test_data), 2, 5) + expect_silent(result <- add_sqrtscale(train_data, test_data, 1, "value_raw")) + + new_train_data <- result[[1]] + new_test_data <- result[[2]] + sqrtscales <- result[[3]] + expect_true(length(sqrtscales) == 4) + for (cov in sqrtscales){ + expect_true(cov %in% colnames(new_train_data)) + expect_true(cov %in% colnames(new_test_data)) + } + expect_true(all(rowSums(new_train_data[sqrtscales]) %in% c(0, 1))) + expect_true(all(rowSums(new_test_data[sqrtscales]) %in% c(0, 1))) + + for (i in 0:2){ + m_l <- max(new_train_data[new_train_data[[paste0("sqrty", as.character(i))]] == 1, "value_raw"]) + m_r <- min(new_train_data[new_train_data[[paste0("sqrty", as.character(i+1))]] == 1, "value_raw"]) + expect_true(m_l <= m_r) + } + +}) + +test_that("testing data filteration", { + train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] + test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] + + # When test lag is smal0 + test_lag <- 5 + result <- data_filteration(test_lag, train_data, test_data) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(train_df$lag) == test_lag+2) + expect_true(min(train_df$lag) == test_lag-2) + expect_true(all(test_df$lag == test_lag)) + + # When test lag is large + test_lag <- 50 + result <- data_filteration(test_lag, train_data, test_data) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(train_df$lag) == test_lag+3) + expect_true(min(train_df$lag) == test_lag-3) + expect_true(all(test_df$lag == test_lag)) +}) + + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index 148e77202..a5c25fdf0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -22,7 +22,7 @@ test_that("testing rows filling for missing lags", { df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) n_refds <- as.numeric(max_refd - min_refd)+1 - expect_equal(nrow(df_new), n_refds*(ref_lag+1)) + expect_equal(nrow(df_new), n_refds*(ref_lag+31)) expect_equal(df_new %>% drop_na(), fake_df) }) @@ -32,14 +32,14 @@ test_that("testing NA filling for missing udpates", { # Assuming the input data does not have enough rows for consecutive lags expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), - "Risk exists in forward fill") + "Risk exists in forward filling") # Assuming the input data is already prepared df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) n_refds <- as.numeric(max_refd - min_refd)+1 backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - expect_equal(nrow(backfill_df), n_refds*(ref_lag+1)) + expect_equal(nrow(backfill_df), n_refds*(ref_lag+31)) for (d in seq(min_refd, max_refd, by="day")) { expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) @@ -54,9 +54,10 @@ test_that("testing the calculation of 7-day moving average", { pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% pivot_wider(id_cols=refd_col, names_from="issue_date", values_from="value_raw") - pivot_df[is.na(pivot_df)] <- 0 + pivot_df[is.na(pivot_df)] = 0 backfill_df <- get_7dav(pivot_df, refd_col) + output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] expected <- colSums(pivot_df[, -1]) / 7 expect_true(all(output == expected)) @@ -81,14 +82,14 @@ test_that("testing adding columns for each day of a week", { test_that("testing the calculation of week of a month", { - expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-03")), 2) - expect_equal(get_weekofmonth(as.Date("2022-01-10")), 3) - expect_equal(get_weekofmonth(as.Date("2022-01-31")), 1) + expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) + expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) + expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) }) -test_that("testing the calculation of 7-day moving average", { +test_that("testing adding columns for each week of a month", { df_new <- add_weekofmonth(fake_df, refd_col, wm) expect_equal(ncol(fake_df) + 3, ncol(df_new)) @@ -96,6 +97,7 @@ test_that("testing the calculation of 7-day moving average", { expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) }) + test_that("testing adding 7 day avg and target", { df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) @@ -113,6 +115,6 @@ test_that("testing adding 7 day avg and target", { # target_date: the date ref_lag days after the reference date # and 5 log columns expect_equal(ncol(df_new), 3 + 10) - expect_equal(nrow(df_new), 7 * 8) + expect_equal(nrow(df_new), 7 * (ref_lag + 30 + 1)) }) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R new file mode 100644 index 000000000..302488f1f --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -0,0 +1,142 @@ +context("Testing utils helper functions") + +TRAINING_DAYS = 10 + +test_that("testing create directory if not exist", { + # If not exists + path = "test.test" + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # If already exists + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # Remove + unlink(path, recursive = TRUE) + expect_true(!file.exists(path)) +}) + + +test_that("testing number of available issue dates for training", { + start_date <- as.Date("2022-01-01") + end_date <- as.Date("2022-01-09") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_warning(training_days_check(issue_date, training_days = training_days), + "Only 9 days are available at most for training.") + + end_date <- as.Date("2022-01-10") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_silent(training_days_check(issue_date, training_days = training_days)) +}) + +test_that("testing get the top200 populous counties", { + counties <- get_populous_counties() + + expect_true(length(counties) == 200) + expect_true("06037" %in% counties) +}) + + +test_that("testing the filteration of top200 populous counties", { + geos = c("06037", "58001") + expect_true(filter_counties(geos) == "06037") +}) + + +test_that("testing read parameters", { + # No input file + expect_error(read_params(path = "params.json", template_path = "params.json.template", + train_models = TRUE, make_predictions = TRUE), + "input_dir must be set in `params` and exist") + + # Check parameters + params <- read_json("params.json", simplifyVector = TRUE) + # Check initialization + expect_true(!("export_dir" %in% names(params))) + expect_true(!("cache_dir" %in% names(params))) + + expect_true(!("parallel" %in% names(params))) + expect_true(!("parallel_max_cores" %in% names(params))) + + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + + expect_true(!("num_col" %in% names(params))) + expect_true(!("denom_col" %in% names(params))) + expect_true(!("geo_levels" %in% names(params))) + expect_true(!("value_types" %in% names(params))) + + expect_true(!("training_days" %in% names(params))) + expect_true(!("ref_lag" %in% names(params))) + expect_true(!("testing_window" %in% names(params))) + expect_true(!("test_dates" %in% names(params))) + + # Create input file + path = "test.tempt" + create_dir_not_exist(path) + expect_silent(params <- read_params(path = "params.json", + template_path = "params.json.template", + train_models = TRUE, make_predictions = TRUE)) + unlink(path, recursive = TRUE) + + + expect_true("export_dir" %in% names(params)) + expect_true("cache_dir" %in% names(params)) + + expect_true("parallel" %in% names(params)) + expect_true("parallel_max_cores" %in% names(params)) + + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + + expect_true("num_col" %in% names(params)) + expect_true("denom_col" %in% names(params)) + expect_true("geo_levels" %in% names(params)) + expect_true("value_types" %in% names(params)) + + expect_true("training_days" %in% names(params)) + expect_true("ref_lag" %in% names(params)) + expect_true("testing_window" %in% names(params)) + expect_true("test_dates" %in% names(params)) + + expect_true(params$export_dir == "./receiving") + expect_true(params$cache_dir == "./cache") + + expect_true(params$parallel == FALSE) + expect_true(params$parallel_max_cores == .Machine$integer.max) + + expect_true(all(params$taus == TAUS)) + expect_true(params$lambda == LAMBDA) + expect_true(params$lp_solver == LP_SOLVER) + + expect_true(params$num_col == "num") + expect_true(params$denom_col == "denom") + expect_true(all(params$geo_levels == c("state", "county"))) + expect_true(all(params$value_types == c("count", "fraction"))) + + expect_true(params$training_days == TRAINING_DAYS) + expect_true(params$ref_lag == REF_LAG) + expect_true(params$testing_window == TESTING_WINDOW) + start_date <- TODAY - params$testing_window + end_date <- TODAY - 1 + expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) + + expect_silent(file.remove("params.json")) +}) + + From 0a8837a80a494aa1da04bea511c57841732e3fb8 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 20:15:30 -0400 Subject: [PATCH 068/145] add files to support unit tests --- .../unit-tests/testthat/model/.gitignore | 7 +++++++ .../unit-tests/testthat/params.json.template | 3 +++ 2 files changed, 10 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore new file mode 100644 index 000000000..4231947de --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +nowcast/code/fusion/exploration/.Rhistory +nowcast/code/fusion/unsup-meta_generation/.Rhistory +nowcast/code/fusion/data-eda/.Rhistory +nowcast/code/fusion/h2o-glrm/.Rhistory +nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory +nowcast/code/fusion/exploration/.Rhistory diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template new file mode 100644 index 000000000..fb8309e94 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template @@ -0,0 +1,3 @@ +{ + "input_dir": "./test.tempt" +} From 45a9155225fc34b32023c35620c4d7f1f9941b95 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 22:47:38 -0400 Subject: [PATCH 069/145] back to the previous version of data filteration --- .../delphiBackfillCorrection/R/model.R | 20 ++++++++++++------- .../unit-tests/testthat/test-model.R | 20 +++++++++++++------ 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index afe5ef0e7..6269200ea 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -8,19 +8,25 @@ #' #' @export data_filteration <- function(test_lag, geo_train_data, geo_test_data) { - if (test_lag <= 14) { - test_lag_pad=2 - } else if (test_lag < 51) { - test_lag_pad=3 - } else { + if (test_lag <= 14){ + test_lag_pad=lag_pad + test_lag_pad1=0 + test_lag_pad2=0 + }else if (test_lag < 51){ test_lag_pad=7 + test_lag_pad1=6 + test_lag_pad2=7 + }else { + test_lag_pad=9 + test_lag_pad1=8 + test_lag_pad2=9 } - train_data = geo_train_data %>% filter(.data$lag >= .env$test_lag - .env$test_lag_pad ) %>% filter(.data$lag <= .env$test_lag + .env$test_lag_pad ) test_data = geo_test_data %>% - filter(.data$lag == .env$test_lag) + filter(.data$lag >= .env$test_lag - .env$test_lag_pad1 ) %>% + filter(.data$lag <= .env$test_lag + .env$test_lag_pad2) return (list(train_data, test_data)) } diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index dc207230e..8c9ae5439 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -22,7 +22,7 @@ dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref") response <- "log_value_target" train_beta_vs <- log(rbeta(1000, 2, 5)) -test_beta_vs <- log(rbeta(50, 2, 5)) +test_beta_vs <- log(rbeta(61, 2, 5)) train_data <- data.frame(log_value_7dav = train_beta_vs, log_value_target = train_beta_vs) train_data$value_target_num <- exp(train_beta_vs) * 100 @@ -128,7 +128,7 @@ test_that("testing data filteration", { train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] - # When test lag is smal0 + # When test lag is small test_lag <- 5 result <- data_filteration(test_lag, train_data, test_data) train_df <- result[[1]] @@ -138,13 +138,21 @@ test_that("testing data filteration", { expect_true(all(test_df$lag == test_lag)) # When test lag is large - test_lag <- 50 + test_lag <- 48 result <- data_filteration(test_lag, train_data, test_data) train_df <- result[[1]] test_df <- result[[2]] - expect_true(max(train_df$lag) == test_lag+3) - expect_true(min(train_df$lag) == test_lag-3) - expect_true(all(test_df$lag == test_lag)) + expect_true(max(test_df$lag) == test_lag+7) + expect_true(min(test_df$lag) == test_lag-6) + + # Make sure that all lags are tested + included_lags = c() + for (test_lag in c(1:14, 21, 35, 51)){ + result <- data_filteration(test_lag, train_data, test_data) + test_df <- result[[2]] + included_lags <- c(included_lags, unique(test_df$lag)) + } + expect_true(all(1:60 %in% included_lags)) }) From 311e0783ab18bf67439bfaa39ef83060ea73ea06 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 23:37:32 -0400 Subject: [PATCH 070/145] update main --- .../delphiBackfillCorrection/R/main.R | 134 +++++++++++------- .../delphiBackfillCorrection/R/model.R | 8 +- .../delphiBackfillCorrection/R/utils.R | 1 + .../unit-tests/testthat/test-model.R | 10 +- .../unit-tests/testthat/test-utils.R | 3 + 5 files changed, 94 insertions(+), 62 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index d7a088155..cec6874f0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -16,62 +16,72 @@ run_backfill <- function(df, params, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), indicator = "", signal = "") { - geo_levels <- params$geo_level - if ("state" %in% geo_levels) { - # If state included, do it last since state processing modifies the - # `df` object. - geo_levels <- c(setdiff(geo_levels, c("state")), "state") - } - - for (geo_level in geo_levels) { - # Get full list of interested locations - if (geo_level == "state") { - # Drop county field and make new "geo_value" field from "state_id". - # Aggregate counties up to state level - df <- df %>% - select(-.data$geo_value, geo_value = .data$state_id) %>% - group_by(across(c("geo_value", refd_col, lag_col))) %>% - # Summarized columns keep original names - summarize(across(everything(), sum)) + for (suffix in signal_suffixes) { + # For each suffix listed in `signal_suffixes`, run training/testing + # process again. Main use case is for quidel which has overall and + # age-based signals. + if (suffix != "") { + num_col <- paste(params$num_col, suffix, sep = "_") + denom_col <- paste(params$denom_col, suffix, sep = "_") + } else { + num_col <- params$num_col + denom_col <- params$denom_col } - geo_list <- unique(df$geo_value) - if (geo_level == "county") { - # Keep only 200 most populous (within the US) counties - geo_list <- filter_counties(geo_list) + + geo_levels <- params$geo_levels + if ("state" %in% geo_levels) { + # If state included, do it last since state processing modifies the + # `df` object. + geo_levels <- c(setdiff(geo_levels, c("state")), "state") } - - # Build model for each location - for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) - min_refd <- min(subdf[[refd_col]]) - max_refd <- max(subdf[[refd_col]]) - subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + + for (geo_level in geo_levels) { + # Get full list of interested locations + if (geo_level == "state") { + # Drop county field and make new "geo_value" field from "state_id". + # Aggregate counties up to state level + df <- df %>% + dplyr::select(-.data$geo_value, geo_value = .data$state_id) %>% + dplyr::group_by(across(c("geo_value", refd_col, lag_col))) %>% + # Summarized columns keep original names + dplyr::summarize(across(everything(), sum)) + } + geo_list <- unique(df$geo_value) + if (geo_level == "county") { + # Keep only 200 most populous (within the US) counties + geo_list <- filter_counties(geo_list) + } - for (suffix in signal_suffixes) { - # For each suffix listed in `signal_suffixes`, run training/testing - # process again. Main use case is for quidel which has overall and - # age-based signals. - if (suffix != "") { - num_col <- paste(params$num_col, suffix, sep = "_") - denom_col <- paste(params$denom_col, suffix, sep = "_") - } else { - num_col <- params$num_col - denom_col <- params$denom_col - } - + model_path_prefix <- generate_model_filename_prefix( + params$model_save_dir, indicator, signal, + geo_level, signal_suffix, lambda) + + test_data_list <- list() + coef_list <- list() + for (value_type in params$value_types) { + test_data_list[[value_type]] <- list() + coef_list[[value_type]] <- list() + } + # Build model for each location + for (geo in geo_list) { + subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) + min_refd <- min(subdf[[refd_col]]) + max_refd <- max(subdf[[refd_col]]) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + for (value_type in params$value_types) { # Handle different signal types if (value_type == "count") { # For counts data only combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - + } else if (value_type == "fraction") { combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - + combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - + combined_df <- merge( combined_num_df, combined_denom_df, by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, @@ -79,7 +89,7 @@ run_backfill <- function(df, params, refd_col = "time_value", ) } combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - + for (test_date in params$test_dates) { geo_train_data = combined_df %>% filter(.data$issue_date < .env$test_date) %>% @@ -94,11 +104,14 @@ run_backfill <- function(df, params, refd_col = "time_value", if (nrow(geo_train_data) <= 200) next if (value_type == "fraction") { + # Use beta prior approach to adjust fractions geo_prior_test_data = combined_df %>% filter(.data$issue_date > .env$test_date - 7) %>% filter(.data$issue_date <= .env$test_date) - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + updated_data <- frac_adj(geo_train_data, geo_test_data, + geo_prior_test_data, model_path_prefix, + geo, value_type) geo_train_data <- updated_data[[1]] geo_test_data <- updated_data[[2]] } @@ -115,16 +128,16 @@ run_backfill <- function(df, params, refd_col = "time_value", covariates <- list( Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), - paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, SQRTSCALE + paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, sqrtscale ) params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, - params$lp_solver, params$lambda, test_date, geo, - indicator = indicator, signal = signal, signal_suffix = suffix, - value_type = value_type, test_lag = test_lag, + train_data, test_data, params$taus, params_list, params$lp_solver, + params$lambda, test_date, test_lag = test_lag, + geo = geo, value_type = value_type, + model_path_prefix=model_path_prefix, train_models = params$train_models, make_predictions = params$make_predictions ) @@ -135,15 +148,26 @@ run_backfill <- function(df, params, refd_col = "time_value", test_data <- prediction_results[[1]] coefs <- prediction_results[[2]] test_data <- evaluate(test_data, params$taus) - - export_test_result(test_data, coefs, params$export_dir, geo_level, test_lag) + + idx <- length(test_data_list[[value_type]]) + 1 + test_data_list[[value_type]][[idx]] <- test_data + coef_list[[value_type]][[idx]] <- coefs } }# End for test lags }# End for test date list }# End for value types - }# End for signal suffixes - }# End for geo list - }# End for geo type + }# End for geo list + if (params$make_predictions) { + for (value_type in params$value_types) { + test_combined <- do.call(plyr::rbind.fill, test_data_list[[value_type]]) + coef_combined <- do.call(plyr::rbind.fill, coef_list[[value_type]]) + export_test_result(test_combined, coef_combined, value_type + params$export_dir, model_path_prefix) + } + + } + }# End for geo type + }# End for signal suffixes } #' Perform backfill correction on all desired signals and geo levels diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 6269200ea..0f9761988 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -119,7 +119,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, } if (success < 9) {return (NULL)} if (!make_predictions) {return (list())} - + coef_combined_result = data.frame(tau=taus, issue_date=test_date, geo=geo, test_lag=test_lag) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) @@ -208,11 +208,13 @@ get_model <- function(model_path, train_data, covariates, tau, #' #' @importFrom stringr str_interp #' -generate_model_filename_prefix <- function(indicator, signal, geo_level, - signal_suffix, lambda) { +generate_model_filename_prefix <- function(model_save_dir, indicator, signal, + geo_level, signal_suffix, lambda) { prefix_components <- c(indicator, signal, signal_suffix) filename = paste0( # Drop any empty strings. + model_save_dir, + "/", paste(prefix_components[prefix_components != ""], collapse="_"), str_interp("_${geo_level}_lambda${lambda}") ) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 439b6825a..87d87682c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -50,6 +50,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ # Paths if (!("export_dir" %in% names(params))) {params$export_dir <- "./receiving"} if (!("cache_dir" %in% names(params))) {params$cache_dir <- "./cache"} + if (!("model_save_dir" %in% names(params))) {params$model_save_dir <- "./model"} # Parallel parameters if (!("parallel" %in% names(params))) {params$parallel <- FALSE} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index 8c9ae5439..0202f7e84 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -8,6 +8,7 @@ signal_suffix <- "" lambda <- 0.1 lp_solver <- "gurobi" lambda <- 0.1 +model_save_dir <- "./model" model_path_prefix <- "model/test" geo <- "pa" value_type <- "fraction" @@ -37,12 +38,13 @@ for (cov in c(dayofweek_covariates, "Sun_ref")){ train_data[[cov]] <- 1 test_data[[cov]] <- 1 } +covariates <- c(main_covariate, dayofweek_covariates) test_that("testing the generation of model filename prefix", { - model_prefix <- generate_model_filename_prefix(indicator, signal, geo_level, - signal_suffix, lambda) - expected <- "chng_outpatient_state_lambda0.1" + model_prefix <- generate_model_filename_prefix(model_save_dir, indicator, signal, + geo_level, signal_suffix, lambda) + expected <- "./model/chng_outpatient_state_lambda0.1" expect_equal(model_prefix, expected) }) @@ -51,7 +53,7 @@ test_that("testing the evaluation", { test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) } result <- evaluate(test_data, taus) - expect_true(mean(result$wis) < 0.2) + expect_true(mean(result$wis) < 0.3) }) test_that("testing generating or loading the model", { diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R index 302488f1f..429628b03 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -57,6 +57,7 @@ test_that("testing read parameters", { # Check initialization expect_true(!("export_dir" %in% names(params))) expect_true(!("cache_dir" %in% names(params))) + expect_true(!("model_save_dir" %in% names(params))) expect_true(!("parallel" %in% names(params))) expect_true(!("parallel_max_cores" %in% names(params))) @@ -91,6 +92,7 @@ test_that("testing read parameters", { expect_true("export_dir" %in% names(params)) expect_true("cache_dir" %in% names(params)) + expect_true("model_save_dir" %in% names(params)) expect_true("parallel" %in% names(params)) expect_true("parallel_max_cores" %in% names(params)) @@ -116,6 +118,7 @@ test_that("testing read parameters", { expect_true(params$export_dir == "./receiving") expect_true(params$cache_dir == "./cache") + expect_true(params$model_save_dir == "./model") expect_true(params$parallel == FALSE) expect_true(params$parallel_max_cores == .Machine$integer.max) From 410952b43b0c103ac4758f01ae863e369b016792 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 23:38:05 -0400 Subject: [PATCH 071/145] add movel save dir to params --- Backfill_Correction/params.json.production.template | 1 + Backfill_Correction/params.json.template | 1 + 2 files changed, 2 insertions(+) diff --git a/Backfill_Correction/params.json.production.template b/Backfill_Correction/params.json.production.template index 27c8b411c..2c38d3fb1 100644 --- a/Backfill_Correction/params.json.production.template +++ b/Backfill_Correction/params.json.production.template @@ -1,6 +1,7 @@ { "ref_lag": 60, "input_dir": "", + "model_save_dir": "./model", "cache_dir": "./cache", "testing_window": 1, "training_days": 270, diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index b6317e084..2f8c70f2a 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,6 +1,7 @@ { "ref_lag": 7, "input_dir": "", + "model_save_dir": "./model", "cache_dir": "./cache", "testing_window": 1, "training_days": 30, From b2e2b3662f9295b34eec483354045d0ce91be32f Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 19 Sep 2022 23:38:44 -0400 Subject: [PATCH 072/145] add output dir --- .../unit-tests/testthat/output/.gitignore | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore new file mode 100644 index 000000000..4231947de --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +nowcast/code/fusion/exploration/.Rhistory +nowcast/code/fusion/unsup-meta_generation/.Rhistory +nowcast/code/fusion/data-eda/.Rhistory +nowcast/code/fusion/h2o-glrm/.Rhistory +nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory +nowcast/code/fusion/exploration/.Rhistory From 6ec8f3bd304bab0a86c543f9dee6113f0399fb40 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Tue, 20 Sep 2022 00:52:13 -0400 Subject: [PATCH 073/145] change the format of the model file names --- .../R/beta_prior_estimation.R | 15 ++++++---- .../delphiBackfillCorrection/R/model.R | 13 ++++----- .../testthat/test-beta_prior_estimation.R | 29 ++++++++++++------- .../unit-tests/testthat/test-model.R | 16 +++++----- 4 files changed, 43 insertions(+), 30 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index a2e9888fc..6a7fb54cd 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -46,8 +46,8 @@ objective <- function(theta, x, prob, ...) { #' @importFrom quantgen quantile_lasso #' est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, - covariates, response, lp_solver, lambda, model_path_prefix, - start=c(0, log(10)), + covariates, response, lp_solver, lambda, training_end_date, + model_save_dir, model_path_prefix, start=c(0, log(10)), base_pseudo_denom=1000, base_pseudo_num=10, train_models = TRUE, make_predictions = TRUE) { sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) @@ -60,8 +60,10 @@ est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, quantiles <- list() for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0(model_path_prefix, "_beta_prior", - str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + model_path <- paste0( + model_save_dir, + str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), + ".model") obj = get_model(model_path, sub_train_data, covariates, tau = tau, lambda = lambda, lp_solver = lp_solver, train_models) @@ -110,7 +112,8 @@ frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, de #' @template lp_solver-template #' #' @export -frac_adj <- function(train_data, test_data, prior_test_data, model_path_prefix, +frac_adj <- function(train_data, test_data, prior_test_data, traning_end_date, + model_save_dir, model_path_prefix, geo, value_type, taus = TAUS, lp_solver = LP_SOLVER) { train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") @@ -138,7 +141,7 @@ frac_adj <- function(train_data, test_data, prior_test_data, model_path_prefix, for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, pre_covariates, "log_value_target", - lp_solver, lambda=0.1, + lp_solver, 0.1, training_end_date, model_save_dir, model_path_prefix=model_path_prefix) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 0f9761988..3262d5e57 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -90,8 +90,9 @@ add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { #' #' @export model_training_and_testing <- function(train_data, test_data, taus, covariates, - lp_solver, lambda, test_date, test_lag, - geo, value_type, model_path_prefix, + lp_solver, lambda, test_lag, + geo, value_type, model_save_dir, + training_end_date, model_path_prefix, train_models = TRUE, make_predictions = TRUE) { success = 0 @@ -100,8 +101,8 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, for (tau in taus) { tryCatch( expr = { - model_path <- paste(model_path_prefix, - str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + model_path <- paste(model_save_dir, + str_interp("/${training_end_date}_${model_path_prefix}_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") obj <- get_model(model_path, train_data, covariates, tau, lambda, lp_solver, train_models=TRUE) @@ -208,13 +209,11 @@ get_model <- function(model_path, train_data, covariates, tau, #' #' @importFrom stringr str_interp #' -generate_model_filename_prefix <- function(model_save_dir, indicator, signal, +generate_model_filename_prefix <- function(indicator, signal, geo_level, signal_suffix, lambda) { prefix_components <- c(indicator, signal, signal_suffix) filename = paste0( # Drop any empty strings. - model_save_dir, - "/", paste(prefix_components[prefix_components != ""], collapse="_"), str_interp("_${geo_level}_lambda${lambda}") ) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index dbd111543..7fa5b87e6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -11,10 +11,14 @@ dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", response <- "log_value_target" lp_solver <- "gurobi" lambda <- 0.1 -model_path_prefix <- "model/test" +model_save_dir <- "./model" +model_path_prefix <- "test" geo <- "pa" value_type <- "fraction" +training_end_date <- as.Date("2022-01-01") taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) + +set.seed(2022) train_beta_vs <- log(rbeta(1000, 2, 5)) test_beta_vs <- log(rbeta(50, 2, 5)) train_data <- data.frame(log_value_7dav = train_beta_vs, @@ -58,22 +62,24 @@ test_that("testing the squared error objection function given the beta prior", { test_that("testing the prior estimation", { - set.seed(1) dw <- "Sat_ref" priors <- est_priors(train_data, test_data, geo, value_type, dw, taus, - main_covariate, response, lp_solver, lambda, model_path_prefix, + main_covariate, response, lp_solver, lambda, + training_end_date, model_save_dir, model_path_prefix, start=c(0, log(10)), base_pseudo_denom=1000, base_pseudo_num=10, train_models = TRUE, make_predictions = TRUE) beta <- priors[2] alpha <- priors[1] - beta - expect_true((alpha > 0)& (alpha < 4)) - expect_true((beta > 4)& (beta < 6)) + expect_true((alpha > 0) & (alpha < 4)) + expect_true((beta > 4) & (beta < 8)) for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0(model_path_prefix, "_beta_prior", - str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + model_path <- paste0( + model_save_dir, + str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), + ".model") expect_true(file.exists(model_path)) file.remove(model_path) } @@ -92,7 +98,8 @@ test_that("testing the fraction adjustment with pseudo counts", { test_that("testing the main beta prior adjustment function", { set.seed(1) - updated_data <- frac_adj(train_data, test_data, prior_test_data, model_path_prefix, + updated_data <- frac_adj(train_data, test_data, prior_test_data, + training_end_date, model_save_dir, model_path_prefix, geo, value_type, taus = taus, lp_solver = lp_solver) updated_train_data <- updated_data[[1]] updated_test_data <- updated_data[[2]] @@ -100,8 +107,10 @@ test_that("testing the main beta prior adjustment function", { for (dw in c(dayofweek_covariates, "Sun_ref")){ for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0(model_path_prefix, "_beta_prior", - str_interp("_${value_type}_${geo}_${dw}_tau${tau}"), ".model") + model_path <- paste0( + model_save_dir, + str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), + ".model") expect_true(file.exists(model_path)) file.remove(model_path) } diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index 0202f7e84..c1a64679b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -9,9 +9,10 @@ lambda <- 0.1 lp_solver <- "gurobi" lambda <- 0.1 model_save_dir <- "./model" -model_path_prefix <- "model/test" +model_path_prefix <- "test" geo <- "pa" value_type <- "fraction" +training_end_date <- as.Date("2022-01-01") taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) # Generate Test Data @@ -42,9 +43,9 @@ covariates <- c(main_covariate, dayofweek_covariates) test_that("testing the generation of model filename prefix", { - model_prefix <- generate_model_filename_prefix(model_save_dir, indicator, signal, + model_prefix <- generate_model_filename_prefix(indicator, signal, geo_level, signal_suffix, lambda) - expected <- "./model/chng_outpatient_state_lambda0.1" + expected <- "chng_outpatient_state_lambda0.1" expect_equal(model_prefix, expected) }) @@ -73,8 +74,9 @@ test_that("testing generating or loading the model", { test_that("testing model training and testing", { result <- model_training_and_testing(train_data, test_data, taus, covariates, - lp_solver, lambda, test_date, test_lag, - geo, value_type, model_path_prefix, + lp_solver, lambda, test_lag, + geo, value_type, model_save_dir, + training_end_date, model_path_prefix, train_models = TRUE, make_predictions = TRUE) test_result <- result[[1]] coef_df <- result[[2]] @@ -83,8 +85,8 @@ test_that("testing model training and testing", { cov <- paste0("predicted_tau", as.character(tau)) expect_true(cov %in% colnames(test_result)) - model_path <- paste(model_path_prefix, - str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + model_path <- paste(model_save_dir, + str_interp("/${training_end_date}_${model_path_prefix}_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") expect_true(file.exists(model_path)) expect_silent(file.remove(model_path)) From 7316c323c0f52aa5fd6dfe8fc1cbf678bba2e88d Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Tue, 20 Sep 2022 01:09:00 -0400 Subject: [PATCH 074/145] add folders --- .../delphiBackfillCorrection/R/model/.gitignore | 7 +++++++ .../delphiBackfillCorrection/R/receiving/.gitignore | 7 +++++++ 2 files changed, 14 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore create mode 100644 Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore new file mode 100644 index 000000000..4231947de --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +nowcast/code/fusion/exploration/.Rhistory +nowcast/code/fusion/unsup-meta_generation/.Rhistory +nowcast/code/fusion/data-eda/.Rhistory +nowcast/code/fusion/h2o-glrm/.Rhistory +nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory +nowcast/code/fusion/exploration/.Rhistory diff --git a/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore new file mode 100644 index 000000000..4231947de --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +nowcast/code/fusion/exploration/.Rhistory +nowcast/code/fusion/unsup-meta_generation/.Rhistory +nowcast/code/fusion/data-eda/.Rhistory +nowcast/code/fusion/h2o-glrm/.Rhistory +nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory +nowcast/code/fusion/exploration/.Rhistory From 5a8d35a58df2a52ca6e9f33a149ffc74b1ad66ac Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Tue, 20 Sep 2022 01:11:12 -0400 Subject: [PATCH 075/145] update main and io --- .../delphiBackfillCorrection/R/io.R | 28 ++-- .../delphiBackfillCorrection/R/main.R | 128 +++++++++--------- .../delphiBackfillCorrection/R/tooling.R | 2 + .../unit-tests/testthat/test-io.R | 35 +++++ 4 files changed, 113 insertions(+), 80 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index bfef16bec..3b6d07f16 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -14,26 +14,24 @@ read_data <- function(input_dir) { #' #' @param test_data test data containing prediction results #' @param coef_data data frame containing the estimated coefficients +#' @param model_path_prefix string containing necessary information #' @template export_dir-template -#' @template geo_level-template -#' @template test_lag-template +#' @template value_type-template #' #' @importFrom readr write_csv -#' @importFrom stringr str_interp +#' @importFrom stringr str_interp, str_split #' #' @export -export_test_result <- function(test_data, coef_data, export_dir, - geo_level, test_lag) { - if (!missing(test_lag)) { - base_name = str_interp("{geo_level}_lag{test_lag}.csv") - } else { - base_name = str_interp("{geo_level}.csv") - } +export_test_result <- function(test_data, coef_data, training_end_date, + value_type, export_dir, + model_path_prefix) { + base_name <- paste(as.character(training_end_date), + model_path_prefix, str_interp("${value_type}.csv"), sep="_") - pred_output_dir = str_interp("prediction_{base_name}") + pred_output_dir <- str_interp("prediction_${base_name}") write_csv(test_data, file.path(export_dir, pred_output_dir)) - coef_output_dir = str_interp("coefs_{base_name}") + coef_output_dir <- str_interp("coefs_${base_name}") write_csv(test_data, file.path(export_dir, coef_output_dir)) } @@ -103,7 +101,7 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par ## TODO: start_date depends on if we're doing model training or just corrections. start_date <- TODAY - params$training_days - params$ref_lag - end_date <- TODAY - 1 + end_date <- TODAY # Only keep files with data that falls at least somewhat between the desired # start and end range dates. @@ -125,7 +123,7 @@ create_name_pattern <- function(indicator, signal, file_type = c("daily", "rollup")) { file_type <- match.arg(file_type) switch(file_type, - daily = str_interp("{indicator}_{signal}_as_of_[0-9]{8}.parquet$"), - rollup = str_interp("{indicator}_{signal}_from_[0-9]{8}_to_[0-9]{8}.parquet$") + daily = str_interp("${indicator}_${signal}_as_of_[0-9]{8}.parquet$"), + rollup = str_interp("${indicator}_${signal}_from_[0-9]{8}_to_[0-9]{8}.parquet$") ) } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index cec6874f0..b57dec61a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -53,8 +53,7 @@ run_backfill <- function(df, params, refd_col = "time_value", } model_path_prefix <- generate_model_filename_prefix( - params$model_save_dir, indicator, signal, - geo_level, signal_suffix, lambda) + indicator, signal, geo_level, signal_suffix, lambda) test_data_list <- list() coef_list <- list() @@ -89,82 +88,81 @@ run_backfill <- function(df, params, refd_col = "time_value", ) } combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + combined_df <- combined_df %>% filter(.data$lag < params$ref_lag) - for (test_date in params$test_dates) { - geo_train_data = combined_df %>% - filter(.data$issue_date < .env$test_date) %>% - filter(.data$target_date <= .env$test_date) %>% - filter(.data$target_date > .env$test_date - params$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(.data$issue_date >= .env$test_date) %>% - filter(.data$issue_date < .env$test_date + params$testing_window) %>% - drop_na() - if (nrow(geo_test_data) == 0) next - if (nrow(geo_train_data) <= 200) next + test_date <- min(params$test_dates) + geo_train_data <- combined_df %>% + filter(.data$issue_date < .env$test_date) %>% + filter(.data$target_date <= .env$test_date) %>% + filter(.data$target_date > .env$test_date - params$training_days) %>% + drop_na() + geo_test_data <- combined_df %>% + filter(.data$issue_date %in% params$test_dates) %>% + drop_na() + if (nrow(geo_test_data) == 0) next + if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction") { - # Use beta prior approach to adjust fractions - geo_prior_test_data = combined_df %>% - filter(.data$issue_date > .env$test_date - 7) %>% - filter(.data$issue_date <= .env$test_date) - - updated_data <- frac_adj(geo_train_data, geo_test_data, - geo_prior_test_data, model_path_prefix, - geo, value_type) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)) { - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] + if (value_type == "fraction") { + # Use beta prior approach to adjust fractions + geo_prior_test_data = combined_df %>% + filter(.data$issue_date > .env$test_date - 7) %>% + filter(.data$issue_date <= .env$test_date) + updated_data <- frac_adj(geo_train_data, geo_test_data, + geo_prior_test_data, test_date-1, + model_save_dir, model_path_prefix, + geo, value_type) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)) { + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] - covariates <- list( - Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), - paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, sqrtscale - ) - params_list <- c(YITL, as.vector(unlist(covariates))) + covariates <- list( + Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), + paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, sqrtscale + ) + params_list <- c(YITL, as.vector(unlist(covariates))) - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, params$lp_solver, - params$lambda, test_date, test_lag = test_lag, - geo = geo, value_type = value_type, - model_path_prefix=model_path_prefix, - train_models = params$train_models, - make_predictions = params$make_predictions - ) + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, params$lp_solver, + params$lambda, model_save_dir = params$model_save_dir, + training_end_date = test_date - 1, test_lag = test_lag, + geo = geo, value_type = value_type, + model_path_prefix=model_path_prefix, + train_models = params$train_models, + make_predictions = params$make_predictions + ) - # Model objects are saved during training, so only need to export - # output if making predictions/corrections - if (params$make_predictions) { - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evaluate(test_data, params$taus) - - idx <- length(test_data_list[[value_type]]) + 1 - test_data_list[[value_type]][[idx]] <- test_data - coef_list[[value_type]][[idx]] <- coefs - } - }# End for test lags - }# End for test date list + # Model objects are saved during training, so only need to export + # output if making predictions/corrections + if (params$make_predictions) { + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evaluate(test_data, params$taus) + + idx <- length(test_data_list[[value_type]]) + 1 + test_data_list[[value_type]][[idx]] <- test_data + coef_list[[value_type]][[idx]] <- coefs + } + }# End for test lags }# End for value types }# End for geo list if (params$make_predictions) { for (value_type in params$value_types) { test_combined <- do.call(plyr::rbind.fill, test_data_list[[value_type]]) coef_combined <- do.call(plyr::rbind.fill, coef_list[[value_type]]) - export_test_result(test_combined, coef_combined, value_type - params$export_dir, model_path_prefix) + export_test_result(test_combined, coef_combined, training_end_date, + value_type, params$export_dir, model_path_prefix) } - } }# End for geo type }# End for signal suffixes diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R index aba60f19c..d7cf36ff8 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R @@ -91,6 +91,8 @@ run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value params_list <- c(YITL, as.vector(unlist(covariates))) # Model training and testing + model_path_prefix <- generate_model_filename_prefix( + indicator, signal, geo, signal_suffix, value_type, test_lag, tau, lambda) prediction_results <- model_training_and_testing( train_data, test_data, taus, params_list, lp_solver, lambda, test_date, geo, value_type = value_type, test_lag = test_lag diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R new file mode 100644 index 000000000..59a4dc3c8 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -0,0 +1,35 @@ +context("Testing io helper functions") + +# Constants +params <- list() +params$input_dir <- "./input" +params$taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) + +model_path_prefix <- "test" +geo <- "pa" +value_type <- "fraction" + + + +test_that("testing exporting the output file", { + test_data <- data.frame() + coef_data <- data.frame() + export_dir <- "./output" + value_type <- "fraction" + geo_level <- "state" + training_end_date <- as.Date("2022-01-01'") + + export_test_result(test_data, coef_data, training_end_date, value_type, export_dir, + model_path_prefix) + prediction_file <- "./output/prediction_2022-01-01_test_fraction.csv" + coefs_file <- "./output/coefs_2022-01-01_test_fraction.csv" + expect_true(file.exists(prediction_file)) + expect_true(file.exists(coefs_file)) + + # Remove + file.remove(prediction_file) + file.remove(coefs_file) +}) + + + From fd4d814d9c19789344370f190630486f70781684 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Tue, 20 Sep 2022 01:21:54 -0400 Subject: [PATCH 076/145] update data filteration to allow test-only mode --- .../delphiBackfillCorrection/R/main.R | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index b57dec61a..52dc6af87 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -13,7 +13,7 @@ #' @importFrom rlang .data .env #' #' @export -run_backfill <- function(df, params, refd_col = "time_value", +run_backfill <- function(df, params, training_end_date, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), indicator = "", signal = "") { for (suffix in signal_suffixes) { @@ -90,11 +90,10 @@ run_backfill <- function(df, params, refd_col = "time_value", combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) combined_df <- combined_df %>% filter(.data$lag < params$ref_lag) - test_date <- min(params$test_dates) geo_train_data <- combined_df %>% - filter(.data$issue_date < .env$test_date) %>% - filter(.data$target_date <= .env$test_date) %>% - filter(.data$target_date > .env$test_date - params$training_days) %>% + filter(.data$issue_date < training_end_date) %>% + filter(.data$target_date <= training_end_date) %>% + filter(.data$target_date > training_end_date - params$training_days) %>% drop_na() geo_test_data <- combined_df %>% filter(.data$issue_date %in% params$test_dates) %>% @@ -105,10 +104,10 @@ run_backfill <- function(df, params, refd_col = "time_value", if (value_type == "fraction") { # Use beta prior approach to adjust fractions geo_prior_test_data = combined_df %>% - filter(.data$issue_date > .env$test_date - 7) %>% - filter(.data$issue_date <= .env$test_date) + filter(.data$issue_date > min(params$test_dates) - 7) %>% + filter(.data$issue_date <= max(params$test_dates)) updated_data <- frac_adj(geo_train_data, geo_test_data, - geo_prior_test_data, test_date-1, + geo_prior_test_data, training_end_date, model_save_dir, model_path_prefix, geo, value_type) geo_train_data <- updated_data[[1]] @@ -135,7 +134,7 @@ run_backfill <- function(df, params, refd_col = "time_value", prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, params$lp_solver, params$lambda, model_save_dir = params$model_save_dir, - training_end_date = test_date - 1, test_lag = test_lag, + training_end_date = training_end_date, test_lag = test_lag, geo = geo, value_type = value_type, model_path_prefix=model_path_prefix, train_models = params$train_models, From 66dd670ba9f16f54efa8f0ffea0084a0f0b3f023 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Tue, 20 Sep 2022 01:24:25 -0400 Subject: [PATCH 077/145] add comments for TODOs --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 52dc6af87..4e109eadf 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -192,7 +192,11 @@ main <- function(params) { options(mc.cores = min(params$parallel_max_cores, floor(cores / 2))) } } - + + #TO-DO + # Get the training end date according to the current date which is the date + # when the newest models were trained + # Loop over every indicator + signal combination. for (input_group in INDICATORS_AND_SIGNALS) { files_list <- get_files_list( @@ -234,7 +238,7 @@ main <- function(params) { training_days_check(input_data$issue_date, params$training_days) # Perform backfill corrections and save result - run_backfill(input_data, params, + run_backfill(input_data, params, training_end_date, indicator = input_group$indicator, signal = input_group$signal, signal_suffixes = input_group$name_suffix) } From a3cffc3c07e5ee9104195b4b022deff363cdef3e Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:20:37 -0400 Subject: [PATCH 078/145] Update Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../delphiBackfillCorrection/R/beta_prior_estimation.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 6a7fb54cd..4e4d02d00 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -18,7 +18,7 @@ delta <- function(fit, actual) sum((fit-actual)^2) #' @param prob the expected probabilities #' @param ... additional arguments #' -#' @importFrom stats pbeta +#' @importFrom stats qbeta objective <- function(theta, x, prob, ...) { ab <- exp(theta) # Parameters are the *logs* of alpha and beta fit <- qbeta(x, ab[1], ab[2]) From ed07a2387872119a3f11e988eba0d3a0f3b916c0 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:21:31 -0400 Subject: [PATCH 079/145] Update Backfill_Correction/delphiBackfillCorrection/R/io.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- Backfill_Correction/delphiBackfillCorrection/R/io.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 3b6d07f16..47ea5a202 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -19,7 +19,7 @@ read_data <- function(input_dir) { #' @template value_type-template #' #' @importFrom readr write_csv -#' @importFrom stringr str_interp, str_split +#' @importFrom stringr str_interp str_split #' #' @export export_test_result <- function(test_data, coef_data, training_end_date, From b91830e8da6c96706e16ded0a5959bde03317ddb Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:21:58 -0400 Subject: [PATCH 080/145] Update Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../delphiBackfillCorrection/R/receiving/.gitignore | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore index 4231947de..afed0735d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore +++ b/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore @@ -1,7 +1 @@ -.DS_Store -nowcast/code/fusion/exploration/.Rhistory -nowcast/code/fusion/unsup-meta_generation/.Rhistory -nowcast/code/fusion/data-eda/.Rhistory -nowcast/code/fusion/h2o-glrm/.Rhistory -nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory -nowcast/code/fusion/exploration/.Rhistory +*.csv From 4f5d84de90e54d001133b7a2188b2208f16dfe96 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:22:23 -0400 Subject: [PATCH 081/145] Update Backfill_Correction/delphiBackfillCorrection/R/main.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 4e109eadf..5d19edbd5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -157,8 +157,8 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", }# End for geo list if (params$make_predictions) { for (value_type in params$value_types) { - test_combined <- do.call(plyr::rbind.fill, test_data_list[[value_type]]) - coef_combined <- do.call(plyr::rbind.fill, coef_list[[value_type]]) + test_combined <- bind_rows(test_data_list[[value_type]]) + coef_combined <- bind_rows(coef_list[[value_type]]) export_test_result(test_combined, coef_combined, training_end_date, value_type, params$export_dir, model_path_prefix) } From e1a06e035a2cf40ec429a714a8fa28fdab3b09b5 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:41:12 -0400 Subject: [PATCH 082/145] Update Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../delphiBackfillCorrection/R/model/.gitignore | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore index 4231947de..0149797d2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore +++ b/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore @@ -1,7 +1 @@ -.DS_Store -nowcast/code/fusion/exploration/.Rhistory -nowcast/code/fusion/unsup-meta_generation/.Rhistory -nowcast/code/fusion/data-eda/.Rhistory -nowcast/code/fusion/h2o-glrm/.Rhistory -nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory -nowcast/code/fusion/exploration/.Rhistory +*.model From dbb20419962ceef4ab7f7a08f731d6fec8369292 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:41:28 -0400 Subject: [PATCH 083/145] Update Backfill_Correction/delphiBackfillCorrection/R/model.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- Backfill_Correction/delphiBackfillCorrection/R/model.R | 1 - 1 file changed, 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 3262d5e57..da6067032 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -152,7 +152,6 @@ evaluate <- function(test_data, taus) { "predicted_tau0.99")]) predicted_all_exp = exp(predicted_all) predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) - #predicted_trans_exp = as.list(data.frame(t(predicted_all_exp - test_data$value_target))) test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) #test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) From a28ec445b15dd0c5a1b68ba317735c83eea0bbf4 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:41:40 -0400 Subject: [PATCH 084/145] Update Backfill_Correction/delphiBackfillCorrection/R/model.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- Backfill_Correction/delphiBackfillCorrection/R/model.R | 1 - 1 file changed, 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index da6067032..5eba52040 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -153,7 +153,6 @@ evaluate <- function(test_data, taus) { predicted_all_exp = exp(predicted_all) predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) - #test_data$wis_exp = mapply(weighted_interval_score, taus_list, predicted_trans_exp, 0) return (test_data) } From b91f6594c6b535dbb68b4326d8890027240a2685 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:43:41 -0400 Subject: [PATCH 085/145] Update Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../unit-tests/testthat/output/.gitignore | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore index 4231947de..afed0735d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore @@ -1,7 +1 @@ -.DS_Store -nowcast/code/fusion/exploration/.Rhistory -nowcast/code/fusion/unsup-meta_generation/.Rhistory -nowcast/code/fusion/data-eda/.Rhistory -nowcast/code/fusion/h2o-glrm/.Rhistory -nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory -nowcast/code/fusion/exploration/.Rhistory +*.csv From 4e59b5f55e5d06bdaf236aab5d2208bdcfd679ee Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:43:56 -0400 Subject: [PATCH 086/145] Update Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../unit-tests/testthat/model/.gitignore | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore index 4231947de..0149797d2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore @@ -1,7 +1 @@ -.DS_Store -nowcast/code/fusion/exploration/.Rhistory -nowcast/code/fusion/unsup-meta_generation/.Rhistory -nowcast/code/fusion/data-eda/.Rhistory -nowcast/code/fusion/h2o-glrm/.Rhistory -nowcast/code/fusion/unsup-meta_generation/extra/.Rhistory -nowcast/code/fusion/exploration/.Rhistory +*.model From a3b5f5ebe282a581d6c6dad9aa805f2be2de53f0 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:34:09 -0400 Subject: [PATCH 087/145] add files that have been created or changed in man --- .../delphiBackfillCorrection/NAMESPACE | 8 ++-- .../man/add_sqrtscale.Rd | 8 +++- .../man/est_priors.Rd | 9 ++++- .../man/export_test_result.Rd | 16 +++++--- .../man/model_training_and_testing.Rd | 37 +++++++++---------- .../man/run_backfill.Rd | 1 + 6 files changed, 49 insertions(+), 30 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index ba63ea765..4cfaf41e9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -3,6 +3,7 @@ export(add_7davs_and_target) export(add_dayofweek) export(add_shift) +export(add_sqrtscale) export(add_weekofmonth) export(create_dir_not_exist) export(data_filteration) @@ -10,12 +11,12 @@ export(evaluate) export(export_test_result) export(fill_missing_updates) export(fill_rows) +export(frac_adj) +export(frac_adj_with_pseudo) export(get_7dav) export(main) export(main_local) export(model_training_and_testing) -export(ratio_adj) -export(ratio_adj_with_pseudo) export(read_data) export(run_backfill) export(run_backfill_local) @@ -48,10 +49,11 @@ importFrom(rlang,.data) importFrom(rlang,.env) importFrom(stats,coef) importFrom(stats,nlm) -importFrom(stats,pbeta) importFrom(stats,predict) +importFrom(stats,qbeta) importFrom(stats,setNames) importFrom(stringr,str_interp) +importFrom(stringr,str_split) importFrom(tibble,tribble) importFrom(tidyr,crossing) importFrom(tidyr,drop_na) diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd index 8b4d55222..d526c1461 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -1,9 +1,11 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R +% Please edit documentation in R/model.R, R/preprocessing.R \name{add_sqrtscale} \alias{add_sqrtscale} -\title{Add columns to indicate the scale of value at square root level} +\title{Add square root scale indicator} \usage{ +add_sqrtscale(train_data, test_data, max_raw, value_col) + add_sqrtscale(train_data, test_data, max_raw, value_col) } \arguments{ @@ -17,5 +19,7 @@ add_sqrtscale(train_data, test_data, max_raw, value_col) the input dataframe.} } \description{ +Add square root scale indicator + Add columns to indicate the scale of value at square root level } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd index 1a95c8dcb..94b92df70 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -9,15 +9,22 @@ a certain day of a week} est_priors( train_data, prior_test_data, + geo, + value_type, dw, taus, covariates, response, lp_solver, lambda, + training_end_date, + model_save_dir, + model_path_prefix, start = c(0, log(10)), base_pseudo_denom = 1000, - base_pseudo_num = 10 + base_pseudo_num = 10, + train_models = TRUE, + make_predictions = TRUE ) } \arguments{ diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index c24dfdbbb..5cbc5e44c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -4,19 +4,25 @@ \alias{export_test_result} \title{Export the result to customized directory} \usage{ -export_test_result(test_data, coef_data, export_dir, geo_level, test_lag) +export_test_result( + test_data, + coef_data, + training_end_date, + value_type, + export_dir, + model_path_prefix +) } \arguments{ \item{test_data}{test data containing prediction results} \item{coef_data}{data frame containing the estimated coefficients} -\item{export_dir}{path to directory to save output to} +\item{value_type}{string describing signal type. Either "count" or "fraction".} -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} +\item{export_dir}{path to directory to save output to} -\item{test_lag}{integer number of days ago to predict for} +\item{model_path_prefix}{string containing necessary information} } \description{ Export the result to customized directory diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index c255c1cea..92fe3e45a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -11,15 +11,14 @@ model_training_and_testing( covariates, lp_solver, lambda, - test_date, + test_lag, geo, - indicator = "", - signal = "", + value_type, + model_save_dir, + training_end_date, + model_path_prefix, train_models = TRUE, - make_predictions = TRUE, - signal_suffix = "", - value_type = "", - test_lag = "" + make_predictions = TRUE ) } \arguments{ @@ -39,18 +38,12 @@ of the `gurobi` package).} \item{lambda}{the level of lasso penalty} -\item{test_date}{Date object representing test date} +\item{test_lag}{integer number of days ago to predict for} \item{geo}{string specifying the name of the geo region (e.g. FIPS code for counties)} -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} +\item{value_type}{string describing signal type. Either "count" or "fraction".} \item{train_models}{boolean indicating whether to train models (TRUE). If FALSE previously trained models (stored locally) will be used instead. @@ -59,15 +52,21 @@ Default is TRUE.} \item{make_predictions}{boolean indicating whether to generate and save corrections (TRUE) or not. Default is TRUE.} +\item{test_date}{Date object representing test date} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + \item{signal_suffix}{string specifying value column name ending to be appended to standard value column names from `params$num_col` and `params$denom_col`. Used for non-standard value column names and when processing multiple signals from a single input dataframe, as with `quidel`'s age buckets.} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{test_lag}{integer number of days ago to predict for} } \description{ Fetch model and use to generate predictions/perform corrections diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index d8dd8498d..12947e3f3 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -7,6 +7,7 @@ run_backfill( df, params, + training_end_date, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), From 239ae0d8354afa0271bbd4cd508b84ff570385b6 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:38:37 -0400 Subject: [PATCH 088/145] delete duplicated add sqrtscale function --- .../delphiBackfillCorrection/R/model.R | 11 ++++---- .../R/preprocessing.R | 27 ------------------- 2 files changed, 6 insertions(+), 32 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 5eba52040..acc6bc34e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -31,12 +31,13 @@ data_filteration <- function(test_lag, geo_train_data, geo_test_data) { return (list(train_data, test_data)) } -#' Add square root scale indicator +#' Add columns to indicate the scale of value at square root level +#' +#' @template train_data-template +#' @param test_data Data Frame for testing +#' @param max_raw the maximum value in the training data at square root level +#' @template value_col-template #' -#' @param train_data training data for a certain location and a certain test lag -#' @param test_data testing data for a certain location and a certain test lag -#' @param max_raw the value raw maximum for a certain location -#' #' @export add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { if (!(value_col %in% colnames(train_data))){ diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index fa134cbbc..c2143cc76 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -220,30 +220,3 @@ add_params_for_dates <- function(df, refd_col, lag_col) { return (as.data.frame(df)) } - -#' Add columns to indicate the scale of value at square root level -#' -#' @template train_data-template -#' @param test_data Data Frame for testing -#' @param max_raw the maximum value in the training data at square root level -#' @template value_col-template -add_sqrtscale <- function(train_data, test_data, max_raw, value_col) { - sqrtscale = c() - sub_max_raw = sqrt(max(train_data$value_raw)) / 2 - - for (split in seq(0, 3)) { - if (sub_max_raw < (max_raw * (split+1) * 0.1)) break - train_data[paste0("sqrty", as.character(split))] = 0 - test_data[paste0("sqrty", as.character(split))] = 0 - qv_pre = max_raw * split * 0.2 - qv_next = max_raw * (split+1) * 0.2 - - train_data[(train_data$value_raw <= (qv_next)^2) - & (train_data$value_raw > (qv_pre)^2), paste0("sqrty", as.character(split))] = 1 - test_data[(test_data$value_raw <= (qv_next)^2) - & (test_data$value_raw > (qv_pre)^2), paste0("sqrty", as.character(split))] = 1 - sqrtscale[split+1] = paste0("sqrty", as.character(split)) - } - - return (list(train_data, test_data, sqrtscale)) -} From cc20e45ad8b8af7d1c79a9e8b936c555d1ecc4c7 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:39:37 -0400 Subject: [PATCH 089/145] remove man files --- .../man/generate_model_filename.Rd | 49 ------------------- .../delphiBackfillCorrection/man/ratio_adj.Rd | 32 ------------ .../man/ratio_adj_with_pseudo.Rd | 24 --------- 3 files changed, 105 deletions(-) delete mode 100644 Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd delete mode 100644 Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd delete mode 100644 Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd b/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd deleted file mode 100644 index 14c20b2fb..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/man/generate_model_filename.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{generate_model_filename} -\alias{generate_model_filename} -\title{Construct filename for model with given parameters} -\usage{ -generate_model_filename( - indicator, - signal, - geo, - signal_suffix, - value_type, - test_lag, - tau, - lambda -) -} -\arguments{ -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo}{string specifying the name of the geo region (e.g. FIPS -code for counties)} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{test_lag}{integer number of days ago to predict for} - -\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} - -\item{lambda}{the level of lasso penalty} -} -\value{ -path to file containing model object -} -\description{ -Construct filename for model with given parameters -} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd deleted file mode 100644 index 1b4e53005..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{ratio_adj} -\alias{ratio_adj} -\title{Update fraction using beta prior approach} -\usage{ -ratio_adj( - train_data, - test_data, - prior_test_data, - taus = TAUS, - lp_solver = LP_SOLVER -) -} -\arguments{ -\item{train_data}{Data Frame containing training data} - -\item{test_data}{testing data} - -\item{prior_test_data}{testing data for the lag -1 model} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} -} -\description{ -Update fraction using beta prior approach -} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd deleted file mode 100644 index 3363d684c..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/man/ratio_adj_with_pseudo.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{ratio_adj_with_pseudo} -\alias{ratio_adj_with_pseudo} -\title{Update fraction based on the pseudo counts for numerators and denominators} -\usage{ -ratio_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) -} -\arguments{ -\item{data}{Data Frame} - -\item{dw}{character to indicate the day of a week. Can be NULL for all the days} - -\item{pseudo_num}{the estimated counts to be added to numerators} - -\item{pseudo_denom}{the estimated counts to be added to denominators} - -\item{num_col}{name of numerator column in the input dataframe} - -\item{denom_col}{name of denominator column in the input dataframe} -} -\description{ -Update fraction based on the pseudo counts for numerators and denominators -} From 84532eebee2d775e25948a442e03fc5e27505a5b Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:40:23 -0400 Subject: [PATCH 090/145] update the man file for add sqrtscale --- .../delphiBackfillCorrection/man/add_sqrtscale.Rd | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd index d526c1461..47af18a24 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -1,11 +1,9 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R, R/preprocessing.R +% Please edit documentation in R/model.R \name{add_sqrtscale} \alias{add_sqrtscale} -\title{Add square root scale indicator} +\title{Add columns to indicate the scale of value at square root level} \usage{ -add_sqrtscale(train_data, test_data, max_raw, value_col) - add_sqrtscale(train_data, test_data, max_raw, value_col) } \arguments{ @@ -19,7 +17,5 @@ add_sqrtscale(train_data, test_data, max_raw, value_col) the input dataframe.} } \description{ -Add square root scale indicator - Add columns to indicate the scale of value at square root level } From d7fb08301ecaaccb2b7fa4f769fdd8f988e9ee12 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:47:29 -0400 Subject: [PATCH 091/145] add lag pad --- Backfill_Correction/delphiBackfillCorrection/R/constants.R | 1 + Backfill_Correction/delphiBackfillCorrection/R/main.R | 3 ++- Backfill_Correction/delphiBackfillCorrection/R/model.R | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/Backfill_Correction/delphiBackfillCorrection/R/constants.R index 466b7136b..e5a9cfd35 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/constants.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/constants.R @@ -6,6 +6,7 @@ TRAINING_DAYS <- 270 TESTING_WINDOW <- 14 LAG_WINDOW <- 5 LAMBDA <- 0.1 +LAG_PAD <- 2 LP_SOLVER <-"gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" YITL <-"log_value_raw" diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 5d19edbd5..1542fbf22 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -115,7 +115,8 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", } max_raw = sqrt(max(geo_train_data$value_raw)) for (test_lag in c(1:14, 21, 35, 51)) { - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + filtered_data <- data_filteration(test_lag, geo_train_data, + geo_test_data, params$lag_pad) train_data <- filtered_data[[1]] test_data <- filtered_data[[2]] diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index acc6bc34e..805380b39 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -7,7 +7,7 @@ #' @importFrom rlang .data .env #' #' @export -data_filteration <- function(test_lag, geo_train_data, geo_test_data) { +data_filteration <- function(test_lag, geo_train_data, geo_test_data, lag_pad) { if (test_lag <= 14){ test_lag_pad=lag_pad test_lag_pad1=0 From 68a68fd7df59b86adbff427eea3979b281e55ab9 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:48:00 -0400 Subject: [PATCH 092/145] remove model_save_dir and add lag_pad --- Backfill_Correction/delphiBackfillCorrection/R/utils.R | 2 +- .../unit-tests/testthat/test-utils.R | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 87d87682c..64f94cdce 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -50,7 +50,6 @@ read_params <- function(path = "params.json", template_path = "params.json.templ # Paths if (!("export_dir" %in% names(params))) {params$export_dir <- "./receiving"} if (!("cache_dir" %in% names(params))) {params$cache_dir <- "./cache"} - if (!("model_save_dir" %in% names(params))) {params$model_save_dir <- "./model"} # Parallel parameters if (!("parallel" %in% names(params))) {params$parallel <- FALSE} @@ -60,6 +59,7 @@ read_params <- function(path = "params.json", template_path = "params.json.templ if (!("taus" %in% names(params))) {params$taus <- TAUS} if (!("lambda" %in% names(params))) {params$lambda <- LAMBDA} if (!("lp_solver" %in% names(params))) {params$lp_solver <- LP_SOLVER} + if (!("lag_pad" %in% names(params))) {params$lag_pad <- LAG_PAD} # Data parameters if (!("num_col" %in% names(params))) {params$num_col <- "num"} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R index 429628b03..dae94697a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -57,7 +57,6 @@ test_that("testing read parameters", { # Check initialization expect_true(!("export_dir" %in% names(params))) expect_true(!("cache_dir" %in% names(params))) - expect_true(!("model_save_dir" %in% names(params))) expect_true(!("parallel" %in% names(params))) expect_true(!("parallel_max_cores" %in% names(params))) @@ -66,6 +65,7 @@ test_that("testing read parameters", { expect_true(!("taus" %in% names(params))) expect_true(!("lambda" %in% names(params))) expect_true(!("lp_solver" %in% names(params))) + expect_true(!("lag_pad" %in% names(params))) expect_true(!("taus" %in% names(params))) expect_true(!("lambda" %in% names(params))) @@ -92,7 +92,6 @@ test_that("testing read parameters", { expect_true("export_dir" %in% names(params)) expect_true("cache_dir" %in% names(params)) - expect_true("model_save_dir" %in% names(params)) expect_true("parallel" %in% names(params)) expect_true("parallel_max_cores" %in% names(params)) @@ -105,6 +104,7 @@ test_that("testing read parameters", { expect_true("taus" %in% names(params)) expect_true("lambda" %in% names(params)) expect_true("lp_solver" %in% names(params)) + expect_true("lag_pad" %in% names(params)) expect_true("num_col" %in% names(params)) expect_true("denom_col" %in% names(params)) @@ -118,7 +118,6 @@ test_that("testing read parameters", { expect_true(params$export_dir == "./receiving") expect_true(params$cache_dir == "./cache") - expect_true(params$model_save_dir == "./model") expect_true(params$parallel == FALSE) expect_true(params$parallel_max_cores == .Machine$integer.max) @@ -126,6 +125,7 @@ test_that("testing read parameters", { expect_true(all(params$taus == TAUS)) expect_true(params$lambda == LAMBDA) expect_true(params$lp_solver == LP_SOLVER) + expect_true(params$lag_pad == LAG_PAD) expect_true(params$num_col == "num") expect_true(params$denom_col == "denom") From 81c82facbdee946239ff695a0265b33a7ebabf09 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 01:52:16 -0400 Subject: [PATCH 093/145] remove model_save_dir --- Backfill_Correction/params.json.production.template | 1 - Backfill_Correction/params.json.template | 1 - 2 files changed, 2 deletions(-) diff --git a/Backfill_Correction/params.json.production.template b/Backfill_Correction/params.json.production.template index 2c38d3fb1..27c8b411c 100644 --- a/Backfill_Correction/params.json.production.template +++ b/Backfill_Correction/params.json.production.template @@ -1,7 +1,6 @@ { "ref_lag": 60, "input_dir": "", - "model_save_dir": "./model", "cache_dir": "./cache", "testing_window": 1, "training_days": 270, diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index 2f8c70f2a..b6317e084 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -1,7 +1,6 @@ { "ref_lag": 7, "input_dir": "", - "model_save_dir": "./model", "cache_dir": "./cache", "testing_window": 1, "training_days": 30, From 5981346398e50fc9c16fd7266d0d7a26d77e6f42 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 02:41:13 -0400 Subject: [PATCH 094/145] update the function to get model file names --- .../R/beta_prior_estimation.R | 26 ++++++---- .../delphiBackfillCorrection/R/model.R | 50 +++++++++++++++---- .../testthat/test-beta_prior_estimation.R | 35 +++++++------ .../unit-tests/testthat/test-model.R | 36 +++++++------ 4 files changed, 98 insertions(+), 49 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 4e4d02d00..3dda64038 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -46,8 +46,9 @@ objective <- function(theta, x, prob, ...) { #' @importFrom quantgen quantile_lasso #' est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, - covariates, response, lp_solver, lambda, training_end_date, - model_save_dir, model_path_prefix, start=c(0, log(10)), + covariates, response, lp_solver, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir, start=c(0, log(10)), base_pseudo_denom=1000, base_pseudo_num=10, train_models = TRUE, make_predictions = TRUE) { sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) @@ -60,10 +61,14 @@ est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, quantiles <- list() for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0( - model_save_dir, - str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), - ".model") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + obj = get_model(model_path, sub_train_data, covariates, tau = tau, lambda = lambda, lp_solver = lp_solver, train_models) @@ -112,8 +117,9 @@ frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, de #' @template lp_solver-template #' #' @export -frac_adj <- function(train_data, test_data, prior_test_data, traning_end_date, - model_save_dir, model_path_prefix, +frac_adj <- function(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + traning_end_date, model_save_dir, geo, value_type, taus = TAUS, lp_solver = LP_SOLVER) { train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") @@ -141,8 +147,8 @@ frac_adj <- function(train_data, test_data, prior_test_data, traning_end_date, for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, pre_covariates, "log_value_target", - lp_solver, 0.1, training_end_date, model_save_dir, - model_path_prefix=model_path_prefix) + lp_solver, 0.1, indicator, signal, geo_level, + signal_suffix, training_end_date, model_save_dir) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] # update current data diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 805380b39..a55edcb65 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -92,8 +92,10 @@ add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { #' @export model_training_and_testing <- function(train_data, test_data, taus, covariates, lp_solver, lambda, test_lag, - geo, value_type, model_save_dir, - training_end_date, model_path_prefix, + geo, model_save_dir, + indicator, signal, + geo_level, signal_suffix, + training_end_date, train_models = TRUE, make_predictions = TRUE) { success = 0 @@ -102,8 +104,11 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, for (tau in taus) { tryCatch( expr = { - model_path <- paste(model_save_dir, - str_interp("/${training_end_date}_${model_path_prefix}_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, geo, + value_type, test_lag, tau) + model_path <- file.path(model_save_dir, model_file_name) obj <- get_model(model_path, train_data, covariates, tau, lambda, lp_solver, train_models=TRUE) @@ -208,14 +213,39 @@ get_model <- function(model_path, train_data, covariates, tau, #' #' @importFrom stringr str_interp #' -generate_model_filename_prefix <- function(indicator, signal, - geo_level, signal_suffix, lambda) { - prefix_components <- c(indicator, signal, signal_suffix) +generate_filename <- function(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date="", geo="", + value_type = "", test_lag="", tau="", dw="", + beta_prior_mode = FALSE, model_mode = TRUE) { + if (lambda != "") { + lambda <- str_interp("lambda${lambda}") + } + if (test_lag != "") { + test_lag <- str_interp("lag${test_lag}") + } + if (tau != "") { + tau <- str_interp("tau${tau}") + } + if (beta_prior_mode) { + beta_prior <- "beta_prior" + } else { + beta_prior <- "" + } + if (model_mode) { + file_type <- ".model" + } else { + file_type <- ".csv" + } + components <- c(as.character(training_end_date), beta_prior, + indicator, signal, signal_suffix, + geo_level, lambda, + geo, test_lag, dw, tau) + filename = paste0( # Drop any empty strings. - paste(prefix_components[prefix_components != ""], collapse="_"), - str_interp("_${geo_level}_lambda${lambda}") + paste(components[components != ""], collapse="_"), + file_type ) - return(filename) } diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index 7fa5b87e6..c9e88b747 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -63,12 +63,10 @@ test_that("testing the squared error objection function given the beta prior", { test_that("testing the prior estimation", { dw <- "Sat_ref" - priors <- est_priors(train_data, test_data, geo, value_type, dw, taus, - main_covariate, response, lp_solver, lambda, - training_end_date, model_save_dir, model_path_prefix, - start=c(0, log(10)), - base_pseudo_denom=1000, base_pseudo_num=10, - train_models = TRUE, make_predictions = TRUE) + priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, taus, + covariates, response, lp_solver, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir) beta <- priors[2] alpha <- priors[1] - beta expect_true((alpha > 0) & (alpha < 4)) @@ -76,10 +74,13 @@ test_that("testing the prior estimation", { for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0( - model_save_dir, - str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), - ".model") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) expect_true(file.exists(model_path)) file.remove(model_path) } @@ -99,7 +100,8 @@ test_that("testing the fraction adjustment with pseudo counts", { test_that("testing the main beta prior adjustment function", { set.seed(1) updated_data <- frac_adj(train_data, test_data, prior_test_data, - training_end_date, model_save_dir, model_path_prefix, + indicator, signal, geo_level, signal_suffix, + traning_end_date, model_save_dir, geo, value_type, taus = taus, lp_solver = lp_solver) updated_train_data <- updated_data[[1]] updated_test_data <- updated_data[[2]] @@ -107,10 +109,13 @@ test_that("testing the main beta prior adjustment function", { for (dw in c(dayofweek_covariates, "Sun_ref")){ for (idx in 1:length(taus)) { tau <- taus[idx] - model_path <- paste0( - model_save_dir, - str_interp("/${training_end_date}_beta_prior_${model_path_prefix}_${value_type}_${geo}_${dw}_tau${tau}"), - ".model") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) expect_true(file.exists(model_path)) file.remove(model_path) } diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index c1a64679b..ee68afb6b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -43,10 +43,10 @@ covariates <- c(main_covariate, dayofweek_covariates) test_that("testing the generation of model filename prefix", { - model_prefix <- generate_model_filename_prefix(indicator, signal, - geo_level, signal_suffix, lambda) - expected <- "chng_outpatient_state_lambda0.1" - expect_equal(model_prefix, expected) + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda) + expected <- "chng_outpatient_state_lambda0.1.model" + expect_equal(model_file_name, expected) }) test_that("testing the evaluation", { @@ -60,8 +60,10 @@ test_that("testing the evaluation", { test_that("testing generating or loading the model", { # Check the model that does not exist tau = 0.5 - model_path <- paste(model_path_prefix, - str_interp("_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau) + model_path <- file.path(model_save_dir, model_name) expect_true(!file.exists(model_path)) # Generate the model and check again @@ -75,9 +77,12 @@ test_that("testing generating or loading the model", { test_that("testing model training and testing", { result <- model_training_and_testing(train_data, test_data, taus, covariates, lp_solver, lambda, test_lag, - geo, value_type, model_save_dir, - training_end_date, model_path_prefix, - train_models = TRUE, make_predictions = TRUE) + geo, model_save_dir, + indicator, signal, + geo_level, signal_suffix, + training_end_date, + train_models = TRUE, + make_predictions = TRUE) test_result <- result[[1]] coef_df <- result[[2]] @@ -85,8 +90,11 @@ test_that("testing model training and testing", { cov <- paste0("predicted_tau", as.character(tau)) expect_true(cov %in% colnames(test_result)) - model_path <- paste(model_save_dir, - str_interp("/${training_end_date}_${model_path_prefix}_${geo}_lag${test_lag}_tau${tau}"), ".model", sep="") + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau, + training_end_date=training_end_date) + model_path <- file.path(model_save_dir, model_file_name) expect_true(file.exists(model_path)) expect_silent(file.remove(model_path)) @@ -134,7 +142,7 @@ test_that("testing data filteration", { # When test lag is small test_lag <- 5 - result <- data_filteration(test_lag, train_data, test_data) + result <- data_filteration(test_lag, train_data, test_data, 2) train_df <- result[[1]] test_df <- result[[2]] expect_true(max(train_df$lag) == test_lag+2) @@ -143,7 +151,7 @@ test_that("testing data filteration", { # When test lag is large test_lag <- 48 - result <- data_filteration(test_lag, train_data, test_data) + result <- data_filteration(test_lag, train_data, test_data, 2) train_df <- result[[1]] test_df <- result[[2]] expect_true(max(test_df$lag) == test_lag+7) @@ -152,7 +160,7 @@ test_that("testing data filteration", { # Make sure that all lags are tested included_lags = c() for (test_lag in c(1:14, 21, 35, 51)){ - result <- data_filteration(test_lag, train_data, test_data) + result <- data_filteration(test_lag, train_data, test_data, 2) test_df <- result[[2]] included_lags <- c(included_lags, unique(test_df$lag)) } From 305f86b836fcd683314fee27ca56febf5c2f99ab Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 02:52:23 -0400 Subject: [PATCH 095/145] update the application on csv files --- .../delphiBackfillCorrection/R/io.R | 13 +++---- .../delphiBackfillCorrection/R/main.R | 36 +++++++++---------- .../testthat/test-beta_prior_estimation.R | 1 - .../unit-tests/testthat/test-io.R | 18 +++++++--- .../unit-tests/testthat/test-model.R | 1 - 5 files changed, 38 insertions(+), 31 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 47ea5a202..d67d22324 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -22,12 +22,13 @@ read_data <- function(input_dir) { #' @importFrom stringr str_interp str_split #' #' @export -export_test_result <- function(test_data, coef_data, training_end_date, - value_type, export_dir, - model_path_prefix) { - base_name <- paste(as.character(training_end_date), - model_path_prefix, str_interp("${value_type}.csv"), sep="_") - +export_test_result <- function(test_data, coef_data, indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir) { + base_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, value_type, model_mode=FALSE) pred_output_dir <- str_interp("prediction_${base_name}") write_csv(test_data, file.path(export_dir, pred_output_dir)) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 1542fbf22..e04d95b23 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -16,6 +16,13 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), indicator = "", signal = "") { + geo_levels <- params$geo_levels + if ("state" %in% geo_levels) { + # If state included, do it last since state processing modifies the + # `df` object. + geo_levels <- c(setdiff(geo_levels, c("state")), "state") + } + for (suffix in signal_suffixes) { # For each suffix listed in `signal_suffixes`, run training/testing # process again. Main use case is for quidel which has overall and @@ -28,12 +35,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", denom_col <- params$denom_col } - geo_levels <- params$geo_levels - if ("state" %in% geo_levels) { - # If state included, do it last since state processing modifies the - # `df` object. - geo_levels <- c(setdiff(geo_levels, c("state")), "state") - } + for (geo_level in geo_levels) { # Get full list of interested locations @@ -52,9 +54,6 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", geo_list <- filter_counties(geo_list) } - model_path_prefix <- generate_model_filename_prefix( - indicator, signal, geo_level, signal_suffix, lambda) - test_data_list <- list() coef_list <- list() for (value_type in params$value_types) { @@ -106,9 +105,9 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", geo_prior_test_data = combined_df %>% filter(.data$issue_date > min(params$test_dates) - 7) %>% filter(.data$issue_date <= max(params$test_dates)) - updated_data <- frac_adj(geo_train_data, geo_test_data, - geo_prior_test_data, training_end_date, - model_save_dir, model_path_prefix, + updated_data <- frac_adj(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + traning_end_date, params$cache_dir, geo, value_type) geo_train_data <- updated_data[[1]] geo_test_data <- updated_data[[2]] @@ -134,10 +133,8 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, params$lp_solver, - params$lambda, model_save_dir = params$model_save_dir, - training_end_date = training_end_date, test_lag = test_lag, - geo = geo, value_type = value_type, - model_path_prefix=model_path_prefix, + params$lambda, test_lag, geo, params$cache_dir, + indicator, signal, geo_level, signal_suffix,training_end_date, train_models = params$train_models, make_predictions = params$make_predictions ) @@ -160,8 +157,11 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", for (value_type in params$value_types) { test_combined <- bind_rows(test_data_list[[value_type]]) coef_combined <- bind_rows(coef_list[[value_type]]) - export_test_result(test_combined, coef_combined, training_end_date, - value_type, params$export_dir, model_path_prefix) + export_test_result(test_combined, coef_combined, + indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir params$export_dir) } } }# End for geo type diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index c9e88b747..a00fdbf10 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -12,7 +12,6 @@ response <- "log_value_target" lp_solver <- "gurobi" lambda <- 0.1 model_save_dir <- "./model" -model_path_prefix <- "test" geo <- "pa" value_type <- "fraction" training_end_date <- as.Date("2022-01-01") diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 59a4dc3c8..0476e0875 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -5,7 +5,13 @@ params <- list() params$input_dir <- "./input" params$taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) -model_path_prefix <- "test" +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +lp_solver <- "gurobi" +lambda <- 0.1 geo <- "pa" value_type <- "fraction" @@ -19,10 +25,12 @@ test_that("testing exporting the output file", { geo_level <- "state" training_end_date <- as.Date("2022-01-01'") - export_test_result(test_data, coef_data, training_end_date, value_type, export_dir, - model_path_prefix) - prediction_file <- "./output/prediction_2022-01-01_test_fraction.csv" - coefs_file <- "./output/coefs_2022-01-01_test_fraction.csv" + export_test_result(test_data, coef_data, indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir) + prediction_file <- "./output/prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv" + coefs_file <- "./output/coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv" expect_true(file.exists(prediction_file)) expect_true(file.exists(coefs_file)) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index ee68afb6b..360c8d6c7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -9,7 +9,6 @@ lambda <- 0.1 lp_solver <- "gurobi" lambda <- 0.1 model_save_dir <- "./model" -model_path_prefix <- "test" geo <- "pa" value_type <- "fraction" training_end_date <- as.Date("2022-01-01") From 4dd982e7b8b613b8c08430a4bf50bfff226837ad Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 03:10:02 -0400 Subject: [PATCH 096/145] move the suffix loop after geo list loop --- .../delphiBackfillCorrection/R/main.R | 106 ++++++++++-------- 1 file changed, 57 insertions(+), 49 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index e04d95b23..48ac11996 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -23,49 +23,52 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", geo_levels <- c(setdiff(geo_levels, c("state")), "state") } - for (suffix in signal_suffixes) { - # For each suffix listed in `signal_suffixes`, run training/testing - # process again. Main use case is for quidel which has overall and - # age-based signals. - if (suffix != "") { - num_col <- paste(params$num_col, suffix, sep = "_") - denom_col <- paste(params$denom_col, suffix, sep = "_") - } else { - num_col <- params$num_col - denom_col <- params$denom_col + for (geo_level in geo_levels) { + # Get full list of interested locations + if (geo_level == "state") { + # Drop county field and make new "geo_value" field from "state_id". + # Aggregate counties up to state level + df <- df %>% + dplyr::select(-.data$geo_value, geo_value = .data$state_id) %>% + dplyr::group_by(across(c("geo_value", refd_col, lag_col))) %>% + # Summarized columns keep original names + dplyr::summarize(across(everything(), sum)) } + geo_list <- unique(df$geo_value) + if (geo_level == "county") { + # Keep only 200 most populous (within the US) counties + geo_list <- filter_counties(geo_list) + } + + test_data_list <- list() + coef_list <- list() - - - for (geo_level in geo_levels) { - # Get full list of interested locations - if (geo_level == "state") { - # Drop county field and make new "geo_value" field from "state_id". - # Aggregate counties up to state level - df <- df %>% - dplyr::select(-.data$geo_value, geo_value = .data$state_id) %>% - dplyr::group_by(across(c("geo_value", refd_col, lag_col))) %>% - # Summarized columns keep original names - dplyr::summarize(across(everything(), sum)) - } - geo_list <- unique(df$geo_value) - if (geo_level == "county") { - # Keep only 200 most populous (within the US) counties - geo_list <- filter_counties(geo_list) + for (value_type in params$value_types) { + for (signal_suffix in signal_suffixes) { + key = paste(value_type, signal_suffix) + test_data_list[[key]] <- list() + coef_list[[key]] <- list() } + } + + # Build model for each location + for (geo in geo_list) { + subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) + min_refd <- min(subdf[[refd_col]]) + max_refd <- max(subdf[[refd_col]]) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) - test_data_list <- list() - coef_list <- list() - for (value_type in params$value_types) { - test_data_list[[value_type]] <- list() - coef_list[[value_type]] <- list() - } - # Build model for each location - for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) - min_refd <- min(subdf[[refd_col]]) - max_refd <- max(subdf[[refd_col]]) - subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + for (signal_suffix in signal_suffixes) { + # For each suffix listed in `signal_suffixes`, run training/testing + # process again. Main use case is for quidel which has overall and + # age-based signals. + if (signal_suffix != "") { + num_col <- paste(params$num_col, signal_suffix, sep = "_") + denom_col <- paste(params$denom_col, signal_suffix, sep = "_") + } else { + num_col <- params$num_col + denom_col <- params$denom_col + } for (value_type in params$value_types) { # Handle different signal types @@ -152,20 +155,25 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", } }# End for test lags }# End for value types - }# End for geo list + }# End for signal suffixes + if (params$make_predictions) { for (value_type in params$value_types) { - test_combined <- bind_rows(test_data_list[[value_type]]) - coef_combined <- bind_rows(coef_list[[value_type]]) - export_test_result(test_combined, coef_combined, - indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, - value_type, export_dir params$export_dir) + for (signal_suffix in signal_suffixes) { + key = paste(value_type, signal_suffix) + test_combined <- bind_rows(test_data_list[[key]]) + coef_combined <- bind_rows(coef_list[[key]]) + export_test_result(test_combined, coef_combined, + indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir params$export_dir) + } } } - }# End for geo type - }# End for signal suffixes + + }# End for geo list + }# End for geo type } #' Perform backfill correction on all desired signals and geo levels From 26573a690a731d9bc563353129ec2d963655c1b5 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 03:10:56 -0400 Subject: [PATCH 097/145] fix an error --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 48ac11996..f17db6647 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -167,7 +167,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", indicator, signal, geo_level, signal_suffix, lambda, training_end_date, - value_type, export_dir params$export_dir) + value_type, export_dir, params$export_dir) } } } From d7ca2262636e8732f283b0c1931e34ef28b2da99 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 03:11:40 -0400 Subject: [PATCH 098/145] udpate man files --- .../man/data_filteration.Rd | 2 +- .../man/est_priors.Rd | 5 +++- .../man/export_test_result.Rd | 8 ++++-- .../man/model_training_and_testing.Rd | 28 ++++++++++--------- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd index bdea55b36..109cede1f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd @@ -4,7 +4,7 @@ \alias{data_filteration} \title{Filtration for training and testing data with different lags} \usage{ -data_filteration(test_lag, geo_train_data, geo_test_data) +data_filteration(test_lag, geo_train_data, geo_test_data, lag_pad) } \arguments{ \item{test_lag}{integer number of days ago to predict for} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd index 94b92df70..763ba579d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -17,9 +17,12 @@ est_priors( response, lp_solver, lambda, + indicator, + signal, + geo_level, + signal_suffix, training_end_date, model_save_dir, - model_path_prefix, start = c(0, log(10)), base_pseudo_denom = 1000, base_pseudo_num = 10, diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index 5cbc5e44c..a632b7ae2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -7,10 +7,14 @@ export_test_result( test_data, coef_data, + indicator, + signal, + geo_level, + signal_suffix, + lambda, training_end_date, value_type, - export_dir, - model_path_prefix + export_dir ) } \arguments{ diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index 92fe3e45a..8678cc0b7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -13,10 +13,12 @@ model_training_and_testing( lambda, test_lag, geo, - value_type, model_save_dir, + indicator, + signal, + geo_level, + signal_suffix, training_end_date, - model_path_prefix, train_models = TRUE, make_predictions = TRUE ) @@ -43,17 +45,6 @@ of the `gurobi` package).} \item{geo}{string specifying the name of the geo region (e.g. FIPS code for counties)} -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} - -\item{make_predictions}{boolean indicating whether to generate and save -corrections (TRUE) or not. Default is TRUE.} - -\item{test_date}{Date object representing test date} - \item{indicator}{string specifying the name of the indicator as used in `parquet` input data filenames. One indicator can be associated with multiple signals.} @@ -67,6 +58,17 @@ ending to be appended to standard value column names from `params$num_col` and `params$denom_col`. Used for non-standard value column names and when processing multiple signals from a single input dataframe, as with `quidel`'s age buckets.} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} + +\item{test_date}{Date object representing test date} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} } \description{ Fetch model and use to generate predictions/perform corrections From 15df4da040ded9ebf472a1e8f8ee0e48a574f5b0 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:19:58 -0400 Subject: [PATCH 099/145] add new man files --- .../delphiBackfillCorrection/man/frac_adj.Rd | 40 ++++++++++++++ .../man/frac_adj_with_pseudo.Rd | 24 +++++++++ .../man/generate_filename.Rd | 54 +++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd create mode 100644 Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd new file mode 100644 index 000000000..0e8e6e341 --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{frac_adj} +\alias{frac_adj} +\title{Update fraction using beta prior approach} +\usage{ +frac_adj( + train_data, + test_data, + prior_test_data, + indicator, + signal, + geo_level, + signal_suffix, + traning_end_date, + model_save_dir, + geo, + value_type, + taus = TAUS, + lp_solver = LP_SOLVER +) +} +\arguments{ +\item{train_data}{Data Frame containing training data} + +\item{test_data}{testing data} + +\item{prior_test_data}{testing data for the lag -1 model} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} +} +\description{ +Update fraction using beta prior approach +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd new file mode 100644 index 000000000..2ae59d33a --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{frac_adj_with_pseudo} +\alias{frac_adj_with_pseudo} +\title{Update fraction based on the pseudo counts for numerators and denominators} +\usage{ +frac_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) +} +\arguments{ +\item{data}{Data Frame} + +\item{dw}{character to indicate the day of a week. Can be NULL for all the days} + +\item{pseudo_num}{the estimated counts to be added to numerators} + +\item{pseudo_denom}{the estimated counts to be added to denominators} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} +} +\description{ +Update fraction based on the pseudo counts for numerators and denominators +} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd b/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd new file mode 100644 index 000000000..ae40321db --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd @@ -0,0 +1,54 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{generate_filename} +\alias{generate_filename} +\title{Construct filename for model with given parameters} +\usage{ +generate_filename( + indicator, + signal, + geo_level, + signal_suffix, + lambda, + training_end_date = "", + geo = "", + value_type = "", + test_lag = "", + tau = "", + dw = "", + beta_prior_mode = FALSE, + model_mode = TRUE +) +} +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{test_lag}{integer number of days ago to predict for} + +\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} +} +\value{ +path to file containing model object +} +\description{ +Construct filename for model with given parameters +} From 39cf9c37fa901b2665e51392e8fc81bd90434268 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:20:29 -0400 Subject: [PATCH 100/145] split geo values in one step instead of repeated filtering --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 1 + Backfill_Correction/delphiBackfillCorrection/R/main.R | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 4cfaf41e9..d730a8293 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -30,6 +30,7 @@ importFrom(dplyr,desc) importFrom(dplyr,everything) importFrom(dplyr,filter) importFrom(dplyr,group_by) +importFrom(dplyr,group_split) importFrom(dplyr,if_else) importFrom(dplyr,pull) importFrom(dplyr,select) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index f17db6647..7eddf2ac6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -8,7 +8,7 @@ #' @template indicator-template #' @template signal-template #' -#' @importFrom dplyr %>% filter select group_by summarize across everything +#' @importFrom dplyr %>% filter select group_by summarize across everything group_split #' @importFrom tidyr drop_na #' @importFrom rlang .data .env #' @@ -16,6 +16,8 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), indicator = "", signal = "") { + df <- filter(df, .data$lag < params$ref_lag) + geo_levels <- params$geo_levels if ("state" %in% geo_levels) { # If state included, do it last since state processing modifies the @@ -51,9 +53,11 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", } } + group_dfs <- group_split(df, geo_value) + # Build model for each location - for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < params$ref_lag) + for (subdf in group_dfs) { + geo <- group_df$geo_value[1] min_refd <- min(subdf[[refd_col]]) max_refd <- max(subdf[[refd_col]]) subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) From 2058cedd0017fdf8b0633448a02e33bdc22fcc4a Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:25:06 -0400 Subject: [PATCH 101/145] drop counties directly; remove geo_list --- .../delphiBackfillCorrection/R/main.R | 4 ++-- .../delphiBackfillCorrection/R/utils.R | 8 -------- .../man/filter_counties.Rd | 14 -------------- 3 files changed, 2 insertions(+), 24 deletions(-) delete mode 100644 Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 7eddf2ac6..d0b54e7be 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -36,10 +36,10 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", # Summarized columns keep original names dplyr::summarize(across(everything(), sum)) } - geo_list <- unique(df$geo_value) if (geo_level == "county") { # Keep only 200 most populous (within the US) counties - geo_list <- filter_counties(geo_list) + top_200_geos <- get_populous_counties() + df <- filter(df, geo_value %in% top_200_geos) } test_data_list <- list() diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index 64f94cdce..af8228e1c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -146,14 +146,6 @@ training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { } } -#' Subset list of counties to those included in the 200 most populous in the US -#' -#' @param geos character vector of county FIPS codes -filter_counties <- function(geos) { - top_200_geos <- get_populous_counties() - return(intersect(geos, top_200_geos)) -} - #' Subset list of counties to those included in the 200 most populous in the US #' #' @importFrom dplyr select %>% arrange desc pull diff --git a/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd b/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd deleted file mode 100644 index c4a731de5..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/man/filter_counties.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{filter_counties} -\alias{filter_counties} -\title{Subset list of counties to those included in the 200 most populous in the US} -\usage{ -filter_counties(geos) -} -\arguments{ -\item{geos}{character vector of county FIPS codes} -} -\description{ -Subset list of counties to those included in the 200 most populous in the US -} From 74cd94681e1e7783e83b3fe8e47929c3a47f3617 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:38:58 -0400 Subject: [PATCH 102/145] remove filter_counties test --- .../unit-tests/testthat/test-utils.R | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R index dae94697a..933d0662c 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -39,13 +39,6 @@ test_that("testing get the top200 populous counties", { expect_true("06037" %in% counties) }) - -test_that("testing the filteration of top200 populous counties", { - geos = c("06037", "58001") - expect_true(filter_counties(geos) == "06037") -}) - - test_that("testing read parameters", { # No input file expect_error(read_params(path = "params.json", template_path = "params.json.template", From 7f9f7947b7284eb1ff33eee804d846973af9d6af Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 14:40:55 -0400 Subject: [PATCH 103/145] fix errors and update docs --- .../R/beta_prior_estimation.R | 36 ++++++++++++++----- .../delphiBackfillCorrection/R/io.R | 9 +++-- .../delphiBackfillCorrection/R/main.R | 12 ++++--- .../delphiBackfillCorrection/R/model.R | 18 +++++++--- .../man/data_filteration.Rd | 2 ++ .../man/est_priors.Rd | 33 +++++++++++++++++ .../man/export_test_result.Rd | 23 ++++++++++-- .../man/model_training_and_testing.Rd | 14 +++++--- .../man/run_backfill.Rd | 2 ++ .../testthat/test-beta_prior_estimation.R | 24 +++++++------ .../unit-tests/testthat/test-io.R | 13 ++----- .../unit-tests/testthat/test-model.R | 11 +++--- .../unit-tests/testthat/test-utils.R | 2 -- .../params.json.production.template | 1 + Backfill_Correction/params.json.template | 1 + 15 files changed, 147 insertions(+), 54 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 3dda64038..983ed6122 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -31,15 +31,25 @@ objective <- function(theta, x, prob, ...) { #' #' @template train_data-template #' @param prior_test_data Data Frame for testing -#' @param dw column name to indicate which day of a week it is #' @template taus-template #' @template covariates-template -#' @param response the column name of the response variable #' @template lp_solver-template #' @template lambda-template +#' @template geo_level-template +#' @template geo-template +#' @template indicator-template +#' @template signal-template +#' @template signal_suffix-template +#' @template value_type-template +#' @template train_models-template +#' @template make_predictions-template +#' @param dw column name to indicate which day of a week it is +#' @param response the column name of the response variable #' @param start the initialization of the the points in nlm #' @param base_pseudo_denom the pseudo counts added to denominator if little data for training #' @param base_pseudo_num the pseudo counts added to numerator if little data for training +#' @param training_end_date the most recent training date +#' @param model_save_dir directory containing trained models #' #' @importFrom stats nlm predict #' @importFrom dplyr %>% filter @@ -113,14 +123,24 @@ frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, de #' @template train_data-template #' @param test_data testing data #' @param prior_test_data testing data for the lag -1 model +#' @param training_end_date the most recent training date +#' @param model_save_dir directory containing trained models +#' @template indicator-template +#' @template signal-template +#' @template geo-template +#' @template signal_suffix-template +#' @template lambda-template +#' @template value_type-template +#' @template geo_level-template #' @template taus-template #' @template lp_solver-template #' #' @export frac_adj <- function(train_data, test_data, prior_test_data, indicator, signal, geo_level, signal_suffix, - traning_end_date, model_save_dir, - geo, value_type, taus = TAUS, lp_solver = LP_SOLVER) { + lambda, value_type, geo, + training_end_date, model_save_dir, + taus = TAUS, lp_solver = LP_SOLVER) { train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") prior_test_data$value_7dav <- frac_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") @@ -145,10 +165,10 @@ frac_adj <- function(train_data, test_data, prior_test_data, test_data$pseudo_denum = NaN for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { - pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, - cov, taus, pre_covariates, "log_value_target", - lp_solver, 0.1, indicator, signal, geo_level, - signal_suffix, training_end_date, model_save_dir) + pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, + pre_covariates, "log_value_target", lp_solver, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir) pseudo_denum = pseudo_counts[1] + pseudo_counts[2] pseudo_num = pseudo_counts[1] # update current data diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index d67d22324..2a47396a6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -14,9 +14,14 @@ read_data <- function(input_dir) { #' #' @param test_data test data containing prediction results #' @param coef_data data frame containing the estimated coefficients -#' @param model_path_prefix string containing necessary information -#' @template export_dir-template +#' @template indicator-template +#' @template signal-template +#' @template geo_level-template +#' @template signal_suffix-template +#' @template lambda-template #' @template value_type-template +#' @template export_dir-template +#' @param training_end_date the most recent training date #' #' @importFrom readr write_csv #' @importFrom stringr str_interp str_split diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index f17db6647..1161f60c4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -7,14 +7,16 @@ #' @template signal_suffixes-template #' @template indicator-template #' @template signal-template +#' @param training_end_date the most recent training date #' #' @importFrom dplyr %>% filter select group_by summarize across everything #' @importFrom tidyr drop_na #' @importFrom rlang .data .env #' #' @export -run_backfill <- function(df, params, training_end_date, refd_col = "time_value", - lag_col = "lag", signal_suffixes = c(""), +run_backfill <- function(df, params, training_end_date, + refd_col = "time_value", lag_col = "lag", + signal_suffixes = c(""), indicator = "", signal = "") { geo_levels <- params$geo_levels if ("state" %in% geo_levels) { @@ -110,7 +112,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", filter(.data$issue_date <= max(params$test_dates)) updated_data <- frac_adj(train_data, test_data, prior_test_data, indicator, signal, geo_level, signal_suffix, - traning_end_date, params$cache_dir, + training_end_date, params$cache_dir, geo, value_type) geo_train_data <- updated_data[[1]] geo_test_data <- updated_data[[2]] @@ -136,7 +138,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", # Model training and testing prediction_results <- model_training_and_testing( train_data, test_data, params$taus, params_list, params$lp_solver, - params$lambda, test_lag, geo, params$cache_dir, + params$lambda, test_lag, geo, value_type, params$cache_dir, indicator, signal, geo_level, signal_suffix,training_end_date, train_models = params$train_models, make_predictions = params$make_predictions @@ -167,7 +169,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", indicator, signal, geo_level, signal_suffix, lambda, training_end_date, - value_type, export_dir, params$export_dir) + value_type, export_dir=params$export_dir) } } } diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index a55edcb65..48b02780d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -1,6 +1,7 @@ #' Filtration for training and testing data with different lags #' #' @template test_lag-template +#' @param lag_pad lag padding for training #' @param geo_train_data training data for a certain location #' @param geo_test_data testing data for a certain location #' @@ -77,7 +78,7 @@ add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { #' @template covariates-template #' @template lp_solver-template #' @template lambda-template -#' @param test_date Date object representing test date +#' @template geo_level-template #' @template geo-template #' @template indicator-template #' @template signal-template @@ -86,13 +87,15 @@ add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { #' @template test_lag-template #' @template train_models-template #' @template make_predictions-template +#' @param model_save_dir directory containing trained models +#' @param training_end_date Most recent training date #' #' @importFrom stats predict coef #' #' @export model_training_and_testing <- function(train_data, test_data, taus, covariates, lp_solver, lambda, test_lag, - geo, model_save_dir, + geo, value_type, model_save_dir, indicator, signal, geo_level, signal_suffix, training_end_date, @@ -127,8 +130,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, if (success < 9) {return (NULL)} if (!make_predictions) {return (list())} - coef_combined_result = data.frame(tau=taus, issue_date=test_date, - geo=geo, test_lag=test_lag) + coef_combined_result = data.frame(tau=taus, geo=geo, test_lag=test_lag) coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) return (list(test_data, coef_combined_result)) @@ -204,10 +206,16 @@ get_model <- function(model_path, train_data, covariates, tau, #' @template signal-template #' @template geo-template #' @template signal_suffix-template +#' @template lambda-template #' @template value_type-template #' @template test_lag-template +#' @template geo_level-template +#' @template test_lag-template +#' @param dw string, indicate the day of a week #' @param tau decimal quantile to be predicted. Values must be between 0 and 1. -#' @template lambda-template +#' @param beta_prior_mode bool, indicate whether it is for a beta prior model +#' @param model_mode bool, indicate whether the file name is for a model +#' @param training_end_date the most recent training date #' #' @return path to file containing model object #' diff --git a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd index 109cede1f..d8589ecac 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd @@ -12,6 +12,8 @@ data_filteration(test_lag, geo_train_data, geo_test_data, lag_pad) \item{geo_train_data}{training data for a certain location} \item{geo_test_data}{testing data for a certain location} + +\item{lag_pad}{lag padding for training} } \description{ Filtration for training and testing data with different lags diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd index 763ba579d..881864341 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd @@ -35,6 +35,11 @@ est_priors( \item{prior_test_data}{Data Frame for testing} +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + \item{dw}{column name to indicate which day of a week it is} \item{taus}{numeric vector of quantiles to be predicted. Values @@ -51,11 +56,39 @@ of the `gurobi` package).} \item{lambda}{the level of lasso penalty} +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{training_end_date}{the most recent training date} + +\item{model_save_dir}{directory containing trained models} + \item{start}{the initialization of the the points in nlm} \item{base_pseudo_denom}{the pseudo counts added to denominator if little data for training} \item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} } \description{ Main function for the beta prior approach diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd index a632b7ae2..77c2088d5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd @@ -22,11 +22,30 @@ export_test_result( \item{coef_data}{data frame containing the estimated coefficients} +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{training_end_date}{the most recent training date} + \item{value_type}{string describing signal type. Either "count" or "fraction".} \item{export_dir}{path to directory to save output to} - -\item{model_path_prefix}{string containing necessary information} } \description{ Export the result to customized directory diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd index 8678cc0b7..225a555a9 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -13,6 +13,7 @@ model_training_and_testing( lambda, test_lag, geo, + value_type, model_save_dir, indicator, signal, @@ -45,6 +46,10 @@ of the `gurobi` package).} \item{geo}{string specifying the name of the geo region (e.g. FIPS code for counties)} +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{model_save_dir}{directory containing trained models} + \item{indicator}{string specifying the name of the indicator as used in `parquet` input data filenames. One indicator can be associated with multiple signals.} @@ -53,22 +58,23 @@ with multiple signals.} `parquet` input data filenames. One indicator can be associated with multiple signals.} +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + \item{signal_suffix}{string specifying value column name ending to be appended to standard value column names from `params$num_col` and `params$denom_col`. Used for non-standard value column names and when processing multiple signals from a single input dataframe, as with `quidel`'s age buckets.} +\item{training_end_date}{Most recent training date} + \item{train_models}{boolean indicating whether to train models (TRUE). If FALSE previously trained models (stored locally) will be used instead. Default is TRUE.} \item{make_predictions}{boolean indicating whether to generate and save corrections (TRUE) or not. Default is TRUE.} - -\item{test_date}{Date object representing test date} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} } \description{ Fetch model and use to generate predictions/perform corrections diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd index 12947e3f3..aab815222 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd @@ -24,6 +24,8 @@ the following elements: `ref_lag`, `testing_window`, `test_dates`, `training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, `lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} +\item{training_end_date}{the most recent training date} + \item{refd_col}{string specifying name of reference date field within the input dataframe.} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index a00fdbf10..2f4188d48 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -1,7 +1,17 @@ context("Testing helper functions for beta prior estimation") +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +geo <- "pa" +value_type <- "fraction" +model_save_dir <- "./model" +training_end_date <- as.Date("2022-01-01") + # Generate Test Data -prior <- c(1, 2) main_covariate <- c("log_value_7dav") null_covariates <- c("value_raw_num", "value_raw_denom", "value_7dav_num", "value_7dav_denom", @@ -9,13 +19,6 @@ null_covariates <- c("value_raw_num", "value_raw_denom", dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref") response <- "log_value_target" -lp_solver <- "gurobi" -lambda <- 0.1 -model_save_dir <- "./model" -geo <- "pa" -value_type <- "fraction" -training_end_date <- as.Date("2022-01-01") -taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) set.seed(2022) train_beta_vs <- log(rbeta(1000, 2, 5)) @@ -100,8 +103,9 @@ test_that("testing the main beta prior adjustment function", { set.seed(1) updated_data <- frac_adj(train_data, test_data, prior_test_data, indicator, signal, geo_level, signal_suffix, - traning_end_date, model_save_dir, - geo, value_type, taus = taus, lp_solver = lp_solver) + lambda, value_type, geo, + training_end_date, model_save_dir, + taus = TAUS, lp_solver = LP_SOLVER) updated_train_data <- updated_data[[1]] updated_test_data <- updated_data[[2]] diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 0476e0875..6a953fa0f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -1,29 +1,22 @@ context("Testing io helper functions") # Constants -params <- list() -params$input_dir <- "./input" -params$taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) - indicator <- "chng" signal <- "outpatient" geo_level <- "state" signal_suffix <- "" lambda <- 0.1 lp_solver <- "gurobi" -lambda <- 0.1 geo <- "pa" value_type <- "fraction" - +input_dir <- "./input" +export_dir <- "./output" +training_end_date <- as.Date("2022-01-01") test_that("testing exporting the output file", { test_data <- data.frame() coef_data <- data.frame() - export_dir <- "./output" - value_type <- "fraction" - geo_level <- "state" - training_end_date <- as.Date("2022-01-01'") export_test_result(test_data, coef_data, indicator, signal, geo_level, signal_suffix, lambda, diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index 360c8d6c7..be8fe7a53 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -6,8 +6,7 @@ signal <- "outpatient" geo_level <- "state" signal_suffix <- "" lambda <- 0.1 -lp_solver <- "gurobi" -lambda <- 0.1 +test_lag <- 1 model_save_dir <- "./model" geo <- "pa" value_type <- "fraction" @@ -62,12 +61,12 @@ test_that("testing generating or loading the model", { model_file_name <- generate_filename(indicator, signal, geo_level, signal_suffix, lambda, geo=geo, test_lag=test_lag, tau=tau) - model_path <- file.path(model_save_dir, model_name) + model_path <- file.path(model_save_dir, model_file_name) expect_true(!file.exists(model_path)) # Generate the model and check again obj <- get_model(model_path, train_data, covariates, tau, - lambda, lp_solver, train_models=TRUE) + lambda, LP_SOLVER, train_models=TRUE) expect_true(file.exists(model_path)) expect_silent(file.remove(model_path)) @@ -75,8 +74,8 @@ test_that("testing generating or loading the model", { test_that("testing model training and testing", { result <- model_training_and_testing(train_data, test_data, taus, covariates, - lp_solver, lambda, test_lag, - geo, model_save_dir, + LP_SOLVER, lambda, test_lag, + geo, value_type, model_save_dir, indicator, signal, geo_level, signal_suffix, training_end_date, diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R index dae94697a..af3784c03 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -1,6 +1,4 @@ context("Testing utils helper functions") - -TRAINING_DAYS = 10 test_that("testing create directory if not exist", { # If not exists diff --git a/Backfill_Correction/params.json.production.template b/Backfill_Correction/params.json.production.template index 27c8b411c..f8a4eb306 100644 --- a/Backfill_Correction/params.json.production.template +++ b/Backfill_Correction/params.json.production.template @@ -4,6 +4,7 @@ "cache_dir": "./cache", "testing_window": 1, "training_days": 270, + "lag_pad":2, "export_dir": "./receiving", "geo_levels": ["state", "county"], "value_types": ["count", "fraction"], diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index b6317e084..293febe05 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -4,6 +4,7 @@ "cache_dir": "./cache", "testing_window": 1, "training_days": 30, + "lag_pad":2 "export_dir": "./receiving", "geo_levels": ["state", "county"], "value_types": ["count", "fraction"], From 8448a978e69dba38f531e4513c2fcfe4dabddd23 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 15:10:05 -0400 Subject: [PATCH 104/145] small changes in unit tests to use pre defined constants --- Backfill_Correction/delphiBackfillCorrection/R/io.R | 2 -- .../unit-tests/testthat/test-beta_prior_estimation.R | 10 +++++----- .../unit-tests/testthat/test-model.R | 9 ++++----- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index 2a47396a6..f0bf3b0b6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -25,8 +25,6 @@ read_data <- function(input_dir) { #' #' @importFrom readr write_csv #' @importFrom stringr str_interp str_split -#' -#' @export export_test_result <- function(test_data, coef_data, indicator, signal, geo_level, signal_suffix, lambda, training_end_date, diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index 2f4188d48..6fccfd2eb 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -65,7 +65,7 @@ test_that("testing the squared error objection function given the beta prior", { test_that("testing the prior estimation", { dw <- "Sat_ref" - priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, taus, + priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, covariates, response, lp_solver, lambda, indicator, signal, geo_level, signal_suffix, training_end_date, model_save_dir) @@ -74,8 +74,8 @@ test_that("testing the prior estimation", { expect_true((alpha > 0) & (alpha < 4)) expect_true((beta > 4) & (beta < 8)) - for (idx in 1:length(taus)) { - tau <- taus[idx] + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] model_file_name <- generate_filename(indicator, signal, geo_level, signal_suffix, lambda, geo=geo, dw=dw, tau=tau, @@ -110,8 +110,8 @@ test_that("testing the main beta prior adjustment function", { updated_test_data <- updated_data[[2]] for (dw in c(dayofweek_covariates, "Sun_ref")){ - for (idx in 1:length(taus)) { - tau <- taus[idx] + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] model_file_name <- generate_filename(indicator, signal, geo_level, signal_suffix, lambda, geo=geo, dw=dw, tau=tau, diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index be8fe7a53..3d9e19894 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -11,7 +11,6 @@ model_save_dir <- "./model" geo <- "pa" value_type <- "fraction" training_end_date <- as.Date("2022-01-01") -taus <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) # Generate Test Data main_covariate <- c("log_value_7dav") @@ -48,10 +47,10 @@ test_that("testing the generation of model filename prefix", { }) test_that("testing the evaluation", { - for (tau in taus){ + for (tau in TAUS){ test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) } - result <- evaluate(test_data, taus) + result <- evaluate(test_data, TAUS) expect_true(mean(result$wis) < 0.3) }) @@ -73,7 +72,7 @@ test_that("testing generating or loading the model", { }) test_that("testing model training and testing", { - result <- model_training_and_testing(train_data, test_data, taus, covariates, + result <- model_training_and_testing(train_data, test_data, TAUS, covariates, LP_SOLVER, lambda, test_lag, geo, value_type, model_save_dir, indicator, signal, @@ -84,7 +83,7 @@ test_that("testing model training and testing", { test_result <- result[[1]] coef_df <- result[[2]] - for (tau in taus){ + for (tau in TAUS){ cov <- paste0("predicted_tau", as.character(tau)) expect_true(cov %in% colnames(test_result)) From 0b96c5e8542fe2871482b190a4af98b90dafaadc Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 15:10:45 -0400 Subject: [PATCH 105/145] update man file --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 1 - .../unit-tests/testthat/test-io.R | 9 +++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index 4cfaf41e9..f4283362a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -8,7 +8,6 @@ export(add_weekofmonth) export(create_dir_not_exist) export(data_filteration) export(evaluate) -export(export_test_result) export(fill_missing_updates) export(fill_rows) export(frac_adj) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 6a953fa0f..826a72304 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -15,9 +15,9 @@ training_end_date <- as.Date("2022-01-01") test_that("testing exporting the output file", { - test_data <- data.frame() - coef_data <- data.frame() - + test_data <- data.frame(test=TRUE) + coef_data <- data.frame(test=TRUE) + export_test_result(test_data, coef_data, indicator, signal, geo_level, signal_suffix, lambda, training_end_date, @@ -31,6 +31,3 @@ test_that("testing exporting the output file", { file.remove(prediction_file) file.remove(coefs_file) }) - - - From 4b33519a4d4052164b2f3cfe7f22c18b9edfab45 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Wed, 21 Sep 2022 16:00:59 -0400 Subject: [PATCH 106/145] fix the error in the ojective function --- Backfill_Correction/delphiBackfillCorrection/NAMESPACE | 2 +- .../delphiBackfillCorrection/R/beta_prior_estimation.R | 4 ++-- .../unit-tests/testthat/test-beta_prior_estimation.R | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index f4283362a..cdcd9d752 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -48,8 +48,8 @@ importFrom(rlang,.data) importFrom(rlang,.env) importFrom(stats,coef) importFrom(stats,nlm) +importFrom(stats,pbeta) importFrom(stats,predict) -importFrom(stats,qbeta) importFrom(stats,setNames) importFrom(stringr,str_interp) importFrom(stringr,str_split) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index 983ed6122..bcee90cde 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -18,10 +18,10 @@ delta <- function(fit, actual) sum((fit-actual)^2) #' @param prob the expected probabilities #' @param ... additional arguments #' -#' @importFrom stats qbeta +#' @importFrom stats pbeta objective <- function(theta, x, prob, ...) { ab <- exp(theta) # Parameters are the *logs* of alpha and beta - fit <- qbeta(x, ab[1], ab[2]) + fit <- pbeta(x, ab[1], ab[2]) return (delta(fit, prob)) } diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index 6fccfd2eb..c0c49cc75 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -54,11 +54,10 @@ test_that("testing the sum of squared error", { test_that("testing the squared error objection function given the beta prior", { theta <- c(log(1), log(2)) - x <- c(0.1, 0.25, 0.5, 0.75, 0.9) - prob <- qbeta(x, 1, 2) + x <- qbeta(TAUS, 1, 2) expected <-0 - computed <- objective(theta, x, prob) + computed <- objective(theta, x, TAUS) expect_equal(expected, computed) }) From c7ddd45264e05e322b1f3a9168c5d4e49d135c0e Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Thu, 22 Sep 2022 02:12:24 -0400 Subject: [PATCH 107/145] finish the unittests for io functions --- .../delphiBackfillCorrection/R/io.R | 2 +- .../unit-tests/testthat/test-io.R | 80 ++++++++++++++++++- 2 files changed, 77 insertions(+), 5 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/Backfill_Correction/delphiBackfillCorrection/R/io.R index f0bf3b0b6..bd506b6f7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/io.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/io.R @@ -105,7 +105,7 @@ subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), par ## TODO: start_date depends on if we're doing model training or just corrections. start_date <- TODAY - params$training_days - params$ref_lag - end_date <- TODAY + end_date <- TODAY - 1 # Only keep files with data that falls at least somewhat between the desired # start and end range dates. diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 826a72304..377c933e4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -6,12 +6,15 @@ signal <- "outpatient" geo_level <- "state" signal_suffix <- "" lambda <- 0.1 -lp_solver <- "gurobi" geo <- "pa" value_type <- "fraction" -input_dir <- "./input" -export_dir <- "./output" -training_end_date <- as.Date("2022-01-01") + +params <- list() +params$training_end_date <- as.Date("2022-01-01") +params$training_days <- 7 +params$ref_lag <- 3 +params$input_dir <- "./input" +params$export_dir <- "./output" test_that("testing exporting the output file", { @@ -31,3 +34,72 @@ test_that("testing exporting the output file", { file.remove(prediction_file) file.remove(coefs_file) }) + + +test_that("testing creating file name pattern", { + daily_pattern <- create_name_pattern(indicator, signal, "daily") + rollup_pattern <- create_name_pattern(indicator, signal, "rollup") + + # Create test files + daily_file <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_csv(daily_file, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_csv(rollup_file, rollup_file_name) + + + filtered_daily_file <- list.files( + params$input_dir, pattern = daily_pattern, full.names = TRUE) + expect_equal(filtered_daily_file, daily_file_name) + + filtered_rollup_file <- list.files( + params$input_dir, pattern = rollup_pattern, full.names = TRUE) + expect_equal(filtered_rollup_file, rollup_file_name) + + file.remove(daily_file_name) + file.remove(rollup_file_name) +}) + + +test_that("testing", { + date_format = "%Y%m%d" + daily_files_list <- c(str_interp("./input/chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet"), + str_interp("./input/chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet"), + str_interp("./input/chng_outpatient_as_of_${format(TODAY, date_format)}.parquet")) + daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) + expect_equal(daily_valid_files, daily_files_list[2]) + + rollup_files_list <- c(str_interp( + "./input/chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet"), + str_interp( + "./input/chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet"), + str_interp( + "./input/chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet")) + rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) + expect_equal(rollup_valid_files, rollup_files_list[2]) +}) + +test_that("testing", { + daily_file <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_csv(daily_file, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_csv(rollup_file, rollup_file_name) + + + files <- get_files_list(indicator, signal, params) + expect_true(all(files == c(daily_file_name, rollup_file_name))) + + file.remove(daily_file_name) + file.remove(rollup_file_name) +}) + + From 9d324c11d4f57a2c9a0e23273a1f09e826a7b1c7 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Thu, 22 Sep 2022 02:13:05 -0400 Subject: [PATCH 108/145] update man files --- .../delphiBackfillCorrection/man/frac_adj.Rd | 35 +++++++++++++++++-- .../man/generate_filename.Rd | 11 ++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd index 0e8e6e341..71589d75e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd @@ -12,10 +12,11 @@ frac_adj( signal, geo_level, signal_suffix, - traning_end_date, - model_save_dir, - geo, + lambda, value_type, + geo, + training_end_date, + model_save_dir, taus = TAUS, lp_solver = LP_SOLVER ) @@ -27,6 +28,34 @@ frac_adj( \item{prior_test_data}{testing data for the lag -1 model} +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{training_end_date}{the most recent training date} + +\item{model_save_dir}{directory containing trained models} + \item{taus}{numeric vector of quantiles to be predicted. Values must be between 0 and 1.} diff --git a/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd b/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd index ae40321db..ba40a8aa2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd @@ -29,6 +29,9 @@ with multiple signals.} `parquet` input data filenames. One indicator can be associated with multiple signals.} +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + \item{signal_suffix}{string specifying value column name ending to be appended to standard value column names from `params$num_col` and `params$denom_col`. Used for non-standard @@ -37,6 +40,8 @@ single input dataframe, as with `quidel`'s age buckets.} \item{lambda}{the level of lasso penalty} +\item{training_end_date}{the most recent training date} + \item{geo}{string specifying the name of the geo region (e.g. FIPS code for counties)} @@ -45,6 +50,12 @@ code for counties)} \item{test_lag}{integer number of days ago to predict for} \item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} + +\item{dw}{string, indicate the day of a week} + +\item{beta_prior_mode}{bool, indicate whether it is for a beta prior model} + +\item{model_mode}{bool, indicate whether the file name is for a model} } \value{ path to file containing model object From c0d5fbce80068b07c93f82e3386ab0b3c51cec6f Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Thu, 22 Sep 2022 02:17:32 -0400 Subject: [PATCH 109/145] fix an error in test-io --- .../delphiBackfillCorrection/unit-tests/testthat/test-io.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 377c933e4..217f11bc0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -8,7 +8,8 @@ signal_suffix <- "" lambda <- 0.1 geo <- "pa" value_type <- "fraction" - +date_format = "%Y%m%d" + params <- list() params$training_end_date <- as.Date("2022-01-01") params$training_days <- 7 @@ -66,7 +67,6 @@ test_that("testing creating file name pattern", { test_that("testing", { - date_format = "%Y%m%d" daily_files_list <- c(str_interp("./input/chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet"), str_interp("./input/chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet"), str_interp("./input/chng_outpatient_as_of_${format(TODAY, date_format)}.parquet")) From b1c0c1e710ef0873671fb0d972491d820f05b5d9 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Thu, 22 Sep 2022 03:03:36 -0400 Subject: [PATCH 110/145] add training/prediction indicator arguments' --- .../R/beta_prior_estimation.R | 14 ++++++++++---- .../testthat/test-beta_prior_estimation.R | 14 +++++++------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R index bcee90cde..dadb48984 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -134,13 +134,17 @@ frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, de #' @template geo_level-template #' @template taus-template #' @template lp_solver-template +#' @template train_models-template +#' @template make_predictions-template #' #' @export frac_adj <- function(train_data, test_data, prior_test_data, indicator, signal, geo_level, signal_suffix, lambda, value_type, geo, training_end_date, model_save_dir, - taus = TAUS, lp_solver = LP_SOLVER) { + taus = TAUS, lp_solver = LP_SOLVER, + train_models = TRUE, + make_predictions = TRUE) { train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") prior_test_data$value_7dav <- frac_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") @@ -168,9 +172,11 @@ frac_adj <- function(train_data, test_data, prior_test_data, pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, pre_covariates, "log_value_target", lp_solver, lambda, indicator, signal, geo_level, signal_suffix, - training_end_date, model_save_dir) - pseudo_denum = pseudo_counts[1] + pseudo_counts[2] - pseudo_num = pseudo_counts[1] + training_end_date, model_save_dir, + train_models = train_models, + make_predictions = make_predictions) + pseudo_denum = pseudo_counts[1] + pseudo_num = pseudo_counts[2] # update current data # For training train_data$value_raw[train_data[[cov]] == 1] <- frac_adj_with_pseudo( diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index c0c49cc75..21eda983a 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -65,13 +65,13 @@ test_that("testing the squared error objection function given the beta prior", { test_that("testing the prior estimation", { dw <- "Sat_ref" priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, - covariates, response, lp_solver, lambda, + covariates, response, LP_SOLVER, lambda, indicator, signal, geo_level, signal_suffix, training_end_date, model_save_dir) - beta <- priors[2] - alpha <- priors[1] - beta - expect_true((alpha > 0) & (alpha < 4)) - expect_true((beta > 4) & (beta < 8)) + alpha <- priors[2] + beta <- priors[1] - alpha + expect_true((alpha > 1) & (alpha < 3)) + expect_true((beta > 4) & (beta < 6)) for (idx in 1:length(TAUS)) { tau <- TAUS[idx] @@ -124,7 +124,7 @@ test_that("testing the main beta prior adjustment function", { } expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) - expect_true(all(updated_train_data$value_raw < 6/(6+1))) - expect_true(all(updated_train_data$value_raw > 4/(4+4))) + expect_true(all(updated_train_data$value_raw < 3/(3+4))) + expect_true(all(updated_train_data$value_raw > 1/(1+6))) }) From 43de85f99434ccf4a8e4abc6afd69d052f2e9080 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Thu, 22 Sep 2022 03:04:07 -0400 Subject: [PATCH 111/145] update main --- .../delphiBackfillCorrection/R/main.R | 31 +++++++++++++------ .../delphiBackfillCorrection/man/frac_adj.Rd | 11 ++++++- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 28080eb14..8a16fbda1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -18,7 +18,7 @@ run_backfill <- function(df, params, training_end_date, refd_col = "time_value", lag_col = "lag", signal_suffixes = c(""), indicator = "", signal = "") { - df <- filter(df, .data$lag < params$ref_lag) + df <- filter(df, .data$lag < params$ref_lag + 30) # a rough filtration to save memory geo_levels <- params$geo_levels if ("state" %in% geo_levels) { @@ -116,8 +116,10 @@ run_backfill <- function(df, params, training_end_date, filter(.data$issue_date <= max(params$test_dates)) updated_data <- frac_adj(train_data, test_data, prior_test_data, indicator, signal, geo_level, signal_suffix, - training_end_date, params$cache_dir, - geo, value_type) + lambda, value_type, geo, + training_end_date, params$cache_dir, + train_models = params$train_models, + make_predictions = params$make_predictions) geo_train_data <- updated_data[[1]] geo_test_data <- updated_data[[2]] } @@ -195,6 +197,15 @@ main <- function(params) { message("both model training and prediction generation are turned off; exiting") return } + + if (params$train_models) { + # Remove all the stored models + files_list <- list.files(params$cache_dir, pattern="*.model", full.names = TRUE) + file.remove(file.path(mydir, files_list)) + + training_end_date <- as.Date(readLines( + file.path(params$cache_dir, "training_end_date.txt"))) + } ## Set default number of cores for mclapply to half of those available. if (params$parallel) { @@ -208,10 +219,6 @@ main <- function(params) { } } - #TO-DO - # Get the training end date according to the current date which is the date - # when the newest models were trained - # Loop over every indicator + signal combination. for (input_group in INDICATORS_AND_SIGNALS) { files_list <- get_files_list( @@ -220,7 +227,7 @@ main <- function(params) { if (length(files_list) == 0) { warning(str_interp( - "No files found for indicator {input_group$indicator} signal {input_group$signal}, skipping" + "No files found for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" )) next } @@ -235,7 +242,7 @@ main <- function(params) { if (nrow(input_data) == 0) { warning(str_interp( - "No data available for indicator {input_group$indicator} signal {input_group$signal}, skipping" + "No data available for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" )) next } @@ -256,5 +263,11 @@ main <- function(params) { run_backfill(input_data, params, training_end_date, indicator = input_group$indicator, signal = input_group$signal, signal_suffixes = input_group$name_suffix) + + if (params$train_models) { + # Save the training end date to a text file. + writeLines(as.character(TODAY), + file.path(params$cache_dir, "training_end_date.txt")) + } } } diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd index 71589d75e..f2de00345 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd @@ -18,7 +18,9 @@ frac_adj( training_end_date, model_save_dir, taus = TAUS, - lp_solver = LP_SOLVER + lp_solver = LP_SOLVER, + train_models = TRUE, + make_predictions = TRUE ) } \arguments{ @@ -63,6 +65,13 @@ must be between 0 and 1.} Quantgen fitting. Either "glpk" or "gurobi". For faster optimization, use Gurobi (requires separate installation of the `gurobi` package).} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} } \description{ Update fraction using beta prior approach From ae218238a2ec1f2bd0d1defa1c403608cb05fbb2 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 22 Sep 2022 11:36:48 -0400 Subject: [PATCH 112/145] use write_parquet in test-io --- .../unit-tests/testthat/{model => cache}/.gitignore | 0 .../unit-tests/testthat/input/.gitignore | 2 ++ .../unit-tests/testthat/test-io.R | 10 ++++++---- 3 files changed, 8 insertions(+), 4 deletions(-) rename Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/{model => cache}/.gitignore (100%) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/model/.gitignore rename to Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore new file mode 100644 index 000000000..f6c85953c --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore @@ -0,0 +1,2 @@ +*.parquet + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 217f11bc0..8717f8cbe 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -1,3 +1,5 @@ +library(arrow) + context("Testing io helper functions") # Constants @@ -45,12 +47,12 @@ test_that("testing creating file name pattern", { daily_file <- data.frame(test=TRUE) daily_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_csv(daily_file, daily_file_name) + write_parquet(daily_file, daily_file_name) rollup_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) rollup_data <- data.frame(test=TRUE) - write_csv(rollup_file, rollup_file_name) + write_parquet(rollup_file, rollup_file_name) filtered_daily_file <- list.files( @@ -87,12 +89,12 @@ test_that("testing", { daily_file <- data.frame(test=TRUE) daily_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_csv(daily_file, daily_file_name) + write_parquet(daily_file, daily_file_name) rollup_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) rollup_data <- data.frame(test=TRUE) - write_csv(rollup_file, rollup_file_name) + write_parquet(rollup_file, rollup_file_name) files <- get_files_list(indicator, signal, params) From 161c44b255d9ab52f65bc7a78b66832df5e33584 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 22 Sep 2022 13:42:49 -0400 Subject: [PATCH 113/145] new test params to fix connection errors --- .../testthat/params-run.json.template | 8 +++ ...son.template => params-test.json.template} | 0 .../unit-tests/testthat/test-io.R | 58 ++++++++++--------- .../unit-tests/testthat/test-utils.R | 10 ++-- 4 files changed, 45 insertions(+), 31 deletions(-) create mode 100644 Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template rename Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/{params.json.template => params-test.json.template} (100%) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template new file mode 100644 index 000000000..f2224855a --- /dev/null +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template @@ -0,0 +1,8 @@ +{ + "training_end_date": "2022-01-01", + "training_days": 7, + "ref_lag": 3, + "input_dir": "./input", + "export_dir": "./output", + "cache_dir": "./cache" +} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params.json.template rename to Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 8717f8cbe..d56e6b107 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -11,25 +11,25 @@ lambda <- 0.1 geo <- "pa" value_type <- "fraction" date_format = "%Y%m%d" +training_end_date <- as.Date("2022-01-01") -params <- list() -params$training_end_date <- as.Date("2022-01-01") -params$training_days <- 7 -params$ref_lag <- 3 -params$input_dir <- "./input" -params$export_dir <- "./output" - +create_dir_not_exist("./input") +create_dir_not_exist("./output") +create_dir_not_exist("./cache") test_that("testing exporting the output file", { + params <- read_params("params-run.json", "params-run.json.template") + test_data <- data.frame(test=TRUE) coef_data <- data.frame(test=TRUE) - + export_test_result(test_data, coef_data, indicator, signal, geo_level, signal_suffix, lambda, training_end_date, - value_type, export_dir) - prediction_file <- "./output/prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv" - coefs_file <- "./output/coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv" + value_type, params$export_dir) + prediction_file <- file.path(params$export_dir, "prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + coefs_file <- file.path(params$export_dir, "coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + expect_true(file.exists(prediction_file)) expect_true(file.exists(coefs_file)) @@ -40,19 +40,21 @@ test_that("testing exporting the output file", { test_that("testing creating file name pattern", { + params <- read_params("params-run.json", "params-run.json.template") + daily_pattern <- create_name_pattern(indicator, signal, "daily") rollup_pattern <- create_name_pattern(indicator, signal, "rollup") # Create test files - daily_file <- data.frame(test=TRUE) + daily_data <- data.frame(test=TRUE) daily_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_file, daily_file_name) + write_parquet(daily_data, daily_file_name) rollup_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_file, rollup_file_name) + write_parquet(rollup_data, rollup_file_name) filtered_daily_file <- list.files( @@ -69,32 +71,36 @@ test_that("testing creating file name pattern", { test_that("testing", { - daily_files_list <- c(str_interp("./input/chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet"), - str_interp("./input/chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet"), - str_interp("./input/chng_outpatient_as_of_${format(TODAY, date_format)}.parquet")) + params <- read_params("params-run.json", "params-run.json.template") + + daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY, date_format)}.parquet"))) daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) expect_equal(daily_valid_files, daily_files_list[2]) - rollup_files_list <- c(str_interp( - "./input/chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet"), - str_interp( - "./input/chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet"), - str_interp( - "./input/chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet")) + rollup_files_list <- c(file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) expect_equal(rollup_valid_files, rollup_files_list[2]) }) test_that("testing", { - daily_file <- data.frame(test=TRUE) + params <- read_params("params-run.json", "params-run.json.template") + + daily_data <- data.frame(test=TRUE) daily_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_file, daily_file_name) + write_parquet(daily_data, daily_file_name) rollup_file_name <- file.path(params$input_dir, str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_file, rollup_file_name) + write_parquet(rollup_data, rollup_file_name) files <- get_files_list(indicator, signal, params) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R index ccaf3d774..a733f2a1d 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -39,12 +39,12 @@ test_that("testing get the top200 populous counties", { test_that("testing read parameters", { # No input file - expect_error(read_params(path = "params.json", template_path = "params.json.template", + expect_error(read_params(path = "params-test.json", template_path = "params-test.json.template", train_models = TRUE, make_predictions = TRUE), "input_dir must be set in `params` and exist") # Check parameters - params <- read_json("params.json", simplifyVector = TRUE) + params <- read_json("params-test.json", simplifyVector = TRUE) # Check initialization expect_true(!("export_dir" %in% names(params))) expect_true(!("cache_dir" %in% names(params))) @@ -75,8 +75,8 @@ test_that("testing read parameters", { # Create input file path = "test.tempt" create_dir_not_exist(path) - expect_silent(params <- read_params(path = "params.json", - template_path = "params.json.template", + expect_silent(params <- read_params(path = "params-test.json", + template_path = "params-test.json.template", train_models = TRUE, make_predictions = TRUE)) unlink(path, recursive = TRUE) @@ -130,7 +130,7 @@ test_that("testing read parameters", { end_date <- TODAY - 1 expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) - expect_silent(file.remove("params.json")) + expect_silent(file.remove("params-test.json")) }) From 051d2fdab945bd4b0ab617aa474fbc66fdfde994 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 22 Sep 2022 16:48:56 -0400 Subject: [PATCH 114/145] remove model, receiving dirs in R/ --- Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore | 1 - .../delphiBackfillCorrection/R/receiving/.gitignore | 1 - 2 files changed, 2 deletions(-) delete mode 100644 Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore delete mode 100644 Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore deleted file mode 100644 index 0149797d2..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/R/model/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.model diff --git a/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore b/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore deleted file mode 100644 index afed0735d..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/R/receiving/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.csv From 87f69f147c944cf3743ae5c0dfb4408e9304c91c Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 22 Sep 2022 16:57:49 -0400 Subject: [PATCH 115/145] remove test params during teardown --- .../delphiBackfillCorrection/unit-tests/testthat/test-io.R | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index d56e6b107..4bee23160 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -36,6 +36,7 @@ test_that("testing exporting the output file", { # Remove file.remove(prediction_file) file.remove(coefs_file) + file.remove("params-run.json") }) @@ -67,6 +68,7 @@ test_that("testing creating file name pattern", { file.remove(daily_file_name) file.remove(rollup_file_name) + file.remove("params-run.json") }) @@ -87,6 +89,8 @@ test_that("testing", { "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) expect_equal(rollup_valid_files, rollup_files_list[2]) + + file.remove("params-run.json") }) test_that("testing", { @@ -108,6 +112,7 @@ test_that("testing", { file.remove(daily_file_name) file.remove(rollup_file_name) + file.remove("params-run.json") }) From aa796e4ab395e3055a14a6d369d3b626fd3a38a6 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:45:03 -0400 Subject: [PATCH 116/145] Update Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../delphiBackfillCorrection/unit-tests/testthat/test-model.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index 3d9e19894..4a3c5bae6 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -7,7 +7,7 @@ geo_level <- "state" signal_suffix <- "" lambda <- 0.1 test_lag <- 1 -model_save_dir <- "./model" +model_save_dir <- "./cache" geo <- "pa" value_type <- "fraction" training_end_date <- as.Date("2022-01-01") From 460fc67d0151ef81905345ecaf7601d89aedd554 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:47:24 -0400 Subject: [PATCH 117/145] Update Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../unit-tests/testthat/test-beta_prior_estimation.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R index 21eda983a..59ea2beda 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -8,7 +8,7 @@ signal_suffix <- "" lambda <- 0.1 geo <- "pa" value_type <- "fraction" -model_save_dir <- "./model" +model_save_dir <- "./cache" training_end_date <- as.Date("2022-01-01") # Generate Test Data From 0fb394f2e82a2d93a7b7828d5c6985082fb29b63 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:48:33 -0400 Subject: [PATCH 118/145] Update Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- .../unit-tests/testthat/test-model.R | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R index 4a3c5bae6..2a1221344 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -67,6 +67,12 @@ test_that("testing generating or loading the model", { obj <- get_model(model_path, train_data, covariates, tau, lambda, LP_SOLVER, train_models=TRUE) expect_true(file.exists(model_path)) + created <- file.info(model_path)$ctime + + # Check that the model was not generated again. + obj <- get_model(model_path, train_data, covariates, tau, + lambda, LP_SOLVER, train_models=FALSE) + expect_equal(file.info(model_path)$ctime, created) expect_silent(file.remove(model_path)) }) From 1988f6bba84ae9841c874a7028c0a20bb83f4347 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:53:19 -0400 Subject: [PATCH 119/145] update the names for test_that cases --- .../delphiBackfillCorrection/unit-tests/testthat/test-io.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R index 4bee23160..07636e140 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -72,7 +72,7 @@ test_that("testing creating file name pattern", { }) -test_that("testing", { +test_that("testing the filtration of the files for training and predicting", { params <- read_params("params-run.json", "params-run.json.template") daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), @@ -93,7 +93,7 @@ test_that("testing", { file.remove("params-run.json") }) -test_that("testing", { +test_that("testing fetching list of files for training and predicting", { params <- read_params("params-run.json", "params-run.json.template") daily_data <- data.frame(test=TRUE) From 8c17f073aa19e52069041c5c79b509f3d356f8db Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:55:47 -0400 Subject: [PATCH 120/145] Add test cases across the year boundary --- .../unit-tests/testthat/test-preprocessing.R | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index a5c25fdf0..ed29a35b7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -82,6 +82,10 @@ test_that("testing adding columns for each day of a week", { test_that("testing the calculation of week of a month", { + expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-02")), 2) + expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) From 909243f740c2472ad6b5dec426b6ba67368f725a Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 07:56:47 -0400 Subject: [PATCH 121/145] Update Backfill_Correction/delphiBackfillCorrection/R/main.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 8a16fbda1..37ae2f98e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -202,10 +202,10 @@ main <- function(params) { # Remove all the stored models files_list <- list.files(params$cache_dir, pattern="*.model", full.names = TRUE) file.remove(file.path(mydir, files_list)) - - training_end_date <- as.Date(readLines( - file.path(params$cache_dir, "training_end_date.txt"))) } + + training_end_date <- as.Date(readLines( + file.path(params$cache_dir, "training_end_date.txt"))) ## Set default number of cores for mclapply to half of those available. if (params$parallel) { From bcdd91af574584f2de049ad5cc820495ebaff8d8 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 23 Sep 2022 13:59:43 -0400 Subject: [PATCH 122/145] line-ending comma in template params --- Backfill_Correction/params.json.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/params.json.template b/Backfill_Correction/params.json.template index 293febe05..b0a8eec8e 100644 --- a/Backfill_Correction/params.json.template +++ b/Backfill_Correction/params.json.template @@ -4,7 +4,7 @@ "cache_dir": "./cache", "testing_window": 1, "training_days": 30, - "lag_pad":2 + "lag_pad": 2, "export_dir": "./receiving", "geo_levels": ["state", "county"], "value_types": ["count", "fraction"], From 2b0e5d70ca8116417e071ac52e1c88272a8b3f08 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 18:33:40 -0400 Subject: [PATCH 123/145] Update Backfill_Correction/delphiBackfillCorrection/DESCRIPTION Co-authored-by: Katie Mazaitis --- Backfill_Correction/delphiBackfillCorrection/DESCRIPTION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION index d776a88c5..d74c639a2 100644 --- a/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION +++ b/Backfill_Correction/delphiBackfillCorrection/DESCRIPTION @@ -5,7 +5,7 @@ Version: 1.0 Date: 2022-08-24 Author: Jingjing Tang Maintainer: Jingjing Tang -Description: Takes formatted output from COVIDcast API data pipelines and +Description: Takes auxiliary output from COVIDcast API data pipelines and adjusts unusual values using a lasso-penalized quantile regression. Output is used for research and model development. License: file LICENSE From 29f335527d38f09c94c831ff33529f03afa0bf23 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 18:34:34 -0400 Subject: [PATCH 124/145] Update Backfill_Correction/correct_local_signal.R Co-authored-by: Katie Mazaitis --- Backfill_Correction/correct_local_signal.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R index 15a5caa3b..94208b7f9 100644 --- a/Backfill_Correction/correct_local_signal.R +++ b/Backfill_Correction/correct_local_signal.R @@ -15,8 +15,8 @@ suppressPackageStartupMessages({ parser <- arg_parser(description='Run backfill corrections on a single signal + geo type combination from local data') parser <- add_argument(parser, arg="--input_dir", type="character", help = "Path to the input file") parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") -parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as '2020-01-01'") -parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as '2020-01-01'") +parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as 'YYYY-MM-DD'") +parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as 'YYYY-MM-DD'") parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") From 3c62f8063c3e2d148d935a3c6f4c24041608c1ee Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 20:28:00 -0400 Subject: [PATCH 125/145] update the get_weekofmonths functions --- .../R/preprocessing.R | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R index c2143cc76..4424fde4b 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R @@ -122,14 +122,18 @@ add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR) { #' Get week of a month info according to a date #' #' All the dates on or before the ith Sunday but after the (i-1)th Sunday -#' is considered to be the ith week. Notice that the dates in the 5th week -#' this month are actually in the same week with the dates in the 1st week -#' next month and those dates are sparse. Thus, we assign the dates in the -#' 5th week to the 1st week. +#' is considered to be the ith week. Notice that +#' If there are 4 or 5 weeks in total, the ith weeks is labeled as i +#' and the dates in the 5th week this month are actually in the same +#' week with the dates in the 1st week next month and those dates are +#' sparse. Thus, we assign the dates in the 5th week to the 1st week. +#' If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks +#' are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, +#' second and the last weeks together. #' #' @param date Date object #' -#' @importFrom lubridate make_date year month day +#' @importFrom lubridate make_date days_in_month year month day #' #' @return a integer indicating which week it is in a month get_weekofmonth <- function(date) { @@ -137,7 +141,10 @@ get_weekofmonth <- function(date) { month <- month(date) day <- day(date) firstdayofmonth <- as.numeric(format(make_date(year, month, 1), format="%u")) - return (((day + firstdayofmonth - 1) %/% 7) %% 4 + 1) + n_days <- lubridate::days_in_month(date) + n_weeks <- (n_days + firstdayofmonth - 1) %/% 7 + 1 + extra_check <- as.integer(n_weeks > 5) + return (max((day + firstdayofmonth - 1) %/% 7 - extra_check, 0) %% 4 + 1) } #' Add one hot encoding for week of a month info in terms of issue date From 3f2691556fc9d69ed90bc428449730338146a55d Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 20:28:42 -0400 Subject: [PATCH 126/145] add test cases for updated get_weekofmonths --- .../unit-tests/testthat/test-preprocessing.R | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index ed29a35b7..7d69d10b0 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -84,13 +84,21 @@ test_that("testing adding columns for each day of a week", { test_that("testing the calculation of week of a month", { expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-02")), 2) + expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-09")), 1) expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-09")), 2) + expect_equal(get_weekofmonth(as.Date("2022-10-16")), 3) + expect_equal(get_weekofmonth(as.Date("2022-10-23")), 4) + expect_equal(get_weekofmonth(as.Date("2022-10-30")), 1) + }) test_that("testing adding columns for each week of a month", { From 814e75432dc01d9911225ed455eeed3969e09984 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 20:31:45 -0400 Subject: [PATCH 127/145] fix a typo --- .../unit-tests/testthat/test-preprocessing.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index 7d69d10b0..8bde8c68e 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -85,7 +85,7 @@ test_that("testing the calculation of week of a month", { expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-09")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-09")), 2) expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) From dfaa2230ae763ca422742a35691bd7c3105c0106 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 20:58:42 -0400 Subject: [PATCH 128/145] Add prediction example and model file name suffix --- Backfill_Correction/README.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Backfill_Correction/README.md b/Backfill_Correction/README.md index 1f9d4af90..fffb951ae 100644 --- a/Backfill_Correction/README.md +++ b/Backfill_Correction/README.md @@ -83,7 +83,7 @@ anything that needs it to read or write files. Repeatedly building the package and running the full check suite is tedious if you are working on fixing a failing test. A faster workflow is this: -1. Set your R working directory to `delphiFacebook/tests/testthat`. +1. Set your R working directory to `delphiBackfillCorrection/tests/testthat`. 2. Run `testthat::test_dir('.')` This will test the live code without having to rebuild the package. @@ -114,9 +114,15 @@ Required columns without fixed column names: The pipeline produces two output types: 1. Predictions -2. Model objects. In production, models are trained on the last year of + +| geo_value | time_value |lag | value | predicted_tauX | ... | wis | +|--- | --- | --- | --- |--- |--- |--- | +| pa | 2022-01-01 | 1 | 0.1 | 0 | ... | 0.01 | + +3. Model objects. In production, models are trained on the last year of versions (as-of dates) and the last year of reference (report) dates. For one signal at the state level, a model takes about 30 minutes to train. Due to resource limitations in production, we only train models once a month and save the model objects between runs. By default, these are saved to the - `cache` directory. + `cache` directory name with suffix `.model`. + From 873f3ccd120de3209bce9d5e0cc4d5c30caa81e5 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Fri, 23 Sep 2022 21:04:02 -0400 Subject: [PATCH 129/145] fix an error in the makefile --- Backfill_Correction/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/Makefile b/Backfill_Correction/Makefile index 76b4d1fd1..3dd2f006d 100644 --- a/Backfill_Correction/Makefile +++ b/Backfill_Correction/Makefile @@ -14,7 +14,7 @@ lib: run-R: rm -rf tmp time Rscript run.R 2>&1 | tee tmp - grep "run_facebook completed successfully" tmp + grep "run_backfill_corrections completed successfully" tmp grep "scheduled core" tmp ; \ [ "$$?" -eq 1 ] From 9f1db9e41aed031f0aa9a9c6461c5578e31495fe Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 23 Sep 2022 18:30:58 -0400 Subject: [PATCH 130/145] remove bash-init.sh use --- Backfill_Correction/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/Makefile b/Backfill_Correction/Makefile index 3dd2f006d..b8988cf98 100644 --- a/Backfill_Correction/Makefile +++ b/Backfill_Correction/Makefile @@ -1,4 +1,4 @@ -SHELL:=/bin/bash --rcfile bash-init.sh +SHELL:=/bin/bash default: @echo No default implemented yet From 06c3d4e10e5eed62eb39473b9895f30315704973 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 10:53:44 -0400 Subject: [PATCH 131/145] make named log file --- Backfill_Correction/Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/Backfill_Correction/Makefile b/Backfill_Correction/Makefile index b8988cf98..943d3e4cb 100644 --- a/Backfill_Correction/Makefile +++ b/Backfill_Correction/Makefile @@ -1,5 +1,9 @@ SHELL:=/bin/bash +TODAY:=$(shell date -u +"%Y-%m-%d") +CURR_TIME:=$(shell date -u +"%Hh%Mm%Ss") +LOG_FILE:=$(TODAY)_$(CURR_TIME).log + default: @echo No default implemented yet @@ -12,10 +16,9 @@ lib: R -e 'roxygen2::roxygenise("delphiBackfillCorrection")' run-R: - rm -rf tmp - time Rscript run.R 2>&1 | tee tmp - grep "run_backfill_corrections completed successfully" tmp - grep "scheduled core" tmp ; \ + time Rscript run.R 2>&1 | tee $(LOG_FILE) + grep "backfill correction completed successfully" $(LOG_FILE) + grep "scheduled core" $(LOG_FILE) ; \ [ "$$?" -eq 1 ] coverage: From 9ad8073fe2214bc44a6c9ab7001125932c307189 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 11:02:13 -0400 Subject: [PATCH 132/145] move covidcst import; rebuild package --- .../delphiBackfillCorrection/NAMESPACE | 1 + .../R/delphiBackfillCorrection.R | 3 +-- .../delphiBackfillCorrection/R/utils.R | 1 + .../delphiBackfillCorrection/man/get_weekofmonth.Rd | 12 ++++++++---- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE index f2660339c..133d2a5b7 100644 --- a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE +++ b/Backfill_Correction/delphiBackfillCorrection/NAMESPACE @@ -37,6 +37,7 @@ importFrom(dplyr,summarize) importFrom(evalcast,weighted_interval_score) importFrom(jsonlite,read_json) importFrom(lubridate,day) +importFrom(lubridate,days_in_month) importFrom(lubridate,make_date) importFrom(lubridate,month) importFrom(lubridate,year) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R index ff49ab375..57d79fd47 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R @@ -1,4 +1,3 @@ -# Suppress R CMD check note +# Load `tribble` for defining global variables #' @importFrom tibble tribble -#' @import covidcast NULL diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/Backfill_Correction/delphiBackfillCorrection/R/utils.R index af8228e1c..fdcaf42e4 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/utils.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/utils.R @@ -151,6 +151,7 @@ training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { #' @importFrom dplyr select %>% arrange desc pull #' @importFrom rlang .data #' @importFrom utils head +#' @import covidcast get_populous_counties <- function() { return( covidcast::county_census %>% diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd index c9307d25e..08d340d7f 100644 --- a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd +++ b/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd @@ -14,8 +14,12 @@ a integer indicating which week it is in a month } \description{ All the dates on or before the ith Sunday but after the (i-1)th Sunday -is considered to be the ith week. Notice that the dates in the 5th week -this month are actually in the same week with the dates in the 1st week -next month and those dates are sparse. Thus, we assign the dates in the -5th week to the 1st week. +is considered to be the ith week. Notice that + If there are 4 or 5 weeks in total, the ith weeks is labeled as i + and the dates in the 5th week this month are actually in the same + week with the dates in the 1st week next month and those dates are + sparse. Thus, we assign the dates in the 5th week to the 1st week. + If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks + are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, + second and the last weeks together. } From 55dcad9ee06bf1fda724ac5072b5ffac2e39b2f8 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 11:06:11 -0400 Subject: [PATCH 133/145] compare successes to # of taus --- Backfill_Correction/delphiBackfillCorrection/R/model.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/Backfill_Correction/delphiBackfillCorrection/R/model.R index 48b02780d..f1b0ed3b1 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/model.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/model.R @@ -127,7 +127,7 @@ model_training_and_testing <- function(train_data, test_data, taus, covariates, error=function(e) {print(paste("Training failed for", model_path, sep=" "))} ) } - if (success < 9) {return (NULL)} + if (success < length(taus)) {return (NULL)} if (!make_predictions) {return (list())} coef_combined_result = data.frame(tau=taus, geo=geo, test_lag=test_lag) From c485807b64d3760e0333d00b7cd3ee8739abc73b Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 11:09:54 -0400 Subject: [PATCH 134/145] update testpath in readme --- Backfill_Correction/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Backfill_Correction/README.md b/Backfill_Correction/README.md index fffb951ae..bacccd81b 100644 --- a/Backfill_Correction/README.md +++ b/Backfill_Correction/README.md @@ -67,13 +67,13 @@ There should be good coverage of all the core functions in the package. Because the package tests involve reading and writing files, we must be careful with working directories to ensure the tests are portable. -For reading and writing to files contained in the `tests/testthat/` directory, +For reading and writing to files contained in the `unit-tests/testthat/` directory, use the `testthat::test_path` function. It works much like `file.path` but -automatically provides paths relative to `tests/testthat/`, so e.g. -`test_path("input")` becomes `tests/testthat/input/` or whatever relative path +automatically provides paths relative to `unit-tests/testthat/`, so e.g. +`test_path("input")` becomes `unit-tests/testthat/input/` or whatever relative path is needed to get there. -`params.json` files contain paths, so `tests/testthat/helper-relativize.R` +`params.json` files contain paths, so `unit-tests/testthat/helper-relativize.R` contains `relativize_params`, which takes a `params` list and applies `test_path` to all of its path components. This object can then be passed to anything that needs it to read or write files. @@ -83,7 +83,7 @@ anything that needs it to read or write files. Repeatedly building the package and running the full check suite is tedious if you are working on fixing a failing test. A faster workflow is this: -1. Set your R working directory to `delphiBackfillCorrection/tests/testthat`. +1. Set your R working directory to `delphiBackfillCorrection/unit-tests/testthat`. 2. Run `testthat::test_dir('.')` This will test the live code without having to rebuild the package. From d37ae59ea52750bd40767979c17748443b9082c3 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 11:20:18 -0400 Subject: [PATCH 135/145] smallest system-derived # cores is 1 --- Backfill_Correction/delphiBackfillCorrection/R/main.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/Backfill_Correction/delphiBackfillCorrection/R/main.R index 37ae2f98e..788eed7f5 100644 --- a/Backfill_Correction/delphiBackfillCorrection/R/main.R +++ b/Backfill_Correction/delphiBackfillCorrection/R/main.R @@ -215,7 +215,7 @@ main <- function(params) { warning("Could not detect the number of CPU cores; parallel mode disabled") params$parallel <- FALSE } else { - options(mc.cores = min(params$parallel_max_cores, floor(cores / 2))) + options(mc.cores = min(params$parallel_max_cores, max(floor(cores / 2), 1L))) } } From 2f724f9adc6019400d202968c5a398fb063303e6 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 26 Sep 2022 12:11:11 -0400 Subject: [PATCH 136/145] fixed an error in the unittest --- .../unit-tests/testthat/test-preprocessing.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R index 8bde8c68e..4bfc0ef31 100644 --- a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ b/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -106,7 +106,7 @@ test_that("testing adding columns for each week of a month", { expect_equal(ncol(fake_df) + 3, ncol(df_new)) expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W1_issue"] == 1)) }) From 775e84ab6e5dc3193d6255aac8f7ac79e1b40697 Mon Sep 17 00:00:00 2001 From: Jingjing Tang Date: Mon, 26 Sep 2022 12:11:41 -0400 Subject: [PATCH 137/145] remove the tooling script --- .../delphiBackfillCorrection/R/tooling.R | 178 ------------------ 1 file changed, 178 deletions(-) delete mode 100644 Backfill_Correction/delphiBackfillCorrection/R/tooling.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R b/Backfill_Correction/delphiBackfillCorrection/R/tooling.R deleted file mode 100644 index d7cf36ff8..000000000 --- a/Backfill_Correction/delphiBackfillCorrection/R/tooling.R +++ /dev/null @@ -1,178 +0,0 @@ -#' Corrected estimates from a single local signal -#' -#' @template df-template -#' @template export_dir-template -#' @param test_date_list Date vector of dates to make predictions for -#' @param value_cols character vector of numerator and/or denominator field names -#' @template value_type-template -#' @template taus-template -#' @param test_lags integer vector of number of days ago to predict for -#' @template training_days-template -#' @template testing_window-template -#' @template ref_lag-template -#' @template lambda-template -#' @template lp_solver-template -#' -#' @importFrom dplyr %>% filter -#' @importFrom plyr rbind.fill -#' @importFrom tidyr drop_na -#' @importFrom rlang .data .env -#' -#' @export -run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value_type, - taus = TAUS, test_lags = TEST_LAGS, - training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - ref_lag = REF_LAG, lambda = LAMBDA, lp_solver = LP_SOLVER) { - # Get all the locations that are considered - geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) - # Build model for each location - res_list = list() - res_indx = 1 - coef_df_list = list() - - for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < .env$ref_lag) - min_refd <- min(subdf$time_value) - max_refd <- max(subdf$time_value) - subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") - combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "fraction") { - combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) - - combined_denom_df <- fill_missing_updates(subdf, value_cols[2], "time_value", "lag") - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", "time_value", "lag", ref_lag) - - combined_df <- merge(combined_num_df, combined_denom_df, - by=c("time_value", "issue_date", "lag", "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom")) - } - combined_df <- add_params_for_dates(combined_df, "time_value", "lag") - - for (test_date in test_date_list) { - geo_train_data = combined_df %>% - filter(.data$issue_date < .env$test_date) %>% - filter(.data$target_date <= .env$test_date) %>% - filter(.data$target_date > .env$test_date - .env$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(.data$issue_date >= .env$test_date) %>% - filter(.data$issue_date < .env$test_date + .env$testing_window) %>% - drop_na() - if (nrow(geo_test_data) == 0) next - if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction") { - geo_prior_test_data = combined_df %>% - filter(.data$issue_date > .env$test_date - 7) %>% - filter(.data$issue_date <= .env$test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in test_lags) { - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list( - Y7DAV, paste0(WEEKDAYS_ABBR, "_ref"), paste0(WEEKDAYS_ABBR, "_issue"), - WEEK_ISSUES, SLOPE, SQRTSCALE - ) - params_list <- c(YITL, as.vector(unlist(covariates))) - - # Model training and testing - model_path_prefix <- generate_model_filename_prefix( - indicator, signal, geo, signal_suffix, value_type, test_lag, tau, lambda) - prediction_results <- model_training_and_testing( - train_data, test_data, taus, params_list, lp_solver, - lambda, test_date, geo, value_type = value_type, test_lag = test_lag - ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evaluate(test_data, taus) - test_data$test_date <- test_date - coefs$test_date <- test_date - coefs$test_lag <- test_lag - coefs$geo_value <- geo - - res_list[[res_indx]] = test_data - coef_df_list[[res_indx]] = coefs - res_indx = res_indx+1 - export_test_result(test_data, coefs, export_dir, - geo, test_lag) - }# End for test lags - }# End for test date list - result_df = do.call(rbind, res_list) - coefs_df = do.call(rbind.fill, coef_df_list) - export_test_result(result_df, coefs_df, export_dir, geo) - }# End for geo list -} - -#' Main function to correct a single local signal -#' -#' @template input_dir-template -#' @template export_dir-template -#' @param test_start_date Date or string in the format "YYYY-MM-DD" to start -#' making predictions on -#' @param test_end_date Date or string in the format "YYYY-MM-DD" to stop -#' making predictions on -#' @template num_col-template -#' @template denom_col-template -#' @template value_type-template -#' @template training_days-template -#' @template testing_window-template -#' @template lambda-template -#' @template ref_lag-template -#' @template lp_solver-template -#' -#' @importFrom readr read_csv -#' -#' @export -main_local <- function(input_dir, export_dir, - test_start_date, test_end_date, - num_col, denom_col,value_type = c("count", "fraction"), - training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER) { - value_type <- match.arg(value_type) - - # Check input data - df = read_csv(input_dir) - - # Check data type and required columns - result <- validity_checks(df, value_type, num_col, denom_col) - df <- result[["df"]] - value_cols <- result[["value_cols"]] - - # Get test date list according to the test start date - if (is.null(test_start_date)) { - test_start_date = max(df$issue_date) - } else { - test_start_date = as.Date(test_start_date) - } - - if (is.null(test_end_date)) { - test_end_date = max(df$issue_date) - } else { - test_end_date = as.Date(test_end_date) - } - - test_date_list = seq(test_start_date, test_end_date, by="days") - - # Check available training days - training_days_check(df$issue_date, training_days) - - run_backfill_local(df, export_dir, - test_date_list, value_cols, value_type, - TAUS, TEST_LAGS, training_days, testing_window, - ref_lag, lambda, lp_solver) -} From 5ee9860d78719effb2479d7a0a8ff3aca2e7528d Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Mon, 26 Sep 2022 16:10:20 -0400 Subject: [PATCH 138/145] remove tooling script runner --- Backfill_Correction/correct_local_signal.R | 34 ---------------------- 1 file changed, 34 deletions(-) delete mode 100644 Backfill_Correction/correct_local_signal.R diff --git a/Backfill_Correction/correct_local_signal.R b/Backfill_Correction/correct_local_signal.R deleted file mode 100644 index 94208b7f9..000000000 --- a/Backfill_Correction/correct_local_signal.R +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env Rscript - -## Run backfill corrections on a single signal + geo type combination from local data. -## -## Usage: -## -## Rscript correct_local_signal.R [options] - -suppressPackageStartupMessages({ - library(delphiBackfillCorrection) - library(argparser) -}) - - -parser <- arg_parser(description='Run backfill corrections on a single signal + geo type combination from local data') -parser <- add_argument(parser, arg="--input_dir", type="character", help = "Path to the input file") -parser <- add_argument(parser, arg="--export_dir", type="character", default = "../export_dir", help = "Pth to the export directory") -parser <- add_argument(parser, arg="--test_start_date", type="character", help = "Should be in the format as 'YYYY-MM-DD'") -parser <- add_argument(parser, arg="--test_end_date", type="character", help = "Should be in the format as 'YYYY-MM-DD'") -parser <- add_argument(parser, arg="--testing_window", type="integer", default = 1, help = "The number of issue dates for testing per trained model") -parser <- add_argument(parser, arg="--value_type", type="character", default = "fraction", help = "Can be 'count' or 'fraction'") -parser <- add_argument(parser, arg="--num_col", type="character", default = "num", help = "The column name for the numerator") -parser <- add_argument(parser, arg="--denum_col", type="character", default = "den", help = "The column name for the denominator") -parser <- add_argument(parser, arg="--lambda", type="character", default = 0.1, help = "The parameter lambda for the lasso regression") -parser <- add_argument(parser, arg="--training_days", type="integer", default = 270, help = "The number of issue dates used for model training") -parser <- add_argument(parser, arg="--ref_lag", type="integer", default = 60, help = "The lag that is set to be the reference") -args = parse_args(parser) - -main_local(args.input_dir, args.export_dir, - args.test_start_date, args.test_end_date, - args.num_col, args.denom_col, - args.value_type, - args.training_days, args.testing_window, - args.lambda, args.ref_lag) From 46fdfd93bbb0f9090fc568139e93a3c1a5709831 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Wed, 28 Sep 2022 13:14:18 -0400 Subject: [PATCH 139/145] drop uppercase from dir name --- backfill_corrections/.Rhistory | 10 + .../Makefile | 0 .../README.md | 0 ...port needed from the engineering side.docx | Bin .../delphiBackfillCorrection/DESCRIPTION | 21 ++ .../delphiBackfillCorrection/LICENSE | 0 .../delphiBackfillCorrection/NAMESPACE | 0 .../R/beta_prior_estimation.R | 0 .../delphiBackfillCorrection/R/constants.R | 0 .../R/delphiBackfillCorrection.R | 0 .../delphiBackfillCorrection/R/io.R | 0 .../delphiBackfillCorrection/R/main.R | 273 ++++++++++++++++++ .../delphiBackfillCorrection/R/model.R | 259 +++++++++++++++++ .../R/preprocessing.R | 0 .../delphiBackfillCorrection/R/tooling.R | 178 ++++++++++++ .../delphiBackfillCorrection/R/utils.R | 0 .../man-roxygen/covariates-template.R | 0 .../man-roxygen/denom_col-template.R | 0 .../man-roxygen/df-template.R | 0 .../man-roxygen/export_dir-template.R | 0 .../man-roxygen/file_type-template.R | 0 .../man-roxygen/geo-template.R | 0 .../man-roxygen/geo_level-template.R | 0 .../man-roxygen/indicator-template.R | 0 .../man-roxygen/input_dir-template.R | 0 .../man-roxygen/lag_col-template.R | 0 .../man-roxygen/lambda-template.R | 0 .../man-roxygen/lp_solver-template.R | 0 .../man-roxygen/make_predictions-template.R | 0 .../man-roxygen/num_col-template.R | 0 .../man-roxygen/params-template.R | 0 .../man-roxygen/ref_lag-template.R | 0 .../man-roxygen/refd_col-template.R | 0 .../man-roxygen/signal-template.R | 0 .../man-roxygen/signal_suffix-template.R | 0 .../man-roxygen/signal_suffixes-template.R | 0 .../man-roxygen/taus-template.R | 0 .../man-roxygen/test_lag-template.R | 0 .../man-roxygen/testing_window-template.R | 0 .../man-roxygen/time_col-template.R | 0 .../man-roxygen/train_data-template.R | 0 .../man-roxygen/train_models-template.R | 0 .../man-roxygen/training_days-template.R | 0 .../man-roxygen/value_col-template.R | 0 .../man-roxygen/value_type-template.R | 0 .../man/add_7davs_and_target.Rd | 0 .../man/add_dayofweek.Rd | 0 .../man/add_params_for_dates.Rd | 0 .../delphiBackfillCorrection/man/add_shift.Rd | 0 .../man/add_sqrtscale.Rd | 0 .../man/add_weekofmonth.Rd | 0 .../man/create_dir_not_exist.Rd | 0 .../man/create_name_pattern.Rd | 0 .../man/data_filteration.Rd | 0 .../delphiBackfillCorrection/man/delta.Rd | 0 .../man/est_priors.Rd | 0 .../delphiBackfillCorrection/man/evaluate.Rd | 0 .../man/export_test_result.Rd | 0 .../man/fill_missing_updates.Rd | 0 .../delphiBackfillCorrection/man/fill_rows.Rd | 0 .../delphiBackfillCorrection/man/frac_adj.Rd | 0 .../man/frac_adj_with_pseudo.Rd | 0 .../man/generate_filename.Rd | 0 .../delphiBackfillCorrection/man/get_7dav.Rd | 0 .../man/get_files_list.Rd | 0 .../delphiBackfillCorrection/man/get_model.Rd | 0 .../man/get_populous_counties.Rd | 0 .../man/get_weekofmonth.Rd | 0 .../delphiBackfillCorrection/man/main.Rd | 0 .../man/main_local.Rd | 0 .../man/model_training_and_testing.Rd | 0 .../delphiBackfillCorrection/man/objective.Rd | 0 .../delphiBackfillCorrection/man/read_data.Rd | 0 .../man/read_params.Rd | 0 .../man/run_backfill.Rd | 0 .../man/run_backfill_local.Rd | 0 .../man/subset_valid_files.Rd | 0 .../man/training_days_check.Rd | 0 .../man/validity_checks.Rd | 0 .../unit-tests/testthat.R | 0 .../unit-tests/testthat/helper-relativize.R | 0 .../testthat/params-run.json.template | 0 .../testthat/params-test.json.template | 0 .../testthat/test-beta_prior_estimation.R | 0 .../unit-tests/testthat/test-io.R | 0 .../unit-tests/testthat/test-model.R | 0 .../unit-tests/testthat/test-preprocessing.R | 132 +++++++++ .../unit-tests/testthat/test-utils.R | 0 .../00install.out | 11 + .../delphiBackfillCorrection-manual.pdf | Bin 0 -> 120174 bytes .../delphiBackfillCorrection/DESCRIPTION | 22 ++ .../delphiBackfillCorrection/INDEX | 65 +++++ .../delphiBackfillCorrection/LICENSE | 2 + .../delphiBackfillCorrection/Meta/Rd.rds | Bin 0 -> 1959 bytes .../Meta/features.rds | Bin 0 -> 121 bytes .../delphiBackfillCorrection/Meta/hsearch.rds | Bin 0 -> 1809 bytes .../delphiBackfillCorrection/Meta/links.rds | Bin 0 -> 663 bytes .../delphiBackfillCorrection/Meta/nsInfo.rds | Bin 0 -> 827 bytes .../delphiBackfillCorrection/Meta/package.rds | Bin 0 -> 1081 bytes .../delphiBackfillCorrection/NAMESPACE | 65 +++++ .../R/delphiBackfillCorrection | 27 ++ .../R/delphiBackfillCorrection.rdb | Bin 0 -> 57192 bytes .../R/delphiBackfillCorrection.rdx | Bin 0 -> 1098 bytes .../delphiBackfillCorrection/help/AnIndex | 34 +++ .../delphiBackfillCorrection/help/aliases.rds | Bin 0 -> 427 bytes .../help/delphiBackfillCorrection.rdb | Bin 0 -> 57034 bytes .../help/delphiBackfillCorrection.rdx | Bin 0 -> 994 bytes .../delphiBackfillCorrection/help/paths.rds | Bin 0 -> 563 bytes .../html/00Index.html | 93 ++++++ .../delphiBackfillCorrection/html/R.css | 120 ++++++++ .../tests/startup.Rs | 4 + .../tests/testthat.R | 4 + .../tests/testthat.Rout.fail | 33 +++ .../tests/testthat/helper-relativize.R | 13 + .../tests/testthat/params-run.json.template | 8 + .../tests/testthat/params-test.json.template | 3 + .../testthat/test-beta_prior_estimation.R | 130 +++++++++ .../tests/testthat/test-io.R | 118 ++++++++ .../tests/testthat/test-model.R | 173 +++++++++++ .../tests/testthat/test-preprocessing.R | 132 +++++++++ .../tests/testthat/test-utils.R | 136 +++++++++ .../tests/testthat/testthat-problems.rds | Bin 0 -> 16934 bytes .../delphiBackfillCorrection/DESCRIPTION | 0 .../delphiBackfillCorrection/LICENSE | 2 + .../delphiBackfillCorrection/NAMESPACE | 65 +++++ .../R/beta_prior_estimation.R | 212 ++++++++++++++ .../delphiBackfillCorrection/R/constants.R | 33 +++ .../R/delphiBackfillCorrection.R | 3 + .../delphiBackfillCorrection/R/io.R | 133 +++++++++ .../delphiBackfillCorrection/R/main.R | 0 .../delphiBackfillCorrection/R/model.R | 0 .../R/preprocessing.R | 229 +++++++++++++++ .../delphiBackfillCorrection/R/utils.R | 165 +++++++++++ .../man-roxygen/covariates-template.R | 1 + .../man-roxygen/denom_col-template.R | 1 + .../man-roxygen/df-template.R | 2 + .../man-roxygen/export_dir-template.R | 1 + .../man-roxygen/file_type-template.R | 2 + .../man-roxygen/geo-template.R | 2 + .../man-roxygen/geo_level-template.R | 2 + .../man-roxygen/indicator-template.R | 3 + .../man-roxygen/input_dir-template.R | 1 + .../man-roxygen/lag_col-template.R | 2 + .../man-roxygen/lambda-template.R | 1 + .../man-roxygen/lp_solver-template.R | 4 + .../man-roxygen/make_predictions-template.R | 2 + .../man-roxygen/num_col-template.R | 1 + .../man-roxygen/params-template.R | 4 + .../man-roxygen/ref_lag-template.R | 1 + .../man-roxygen/refd_col-template.R | 2 + .../man-roxygen/signal-template.R | 3 + .../man-roxygen/signal_suffix-template.R | 5 + .../man-roxygen/signal_suffixes-template.R | 5 + .../man-roxygen/taus-template.R | 2 + .../man-roxygen/test_lag-template.R | 1 + .../man-roxygen/testing_window-template.R | 2 + .../man-roxygen/time_col-template.R | 2 + .../man-roxygen/train_data-template.R | 1 + .../man-roxygen/train_models-template.R | 3 + .../man-roxygen/training_days-template.R | 1 + .../man-roxygen/value_col-template.R | 2 + .../man-roxygen/value_type-template.R | 1 + .../man/add_7davs_and_target.Rd | 28 ++ .../man/add_dayofweek.Rd | 24 ++ .../man/add_params_for_dates.Rd | 21 ++ .../delphiBackfillCorrection/man/add_shift.Rd | 20 ++ .../man/add_sqrtscale.Rd | 21 ++ .../man/add_weekofmonth.Rd | 20 ++ .../man/create_dir_not_exist.Rd | 14 + .../man/create_name_pattern.Rd | 23 ++ .../man/data_filteration.Rd | 20 ++ .../delphiBackfillCorrection/man/delta.Rd | 16 + .../man/est_priors.Rd | 97 +++++++ .../delphiBackfillCorrection/man/evaluate.Rd | 23 ++ .../man/export_test_result.Rd | 52 ++++ .../man/fill_missing_updates.Rd | 30 ++ .../delphiBackfillCorrection/man/fill_rows.Rd | 30 ++ .../delphiBackfillCorrection/man/frac_adj.Rd | 78 +++++ .../man/frac_adj_with_pseudo.Rd | 24 ++ .../man/generate_filename.Rd | 65 +++++ .../delphiBackfillCorrection/man/get_7dav.Rd | 20 ++ .../man/get_files_list.Rd | 28 ++ .../delphiBackfillCorrection/man/get_model.Rd | 39 +++ .../man/get_populous_counties.Rd | 11 + .../man/get_weekofmonth.Rd | 25 ++ .../delphiBackfillCorrection/man/main.Rd | 17 ++ .../man/main_local.Rd | 55 ++++ .../man/model_training_and_testing.Rd | 81 ++++++ .../delphiBackfillCorrection/man/objective.Rd | 20 ++ .../delphiBackfillCorrection/man/read_data.Rd | 14 + .../man/read_params.Rd | 55 ++++ .../man/run_backfill.Rd | 51 ++++ .../man/run_backfill_local.Rd | 55 ++++ .../man/subset_valid_files.Rd | 23 ++ .../man/training_days_check.Rd | 16 + .../man/validity_checks.Rd | 32 ++ .../unit-tests/testthat.R | 4 + .../unit-tests/testthat/cache/.gitignore | 0 .../unit-tests/testthat/helper-relativize.R | 13 + .../unit-tests/testthat/input/.gitignore | 0 .../unit-tests/testthat/output/.gitignore | 0 .../testthat/params-run.json.template | 8 + .../testthat/params-test.json.template | 3 + .../testthat/test-beta_prior_estimation.R | 130 +++++++++ .../unit-tests/testthat/test-io.R | 118 ++++++++ .../unit-tests/testthat/test-model.R | 173 +++++++++++ .../unit-tests/testthat/test-preprocessing.R | 0 .../unit-tests/testthat/test-utils.R | 136 +++++++++ .../delphiBackfillCorrection_1.0.tar.gz | Bin 0 -> 28675 bytes .../params.json.production.template | 0 .../params.json.template | 0 .../run.R | 0 212 files changed, 4701 insertions(+) create mode 100644 backfill_corrections/.Rhistory rename {Backfill_Correction => backfill_corrections}/Makefile (100%) rename {Backfill_Correction => backfill_corrections}/README.md (100%) rename {Backfill_Correction => backfill_corrections}/Support needed from the engineering side.docx (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/LICENSE (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/NAMESPACE (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/beta_prior_estimation.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/constants.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/delphiBackfillCorrection.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/io.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/preprocessing.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/R/utils.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/covariates-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/denom_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/df-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/export_dir-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/file_type-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/geo-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/geo_level-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/indicator-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/input_dir-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/lag_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/lambda-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/lp_solver-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/make_predictions-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/num_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/params-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/ref_lag-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/refd_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/signal-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/taus-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/test_lag-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/testing_window-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/time_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/train_data-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/train_models-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/training_days-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/value_col-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man-roxygen/value_type-template.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_7davs_and_target.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_dayofweek.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_params_for_dates.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_shift.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_sqrtscale.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/add_weekofmonth.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/create_dir_not_exist.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/create_name_pattern.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/data_filteration.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/delta.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/est_priors.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/evaluate.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/export_test_result.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/fill_missing_updates.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/fill_rows.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/frac_adj.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/generate_filename.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/get_7dav.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/get_files_list.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/get_model.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/get_populous_counties.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/get_weekofmonth.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/main.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/main_local.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/model_training_and_testing.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/objective.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/read_data.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/read_params.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/run_backfill.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/run_backfill_local.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/subset_valid_files.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/training_days_check.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/man/validity_checks.Rd (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/test-io.R (100%) rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/test-model.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R rename {Backfill_Correction => backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src}/delphiBackfillCorrection/unit-tests/testthat/test-utils.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/links.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/nsInfo.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/NAMESPACE create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdx create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/paths.rds create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R create mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/DESCRIPTION (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection/LICENSE create mode 100644 backfill_corrections/delphiBackfillCorrection/NAMESPACE create mode 100644 backfill_corrections/delphiBackfillCorrection/R/beta_prior_estimation.R create mode 100644 backfill_corrections/delphiBackfillCorrection/R/constants.R create mode 100644 backfill_corrections/delphiBackfillCorrection/R/delphiBackfillCorrection.R create mode 100644 backfill_corrections/delphiBackfillCorrection/R/io.R rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/R/main.R (100%) rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/R/model.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection/R/preprocessing.R create mode 100644 backfill_corrections/delphiBackfillCorrection/R/utils.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/covariates-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/denom_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/df-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/export_dir-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/file_type-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/geo-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/geo_level-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/indicator-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/input_dir-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/lag_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/lambda-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/lp_solver-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/make_predictions-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/num_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/params-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/ref_lag-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/refd_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/signal-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/taus-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/test_lag-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/testing_window-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/time_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/train_data-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/train_models-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/training_days-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/value_col-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man-roxygen/value_type-template.R create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_7davs_and_target.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_dayofweek.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_params_for_dates.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_shift.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_sqrtscale.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/add_weekofmonth.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/create_dir_not_exist.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/create_name_pattern.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/data_filteration.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/delta.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/est_priors.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/evaluate.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/export_test_result.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/fill_missing_updates.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/fill_rows.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/frac_adj.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/generate_filename.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/get_7dav.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/get_files_list.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/get_model.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/get_populous_counties.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/get_weekofmonth.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/main.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/main_local.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/model_training_and_testing.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/objective.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/read_data.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/read_params.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/run_backfill.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/subset_valid_files.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/training_days_check.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/man/validity_checks.Rd create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat.R rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore (100%) rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-io.R create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-model.R rename {Backfill_Correction => backfill_corrections}/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R (100%) create mode 100644 backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-utils.R create mode 100644 backfill_corrections/delphiBackfillCorrection_1.0.tar.gz rename {Backfill_Correction => backfill_corrections}/params.json.production.template (100%) rename {Backfill_Correction => backfill_corrections}/params.json.template (100%) rename {Backfill_Correction => backfill_corrections}/run.R (100%) diff --git a/backfill_corrections/.Rhistory b/backfill_corrections/.Rhistory new file mode 100644 index 000000000..8756eb27e --- /dev/null +++ b/backfill_corrections/.Rhistory @@ -0,0 +1,10 @@ +getwd() +test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") +testthat::test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") +testthat::test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") +df_new +fake_df +refd_col +wm +rowSums(df_new[, -c(1:ncol(fake_df))]) +df_new[, -c(1:ncol(fake_df))] diff --git a/Backfill_Correction/Makefile b/backfill_corrections/Makefile similarity index 100% rename from Backfill_Correction/Makefile rename to backfill_corrections/Makefile diff --git a/Backfill_Correction/README.md b/backfill_corrections/README.md similarity index 100% rename from Backfill_Correction/README.md rename to backfill_corrections/README.md diff --git a/Backfill_Correction/Support needed from the engineering side.docx b/backfill_corrections/Support needed from the engineering side.docx similarity index 100% rename from Backfill_Correction/Support needed from the engineering side.docx rename to backfill_corrections/Support needed from the engineering side.docx diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION new file mode 100644 index 000000000..dfdd673b4 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION @@ -0,0 +1,21 @@ +Package: delphiBackfillCorrection +Type: Package +Title: Correct signal outliers +Version: 1.0 +Date: 2022-08-24 +Author: Jingjing Tang +Maintainer: Jingjing Tang +Description: Takes auxiliary output from COVIDcast API data pipelines and + adjusts unusual values using a lasso-penalized quantile regression. + Output is used for research and model development. +License: file LICENSE +Depends: R (>= 3.5.0), +Imports: dplyr, plyr, readr, tibble, stringr, covidcast, quantgen, + arrow, evalcast, jsonlite, lubridate, tidyr, zoo, utils, rlang, + parallel +Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), testthat (>= 1.0.1), + covr (>= 2.2.2) +RoxygenNote: 7.2.0 +Encoding: UTF-8 +NeedsCompilation: no +Packaged: 2022-09-26 15:03:17 UTC; nat diff --git a/Backfill_Correction/delphiBackfillCorrection/LICENSE b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/LICENSE rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE diff --git a/Backfill_Correction/delphiBackfillCorrection/NAMESPACE b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/NAMESPACE rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE diff --git a/Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/beta_prior_estimation.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/constants.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/constants.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/delphiBackfillCorrection.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/io.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R new file mode 100644 index 000000000..37ae2f98e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R @@ -0,0 +1,273 @@ +#' Get backfill-corrected estimates for a single signal + geo combination +#' +#' @template df-template +#' @template params-template +#' @template refd_col-template +#' @template lag_col-template +#' @template signal_suffixes-template +#' @template indicator-template +#' @template signal-template +#' @param training_end_date the most recent training date +#' +#' @importFrom dplyr %>% filter select group_by summarize across everything group_split +#' @importFrom tidyr drop_na +#' @importFrom rlang .data .env +#' +#' @export +run_backfill <- function(df, params, training_end_date, + refd_col = "time_value", lag_col = "lag", + signal_suffixes = c(""), + indicator = "", signal = "") { + df <- filter(df, .data$lag < params$ref_lag + 30) # a rough filtration to save memory + + geo_levels <- params$geo_levels + if ("state" %in% geo_levels) { + # If state included, do it last since state processing modifies the + # `df` object. + geo_levels <- c(setdiff(geo_levels, c("state")), "state") + } + + for (geo_level in geo_levels) { + # Get full list of interested locations + if (geo_level == "state") { + # Drop county field and make new "geo_value" field from "state_id". + # Aggregate counties up to state level + df <- df %>% + dplyr::select(-.data$geo_value, geo_value = .data$state_id) %>% + dplyr::group_by(across(c("geo_value", refd_col, lag_col))) %>% + # Summarized columns keep original names + dplyr::summarize(across(everything(), sum)) + } + if (geo_level == "county") { + # Keep only 200 most populous (within the US) counties + top_200_geos <- get_populous_counties() + df <- filter(df, geo_value %in% top_200_geos) + } + + test_data_list <- list() + coef_list <- list() + + for (value_type in params$value_types) { + for (signal_suffix in signal_suffixes) { + key = paste(value_type, signal_suffix) + test_data_list[[key]] <- list() + coef_list[[key]] <- list() + } + } + + group_dfs <- group_split(df, geo_value) + + # Build model for each location + for (subdf in group_dfs) { + geo <- group_df$geo_value[1] + min_refd <- min(subdf[[refd_col]]) + max_refd <- max(subdf[[refd_col]]) + subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) + + for (signal_suffix in signal_suffixes) { + # For each suffix listed in `signal_suffixes`, run training/testing + # process again. Main use case is for quidel which has overall and + # age-based signals. + if (signal_suffix != "") { + num_col <- paste(params$num_col, signal_suffix, sep = "_") + denom_col <- paste(params$denom_col, signal_suffix, sep = "_") + } else { + num_col <- params$num_col + denom_col <- params$denom_col + } + + for (value_type in params$value_types) { + # Handle different signal types + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) + + } else if (value_type == "fraction") { + combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) + + combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) + + combined_df <- merge( + combined_num_df, combined_denom_df, + by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom") + ) + } + combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) + combined_df <- combined_df %>% filter(.data$lag < params$ref_lag) + + geo_train_data <- combined_df %>% + filter(.data$issue_date < training_end_date) %>% + filter(.data$target_date <= training_end_date) %>% + filter(.data$target_date > training_end_date - params$training_days) %>% + drop_na() + geo_test_data <- combined_df %>% + filter(.data$issue_date %in% params$test_dates) %>% + drop_na() + if (nrow(geo_test_data) == 0) next + if (nrow(geo_train_data) <= 200) next + + if (value_type == "fraction") { + # Use beta prior approach to adjust fractions + geo_prior_test_data = combined_df %>% + filter(.data$issue_date > min(params$test_dates) - 7) %>% + filter(.data$issue_date <= max(params$test_dates)) + updated_data <- frac_adj(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + lambda, value_type, geo, + training_end_date, params$cache_dir, + train_models = params$train_models, + make_predictions = params$make_predictions) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in c(1:14, 21, 35, 51)) { + filtered_data <- data_filteration(test_lag, geo_train_data, + geo_test_data, params$lag_pad) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list( + Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), + paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, sqrtscale + ) + params_list <- c(YITL, as.vector(unlist(covariates))) + + # Model training and testing + prediction_results <- model_training_and_testing( + train_data, test_data, params$taus, params_list, params$lp_solver, + params$lambda, test_lag, geo, value_type, params$cache_dir, + indicator, signal, geo_level, signal_suffix,training_end_date, + train_models = params$train_models, + make_predictions = params$make_predictions + ) + + # Model objects are saved during training, so only need to export + # output if making predictions/corrections + if (params$make_predictions) { + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evaluate(test_data, params$taus) + + idx <- length(test_data_list[[value_type]]) + 1 + test_data_list[[value_type]][[idx]] <- test_data + coef_list[[value_type]][[idx]] <- coefs + } + }# End for test lags + }# End for value types + }# End for signal suffixes + + if (params$make_predictions) { + for (value_type in params$value_types) { + for (signal_suffix in signal_suffixes) { + key = paste(value_type, signal_suffix) + test_combined <- bind_rows(test_data_list[[key]]) + coef_combined <- bind_rows(coef_list[[key]]) + export_test_result(test_combined, coef_combined, + indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir=params$export_dir) + } + } + } + + }# End for geo list + }# End for geo type +} + +#' Perform backfill correction on all desired signals and geo levels +#' +#' @template params-template +#' +#' @importFrom dplyr bind_rows +#' @importFrom parallel detectCores +#' +#' @export +main <- function(params) { + if (!params$train_models && !params$make_predictions) { + message("both model training and prediction generation are turned off; exiting") + return + } + + if (params$train_models) { + # Remove all the stored models + files_list <- list.files(params$cache_dir, pattern="*.model", full.names = TRUE) + file.remove(file.path(mydir, files_list)) + } + + training_end_date <- as.Date(readLines( + file.path(params$cache_dir, "training_end_date.txt"))) + + ## Set default number of cores for mclapply to half of those available. + if (params$parallel) { + cores <- detectCores() + + if (is.na(cores)) { + warning("Could not detect the number of CPU cores; parallel mode disabled") + params$parallel <- FALSE + } else { + options(mc.cores = min(params$parallel_max_cores, floor(cores / 2))) + } + } + + # Loop over every indicator + signal combination. + for (input_group in INDICATORS_AND_SIGNALS) { + files_list <- get_files_list( + input_group$indicator, input_group$signal, params, input_group$sub_dir + ) + + if (length(files_list) == 0) { + warning(str_interp( + "No files found for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" + )) + next + } + + # Read in all listed files and combine + input_data <- lapply( + files_list, + function(file) { + input_data[[file]] <- read_data(file) + } + ) %>% bind_rows + + if (nrow(input_data) == 0) { + warning(str_interp( + "No data available for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" + )) + next + } + + # Check data type and required columns + for (value_type in params$value_types) { + result <- validity_checks( + input_data, value_type, + params$num_col, params$denom_col, input_group$name_suffix + ) + input_data <- result[["df"]] + } + + # Check available training days + training_days_check(input_data$issue_date, params$training_days) + + # Perform backfill corrections and save result + run_backfill(input_data, params, training_end_date, + indicator = input_group$indicator, signal = input_group$signal, + signal_suffixes = input_group$name_suffix) + + if (params$train_models) { + # Save the training end date to a text file. + writeLines(as.character(TODAY), + file.path(params$cache_dir, "training_end_date.txt")) + } + } +} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R new file mode 100644 index 000000000..48b02780d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R @@ -0,0 +1,259 @@ +#' Filtration for training and testing data with different lags +#' +#' @template test_lag-template +#' @param lag_pad lag padding for training +#' @param geo_train_data training data for a certain location +#' @param geo_test_data testing data for a certain location +#' +#' @importFrom rlang .data .env +#' +#' @export +data_filteration <- function(test_lag, geo_train_data, geo_test_data, lag_pad) { + if (test_lag <= 14){ + test_lag_pad=lag_pad + test_lag_pad1=0 + test_lag_pad2=0 + }else if (test_lag < 51){ + test_lag_pad=7 + test_lag_pad1=6 + test_lag_pad2=7 + }else { + test_lag_pad=9 + test_lag_pad1=8 + test_lag_pad2=9 + } + train_data = geo_train_data %>% + filter(.data$lag >= .env$test_lag - .env$test_lag_pad ) %>% + filter(.data$lag <= .env$test_lag + .env$test_lag_pad ) + test_data = geo_test_data %>% + filter(.data$lag >= .env$test_lag - .env$test_lag_pad1 ) %>% + filter(.data$lag <= .env$test_lag + .env$test_lag_pad2) + + return (list(train_data, test_data)) +} + +#' Add columns to indicate the scale of value at square root level +#' +#' @template train_data-template +#' @param test_data Data Frame for testing +#' @param max_raw the maximum value in the training data at square root level +#' @template value_col-template +#' +#' @export +add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { + if (!(value_col %in% colnames(train_data))){ + stop("value raw does not exist in training data!") + } + + if (!(value_col %in% colnames(test_data))){ + stop("value raw does not exist in testing data!") + } + + sqrtscale = c() + sub_max_raw = sqrt(max(train_data[[value_col]])) / 2 + + for (split in seq(0, 3)){ + if (sub_max_raw < (max_raw * (split+1) * 0.1)) break + train_data[paste0("sqrty", as.character(split))] = 0 + test_data[paste0("sqrty", as.character(split))] = 0 + qv_pre = max_raw * split * 0.2 + qv_next = max_raw * (split+1) * 0.2 + + train_data[(train_data[[value_col]] <= (qv_next)^2) + & (train_data[[value_col]] > (qv_pre)^2), + paste0("sqrty", as.character(split))] = 1 + test_data[(test_data[[value_col]] <= (qv_next)^2) + & (test_data[[value_col]] > (qv_pre)^2), + paste0("sqrty", as.character(split))] = 1 + sqrtscale[split+1] = paste0("sqrty", as.character(split)) + } + return (list(train_data, test_data, sqrtscale)) +} + +#' Fetch model and use to generate predictions/perform corrections +#' +#' @template train_data-template +#' @param test_data Data frame for testing +#' @template taus-template +#' @template covariates-template +#' @template lp_solver-template +#' @template lambda-template +#' @template geo_level-template +#' @template geo-template +#' @template indicator-template +#' @template signal-template +#' @template signal_suffix-template +#' @template value_type-template +#' @template test_lag-template +#' @template train_models-template +#' @template make_predictions-template +#' @param model_save_dir directory containing trained models +#' @param training_end_date Most recent training date +#' +#' @importFrom stats predict coef +#' +#' @export +model_training_and_testing <- function(train_data, test_data, taus, covariates, + lp_solver, lambda, test_lag, + geo, value_type, model_save_dir, + indicator, signal, + geo_level, signal_suffix, + training_end_date, + train_models = TRUE, + make_predictions = TRUE) { + success = 0 + coefs_result = list() + coef_list = c("intercept", paste(covariates, '_coef', sep='')) + for (tau in taus) { + tryCatch( + expr = { + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, geo, + value_type, test_lag, tau) + model_path <- file.path(model_save_dir, model_file_name) + obj <- get_model(model_path, train_data, covariates, tau, + lambda, lp_solver, train_models=TRUE) + + if (make_predictions) { + y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) + test_data[[paste0("predicted_tau", as.character(tau))]] = y_hat_all + + coefs_result[[success+1]] = coef(obj) + } + + success = success + 1 + }, + error=function(e) {print(paste("Training failed for", model_path, sep=" "))} + ) + } + if (success < 9) {return (NULL)} + if (!make_predictions) {return (list())} + + coef_combined_result = data.frame(tau=taus, geo=geo, test_lag=test_lag) + coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) + + return (list(test_data, coef_combined_result)) +} + +#' Evaluation of the test results based on WIS score +#' The WIS score calculation is based on the weighted_interval_score function +#' from the `evalcast` package from Delphi +#' +#' @param test_data dataframe with a column containing the prediction results of +#' each requested quantile. Each row represents an update with certain +#' (reference_date, issue_date, location) combination. +#' @template taus-template +#' +#' @importFrom evalcast weighted_interval_score +#' +#' @export +evaluate <- function(test_data, taus) { + n_row = nrow(test_data) + taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) + + # Calculate WIS + predicted_all = as.matrix(test_data[c("predicted_tau0.01", "predicted_tau0.025", + "predicted_tau0.1", "predicted_tau0.25", + "predicted_tau0.5", "predicted_tau0.75", + "predicted_tau0.9", "predicted_tau0.975", + "predicted_tau0.99")]) + predicted_all_exp = exp(predicted_all) + predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) + test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) + + return (test_data) +} + +#' Train model using quantile regression with Lasso penalty, or load from disk +#' +#' @param model_path path to read model from or to save model to +#' @template train_data-template +#' @template covariates-template +#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. +#' @template lp_solver-template +#' @template lambda-template +#' @template train_models-template +#' +#' @importFrom quantgen quantile_lasso +#' @importFrom stringr str_interp +get_model <- function(model_path, train_data, covariates, tau, + lambda, lp_solver, train_models) { + if (train_models || !file.exists(model_path)) { + if (!train_models && !file.exists(model_path)) { + warning(str_interp("user requested use of cached model but file {model_path}"), + " does not exist; training new model") + } + # Quantile regression + obj <- quantile_lasso(as.matrix(train_data[covariates]), + train_data$log_value_target, tau = tau, + lambda = lambda, standardize = FALSE, lp_solver = lp_solver) + + # Save model to cache. + create_dir_not_exist(dirname(model_path)) + save(obj, file=model_path) + } else { + # Load model from cache. + obj <- load(model_path) + } + + return(obj) +} + +#' Construct filename for model with given parameters +#' +#' @template indicator-template +#' @template signal-template +#' @template geo-template +#' @template signal_suffix-template +#' @template lambda-template +#' @template value_type-template +#' @template test_lag-template +#' @template geo_level-template +#' @template test_lag-template +#' @param dw string, indicate the day of a week +#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. +#' @param beta_prior_mode bool, indicate whether it is for a beta prior model +#' @param model_mode bool, indicate whether the file name is for a model +#' @param training_end_date the most recent training date +#' +#' @return path to file containing model object +#' +#' @importFrom stringr str_interp +#' +generate_filename <- function(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date="", geo="", + value_type = "", test_lag="", tau="", dw="", + beta_prior_mode = FALSE, model_mode = TRUE) { + if (lambda != "") { + lambda <- str_interp("lambda${lambda}") + } + if (test_lag != "") { + test_lag <- str_interp("lag${test_lag}") + } + if (tau != "") { + tau <- str_interp("tau${tau}") + } + if (beta_prior_mode) { + beta_prior <- "beta_prior" + } else { + beta_prior <- "" + } + if (model_mode) { + file_type <- ".model" + } else { + file_type <- ".csv" + } + components <- c(as.character(training_end_date), beta_prior, + indicator, signal, signal_suffix, + geo_level, lambda, + geo, test_lag, dw, tau) + + filename = paste0( + # Drop any empty strings. + paste(components[components != ""], collapse="_"), + file_type + ) + return(filename) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/preprocessing.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R new file mode 100644 index 000000000..d7cf36ff8 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R @@ -0,0 +1,178 @@ +#' Corrected estimates from a single local signal +#' +#' @template df-template +#' @template export_dir-template +#' @param test_date_list Date vector of dates to make predictions for +#' @param value_cols character vector of numerator and/or denominator field names +#' @template value_type-template +#' @template taus-template +#' @param test_lags integer vector of number of days ago to predict for +#' @template training_days-template +#' @template testing_window-template +#' @template ref_lag-template +#' @template lambda-template +#' @template lp_solver-template +#' +#' @importFrom dplyr %>% filter +#' @importFrom plyr rbind.fill +#' @importFrom tidyr drop_na +#' @importFrom rlang .data .env +#' +#' @export +run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value_type, + taus = TAUS, test_lags = TEST_LAGS, + training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, + ref_lag = REF_LAG, lambda = LAMBDA, lp_solver = LP_SOLVER) { + # Get all the locations that are considered + geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) + # Build model for each location + res_list = list() + res_indx = 1 + coef_df_list = list() + + for (geo in geo_list) { + subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < .env$ref_lag) + min_refd <- min(subdf$time_value) + max_refd <- max(subdf$time_value) + subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) + if (value_type == "count") { # For counts data only + combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") + combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) + } else if (value_type == "fraction") { + combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") + combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) + + combined_denom_df <- fill_missing_updates(subdf, value_cols[2], "time_value", "lag") + combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", "time_value", "lag", ref_lag) + + combined_df <- merge(combined_num_df, combined_denom_df, + by=c("time_value", "issue_date", "lag", "target_date"), all.y=TRUE, + suffixes=c("_num", "_denom")) + } + combined_df <- add_params_for_dates(combined_df, "time_value", "lag") + + for (test_date in test_date_list) { + geo_train_data = combined_df %>% + filter(.data$issue_date < .env$test_date) %>% + filter(.data$target_date <= .env$test_date) %>% + filter(.data$target_date > .env$test_date - .env$training_days) %>% + drop_na() + geo_test_data = combined_df %>% + filter(.data$issue_date >= .env$test_date) %>% + filter(.data$issue_date < .env$test_date + .env$testing_window) %>% + drop_na() + if (nrow(geo_test_data) == 0) next + if (nrow(geo_train_data) <= 200) next + if (value_type == "fraction") { + geo_prior_test_data = combined_df %>% + filter(.data$issue_date > .env$test_date - 7) %>% + filter(.data$issue_date <= .env$test_date) + + updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) + geo_train_data <- updated_data[[1]] + geo_test_data <- updated_data[[2]] + } + + max_raw = sqrt(max(geo_train_data$value_raw)) + for (test_lag in test_lags) { + filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) + train_data <- filtered_data[[1]] + test_data <- filtered_data[[2]] + + updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") + train_data <- updated_data[[1]] + test_data <- updated_data[[2]] + sqrtscale <- updated_data[[3]] + + covariates <- list( + Y7DAV, paste0(WEEKDAYS_ABBR, "_ref"), paste0(WEEKDAYS_ABBR, "_issue"), + WEEK_ISSUES, SLOPE, SQRTSCALE + ) + params_list <- c(YITL, as.vector(unlist(covariates))) + + # Model training and testing + model_path_prefix <- generate_model_filename_prefix( + indicator, signal, geo, signal_suffix, value_type, test_lag, tau, lambda) + prediction_results <- model_training_and_testing( + train_data, test_data, taus, params_list, lp_solver, + lambda, test_date, geo, value_type = value_type, test_lag = test_lag + ) + test_data <- prediction_results[[1]] + coefs <- prediction_results[[2]] + test_data <- evaluate(test_data, taus) + test_data$test_date <- test_date + coefs$test_date <- test_date + coefs$test_lag <- test_lag + coefs$geo_value <- geo + + res_list[[res_indx]] = test_data + coef_df_list[[res_indx]] = coefs + res_indx = res_indx+1 + export_test_result(test_data, coefs, export_dir, + geo, test_lag) + }# End for test lags + }# End for test date list + result_df = do.call(rbind, res_list) + coefs_df = do.call(rbind.fill, coef_df_list) + export_test_result(result_df, coefs_df, export_dir, geo) + }# End for geo list +} + +#' Main function to correct a single local signal +#' +#' @template input_dir-template +#' @template export_dir-template +#' @param test_start_date Date or string in the format "YYYY-MM-DD" to start +#' making predictions on +#' @param test_end_date Date or string in the format "YYYY-MM-DD" to stop +#' making predictions on +#' @template num_col-template +#' @template denom_col-template +#' @template value_type-template +#' @template training_days-template +#' @template testing_window-template +#' @template lambda-template +#' @template ref_lag-template +#' @template lp_solver-template +#' +#' @importFrom readr read_csv +#' +#' @export +main_local <- function(input_dir, export_dir, + test_start_date, test_end_date, + num_col, denom_col,value_type = c("count", "fraction"), + training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, + lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER) { + value_type <- match.arg(value_type) + + # Check input data + df = read_csv(input_dir) + + # Check data type and required columns + result <- validity_checks(df, value_type, num_col, denom_col) + df <- result[["df"]] + value_cols <- result[["value_cols"]] + + # Get test date list according to the test start date + if (is.null(test_start_date)) { + test_start_date = max(df$issue_date) + } else { + test_start_date = as.Date(test_start_date) + } + + if (is.null(test_end_date)) { + test_end_date = max(df$issue_date) + } else { + test_end_date = as.Date(test_end_date) + } + + test_date_list = seq(test_start_date, test_end_date, by="days") + + # Check available training days + training_days_check(df$issue_date, training_days) + + run_backfill_local(df, export_dir, + test_date_list, value_cols, value_type, + TAUS, TEST_LAGS, training_days, testing_window, + ref_lag, lambda, lp_solver) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/utils.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/covariates-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/denom_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/df-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/export_dir-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/file_type-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/geo_level-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/indicator-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/input_dir-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/lag_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/lambda-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/lp_solver-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/make_predictions-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/num_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/params-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/ref_lag-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/refd_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/taus-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/test_lag-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/testing_window-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/time_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_data-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/train_models-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/training_days-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_col-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man-roxygen/value_type-template.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_7davs_and_target.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_dayofweek.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_params_for_dates.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_shift.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_sqrtscale.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/add_weekofmonth.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/create_dir_not_exist.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/create_name_pattern.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/data_filteration.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/delta.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/delta.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/est_priors.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/evaluate.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/evaluate.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/export_test_result.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/fill_missing_updates.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/fill_rows.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/frac_adj.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/generate_filename.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/get_7dav.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/get_files_list.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/get_model.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/get_populous_counties.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/get_weekofmonth.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/main.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/main_local.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/model_training_and_testing.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/objective.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/objective.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/read_data.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/read_params.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/run_backfill.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/run_backfill_local.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/subset_valid_files.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/training_days_check.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/man/validity_checks.Rd rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-io.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-model.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R new file mode 100644 index 000000000..8bde8c68e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R @@ -0,0 +1,132 @@ +context("Testing preprocessing helper functions") + +refd_col <- "time_value" +lag_col <- "lag" +value_col <- "Counts_Products_Denom" +min_refd <- as.Date("2022-01-01") +max_refd <- as.Date("2022-01-07") +ref_lag <- 7 +fake_df <- data.frame(time_value = c(as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-04"), as.Date("2022-01-04"), + as.Date("2022-01-04"), as.Date("2022-01-05"), + as.Date("2022-01-05")), + lag = c(0, 1, 3, 7, 0, 6, 7, 0, 7), + Counts_Products_Denom=c(100, 200, 500, 1000, 0, 200, 220, 50, 300)) +wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +wm <- c("W1_issue", "W2_issue", "W3_issue") + + +test_that("testing rows filling for missing lags", { + # Make sure all reference date have enough rows for updates + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + n_refds <- as.numeric(max_refd - min_refd)+1 + + expect_equal(nrow(df_new), n_refds*(ref_lag+31)) + expect_equal(df_new %>% drop_na(), fake_df) +}) + + +test_that("testing NA filling for missing udpates", { + # Make sure all the updates are valid integers + + # Assuming the input data does not have enough rows for consecutive lags + expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), + "Risk exists in forward filling") + + # Assuming the input data is already prepared + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + n_refds <- as.numeric(max_refd - min_refd)+1 + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + + expect_equal(nrow(backfill_df), n_refds*(ref_lag+31)) + + for (d in seq(min_refd, max_refd, by="day")) { + expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) + } +}) + + +test_that("testing the calculation of 7-day moving average", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df$issue_date <- df[[refd_col]] + df[[lag_col]] + pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% + pivot_wider(id_cols=refd_col, names_from="issue_date", + values_from="value_raw") + pivot_df[is.na(pivot_df)] = 0 + backfill_df <- get_7dav(pivot_df, refd_col) + + + output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] + expected <- colSums(pivot_df[, -1]) / 7 + expect_true(all(output == expected)) +}) + +test_that("testing the data shifting", { + shifted_df <- add_shift(fake_df, 1, refd_col) + shifted_df[, refd_col] <- as.Date(shifted_df[, refd_col]) - 1 + + expect_equal(fake_df, shifted_df) +}) + + +test_that("testing adding columns for each day of a week", { + df_new <- add_dayofweek(fake_df, refd_col, "_ref", wd) + + expect_equal(ncol(fake_df) + 7, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) +}) + + +test_that("testing the calculation of week of a month", { + expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-09")), 2) + + expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) + expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) + expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) + + expect_equal(get_weekofmonth(as.Date("2022-10-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-09")), 2) + expect_equal(get_weekofmonth(as.Date("2022-10-16")), 3) + expect_equal(get_weekofmonth(as.Date("2022-10-23")), 4) + expect_equal(get_weekofmonth(as.Date("2022-10-30")), 1) + +}) + +test_that("testing adding columns for each week of a month", { + df_new <- add_weekofmonth(fake_df, refd_col, wm) + + expect_equal(ncol(fake_df) + 3, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) +}) + + +test_that("testing adding 7 day avg and target", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col, ref_lag) + + # Existing columns: + # time_value: reference date + # value_raw: raw counts + # lag: number of days between issue date and reference date + # Added columns + # issue_date: report/issue date + # value_7dav: 7day avg of the raw counts + # value_prev_7dav: 7day avg of the counts from -14 days to -8 days + # value_target: updated counts on the target date + # target_date: the date ref_lag days after the reference date + # and 5 log columns + expect_equal(ncol(df_new), 3 + 10) + expect_equal(nrow(df_new), 7 * (ref_lag + 30 + 1)) +}) + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-utils.R rename to backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out b/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out new file mode 100644 index 000000000..8e09ae745 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out @@ -0,0 +1,11 @@ +* installing *source* package ‘delphiBackfillCorrection’ ... +** using staged installation +** R +** byte-compile and prepare package for lazy loading +** help +*** installing help indices +** building package indices +** testing if installed package can be loaded from temporary location +** testing if installed package can be loaded from final location +** testing if installed package keeps a record of temporary installation path +* DONE (delphiBackfillCorrection) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf new file mode 100644 index 0000000000000000000000000000000000000000..65f45b183ce8d809898987d8e7669d86b536e4ac GIT binary patch literal 120174 zcmb5UQ;w6BBJ#k zr1HX|Gz_#XP^9xqBWq9$ENley1a^j&P~6;5bkZiaX3pjW42%q{1pn)Uq7${Sb~bS& zpcAz=a5fP(F|so@f#T(ba&mSwF|dJh-?&!SQQBff@i|p{tLb8@CYng)EcIr)8$~1X*G$(y@T;UBn*J~*K z=q+Z!LwYYq(rqA&NApmLA+z?GXtiBfjh%%DUV2zqhAee4O4^5LQF`6 zAQ%O9FPRiK1WHk;0B)uM6sjGbqK(GwW}}j&4W$#qwQtQqp5F9^$rwaVkED+OI24B< z!N~?OSj1oZh&4;gmUR%F#4DTCgBd$;CJ|dd|oL^ zRHYaD2anwCd;?Xb(!_$bI1e=ZgQwH!aZA=xfn(0Yhtz4}*`;o*(bmPYaCx%Dm3QOF zduLb1W4YJ+*S&Ge$utYc)tL#0B4QXgsJmdFXdZ@HVE(QYak&DEE#rTprPKL+1@J18zl+$3%} zgG-^aNg1+saWN}Bb5v4*&>9k97Q&F^j2|9%M{W9UN>67~4938nzg28si_7Znvr5r| z3F?+SoXqF%?Bthb+vRY61?XI7Yr@!Ahq)b9kpS0rmGU}@&iHhsyH|c@2}Ns;qJ;k! zdv}R)O^VE&30LuqEAc`=Ns}aM2Q#j-F1{6{lF*Z{Nl)Kyilr6{Me9Z3cmcijZrM_z zRqiQ4+gRj9xh}1U30}h9qacApA5X+w1dC_|t-g zHhdNWSVNmc*BE)^-dwCNGfmjV1(u_4{-|0j!dCb}YzD;MDEeIMFV>Q7LZ$SMPT~q9 zz#ql1htIDWYP>x0)>o|200fPeBGr=V_tIm6&~vQkDS?8GYs~|VV{n5eovfA|Q{HM^ zk~7oZ%ZX5)h&Uwo1x=>PWD6~Erj(UdgvhH?UorMIrhayyEGkKb_<37_aWsX|G89#R zOF$P_1NWu|ZK!zJ#68H&S&Sb)y+D$V4(w9IA-`xtxlOHAZtt#RpA5v^unPOCx-M1| z5MgX^HW;%n%^Yng2VT$)GP!^SeEv2**)~6+wwkRfOTes%5FKcJ$9~?M3B<2F1}+3g z7b-bxo$!M)+5{dU%7m-ql}45IhNH zt4mqxtdGC9emT1$?$rcf5YELJc0}U|QpgZX;(%OnX07U`Z6BoW&_;S0pwRos*#?W=oD{fO5(=1QV7_GzJZ1tMbLxL3E|xEZ?R5} zZ%JAEq))dVrGtqw0m~-7c9UvIp1E0^5c`3BkvkFg(2dA+o>Ujb92$n3Wj$i`yCe03 zR%!g9@u-Y}ubPnLWM$8W|BGBeGpff7$&?N|bq^cGcV4ZMO;0qpSb4PIH0=Iz5*Oe{ zuYs%zYm%}>o*D2<`BsUMH?G)e^LBnY~djob;ZcDkEBKBIcC+fQvcr#rmNvBl71xABg1x&)S+H@_>pZ5ds3H)|c%w6n`ZJn+(Up0YQs4G1|YuVEC5B1pfOfi0ci&Gb@{_89Vr`UHhxY-;*2XvDrSH z8v^)`7ENt0VANz<-jO4FM2#1Zv&p+68WH}Kr^$EA+v7x9(6uprbZgQ8!JQz!<{CVA zItf>XZWeW28K!PPUlYVbA_kmrjuO>(8^DGotWwftdZZ-gP=ZeR9xAd=UZ*3pKaFkr zIX!QCB~1>Yy5~=}08vEHg=%2AK%Zo1J0C#3D4@(Rl&M9g#Wj3-FXpo>Ey)}G; z&)*AZZ510wrNlyWmsaKdabj_L%TAZ+ysXYnZ8ZgD9&Oa#nndqPU((}S%ZQbl)QajV zgN$wqnUI7lQ;hhoFqDjz>RHyn$(P`+qwY=5%j<-`pp?zUTR}a|5xjS_G3)($8&mKbAz(W5?v7hrw)rACGe;405ZQS~;K((2WmHFK+4j2AG?qJ6b3 z@%sw#luLZm)d|R}jP=XoiZkq)O|QR9>Zub?)#F1uWpFFKhu>+K+e&txx8M043E+Et zVA1Xm|5IEU|36t%*~8w1fKJZPQpwo{icXe*fu8=q(x#)6GXXmT^Z(7FY%I+GR(g9h zWbCln5PB}v?$)#^eEk4K{k{8b{Lvt?aW*=6t4ms%BC1u?s)pZReTI%}Q8YBcLu!Qh zY2Q!oXYARFaV9{TrbSQGlc^aiXp#aYtmaLiev1tz_2-#T=RrzC`au>)rIQUe08KLx z6DY@`N41T~g!|J;(@j^j@-vV#E#|UOrNK|S4MiF0riUp`NPDRtRhbUc-SWe!A%+0p zkbqdAY3a+M5cP6Jq}Tcj!yF+;7-h2sr7q*ab(v1*!KfkXNE!xd&Vh%LmFF+^3kMl@ zieNF|?KLQ>#T4R`0C-^$JZum`0C<5<_p+EVqEX1qqxPB*5C_Z*PzFvB+97z$Kq7?3 zCL%yOoSU=r-LV#+W$__^7YOb}2H>GeQ9K_oH~vCq4l`jQT1CjjCrl&iVFu;&Q^+kt)Z0=z2l}YtUz7cn zf~l?ukWr@;Q4q9K#iIe|np2ul$SCf`Ls-ykxWv!}!%QIbTT^zl zpG$)JX}}^e!nlIOFPmbZ@UswvFGaOv6@$z%En&U2Au9Ve)Aa zr*C#bKT=J(uYw*_88y;qtE9D~b~%s+msTiWq7NV<1H>cQLwz%e^U+F^bu6SR=qw=T zKNP*@U%1(738?&6^_gG9$)c`RNlbhaRNeE>2Yd9NRUHgf85K4mTYcsU=^!zfKC~!U zkDaWeTjH}%m2KxgE#HYU9Xr&a^KRkJtPdC4Uk1Eyec~i;%bq$kc1nItAA|#tLfn{w z+tKd9O%;7S1O*MmrZNndQ2k$z`XLR@8>e}GgA}B0A=R?j4YX{J9{>uH&iMf0NZ(C`=WyZg$=p!^XK^{cQ-vt0`PQ7dVaKKHqS2Or3QQScqVmy6Rj=b zV2>#!_Sr>%S$r?`+w&{8WT(Aq zJA>Vx9Ri*t;>8E*)Sr7S>muG=Su=jW`KUCWe;%}EOYiP2B~Q7=gk_T-8G96#j(f9=g#MDLQ+i58BUQIt>(cSu0oxIy zVsObmN4B`?!bF!vBBL$im(`oq+mU=8l%3IXwE#6% zGJL4%UVH!5tmx!PacDA2DqpweYpn(Wr$*TXR0=BEpacQ(s_;a1%naZUa{H?ng-~Z3W@FXgO&a^lEz0f1JYZf%b8*gE~U{Z=p(cpU#pYjyc z%~DGfr?MOAqZ!M3&s7X4vO9%%nT9tA1jPf}jX8f@t4y{SaI-d}2&=EA4vu9fBEc*S zMUD=Tg*WnAz5UfgH{~$&(2U{QC(ja45+R z8{C-ubs$GIs4e?Pnb*9(iTSL7J-uvoufBC70ywv1AdVZb#K#j>)p5V=UH_gs=As$N z`RHjEI|$^^1IG(;YXABXzwPr7_8*Ltk@^2)rT;y)VQ2sEsSUxu7~7~xIc3o!^nR+H zK`=2SAiz7F4P=nyNXXf2ojWH+i-`a+H4K1ilmGTsfn(rswLF^B@mxbm{PEfK-DqKu zL=?pbH%l5gFOgF?5)Q!5mk5Lyg?k1Pdg%`XC9u~Scw08d2768V*XS)_TMjY65x`W5r#~QU`ezv;2 zh4`U;Pb2q2K{0tFK&k(ps7m|^yflVW-9sZ6C8o@VW>UmtmZ3{BD>PL+xYfg2){G;q zE$k9DAb%YGiKVGhGvee+lfu6%qiiW+{0!p+bttz_h}0K_gLqF@XJ1>{A%o_!dfe;F z*8L^|?1rACJ_n?ZxKn5yIwF>!@SFuxMNZ!z%6s(fLyGEVLHPBB|RId>hvunvS z%4a$W+3U;*Af9HJnfAm~y(W>wi#~{D^(BaPmzI_{C$3LVZp_R~j{t%LWU##%vUOou z@W|zvI@t$7n%AT8dD3$o9@yR3W`y0WWm<>7W-5^;*j2^1zmb_HM8^A1iw`u?!1HOl z`UEzWU(Pa9ib01q)?6h&ofEQ!?y&TPVvJ-1^(1SwDl&WENRz1m<5dqsQgW?iGEwew zR1;{Q*vkAKe&Rgg(y*PP7rUK`&V|{2OtmHD0ksG7;rt{^0)0_92y}s3l=f$pBqG2i zGvHlCZpQP&3%MyY)1_ZJ2cUG6;_VeLPLqwAg=Ow#Es_fikLHC~e3PX0YMU*CE539k zJ35($ix#>&T3d(q93v@^{cRIjolMSJF*g&caDqEMjL=vnKHPpeW=-`YBUe$Mr zo!xa=z-|0lP8aR7;M?(hg^?7PU1w(CY-!@)1?ddBp*WSgQkRQTc9!M*#OdQqoZ{|1 z0ixZ)7X_Jq(JBky(Bd$j036`qxTMIoT83|$qOH_S9u_qQy3KFqtVU-IOzKkMRwTi$ zpvLk{Z-@S}-i6hGFVE8kw*P+43t0QnS=`Lj_ip!($|GYmON5ueV<3~)4D@KJ$MtVY zYXAWY%?X(3l!@B2Mz8krHEZfPHSTUs49YIF-e=%0klbnCs@-m-2mC)^%*66PF=k`` z?^(mYz<5M;$~K!GWqVG|9a7Q3IvSQ*siY!VXZf5eMied`lLQYkuK4{EYm*(ADMu`+ z`Hx;u2k-0Fp%H>QFi0;gFC9AiDj+aJbjDmiBQOS`_)!vq(YQEFDrLJ1(5tP7lY`8} zilCs7xC6yvIVrb{+O^SQ3nbley?4Mgo?dFy7-%XX&|0Zk0er@7P_g#b4q^Oc8XJFa zB=KE!YAQiI#GwCpJHRgbu66Hwxh27mD=dZpnts1IfCi2o;MS1_8z6ZU6q()!ee9k3 zQk-gW?Wxj{u}e`B@S|w+IT!Cb?QMM8`^%Nj+(5#L}u_+9M!u1gvFDI&5@ev+cCPi;WYYizR5>PrnzvZ?K{OYc?W8-UR}1qktL0;r38?&4+q6pU_n&A(```~ z3w2(YfW^rdR5B$6a_w{B8LWGGHrAM%UKkKL*2BPQ?0PBZ+$B?KSQf4vRm8gJ*oW5Z zww*zZcjQKHrM@sB42B}}<<>=ss*eX zLiTm#4T)}_9i<$Kw@M8J*c+IHD{Fh7i{4bXjNZ^NHf0`$gEjzC;GIJDX!h;$#wd_Z2 zxBc-^!$&ye)WdZvEz_B=ZN5Ffj?5<|Iy*cX@^zu>2C$~I3>b)}+ET)u4Rcv87sgvtd@{S8&nIZXWe_Srn(!(EMSl zS?P5U_*Akqg}$~F&$?4va(`i7qq6{b>-5BJ#abm~V>e}6QZE-uEtG>M+`z_7lTD+N zrL3=bjoEQ+R2&p1!L*7u|4#W@J_nprPSCaz1xdM;4v4s%laC6~8^BLE)$AVlo4wTa z_lH(*PHR)5$ODb*TGZI&UcZmHj8%9rk_?I3x~InS&`wJRU>Fl^kjnUyMasX&^PI# zK~C@SD%;w>O6b=R1<mLC6r|lpImMrT&DRt5Nx9n<66 zb2Q~TqT~*>ZZ*1GT~c!OpbA!raSv(q0)1$1bOU{inz2qiHr+E{-7as>$-TMZv_`sc zy%b*&f~GaE0#=G>lt<@>#^?Hmf6;)X=bG*+=j9jXCqULrFE=g2P+N)q^UFo9tt_6;%!?BoUUV z#g%)`z=fi$?{E)3#2d9PIWO3MepGNYo#Eby+|_ZVt*ck|_}E*okw890IT0qO`>dM|8#9S zL}f#*tf{uj33SZn_(jR0>fpOzsgm+oJ+%Hic?-l%|1T&tv{Gj?K-}v$(H7Gp+0|H0AhrTuZXU~_M^?ftap!Z00-B)0#N1#rz!oQeA5JSg=^+cZ zvFV3R2Xd;}dcv@?xo?I&ig?w5INIImm3Un%onLbC6_i15iRE8rw_h&me=ja*rBM!6 zcteM4b9wVKRRuAY#W1sRL79yDlh5BnPXHBX?8aZh$y=fV_ajM9}yaN&* zJN=BYX(ax1KGp~otd%(^z?kmzOgRMxyFWvp#rgXE1L~~o|Kyb{|L^SeUqsO!6$QKh zpzb?YcSkG^+cLnqZvdaQB3s!URyG9b=xnAkw#gB9Pxc$2A z#)@HsAev{2pV~BKg;qc!?mqt4F$Q8(K$s&E64E_6-z4^?TRT(gVDrWEtQV$~xoQ#b8X2%i~-7tvx3#emo>QNu&8 zop)f5Ojk;Zsu|)p7$iWt^h9|ALl_~4bfc#u&F07g?g?qg{M<$@BYu0lNhqrKK3pk%~|1NGS1O9*=(8o3G4*@JA z(tY+Y2I|K<8r`wz(pL*M0&2PRw8C@x@6nP6nw+F z7L0)|Buk9ofs`nNC<(aG?^)v_*j3&u4{t1+`K09CIdQB5|IqzSZCZuEAa;+Wt`SR& zdnH(6Q!N17A^?omB7ImaLG?1ivsZmI*=DXA<6K;7C6mOooFf;C<>eMhfwR4f6vyx% zwa)Lh%SZQ>POj$Dzo249w4H!Q7GqC_!pnGTyaJV7+H^+NkulXdKR#~X#UCEzIaBXR zi!~xu+0Yt2=rVwff5;a^D|K`z-D(`uDm%BPMti7QtF~rp${&c3J6-LRM@o@Okl~m~ zX^wSM%~(q-b?TzQI^S4RCZc+W~{V^9=@z@+Kt*Y=Y<_ZYr~h1@0B-VG zYS?wk1$O0GzG}0Z!P92t74rHm{sTj79RHJ{|1@ma|Lp=}MqMIq^FO=exjOo2pp*c0 zL{bl~r+A`60+E2$h(#$Sck_rB1mdVjagU3ikKQqnROZb7Oxm*uuJ_bi8yL|r1$s}n z$M2JqXU^O>)gMc|S+T6|aF77}f$2a5KfD^zEa#`Jw(PIo_x)#2G!|CTsQEZEklMdm z>B^ghM}^7luVnS`-MV51hx5#lMtr)oVvF{GNeLWXMTs`h6P3!=N~uwY#+=YVw_J+= zA_ITAQE0P)RI1jm&KtTf@Gf`?B_|)R6^f~oYQD4J^K8B3aL$3b6&sVuBcr~~DSga4 z7q*&Zec?=p$7ym|MEK)BJMa;ku4Om~1Ki4Esnu-OT{dH>@9`R+PN&GMC{>z^{8K&A zo%~v0au#+DZYszB59!Ua~ zk$^}f*2Q!Hm3Shs}H`OMtR@RhF(= z2Tykc6HG({k^nx!{}^KdLap5T{hTGoyS_^cr~H{M9$I#p)P`o z;`(%-OY#E0ancGvbB&AUSu2`sb)(6UQBtL0DHVh@5g#4@UT2kmA9zDV8 z#Wj`wtV!K6`R*bJagPA;(%+C+>0>;Ga%TyOu8w(BaUMTFB{dw8ibKNV|9F&3B#aZ8 zd)QNDFqLS0TmeRzg3BZgvg4W=%ry6R`JU9a3=T1h~9?uS~NH71zq?)VEykY z0R-xY6Tr2jtYuuF)uE6oceIRNvd>>n?{dk8!#(ZN`$YFk$iG}=3iWp{7-khq2tNMNwI?fZl%H6{4dzHqZ`pWA)+-Z@f8NFjW-M2G5*>JZo z32rNr77zUJFg=}o81|SvH;NxX?SQ(287#HcF)h)O7CaII{S#G>^5z=9RJs5)6a+Np z`|8Lsxb;zx_g4TDHZ{7s%R*E#(3~>cVAcXZ`8{-~N^Z8D<;mw4N5ZcNp_o}O=L%lf zmx_Lq8h?tH8Zu98#v&PI<`32NJ|OB{W!aUw$JLo?CkJv@Mi2KJc+Ned6a>R-7Ke9YY+Nw`$Qa@N~dB~KT-4r zELlNk-A;GT`qm$&x&&0R=?4=z-6n!|xiFM+g$KuDZ~Jg%1e9LW zf^Te-fbhk0>zA8cXkuF_N7k-w1vWOQ8mp~F8jug-hDg~;rSjW+OEYk>+bv}5t1cjK zmU{CC2hDKC@c5NXy4Ftl%EPp1i%2~OY6FzZXPdI3dXn8&bA{}^DHzCY1ha6#tvoHt zsS=yAH0)B+NV9)mt=DkKf4W;(Aw9(b7>#oFZ=Zu(c=D^ zz+s!%Jc?agUR?)|V}RK=Rswgig5m^qF%7_ikEht)<1V`Fx~+{Jc|RH;f$x-QB?(@h zzj`g!l1Ht1Jsyr}F$G7em-T2XGjg%ff?=}im@FiJR2$^bd z5jMQXw<1$;l~)bzGAc#--`4UWJ6~d8?y(k``BH*=&S>HrV6;9V3aThy0;97`=7$8v zWMDzGrSk!$r=k%1Pnb+g6G=LIpWLFnx~iDATHp*?(k9y}-nD(7zY?r>ytq0&qHiby zSN?ZqCfP;ngFGO#Gx*J^BAmbQ0SX)dn0FTR`Jyn5WnO(@Yd{hd&hLm`Z64o924eN@-ViT2wbzcU5*R|7Krn>b_SFo zYK%-LJs%vBU!agfLHbRrB_53u7LkmhwV|Y7KRUTX}ZDx z@v#clZ8h!FsNHsGV-;7JaHs$p8&6eFrQRnSoD>o3(0e(#wJm)yy#Ny3hd8$7<4;JN z0l+Oy)6)&XZZ#sF6SM6iko{`yW-c8bCK2J=IvfrD04EHJ|Hu>RFYi;r zrw@$^a(ITS9O_-_pH3nzCXsH;Ba9*w$)2|7(r`ekqo&jqpzC_bXX-r~$-qa>Z9*;0 zOkkG85stxV~}XD-jQZ>(W6DM~r2 zJU~uQZ9S4oMRIOcC*;e8R|+~hleD<;Q5Hfv@_$sRd%g_tQ|sJ$QLy^K-lRk$rKoR4 zM7r_m$53J#G8PfE+z+4&YsGfl z1`74#d&ILkt-5#e^(cEzRxfR(?F*<8Qpn7Kim#JPCOHW?WNA*lC!0XL3on-@wYM0`|p~Uv;`0Wp;_{pUg z0{8e23c>Ji+c#`1|F?a^#LV&U!>f*+G8=-=uHOFG%g8EIF1%J`yg!9aiE5;OSSunt ziYhAAWFZ2{$F0{1Yke2VdS-nV5ou%H?fu*In|rNUg#%TDri?OV4GHU0R_R7jOL4V1 zPX^~kYPU->j|*6@k*xv%7aqNpK+X_SK7$U(8f`@LeviQtWj}^XdK4%WIqliQR?;b zl>tTu&YICn%2xK(e<9I94AMf=IZdmKFojF7MmEf4^wIx#K>F#gA$qFn_Y6fFhJY2_ z^|hh_<8)ylOyA%UCl(7+kaF$^Y9d*W0&yh;^BgQB`g58WsZNy zXA2LRiFlFXH9)i(06HPCc0b(l7a0ZIlfX_fSWB{jpdzja-Vtl=1%@dCV)iGS=3(U6 zLpdl4TNC~bm7|rgdJA^9fX&laBy9z)Q^7PvP-_Cu*U;L|(nOk}uw%nwW8N29ar}xzX^78q#Hvr`KS;U{|fU}CC&d6#D z4!+A5rszcRi41-q{W{6dX>xi66CRYTZKH;B@xviMhbHkx z3~r10?hX#V&KGvD9A(>t-$YzjW@{QYGo&rk?$(TKW>yU#nUt77Jv&;MA0LV$e$klP^x?Lg8_7d&IC>&<;LVgXfUwHkV2NUQINYIBNLNr$( zTw{4DM`S**zXO^TCXCbg$`;r_uvdEXrg2xzk__x_=@N~=u1ae;9B-OUja!OMio?3C z;i{x@wlyb6dVGxhAWQ%Yh!8JCsI^xcgS}1abtp_HIQqehsyVZ zvzPpKq|Rc^xQ@#o&e~0xE6vDYn4vHLv3+ABb(N|6!+s158p|LCdleL=X>b6iQ|*iV zplCx*-yJLzN8({^Uer7J>5N}))W69i+q zK2D%Tg=C%;qL#)9mxNICFuD&$o4pGru!_Isqh?#JM-RF1QzzuiMms!ftwo!nDD`); zsWYs+`EtbUuuHVWN8o3jZjRuuMy09w znh&eubQU<|V8z;K;n}am8inXuYu`r8l^W66)&6>Wpt=`jVY9w@2Gny*`h%}P>b(q9O$4k6t$kf=4Sn|=HA+cQ!$rmK>ny*=j89hD^9ES_i`!f0KDY;y*B#Pc&I zeyJ{baJRq1T*U(0!LFBSB-U-l#rO_PvoKYBHhFzJ&?d3j;FW2rGc%@7cSs(Hg>Mz$ zLG+-L$X52yOBTj=#F`0%xa8@KT(Q+4yTBTzN)uD0@7an7?Fe%+wJ(-?&PjJnA8)RJ zDkcI|RcP)@@o5N{Xd?;xj+j5lq5hKz&=8(I|5dJE_fnYnDl<|(xh$^0#_b3J10Du2 zQ%P76`?A&YZcnbE$)-ZIJR)%j+~_boUK+q?Es9;xl)rWAh-P%L&^|B0B2&6a=hK?> zetkOyKs3|DY}qD-e!>3$uRGJoRTO(%xOOAk5%Ej|*4oY)^>BzX*}!;L0uw@Zi;9gNW71$>uN69evEitj$5|zMXTXq1@?WZVc4+M{jiI%`%r`FKqc7Z@>8*&+6T2 za#4OyfosjB_O2X@iH6%BZ}~Y}!SiUf(=s1@O7;xLvLImHA=;ordN0$R+q4a+K?1-a z<p#a ztLN=vIU?|aC zguo1O-9B<_qgTAVotg6&2zKG;O0I_o@Gd>$$X+hJX`~u zYGY5n^)+Qo6VTjKdR}G5NIl#^8ENEuOAkb}xM`K%%NHFiuV~r5y5x5JDGa%X0;BUR zKonK!C5)$m_KcHQC_Q?ffMzlHV^pk`pu?g_5eauVY9c^moJitoCAmUtpbrGy#c{J3 zinRq&v5&y5HlbSp4Uq(=U@&Fd;^hqj4l#l;brkv10xN&;^ycMFppZbcv?Lgz4#L8CFK=mnIDuCs(>g~fc3;Hn<-hj6YqqyQ>z7xoi2&&Wegp>HgXWUJv>L}SU>oSJ~9Wyh!2j*OpP;dn+^h0CMjwbI^2uRUC-?{ch~>#y?Stc zUh$orNn8W-I)T$v*7ivrEEpn(doO`(I2}BC*vx6&IHkd34zQNS#TMma5OMWMo* zJ3ia(>2FRQln6$4VgGm>@zpTN^I^Jq9aSy8`S0nFKc8kc%op_t+5~v(vCTv`K*#G` zw~cr_A@C7G2Tclo(KOFIKAtG;VqTbpyMZB#WzF+g2vEEE{`?*)p<>V z`lihDPHt@YpB^>MWk^WiuPq=cOa_dy-eTfKh*b-C;yitC*z3rh@M;o?=1r*PR$5-em9XM4fsWYoWk>_{{$1RLT@nott;WNMX(a5UEy z(FZl%1Ktf+l}4;>DP-#^gOGV|0pg+S1=Bp9r4)uwTa>S)+mUGl9-E2CDqbdX|L)i^IE>=^DS(jr6Cr1G#TvKb=z>{o#O0VR`icTE^H(dhsnAO7cd+n zY92Lz`HpB^Dfvm4NsjNxK`-{Je3@47rjyp8Z#4>h>h|TE6UN=p#RFR5BRJ6_fs0HMlt@@{BN%>42&88ZK=mr|vttRzgjg9P?>m*EH5}cURpVr`haDOc+eM z8FpZ2Neo`5vQ~jxtsKL4nOt^#=<`aejNGcxV0pf&TITs4z$V`CEn(>1{eu87{8K7r zW}yG~vx1q}O;&{7TO|z9fJy%9@I;e3c%I^MPH{v6i+ zO0C#dbsEpJ5gfxkYKSigRkY~$M{^EM zvm!ojG}#~R-xo(zF|)+KhUZQCaHkja6byjdbe7VZ8c z;jX;)>*2+`sRy98p)pGB-k4*ZGB8+ZZQtG97V8-M&?Ph? zsO2uTQTc8dvr46c^vzCB2~3SEG`!)sy1>qW{cJ~cRglr#KTilEHQlaUICR(G=OBUW zKd!!P{CWE7J;zGWocug+Ir%(d78dE@x>ZKt1ILw7Su~PPW?PvrJMu48UqUIh-k8cV z=b!m!?5CJ+qJ3JM(^6GxD~-pOmCyp;*NmVrEX$~(CefkOj#SMj&6#glPzt*N;15}{ ze~iPObudAT{5?8REPA4K8&B!g?26(nDOSKo*%o8{Oae(cV9xkkiFJiQ4 zvogg4!7Ani5F{C{qlo4O&GCo(mv_<8+MA|gHtz$&YmS|2R5FEVC(M6n>iX1!u{BOh znjJZJ(X|KffeFiehVJ12<_t=l?AlvzZ4aQGGzkhZT}-(Ss!>ADM%`>es_ppD?A7#fsFvxEemC=2hO z_4VCEYs_y~9^5s6HiaKB(RFo=yBn*@ygawYw>gcZBK3*fQaA}*Y`jzOpX5ZG#QEKY zyxH?0#%6^J!~vuoW=?i;?meB*NW1YLLa0j3B10eH2(0AJC1|6p) zO9ZV1Ay3H1Ml)QjGtgYyl*up@jpX9W?O6EL|*#f3wEzoXAym*<0vJ zW!`j0nOz6h+GFyPx^m0B%2>1AphPweIr6nsv(~8sz}ACCnZsxq5q@@h-JV~DYNq@% z+RwzNKb;D64&R$?DLsn={2cpGbl;W*&Nq5ULrPx~$P{5(#9j~g&r1Of5(FW+f27+_F*bG%*rt-61B@vvxl5oazZO~rxyIf$?CVQVShbcawc4)1BgU)syff)Xr9GEw zy}5|nX|xplkOw_g^poTbc4tD%a$YX2?4zd(HONKnZM(rCH4ZR5+}Tsy{^XknhekkL zrP+VsCTu`+4b7I1_Tv7T$!>uJ@;L0Fz8w>OP0P9Otyp3W(`LxgVX|W}5Jl?T3~3tz znQ@`H(oS*l9sfN^*Vfhl)-T3>*hx2v?_~wlhAan2p;YW&2D&t~F?j@n%1J)K@p`Cz zU0fbn4Ncxsm)kq%Swf0_A7H)mhUHt2)+#CP7fqzu1Y2A~Je7BYw9n3;nGSzNIOpw2 z5H()GCvWQS>LUD}dDz5_shTi6>G5>)5>m4FP^xoWHuvD>!Z3Vo=)-!V46}q_KtEP2 z@qhb!B%xJVawQsdJtb2Dp5=sYUI+RP$c&h3b88QO4{o-D|6_oW?ylt(Y_QfK20^>N z0h~b=)erHPXdF->I1zx_Dz*kf1h=NRa&SxH*DL?3WI}wJgl-S5^5tCV&gIu^kKk&{ zy{Y#D{L2iq?w=(6AG!IuQJ{L?#h}wHktt;BISkQEs)$ulPzYjzEDokLyZTc z1^+*ky#sfkQMaub+fFLBZQHhO+qR90ZQFKIv2EKqwja#_b=={_u6|s zb1rDYzpyL`L0x@$7_)^egoRu4%~`c70x>`!UD8a{_}XSaK0PpcyM8?oS}U3Ne(#C(&lnCxy0JQ*6}Q;QqTcEp=-TYm@X0dO%82Nbxwxd36n1k{+@Y z3_)Foj`j@8S09)HZ@ZU2K3dcYKQ$=tD)|ETDk0*`h+$U1#&|33_P3fqzd3p&sLV=> z|3x@Qbez!HJ_PTjE`l*w&r+afjnTl_pFWI8d8Wzq#PCv3WG3?8yor$`Qw14Fdf&pC zEZr#?(UBQItRR7ew_iPBAWP~;L2lTo z(tfr6o~1OD+_wr^d};HVID=ip99vG8S4QS?gO1Y1UY@N}uiXV?gh#^7_-(3E6Z_2P zC@YYZq;#5=R5ZVj8}lDaDWK%zT?8}@EOTooGc$SC5~#Ugv|~okSljJD>2d8^iK^W* zpo8xWJer7`AjGj-O|Rx<_qv{)T~*%qJZ@H=Dqoj}nRzWpmo^rpMzu11R8Oi>HgiZ7 zTWUP0;AK^RRH1Fgdr2;EmbVjM^hf&z@M|D{eq7xyi57%15`!5%z?ij*F-^jXaILP; zK`Y%g#ISaFg2hKq)!cQ5lvmoi#xoTXd6i_AGnjR_OYhG)wv+0?Wx(HV7Lw{?q|x_Y zPRH#I4MVfe8i@_Ul0xI)uiC&ZWurjii*1;v#7p3>y+*uC zZ63CeUh|VKlZW)fFq3m?qYh4n4uJk5(2oe90mi6p_=}-N{!4fy2yVcoa#?c7+7tb% zS4NVR#Dmcf11F`?IEQ8CPN_@noMMeW5l-&D;hO+9QUYfAeb0n?faJ8(;G3ZQ@BW=w ziaq@zDN@)rJ@Ymco`y+6Q!B)a-b4AvTs&n*4gTv_dB;&mmLcaL+dsh3z*jmdSxH3g zpr(15NEys)Et!K(l8KfPK=$gd1hCswte}Vepv>Pu{w35mJP=|iNN&jii*Dx`y3!Rb z#wVRxG883OwrO8JLC`@M!04j9W5-~m(^`bBNtAX`im`OEQ^Vk>7i{VzH?~W-HCC%= z)0-QhbCW1+mQR9a1M$o%;a>9*NA@eqpm^0G&HI7h@+~4TVY!FAU3B1{TqGtW@cy*R z<|e*Ku5{ap3^yWUWs;SnWt~_6KnrjP7*xxz^6}4`v)nl0>(!Z*1j&-g`o0V{yhe+aBS~Vo^OYzbG%{8;{sZGPDN*~gC0I9dE9MzPd;B~VtZyIWPV*^Pf?<6;@@nYQ6ow0#(RWX;( z)q+`HMigTzN@_KXE`oZQkqV9vk5@jizhZr#NUcLAn7G6yu`f^L%czZcWL`y1#@Sb|q+0OG z%Jybm-Cu4lV0Q{Wy-f=_Vo&TQ)gl{dQ;GB*@9u2SS=#m16}43}m1Y(jG__sDUQ$0% zxB~LwO{y^d62@;&RBif-@;`BmykUzv0R=LQ4d^gMJO|+dF_fr(y`N%GTn%1?53h|$ zV%NmrO$N-r>1Su-rTzA9?ncHrs~Z%G?Y>U{tg)!YahtV?<(lfl(*~i^CaNCLv(IU2 zSI$CDC-{6@SJKy2?y_Z%LAnk2sAJlvmP*C(r$g#{_y`{hJLlOidI6g;$jn=TGAQ=jz zqc=`!W`(T6?VwnhA;lSwKm>6 zUfnczoDBK|!6n;45fvAO&Ns91yw+Kn`l&4&>J5SH!G%AvJY`ZpvJ({?(CC$T%Xj#aC`XG-JI`f(#9*fGQ@ zgdbf-N0aeZw&dNQd(D*eUUj3TO#&%DbH%|6Fy^BB^W zC&88o^JESP+R)~YwY?f`h&kP~{!DduZ*tjT1(6(%*73zey-CG3vg~hCSzO$M?vA9Ca{+AG4eQsPq-W2Pbx6ZSnK1{&uH+}MzcZVp&+k>z zLGe^UXsI>rFbse;3_+ReHYpnS01*UIfaRO)tO?^D?ADM|YKMbE!SbH6kkpzgTuUFT za#w3-AIq#9nG&l!xjr1uQ^5>TYsnm*SI0i1swC*X14u;_{j$^$b0Rae_K#?Ue9@c` zEQ(+(5_Ea37Ml`GOJmJ4oC+%UA9#A%DzcXL9&Dp%NGm$hp7ch3h2B#xn)7K@Hu3eI zw3EkT(ebRES-KO)_Lbwc=a=_ zANnz)?eKE|AtKYd6~#Rtjx4%r0;-8Ba`2fVPAhm;41K-|jrw03U8lcRYuW9|O;+R_ z$pbK|!WE`6MVr`)f44H`ZT_4r!efQ4XuyK zGvdoyRo2Jar$H0LUCUsY)Zob>d`?P@#_ZRg&QR+-o29msg7mi&;os#e4BApwp+7O)as z|Ilfnc>&s+>C@NFr;iH3)u*_TE$;Z+)|S8IdyAY?C(}dlRyeW0a%lIesYvk^njym$ zEMQp+TQ1cj&|>9OtP55O_L_+aHaSB#S@nb+1m^Wh6KX>9>kDQvSd777=Qs}44D!>r zb>}dy86F8>QgCt{hK7u(iC>o=y0f3zkL6E1?Z`U_MLcp*c?b&o0@EHo=s7z;R%iym zr<|VysqSZAdUAnngLI|Bn4I8FO{@%)g~NNKio@Ja6rdhov#(}!<--iz#uAb3z{DP= zTlCDa#N$9pd`ayIa#x-sAJ8$35AHui=r1hv-@_zX*#6@pv|D}4?(egp-;5r?qXd|x z3JQsa@8xXKDLu?;;&U;TqGo|j&AYt_rN;BhO)NKM;z85+?3F|*k;lt{kCa0l^#1PE znw*+hE$uwFy4VHxcu=KH-#+_(p9TZkkvEREO8;p5QIwn-UvJ0Qt1;OL|0wq1Di4=M zja~6)gTBM6O=b;?7q>H766I`?j4a*Pok;@_shgngi@P@)a|4T~>*wp+c_5k6sz{x& zNHV&t?1D-|4{vxFMv0rw(Oif4C+Ow1N#bz+*=A!}fn##Y$(YR!OYHgkn=1{P1ok$y zI@$#IsQv=>ILvoq9yMm%va$05^pdXHKzQlS9_)3?JFVv&LL`zmV1aTc5&75SRdC>q zAXVqDIVyu9U(O~+I!^^G2~j6g${04o)+haod<&edzeUTM>N6XWv4C_Vo?ZC7Et&+H zr}ng3Vbn8I9QRvCv^GI#sa{iJu_Z|R*9E?2p(RILGHf@Vrv^ox1S(A`Hqg&tAOl*Y z=PkN9n9Z>m>pXvILV#g|n3Tap!HH)B+Ej_BsDDgoFck@=M}1Cb9h&BX2z&!-bGuhd z|HWYQ_4zM|R5GH4l2*XPd*(&(+x}-;@7A(}GWgd+0;twmul3pXlRS$;%dH2t#~s=E z4LpY|3}C~4J(1}FC(uNRp(q`n*ZYCF>|xlkRC$pVjbntKRal=^1-e~(P2xRJpgw@3 zJIYPNE#&nQ@E$ktLae7&`U)#OASwg6zwCIkVLUB>0@};{MV==NruNc3K!mAztz|Q! zi_KB|;oqHGsK>_@pF8eX@JZ;3)Wpr#r2W+Z`4Y`of7YQ1)i<%skQerdF!GMLWA`$T ziLEzl_8j4fEipkIliL37lQ6RP;S{#huw%3JU|26XxYDbg)2TKl{A;G-m$MHWB6X$G z@qOg1XqmVWPEvBPi(26DjeM!mW<^I18ADqlYoTA>r2Kbp0xeu&eCSIsSMk1Oj`*A+vm3CA!& zHEB72wVUp&F$1JpPI{Kj#<%$s=(AlH8xcEPvdff8M1Wt+%y4JokqmrHj@o3SU4+-4 z{9cFET#FE(npz;IF#q--a#^bknp##`n7ONC&GqaOutA~R;kf3iWIJs_O%Twb_6+K1 zEnK#MJ@+g__pzHzBFln|1Lp(DHb#&Qe(w&2dYICm37W5t`9Q^jj+o4^9>TdA zqVT1+tz7L$zce(syuq>5aOb$&u30z3xhGC1sUWo&?sG6QciS~4WE zQgcbq9=akdx2r@3r4G0O_ zn31~rMQ~j69ON6}9W4E;S+#RH%9C6ir`Ck_>rI`I{pj5o-sy>a-ABOXnARMGnDT}1ioU97tYQ52fWmR=XCC{iiiUN){QX(6O#N15Uu zDc1S~$oil&(oj6zjpP<0*@Y^k73 zk52z*KD65NL1nol^%j5<}{QOtIdr&s7C zt~3&>$>u=9rCxAGDM92>l&&0TLkvT3=KMg z9!d>kIM4uvrnbzlN?<_98a+`#!K1)`m-s=!>@PU#3m-zF61wIziPjQ8srATPoSqhX z(NM~2^y`?jwU*Po$Uuatf}nl9z$UC=<;*jr0!;>McBs9gY~9js$ZA0aR7=HXhY@LZ zyTmo~a$0$*s6D-e%X*(hLL#|e(nso9VuFr?oDl0|O|Nn_sk=nIXbbCPx_A{$x=~fm zNt#f$91i%;0QCtKY7OCZehfWaFqx$HM25~dYuFnx*+B9fH@IKP7!fk|Nl18bG5xdp zo>IUJUU^*&(g>2TzzjF}BGgi}KH;dT2%$-n@jmiyJrRvkha)CrD!6?o*rij(oi}aP ziwOQ+L&#*vgn5fVX#pmEuQ4n=E#Zn3#^na6az@>wMb@G(zIqUO(O3p7(Oi&RhRqwf zUu_bKV%Qm2vuk5;1jVNMpu3K<1MVHJg~^$L5v0+TD_IBkdd}w&vKz9hJe!|}(8GkU z4}D#J-_4F>)wO!0iAAQ^Uc(U4C61uiv`f3St;!IM!B`SbO!Zh~PeZ4l#z3*A0H=7~1#w_|eeQV?in*zG=F=ar|x0c|NmJ@euCQ z)Ace(M%ME#1!cY|b9W+JFUKcWCrwP0CrRNa;5r(oYmZMRtr7SiTb1n}Ta|%<{y)B_ zcB@InW&QQ3ywvjh09okM+r_SBI~V=+kGekBO^6Nwhvw~}Zan_Etd)BMv_RlF-zJ_M zW;IdsQTf)U?G!a>8|mD#d+FQ|LclCf64f={OapV&84crO5R^SE9q}>nW>zrt>G(N# zMtCP<>PC4FfcHGiYM~QPa)`XBxvqe56B&U#m_@g8t7(Ba9zZ5}E*gF|d_AMmu`3JF zZp6}>cW6=Xv9*Kmaf{?-uuf)ID0p1lqQ9>yJHr|%A9Xk^iePJJ(ElE8=$>WqsNJ9w zcHxH~_mI+$x<=0ke(UBu9c?MUtceY5hRP?F-hl!=lL>dqKaRz3#FN1i9>^u zxsu_#$E5qy0a1M;tXoDM)jfqV3dC@H2sFlepT#U}`IfC-XfGcVzmOzfBdGV>G>mom zZTCtY9BWc5%~f*cs&*6qm<~`+_qh7?)bl)8>(Kmb zgZd_OGW*h=i$kLVO&KBb?U2d**T!}9Ol6~zV*ZupAJDURJW-mI#jT%+kMPU{<4=>e zK&4U`N{C5|FZwtQ9hbH_ae+#sHQu6yR+heH*{cWN%U9p=37)>RrZuh=ycmib^w$ub zYtgO#IsYtWnLdAJcqPAWq0 z3^_w==)90b@uGr4rn@dY#3HdSJf zGSHY4SB*e#8jVmVaczY9pyk2O&%Q4%VfxKhdF`{X^1JS~mWNl@2V^Vc{19N+ zd@|8w9L*sVV-Y6B8yJ4qyje7&CM?}mVhI<*fQ+Bau7Qi44T^9Fv>?T@cfhyHOCR23bR7co1{Vr-z7y1Vz~JEvDhx<>91&Ag4jd8V?>slpdNlhO`-DDLvxgP zwzR5hlOF8xl(sN<5$+Z?`ypIi(;|Rv<@_pYC-^3?9Ery%A(+1= z8+?1#iU6`o?WlerY5yfr|42e$U}pZ0Z?RMAQg#P_;g%D%zi^A>|At%swnAIPyH*RL zS~_Lvwg&P^G!X@(G!q>Ey11ur38ZQo>!NWz_iqUtad*72DY1=%>~Fu1N0+9y9FgGB z`VFyUz$>r(gAEYkN`o*7`k8fMRL^$d?Z^~IRw@cS642K&(9Sd&%K4vvw7;BNgP%rm zMhGBHomO@IB+=RqNt?j8X%5PoT~TKC^<_9A-k)#>jlSS_e!rKi0li=p$5#iS>dcp$*wQjJXvAU9>&_=an<4qBu;6l zI*df6h4Z7?WOm+`AE&WrW8!!WgwgulIa@jePHR90mAtosM&`|ySl&USnR!^J$VcvqJcUHI1M zpC;4|IP)#=1ETo@x3}ena#T5pMc@~b%rq6`JKtOGB6gpqEj9UnSIH!*e1I`5dK|X{ zVqCYp6h@ppaz@l>l`A#3c6Vt@^}pnO3?ECXSonYR08B;JA?pgK-A?VW> zmkjT0uzC=roM_)3z3$?ftfbYI6CEdm_Q~j)jV3^T2B#_Qlvxj=!@^+~(oYAtqt)a{ znJ+OLhniU0<0L&`?$oaD0&ojtOrIptPl4qcFHFX@Mw*p&ju=83V-R_F)EyE25uVl9 zlnj>DM1v*Z`pTr>mWEDY&?Ex?2xmbvP6bWF^ZmLU!{;G+5nk*olkT8uQuBWnueNd_ zUvm-t>xWnA;ou~g0ialxTwTK2O+*h1)7;B{3jei9=ok1lnCCVmeMfwjQ@zjy9@TF+ zH5X>5eMoJiP|z>MdQO{C0;(vr1!NuI z@88q2EX~ut*Ptr8xD=KeAI9-3n~2dTWDVj+QC+(QZbi3PF~Eb;hGb%T>1rAG1 zt2U>7@tcnqY+KW*#q6`!ASM({r@C+wv<;@S=`73i;@oKyh^@}Bbh7~RguV++(n`aw z^-WK$i2nDSgaNsrG!A%?Rf&#B_&r1UoHs=M2Jj8*r>oJ4Ipm8&MN&HpbO|~|_2ubZ z78Ac?Y3%sGQa-sYRlSf!&77p$Jy)CeO5N-DpNk4CP`2y_!QY_jMG56EWe&A3oaK|) zCgFyQip&|V*g#t&1o&D@@{3}lbAP;iOkG>5ga?Y4bpyW<*H;^7S#fMuF>T7Ef(Q>T zsYf2I&Rew7@fyz{BL&*fb9!FL5G>RZ+T0@W01=7l)NL4DEE1=7tGWCRQhWh{#E5|& z!q;{S?d?d~PIvo1M}SkF=q}d%z-VQoVi2+jR1!pl3^d^{g|ua`jKB%1wYW#auh~E* z)Um61-wtNIyy=e`aKGzVuI6)Ny50EveCc({AY39cqcrb@tc?86(>LwG+bN}irZ{nr z2m^qA@~CK_L8m(xt~AagJld5mw;JzN3AubOxUG3SBt8ri;Ew4vfoiE$NBT=o(B zq01t`WzYAqG~Kfp9=7{QjuT)96A2e#tuhmYmDRP<{rKCSnT<`;P0fGP1Vc$TJNWg? zpB{0o@K)J=19I6BTKz-y|7D{Zjaw5`pSQsVfS zsOw_?u76g^Pfrv{I6=#(>_Sdc_|cVD=Ol0gF@H^<+`{fYW3;&BLb#Z52R@8HR96IM zVS@NLGZ7mR+(GBr({M2gS2`gYih3mbMwVpdC9jeyOF7^xkX}i@Q6D|X0OuvSDGGaf zZD1xBK4woR=_{$sSgp-*WUx999Owwg8?N`&Zst=W8Z9oS%i6~;EsSX9^eEE zNt1o>dySQciK@=~!5)~fjCe>hWyhAnD-<1I+9WXXVCGcZQ?cq)Pr9I$=yxM5`g2le zXr#-amh(Y(LO{`(Z*8@sKViv=e+)}+Kf;lN=Nud7L#pHMxrgGo@@R_tpC2``#oD;L zbaI+27A3WGmU>D}j(~<71WYZp(${_unm^2Y9n5rTcjV<%dbc(66d>zi6fY>dug#P) z`BcW!9mqbz_TDvk8!6<3I}ELNL}>x!T)2YT ztWDZh&}1RkJSOPco3-5-x^nmbA%5d(JP13=CE1$fJ!QEu3Gt@iAAypo-(LOrqx>^- z$1Sh+hP~1VzkNKR=Bj2RbKL5cl@_8>LO27diour{YbLYEj46$+BX=2TCM7s`BVqZ- zVIk0I&sc$sE|&t~BZF#GO-2W*BUKY>CbM05Qy#Iurucxdtz-t1ut?RRjD$KbL63?G z4S*Fa5kibO3WXAD$V=c(87dJm_|(@VypClkIG90fAgzws!;v$cM1Sn$*wv3;8x2Nk zOF|tP>fp$)2@kA#j)hRQ_=_52Ok@Cv(zF8sps~-0fEk;5k7cxoFi_NHT%cGHqc2mF zU%wtHH8ONyz<%9ReHP*vL%Gf^Q+Uo?9m?53$W)l(6r9Z-_6;%$M8N>kG9d>dn~hv8VnuLe6d$80zf@LI&$W~H{mMo@n6N-Y76Z+ElkTH)T>&lejW$wi&sgYpI zLhP3LjK*p-f}k6gP9W|o2>=1L=K=)YOQFAq>gTI?MxhXPgh{Xn`0Op>^@M{(+h8k) zy&UAwI1r39o$L-_Y$7iB_gFV578s-e2#wl+-Crz*cPA1$%|`i>SPoVEg2aVsRKijxE>n z%Ngj?wDb0WZ#fbZrl;qxu^~_*dc(4X_Es-maLgkqx7_~HffIc6mq&`naK{_n?d{*U z2j8?hgiEzE3)ffQ4?3RemMXxChf=V@&0I3T3SnX__WnMz{#v%$tpG_Ufq~p0tfkR5 z%%Mjam2(*Cp5wtj9AL*eUOPb1k~tD!z{Yo*GP#LY`HFZ}uN1|&5bw%s^99bsfZhex z7G2Ib`gqLpMzeRnKK%1f@Qdjr?g$iveFP-A0+U->c^+JB?>#chD; zuXie6;$7$^RB>VAEhjbg)_H_eo26uHbR31R5|MtU}&JV8)cauv8%2gIngJU9BIj*>U9 zK*Sn3&mNJtmiR!|Iig`Xb0lNvoe1fl2M%_@1vZ;`hxOJuYFXqvT$EaNr8>m$jeKhf zkTA8<^er+`#RXLFdmjtr#@ueHS;JI`04~G!8Iq`B83z$@&{eIzD~3NA6TBk|J~|55 z>i|Jk>1L{_7`;fzrO!7(w42h756vN#P3EAQH4y0at`67 z5p)Z9lolIeYgEx4P`N!~^fZRcGq>EUavvTuoS2ghDf~fX2KgUP2+xHU)v0Bj ziMi#UKQYuwrd>;_M+zgpV~~AEEYl@|!MSE|zn64#i0^&nZ{BWi8*|P1{o54f$FfA7S z4xSHRe4x3l$L9I=EMb&p=E6u4Lb2`y8ebv^vNcLvGL?Z8oz0{R$$1rJY=`_2{;Jb# z(BC&LrZ5ZDI|0A1?LxEV4J?VWi=C>V@>>3f@t z^EsfW3==Ai)4nl#Sbf>rF+UsS@?l0Z%M5<%_L^zO!jh;;h(ten&vPWW2#8sl>b#gU zg8yW~)`;k$NB#0mTVuwHK-@Qjm-uRg0<5!wD_DH$CTZh)BBdJ)pg)DhP@WW-od%vL z5a{gS|8kGx1{Ds-9eyr=#Ajo1x7FH>cG1ZWeGU`HyQ=S0@Kb|3X070e>c*ED&AShx zV6Z7%f38=mZ(g06p%v(UdmFTU?tj&%W0REVzcQzg>?2tbX>TkvrRzI6FQ{{eOQ5-@ zxU28&NEhk(xW=kKN9@E2q%*ZR>Uh2bdJM!x3)Q+&A#&O` ztylzWBB$30->gnlB!IG$2~`NKCNtI*I-Db9*cFjs%9htnPPcYU7<7jUn*ALJk}EFW zCiE4Gvv*Wffy6Jcjn}NSMVG8~Y&9m_mzMhy$*% z)(|Pu%+KosNrXzF!8Dq^Q9MXjbEq|I~tTxOhPRiJ2n(%@G&7sb4CLUaU^I zBWWrP;N}WOVKsol9Qly4kpvIHji84?%%=xcqC3nP@r-i>23jk&5js3Zv}=)D6%;NG zebFV<_PJW&2c-EC0qdmZ(NDT1*8&m6D)%Hp{IlxM=f*w zLN*qqJtERnhENoLfI)tdz}e9H$DLJmWEb-^)rGlOHXo;`5Po8=*tftg3A1u$B+nT` zdjV<9Lsrp1(F<1^UI8zKMy?>JqI>`r#wcd$%rdNnT#vx1y=U4B> zc*OF?#www}yX7R(|3!zg3gU!)fi6;W>k81$E)8z^O99q~`^O4Q==`g)l40eq*_WwA zBdh{j@EQnG>v!EQHpDDze9KS5(n4bZebit}Zh}D-x(Q!0esjL?xUeC9S+ z3hWTB=BoD{s>{LT?Te)WJnu1H$|_t*K_>e^E@DJME60v9<$L-#Jy!8XJ1owVC9cjd ztxfsZE6s^b$a1lPV?Gb9&Fhe%S_YiR0&!tYlk@9C5p?9<96ZAgAR(J9DeEXoiYiO$ z!VB5lzB#;yVVfUojP$ksx$c+_p0xTGhuwbKFea2N_l<=w=NMeKo-A+hbPS-m|NTg0 zrvJyh00zeYd^q9%?wV3)JH*;T8a3U_qIaBv2-mLJLB~edMC; z^Nw4n(0D_E%}Us@A*@+w+}rDv{hzF$!7R6^oz^I}R)zf{-3*Y3MjGr~20D1>;8Z^t10H4T)SiyLzlbd0^LC=NS2tt?ngS@1wnw4~s=9d{pn+mPMbpa!P{n&LRR=DXTQX81`YKWeb%fg>v=!^KGsow!WrF7A?dVlg@LW%_?;kCn960-^O$b}0?! z?LX;EhvnxlvlR`|(rq-ZSd?a*1IFxE(Hxa`K2G|x4p^Gbt_n?U0m2i=YE~8mMMpIH z@iQ&wX8Upq+|m`v9@>1Krr*B+p;g}w#{zfu%_+9pCi{f!ZUuM|x9j-{e_O5bEN-Je zXq~yPDrO6-qR9=aE2aaCCqp`I8uALA+?YTc`xmON=TKw81tqT z+TQ%eOX0A@S|cNe{X0(-H76}Lw1k23uRojlJT8<_1*wh?H>5ddofKITu}igMD9X33AvPCWUNF$vx`Q&z2^aNr(%U`LN!eay*| zORUVfHarEEb)r_20@LqMKSngmMZ`t&^mFi3#Kj`gSUCB@#JrZW>D@0QdfczUA8=?X zS%QCv>;ESp?Z0L!{ZB?=SF1_cZTxLb{}or>asKM?<-unIc-sg5Nb3%nO|+;Mdy5Fg z;ITw0MThQQUo-KESd=9zaIbwM_CdQG><(4S_GOiut1dTJO!wu*^w7fD3^FtaozVv& zE9r1#PbDi{Ej=4u)F_e|+}`$U@)>jw%sKB{pYQKWB-v{ZpEULZNC;JFBKWvRZKtK- zu9rqJ{c}xYHz{^FY`!ebxr&#kPOD*lZ#zH>&U)7K(}{b)vDFv-9Icn`I4+MXh4THF zoJv7$vd0KiFyzr(Fd3D^%XUI83VqmK0>eXTvLe+po$-V$u2-XkX@L7G`n=!KU1*U z$yL9(*=4dU^lM_0*dh-zuH@|+^)3=8MY8Tl*X#pu{2`9j57Xnb#d6Gb>qV50_W?9) zSE@dMGy{r(SDJrj2fd%qzF*`WGg@NxHfPY;8Y@3!uZT-E0O5d9TWESZ*PgDPu9o(_ zpI>fngqCGghmMYii^o2W-k(1Q(`0R@kw7-|J3HHi%d$_LIy6U`96n8-Pc|MjUe2Bb zli!dJWI}tO5!WpU=qqJc*Wsj~5-LB)km5BAV2}3ikWpD1gqC6D zt7aK&t{5gJUO*Pk3Hjh-x{|_}i5TDGArGR24J5}&ho56ES(NY4FVCLx5d&U=UIlY+ zJ6ciVJ!UsByP2skNcPy+LUUrYKO?Wt`BPK1*Bo9imYZij?Gfw!0>FYXWbsezeiBCh zisUs^#~NWVb3n@%yHM1*PrAE_TDe;-1fQVb#`vrQ0yTy&?X(GxZ}h}%EYLO?&IzxE zn3RD)Mfluzml<%)=Fvqa;2;){fz+EjvbDiwz5<9phY7{S_yJ0+TJEZCsI^Eg+3c6qbieV8U7>+0(;!4fNKRwR_9Cn7@L-kZsZ8t!b zK9?{RWd@FwDzPI7&B9<5KRd@U&#|)JH-1@`nVLJ+nA0m!DmnYP=`_7?&X_?V4|Sbw zPhtzADHU9l=+A?jxEd#DzL}G)8SMeDp3D%@XxOSowahxS!shuELUp={P5$(k+d#ey zvHuJwp*JxNk&79U4}r)GHddQWSeAQuGSZuId2S6V%~kEvHkIE9`wm`Bg8;ZDVGV8O zYC-nSE0>QWs*!q4t#0sBK6nCa5fwfVT2Da=d4Oasp*7>9GS><&focggQ_X19ARq>* zvDi&MENZl<_4yD2{xa}}*dcy(G5EM}ZfU$V`9hZhB!B&vApQ&Irf2+*A1@*oft07+zq2_l~8k~#Cab?EwR^Y}UhjgVy~$^vDN z>WiII;5mY|d-jlkXQaDUzmw3@#D7_QnA*=3fsi^3S)h`L3<4?DRiNYb7S@FftL0&z zSl%`()bwItPaJv2-0Rl*Ywb|4;)WV z^JmCO?-OW&6N|P>$9gwy9{_ptBMJs>ya-*^AEn+9#lDPGd#=1*Wfbg5LbECKC-0;v zFOV&7-pOuc7gyzEl2v<}jDJlvI=D9xG{Qzm<_?w51U?jmBMt{}$RGZW9xph_-nU^g zx*Xf&j$7V_BDy7MS^1&geo;ISvKrkU`vn05NX`N-V`0)oF(l8}3f%A;Mn9Y6`(sWP zlGp&*uW_+3=fixg@hh^f=DX;NlPf9?5MaR-XQ`9LJ_T!4JL-vt0pU#(AH zR9o=O89e(IW3CTvGI~Dcmynuj$?+rHdaw+2G6 z2kr2DmZ9@&$rq~H%Lt>*A(9R4GiKya80X5uMnE^<5)BFU?IS-ld<-1X7kCYEP$Oa* ziAE+*@J>B{80S@!|I>#3+GV+3u-SG;)kwi5h_l*=gU?N+{e-i<3Q;XotJ1D)Gv|#( zsL?UDdVpD-Y!5LpW?(72=-g#EbJS!MN*AZ3d%yVt9p}PBE0#u-%ewp8CDjL*b5R)t z(Q7(ii6ci_vMsZ4)@01e*JrOkL|RmCSZ%I*hk|sm zT=u;i`#Nnoo=5UZ!FGK`;lo)|HgZb-M1)G;uRp+MJj0bcOt4=+1kQ0CLi0wF(o}gyW94KXuH*r60pULSB_ylF==!A z)Q*by`11oHj0mUbxOFoz$Oj}R;0LMkFfPy&=9u4k=cdnEv8B%bH+AQtMr`1Qsp*ar zXzH*N=BhLlZ3VI3wv;o^sVq(ZyKc{oAkwqIfr8-7dAJ>tbstYKY&p}KKk6}L#Im4}YdD#X2(?TNrM!98Ft>qvXL1>G+C2mHdYh4fS`rLChZVF95Jm9MA$E z!^k`~b5wZ3rn%c|Tyz1Ko*Bvp119}X?@q%c{Vc&;bODco>)O#xCE4F4k^kWE@?7WT zG+34ds)0l@o$oA;0mdwwl^lQhx zoJ9O}e@#0wgO4%#dKxdb>kAi<;c5I^0XY+?si`yHEw0V=U-~fI^L{gnkA1NVU;%^#%qWz^O^08;*f(JNScZE~mr_b;Z zTbfP>{`y^Dv2trKloGS(FPmcrAm(wY-@if`Ho{c|k?g%A`FEgGUR>A~tF`C63fiHt z*0^}rz{kHMjvvTr6$g$7kTPnoL?!wNLqmU5yO1*deL>pb4zPAbKz?8~)RR|oW&{tO z6*l4Gvg>|1a_=157^#KCUpZEvOWq%=#+YdMp|7L7ktBg2e$IIjJ6GOOALjXvpOQ%{ zW=4y6B=}YK6+ss1F&FQ6Me_@8W#;69M%qN^>8f(=RLBx@}CH$n*Qk!eH1)4oW`Vb&{&OD zwA3~af_|qmdrR$=Q;#2Zy0r1my|5ZqlsvH2H3G`3ss_uEBWS|wQyVCGOL5po zr7hX)rS!_Na?|Pl9XK_1a*&2$^W_<#M^E6%E3MzR?w+bNuE|JNuQ6CWo{cV;GkNUB z|BI;kbH`u-zpzC~JNboa_M_v7DRAWhc|~DFIB)hQuSeMSPUy**uy#@`Q_A@~?Dvkd z09{TuASy9*Tb{j_xx>RWrN>IZw(^!>B^WK6RrM|pcCkf+w(PsDtW4pm+09l zU}K}FiPK$pXNhFmImh*={?qs7EbJn63oKz_jdgMD9U*hIrYDJ(J4qCox#kcU;Con7 z-Fl6kQ$9)uI`c_A=J|9}rF3h}#LfW7ovN!WW=qM3bhB;emfZZsdQQuWORw;mJKu7M zd2B7uh<4>t;U5UM_cZ4Y|D~m(Nj%|Wf@oZSy>x3zh4WF>a6fdVk=5%XLUnW6kbK>FNN}_pg41D z8R;aWQy6`E8-5rL#|8h;1-5@=%CR&4SF`6oVECKWCT%z95xP#OoUH>g5Gl@GXl6mK z>ThYtAw~*pNE%KPm2v%ciibf||B##Z4S-+ZPe(s9bV?`5B$BS6pJo2sOh!#oWsn6I zG*CJ;Xp^zaD2PTC;e%3Iq*G@e9#78nAj;|eFUHO>Mzo+?({0-}PTRI^+qUh~wr$(C zZQHipJ!kS|W^QtmnfbGmz4xl5epDrE)q3lBf2sYtbGo`F#1#v%bQ?<-jN?lo4)0xd zn5(Scf=s*Y()KNhqj+Gohv)M;2L}3S z=tW&zii-AU9l!q!b&ClC;^(LVA~k40!rcfDObB~dh#aE$2p--eV)TD^k2AbfT$KINaiK+(nhJu(e{sH?S_<&JAM>oqYZ z%Pg?g6M=|?Sk4e7BOL2xt6{Y$Ha7;S17J@*k*>MM(G}};-N^iS1xAORDQ?<`=(B1n z$V<5a&Ei_t{>2LF9Jo+6NJwk>&}2$UQFD7y>h5TN(`-Lo)V(qdTp~kXeU2_&HBzG~X&idy?H>CwVN;Z|I^aY5#Q;RG*`BaCtYdcLe|d?wIL;mk z?!$|0mGmcpAv1);tYw^$ulk>Bm&s1iZ@Qnu4EDQNH=5W$oQqaC3BdGRZ8*CM{j+r% zz3olFn?!&b)evT|^xg-~E;rJyO@jVQl<~k<3Z5;?yFY6*o2EE;3V)?TS2usz2JBP% zeS%0(Y=c~MW2tL-Ht)>nAF9>2N88>IJa?wS9fKT*|_=jh_VFA z=L!V>a>5fUjsDhmy@506F`WGOO~}OYUpL|ZO<=iua=nOK}r*zVcd zG9&MDiiWWd01)6G{{#R?eRWO}jWdGD$iA?8t131B$J<9;xmW4inwDiGPqKl6){ctx zDm9WwV+T$)cna}~e(EDtK`V31xUBgWg5+;JWlx0oJWykv;4Dr}$n|wKZJ*T4KEHJ`>47MmlU|Tiv@90hQo1ioy~fvk+N2 zcT~oQrFT*I8s(1Cz4U`%v~7L75EHL}CYXfhi=z6y>i7{M2}!K9B=;T69qJA7MSWxW zFc0`(Q!~HQE^i7s{i<_%Xc;xf<*R`F_8+E(W^ zD^P}&UB!YBQsw}4cNPnEwc#*9FIAocH-0EY2vS*Mni(U!S%K0=fOsy5CX0 zUy;w4tRCfX^^fUKJwKnu&yUM%bCCiefy%IL9!s6^^*S##MAO-LsEr&9R(z@{dt8T_ zcuh?a7{Q}pjg*4ePGG)MdT}|w-zDrOd5T-d>%<{!Kr<+m~H~jmyl>-FjkZz1?fSuAlFh z@9j=2xOIcG+f}~bhnLMwz5&!zsyn^rOKj=K7r5JjV0fn4)a~!4tDR64wdS*L;1R}B z9u0$Nic=>9vv3m{m|8Tg6_=s4K831U;u+KWfy`cNMx_IyvC)uF-qgdFG z`nyM%5CQkZj5+%lDg(+U;iZq3ss zav2Zm;@lv0EoVJQU<+L9yeeYR60C2j&8jidT=Qn%7S&C1^JX7$g*8&m83Ae*G(Cwc zad7u3b+IfG#&#`{ES~DoFNvWIo|Pi1@M~nWKi|O6U%g(1SvhOfEh?^7?c6O|*O%9> zvRrQTEXhs?gV^`XbrEShV@^5}Sz1#bq^D6RBVO+V-fGiw)_D`;>wQ0&nVDL39Z=jc zNYHYHp)UkrIU?}YQPnXvcv{$6_?wI^#C+KA>tnNGzPt~Q@wu^I-bbgzeE9G0|M^5k z+^dkn8nGi<{y61fF6L`;q$c{G$8+Cp{24gM^-Jx}uQT7%@5|)0pX;Zc-K}p=54LdD z&ed2BUh znch4#Yd=BpV1NtGBdP0~QaLGESU3mf&@XpL7j8%)&fa2T){s0>Y$$qwKe70c?$L-E zKO&ui{MpDjeCQHrs)rVaS?8Oe0}11p@{{ogFRZow5s< zp5P@-l38Od!$&G{sO5}$*(kCBDEg|9m{#Kgv7rqj#KOw7jjR?%eGP3Omz1*Q1PDxG zv-3imx+Rgv*f^#D8c@pED`>2y{!T5NLHh`(khvWutdNy@IN`boR`YaVB#!3Lw2(z} zY@lsDRy936gRK`KOadG7raULmEd{BPPS|dp*?BG7LyOL z^F267J!VJF4A((@LJ%*%!$*+x%f?E)ECdICAA6*uj)Nr^5IBK7SV0y8ghNMXjLK!O zZ+Iv}PefV;BFIXA;rE_$D22ej?XVO)W~-REQb;u#g4Npx0iA6msBaibXZiVfg5$be zR{fx{xl2RmM&aF7bxo^gO~*qr0NtB^jD3#-2e1)s=TUO-S5YJ3fXVlA5OJZ%>?h<@ zd~gZ=_3O?@RmVFr`Tv*n1d`?22YKnQPf_`ar!d}9AV4ZC8L_tpdUZ7Sqc z8~=XVqSQ5Qa`hxnc7Y)FoJdwx7i`?f#_3ub zE1g+_0+)q(2OcS#na$F}7I6E@efD00Y|zueJ&l=Tb#G~g#e0Z%XBEE6Yk)m5a% z(B$qHpBfZG;J+sx%kEwzM~%tS9v)c7dgJrxkFNy8&8EJxZvVNayMkbL{$WRCaQ=Z%asUJ}hUEc} zrUoE$r8Q-(iG?;5C7bc%*x+ykG^UA~h7DK&FmVzR^CG}$ugmTZEeuX?XF}iGO$CrN zs)*$mW$lPHH`Vk{$nWOh2RpL_Z2FD`kW_*eBRA6$!Y1f$-shtOnX-nVsXC9owk|XO zZT%}P{VV;JTZM;Ttl{VWb(py`Gk^6<^#HxCmv8+``OC-n)rbmE9KFa#3!)vBEqxY1 zMyvevyH93h0Mq0GlmVb^E#n^pU?PF(e?IC1sQ&x-HHb+7*96Mx8?^wUe+2k;SrYD( z1pF15n;riRP3*t0wZ5Jc8L(f=zSt}MBwO;-!fysv5sMhPkpZ!}z6M~EoxPWa0Z4{6 zG6O)ImmW-mGpJXt%*|_Q43q{S^BZmgn~h7e1-KFQ6cr^6Fw)EW+q3YE5geXf-8_o# z)*ibLgpstoJe6%ef7KSf5{gNp7#}cR|fE#E;%#vZWIDA!RICu8lOD>b_l=tOHk;?{*}A* zsCKn$a`*c8H(|cF=n;OiclpsT>dTJtBh~rm;r;gFo$9adY0&9!NoKG2&!+I$uiy2; z@8i!`@AcHLgWS*UWk z76911AF-<_K+qcg5Qsj)Z%_{a2FY(8JlHwkS&+a9ydgOK*pC3dx1--2c)T}ZGWt7c{<2E- zXoULH<{g*u3+u~lbI*wNEXl*vlo!v>S<@IkGpW!tuZa~Wl|03U_vB5{}q8~5OkI$DvGtb=oz>Qs@(FaEG zVDElvYz?mV6|ZiDs9)yJC>?Z2Z*<7H5t?6z!8bIZPJrL-UeGE0*ktS)pX_*%x`Q@GF$Z5S7^qLy7>na)bBR)$XGezRa?+oKUQi zRfyn2wupJ54ZesV2G>(4L5$q?zLUldse*6vU{Ho^6WF~GF#s+uPGWK4qzs;ZkXz<7r0z5+* z72blrkU8nL@(phDBwovq3?eWHO9NPohn$-f@0Zv98VceqzuuKHHI~M+qoLmN?L#T} zo5<{(Wyi0$1TqAjx_d49GKX?OID!dvS};bi-ZbN$&A#$HkTS(MKGR1+V0tVgjoO)p z#uNvY^3NkmHL93Ydu4(1qENSNdQ(fKO};|;yXq#bw?aXsO(2$vdv|O?L-7O$DrEd9 zHcso?M2)sJLPqMS0r*bUo-ap~zE}J$=r zxG?}E5xD>)9~1>m79iW!u;W}onSM-?V*GtbyeFNY#BMYTjtB9TSEsONOEjD0cFLk@(_r+&--i2j?1fC2w1C{V*l4pX zXk2S5q5cfxwv(6?UV^g7s1ExE!&C36arFjUBmcq`BbN{-2j8T5F(~qhj0N*aN07A( zTV93JdQMRSZ53weB6J`0x>>ey7fZ7(mcOv9b}_zH_oLh{Bgna@SgOdpA?2vla{!ig z!I2=*OB6?hqWt_|vR>$uj;p7#4+$}StGr;W!6w*NAcX)cHd5oq0>&ePOhV~ z8Hxy3G7U|q*9%XWgV#0sDz~H6XnL{f;_TjwwC#8fp+d}2x^ZdwM&vRuJD5c9-mOR^ zX{AhrgRb#kn|%hZ5yF+4EBN0jigx16fl;MDHDMUAGIG`mp4Hg~5`eyyF!-)KO4?c* z768W`Mp4GjX{_b^7lJg=SpSS<qCApm#YTm$ zIdSsp1B1ELSoggb7|l&a0Us#NC3-g7MMRQr*ZjK%z@FbZA8sraTIp5@-LE+cg)i^w zio>6o<&413B82b<0-&4RHGV#zEP(|rZ2D_bzaej#q79*0mp3(g3Q}FvC(m?ih1+BH z?MCxPt8ESlJKY{sna|zL8of37IYw_I*-8;bcY=x6l!m~O5{R?J#S5KgWayb zFP|=(z9bhD4zR}?!sZR!5V#>=ifTB`FxJGr7&tNCNc%?g`#D#5!5zcI>|qu`h0i_@ zMcLYLLRX@i4%qdPBn>YZuc3*~T(Vtx;5=6|m!+g3(%@70yf*hwLnqXDt>))UBwP~z za@UC6J5w-Ax9_4DoETs}i9&0bC!OdR+od052naCmGdv{1L*zF+pcCq?=2Z5d;`XBO ztEtwm4$H13dc!tIxOs7#oZ!YQYfL6iv2;0I*5Qx}(LzzQzxP8RpS*|lMf^F}Gynx$ zwsU_?(SsXc+=1g_Yl2<o<}J)_xv!iSi@LTNzD z^AP)(wn8wKmNmOmy0e>xp(YdL;N3EPpwyF4GI$X$u+pCB zEMGF%DMHP!lRjcsC^ZUi&z{G=PEi5xx2XVXp((!b z8;8g8KcpY>g8-bGd)6|g!u*iYJanMh}n!*q22K^;v|3m9jhL;A6>X7eY&lsKsB z`hZX#zxSc>Sgo1>YE|Nyq&iizYV+#xex5r|1UnqKiejC|Q^r<%OkH|>69*?S#3X0V z=Vr$3s7CB@Dj3IDB~5upk~oN#sCe5Er)Rr z1Na9O=DELlM|Yf3bCNAhiLT=$!*M$fU4|M@V2!6mU9xjOKE(k4Gj?Wbw>UzJALh)j?G z)iT}`PT8>xf%GutMt>IX9~MPdQ;?sWi=A1F@TZ5n?6D08x5%wpJ3A9YR8{| zd<>u|;hCu?GN{l(m5vf0rUoAw5=+J#`?;JYX@~nP5GD(6^(b4yw&75ph0+3V$uQT2d02tH~~#U;~9oNNnz#QVgzVU~5;G{p{Ar;lR>Xq_n8B?FY zvT=X1L3}V%)Sen07&|oS*DR)1Yxw@=DmlhM#PrJi1~n%5hZ2?^YA2<^rske|yU2|W zg(DViYR3RrZcMEC6x7q2Dt5rOipra)C0LOu>gT)qzP(auY4M$?T zBQ6@~uwh7)leIXHTfE7$Mhgu)89aMk3(-<~yR|Qw!-ayvECZ!h;W&qrEGl7`cjJE0 zSZ{$B+YB*f0~y5i4rJfID;mHkPVCggQ}qc?1&5ir5POAOENNCq0Cs+UGWeSj z`bvv`^dGR#0!dL%E(0cZbi^3OIY-Kb&3%#X7vV)Zys1+{DH0&R)_rYHKbR(lf+Q*@PZcNM? z)&=cTpRw80ONbAXIx*vIhwp{2Ph#d?+h)`d^%x53OAJEWV2aJK@c-aOd)139RH}by zsmmi7U@z6bA+d$#h>75KN{n+Yl`C)dRXv+z|F(oVVJ>Zb4)P~GWKc8`cKqP(1lM86 zSBF5VQ38qt3eN~yFOe~lu%%k=^#?{Sp0m^c@)ZnU7y_TyZA}0Z?%}#SPH|yaEt$ob z?g?&20u46&YlQPy#o$FSi+Kx7w|X!uSpw)h2T~h<*}XkqMK&Hv*>p|!qG23O^IX3l zQo>6Y7eqt;q=*kaeb<|?3`m)mRZOKrG*r0jtT}0RNwK2htci#%*~ak~LQ zZWw-XrW>%`Ydbwj&LqIjeEefbm~>UDMJbB2Ld4o{jM*|%H^OI^x?VmjA;XzY!4*?L zRTin#!aGYe6|Z*&vfoGy+I5~gp&(?=DV-nfLf44ct3lUoLuZhpP|w5k#G*0Yq7YK0 z*6peh6DXS8_M$k_6d0C#U40E@v-8P4+jn1pS2>ymA`$H3h^?yr<;e6N;AOBP-{?bi zfH6h-m65^Bj~q7>A`+It^v$&>vr5~x3KC@;tW@IA26m9Kk2Q!zTC3`A?}h7fC@!3E z<$_vt3rVC_6s?~WwBLC_5!Oo5l=V%5m}3-Y?B-BW~UbWTda@ggW* z=SuvV@apo)<>9$Sy6^(y=9XU`;NY8hz#4mdZIYfCa3$q-{xYXXh5c7Goq{ZcZ|l39 zjXvLJ{YZAyefen<@eiAoCjW5|Ljgr-_O58jL;G#&2$UpkJmB0#Pi@zQoScymdIqwR zB!QNi#!+?unbS6hUe&WBj6W;POf(ap(FXV0k%(HOilvojhts%H5;q&J*EScsgg53I zm~ZmU)>CPEFe2=#Y5OSND>t0CWa`x5I$fxQ@|j}Jiwlz=)e zCqDyw-rt{zt+Lli3K(?9oz`%fyY%?g@r+=r&p_h|+bUW3~1G5DmYXSh%8pNo!pg`r%P%T}QAzDG50z z0Md`w1&^)K>gfk`bW&24T+6}|u=v?VE>CtP2w>1=NkZJi&}{2dQgdbuBlA2QnW;dSNpDXA4bmPFe+)A2NA(-Kh4y|d4v_XE(ViF?A}Zd=uCWu=eI z6NIR6YO72_laOFMMRp^(z6m><#$uM(bK_Q1acBOXTk?p0|GPCSy0S5&3hq;3mz2aJ zeK$=2w!dJE@Ugr*akR^<>V~g^b~X@3 zG1x=(s&x9Jd_pEI#yx}UEnKesxQDb7TJ|&VaAMm4~nAe#gE*78iH_JUAfXx1?S zI>sw2HpdzcyOafj7*c*dmsi@MBC`wsq#CoHElxE?pRd{33g~(Tc%L(#;eAYW3~>k0 zre*i%9kNfQ)Y`_IpyZx-7btz^k}F}$R*us3Z{K*0fG!Mc3`JY~^Z4eDgivrsj@DP{ zxf>zuLGgm#>``OpxyD-!Mb&IS7MP&paI4Yz_FpS`c>VbdRv%|oj#K!n0)z0ol~l4; z6({U|qZ2f+iBNg4$|_oWX+(s>hAak;&eUO#BU}`%H5D+N{Lh1z zY~EEzl}%P@1SzYID7CWyT`ny!bv@Dm;^1Q=EG`{^c@3lQ+^XdH5tH>>CDme@6i8x2 zFD*FRTU$)=djUC=y^|PHLi-qn?a6kmLA|W&KH~H76klpO90X&J_2$fDDUP@cy*oZ0 zx5L+s)5V^xR3?#{VosB~fKfbEWl_S`6~8X{+z9jVVJgvSly1V(dBhz1J9L4>o5g($ zJFk}9(PTG3p_Q<->k3A5uw`a;3}hrDq?)xxBD$gBNw#b3Q%IVV51<6g_ zXzuu-96up5ee-2G{ToXBGsEQR;qa%vmR#~B%~6HAW@#4Lp}GKsMUdd;{gy>wrGxvR zF`)V_8L=lnwo%{S_-{kZjJT#-`kibTGK)ciMc@JhPJ#~1&w)k?S$n%8qO9yjNAU2YWpP)ke9MU{8z)Z6!!N_>)H<=SqR=2YAr*eYq z^*l^ORaUr1gL=IwObL|1o}Ds^WK~=igP)D7=P5y#>Ns$adv!F9ij{yp`Rd+U`|7t%SV;_%yR!q@73!qyeozKXeYMR`m>^zy-;-vk zZ5JME6%4qP;;4SN=9`XW{p=RZ(FdL7-6Cu^&yN%vyg}PpO|lNai2B~j606bsmpo_q zJbO~nVWq^A5HdHyF(WC8uj^Bf~Wl12_9IRXyOP={mBL7&BN z4$X`Z*A>{eOhGK>)~*=YJfn+KCA2OiF=MAbE{d(Q8CVl%N4}AD?J-MiwNm@MFcdRA zuiea!()_f^sFggq=P1g4czsojKOpFQ;81F*N{@LvJmx~$w9G;qYV6!bY^kIcQLbz1 zuNA~yp-7$~(e5cQD--P0>9zGl*~)C;FPJdTyMklE0fLmA*IqXpP_O9K_sR-|;7-s6 z@+~6~fJwJFwS)_%k5fu1`)3C^CCWJ!n*ce(tfI;774D3#f@X#xKaJbd*c2I!be6m- zKGurluv^;IixjRf3;jn%u77#9(PKrkwB|lJ#1swsVagutnRlNRYome!AJ5}pjb2Q0 zepXqRp_TD!tg;|;ir#e%52vW6imAlnH_&IO<+z{#enl|cw~>X?*5mW0&$4h9dY1qm z@88i}EmEhFS<}LzijW)ldFB`bg-RYlh2=|l9Q9jcdD}@9c;oTxkA!dX!h(nZw-n7Z zM%W0BV$Jh6)P8y5n4BnF>mN4K(P$ksE%%OBdLNd-PQhny#uP?zV*P*JWAtoiJV104 z8vsc(jsWuh?W|0E9dMR)#iCevCNgimSl zBzS4M-H`!|S^i1s+k2)j4k-$n;C+b9Ci_z`JgxP&lb`;}KJKnnFc*(Dz!0L3UxuFT*13Cq z4bs9Tp>Y5!?~QBmp%$+UFckR)irX=z!~vr|&?MNL(s%^y8yYJL)hBba;KXMGJFEbyKud-K#0@`qp-^$`@Q6P>nuF`)-s zXV2HiF=M7FsD+^{d}x>Sr|6GB)Zl9AGgNa2!@|I8RWUZM;9BuJGYxdk{8EL7GI z1AVe(imWi_Z5?k;KWKQs!bLE$=>vm?-W z(<|E(TM%l9Hr&KsVqn1`1Q14S-Bb-elDeT-g>GF5wgu)SQ%vYVvN%rA2;6s6te!Czog$4 zYNl`dVj)T(&`=I@sCLNu!s$qtaQN`ITuVpbBdHZ@umwOF&XpgFHUC*8n;vh!tA~TP zO@)d@k%z4TE;1?Gs2CY$hk=GM(e$@t zE(C2{c#AXdK`{R!t*V^;go4kYQa;ewB_iGA)1XgH3WgM9CA2lEmHS?1nEYl9#{)Uj zz_aI>cs|?)JE_o1Ac=?xGecVnb?a{924G;<{V$*(sro57wn}NSIxn(uldQ?(CR@A8 z`s6y&U9OC=z{{>4l3lZ@P8wcg9M#~8M}qy6!J3m9tELsn%~$Fm`?^dCb{e*|&(Ie+ zb-O7%)k3tKSyJPawA-GCiHy@OP*djFaNsK&6D!^m#I@+5wI6t}>tc#x@Kc7kDx4@; z_-a{KVsM)%(Oy43MuU5*Q;1+sJ6d>ppcSpU6`!b5DCXlcZ*?j8Br^K>-X;FnI^C>H zX+NfUljJu-{DE3d>K@sHF%7z%;dNbLoF*R>%EfNCwKzlJ7I$jLD5egCytV5rACc&h6$e}q!a+rc@m6|cU37^pn$qb9iO(3nP8C7INC86 z5G%~L$wejP27LfpyBgo%pD(5Bov0es$UVm_Kw{GbkV#o9w0w-}AC0a@LCgz;U{RH; z)?rYB3HLWn)A@98irBDS-$S6`#g^9Mr&;UpR%->-yPWWsf8F_n@1@M(t1ow;fw-Bq z&mlmln0yPw>f_~z=`ljW6MEy%=AnXq_n;-V=apA2=AfGb!@q(=ZrK~NWmR19$~@n@ z>bhEctJ3$Xa7pF{yhE%sUXa=Tsa)!&ZJ4ft?p<2C8|g-+HEqLIs=h@vE0}5XsPGr@539rLpa9oSd?&f z4XOdWesft9FmUdjbH`3>PlqdJ?aPQAi~VvUHq;Fv*50y%$BhJ-no+AnNsr$gXiyd#G2m^~r(pPIo;?aw`WmuzJ9kcAH z=nSJ8J>QjkUy(lmF{BMk#Hd_d3Ewzr+cJ6gOMM43W$|P5xE_VygRN-f5kGN??Q=ly z%g+El>>R|4tC%o2_O38tyDS|zP;ATl%}R?R^li`bcpqHBqKQ}Lh)rJpIq3jHB5;-o z`2RpJvD}K96{6B98JZkVGCejG{;OYWi?U72)56?fT*eHYwEf(8dJmDtVSz?`ac%(5TifM8@adNXq`2~D zZXr${z^ zQ_R*srnD-G=4+@;^N{f_EgsV~<)}s=mL+;7Gq-)}iyC9A%B&i}1aNjrfM*ds#o?Hxx=&%|3TChmnd_fD8V4H3kmi zwQ+#SyA)Ql^|2wx*=aT>?ZfCN>$Vzyt?OOEhMqa2nG&|km*~+k-VkXYpSZgolrpJm z3Y64j|3WT98(Ga88M2^}-``ZN+hDsWYR@jKFo{`mk-z+@78QwH$XZ7|;NM8wGvO3J zA$(MsX%4)~BJVIVSfGdKnpjYl@hq!QNsgk5QF=y0elLi=u_>4pRP2uYY!AUc&ncy~ zh^?tyG^gOsgC8zq0E7ALY^W~Ra|7dTy^qbaK%{bAGgeF{Zap2&{5e7AnHHpb$mM8t zK3%2=qNZIw%SYh@U6rI19mvj{f|jY~6Pl z+$qKYi>Fl0b|V#B4kZ>v5ekied@j0n@FZknSSWiRrK8A-Et3wODp|1I zV4uZqVnlA3Kc`OCwnVKydhDdzKf%n`cq`S=!vg>1Ysv~5UA8a$XsGluD@QMUU%Q< zy7)R%7*#zLiuJp8*Q1aMV4x6NDmN(8xfzJThJ0gw?GghL$HCgDhnQ}UjR}A`mcSiz z6jFO1Io)754`|&c-;X$)z~iA49vxt|e^$C;8$K>JdzXeft+Q!^P4mwIi+#7YxQs0q z^lRSitH!%ooxjA%=6E~0X#ap$rL5IcFh;s+)OABmk_k@?S+p1$Xlz6dYpnn8NzvtT zlAyW`t!($C&L*oBff9DuOG<+7WjK!Ef)d6_fUwHnv`0~;{H+^v_fyr&YmjQPt~iUS zAu|kDDsdwup!36a(@mLBG!P(iZ19r*M$`Y`P<#lj})~?-XgH{&KtNNXPoDUN%v>-)qE~It~)}^F;<2W^zSq}oS~^OIdfQMwcpR0k4O=2{odif! zaSb1kGOXrp7+h4O@N9`sL*hYDcTe7<`B{dyk7LXHi=IUwrGQ!rILJ%sB~_hXphT0e zEsVvW*;x%Cp3gn}h19<;tl*~8jOX9Q)%U!=SYJz$4e#`z8#cz|Ku7%r)AZ?WIh6`QM?#mb%$KY4NG_A4Q9EnNPebc=L}qV}v2TR;k!# zDGi1 z5t1+<>)igS;C_@T7!IF*OfQ2}>C1&35k1FiJcjYtb}4ylt4Qx(#Jp&#ow*$`ya))K zT1d&Sh4KBDeXItsk)5_NOJdtnCtS{auWnhjlTPChIJN1JM^tk!)3ki&%@nJ5F3Xy>VXcK z+JS6~N?Yl5F3`xT?`dli;*)Gnwx?7xn?Rg?;i2jDWhIW(hyTMQM)fLy&iIw6@DPdn zXZ43tTYsWFq?wPK*q%2B7I|3xJBB3cW2pO0UOr1v(_<2zbUEEl1z_cro29OBECC{= zxnQ6upyONL@kZuLMR-PCIAAGFezgj-4|O|ssY)sHA*Ic+#gnb!&dCMBZJMlo0Q4c> zto#hK+p6_(I;nT}=3}4*GfPW+emKxGg<#87%Fyst44d5{UwClQB~Us@SKJx&GN8mk zq0X@g*Hj$QFsPY#9Xn{LE$tin-iv>NnoJTb65%`TCCv|xzbFRI!P~FS2>y9Hae$i- z?j7Y)GHD#%I~CHQ;L0=uRPh{;|1MN4wIhKj$I>gmc->oTFFxTM&KYiv0jjcS&MdsE zcPTQefA+7o5QoZ;Z*^hBszqBGx>+q87;tA~j9Y%8!Me-WCGirCv(I{^Jav0yhSYA0 z!tS=yB~v@FM%No8BsO4gIpWvLuu6?2{`?Sa;6!`s#On_xoFTZenb zi<09lV41i}5g%&#b)MUi5Z@g;D{LIS2=sT{bIxCq*o^$l2@&R}>J)d1ZjLr#jO>_7 z*yTLa+cLWA+&cX-wht!Ry9sZX8h`C3gXN$Dn$XZz5l};!N`>%@4s2!>mP$)pG)5P= z6{RFS61UyJstD6Qe80<_`Si^$S**SvhrQWSj?~<#WA-I}8V9<+rXMRGUj@QwIM;x% z`m3AaG(&uRUtnB>%*PUfb9>M9FKKgcC`w`mgmRsoEoRd@z97#)fBAa=hXnk>Fj#Pp zDXDTh`o1Pja}*xy-;#;u%y+5ep5v9I_ra;B=ByV8uI?<>5aHdTuC8DyHU@qAM%MT? z#?Ob(XOj1o<}FH3<%|i(End^f{&Kaf#rT&UJ3H)j>(@g;+ix^`V3GFtnRExKQWD{@ zcs>5Dp=;t3xtPu)34e77n5VlaSol?Q7>ZvXr z=u+%8)`6hV_u)Ut%`x$!OpD5qv=OqZmWy2!Sg1u?K$8E|*rDp_M=0iBmCSF3*)tmM z&C~LI&-p_Hb2u7lWFj}c??M3~>gN-D;ucVl@i|vSuVkJg-M=JkG+-(>F-|t0+nm<{ z8zvson~WBD+O;zby@ar-#IXmyYjOG)8MN2RuPei%x=~klX=hbdfBXdtRdiRoDdTj~ zHH0=4)>wE9ThJIqcJr(g@QcY83xzDmv z%12}mxl>lvYC{1oBF{ugo5;)-NnTp1%%4rLAi-HDNT}>_0Xqf-bD(>q`O4`>&8;CL z+c0m(a5qsShukA)<{vd%`tYz8R^OZSz`oddhAN@PvphMR+q2|{Z7nQZMA7|Gl^|r3RV}}n~^B;nS>{X32syo3e!^9#?bPh6c^LWf^P5V zR63j5+Ztp7Qrt=dM4iHa47gK3R6zSOsZu}(R!PI+J*PU$JTL7IB6SZ{@Y5HZz2gY@ z5I2Sjo6u_Eb;o7g zIM&o9uR51)5xm|cZ8WQF9+_+Ts)+T7{^2hBHpR@^HH7K6Flg7hR-)5|$_DfhnL6Q? zf`*B8_mHHRs5JUmF4>$ju5?;1rGz(`!?y`D5M?Qjn?(i7<}oF&xVq(9YBkx%T}^~! zWuT5vfY)M;sMp|qzF2ACdlW?h0n>XQa%TM$q2+9Fm7h!chN(n<7Fh?r;}XRzn70K%Msh)YG@psht) zNM#OQi3T+vis(uqe=$EV+y~|q+PdlwZ4^B-j!z~Al2CwZNyU_V6L(w?!Qm)8$T$tg zDL~i$zy;j(>HNo}r}hArF3VI;Hx2Ne+uKKX^XOckIkR`AJnH>$&S>C>VxCS)hAvsh|3`L@e9_JSs()edDp{@SkgG9l&h0OU;)1AS7m z&d+#tZsD-PB$ZKDUtWzX(5?ln?WUVBuIgR*g9^5mn23_Qy;?)|GZEB!+vy@lf)nV^ z2QI-i>8wGWJgtB}T>humKA{pg&DkXCk9-hB2%Z|=Ms1!8hM)s@te5mM!e&}dO_<3{ zVh#5!Z`DV6T!4|D$o6p4fV|?oiw!kO4Db%PHh(|+>(P%AM7@_ThH~75dCrA#DirXr zy%@|qm!P4(!HlkK1;~j%r^j`ruvRZc`Gysn9e>58dtU+w%29t?s@iTf^$Up4WBOP^ zpb)6zoAhfeTNCZ1NS9_exJO<3t|bh+TsDHCAh=JH&N?+ClJH&{8@0OD1W*34<2Yhu zOQNU(sXE+B>3XCUOAE*x*w_0}2nVa;yPM)+rO#6AnFg8_N|dQbp`h^1kN_*DM~eWQ zH-%)jJzJ9}a`abuw07XhS0S%N5sf?hzzr6bL1+V!-Q%0SEB*gq?3`jmi=s4Lwr$(C zZQE71Y}>wNo40J+wr$&XRo8UVold5cnaBNb9!~a2_P_R8=lhU*O$fdcXynvyZ8C~sUyw82VUDp1#{I6N#S`DRTzvInFEGdET*YG>qsG|Fp!4iBYQTA z(y{Qp{cBQ_hqw;>#G%(FOhP3&jKf52;Z^-OZv=;Ggn-SkVfp4_ z_oEJ;_M;K%3`4K3N+xgc3t-PpuQ0JrvjH$h^*L-;%@CW;X``K_F38$0=zgTZZBqa& z&HZ#9J77n&f zFX$0)Zv+Lz(lTt+3Ha+RQ*)UI|A!leaYi@$pgg5t< zfN#GNv^XDh^_relJ<3zymA=wDx$9K|dREGFm9(&-Fm#(r==mA;W9S%a4f1H>%PCn! zDwepz#|Q%^Qw*?dx2w+1@^SOC5h2)ZRr5KDZdd3|jB=`@G^TrysKWL0mvg7pq*+qc zY54S6q$jxFNYU6bIS0}ooESCsmf~&sr&dpv0>$8<5z0q>Q5?9)q-nL}MG^TC=bksx%ofB=5|GPf*GIj?Q_y;A`dh*8& z^C0#&Fw2O3lk04sUD_hzT5SU1GmryQ<6Q#LhgxJc-lcNprD3GWHax7Uw9E>Youl4< znvzkx>KQ*oAGt?v53ahPdFLc#*dQ%A3b~2Oy~tFUn@)QT!wMOya|qph9XmHTnctI% zXs1WXl7lc>Jsn8Rv5^jcJf2%mw8qFijxqtoec|c*ei&GZb7}lP3hP&L6O>A-jRKO- zx(>#5nDm<%Qt8QH5l|7en+IZ&ZNf%b2EQAcDxzOz4w%?xdSz1V42D1{!$O89UCL@#1SFt@BD z*?}srNrX_|O~p^B)RIedFSEr_GF!%Tyjy`Zl0GtyC{?tMc`->O$OSf3onoeH+#R1+ ze%U5&Y0OaTurr4%T=fL{fZtsINw89hJz2d1S)xVcpZhQE%Qhm8uDFS~zJ2QBy%&mB z)@_c~5WJuu0$Pbz(JH(LeMPVqe0Hg-fqxxpJ}{sZK;75mR-%0#)8Sdd z*c1{}Yutw(PJrzUJO1EU9+{M?Q{tiFdy=STctS!%aA$NEb-uv4Q^)S2O^Q!3&~SrZ zY+}M9{=kh910$+u3!J8`p$~wZAc)NGtK@cMQ9mgTc6U`eq>76;n?wBv}rSGq0^OLKRla-&3YktH3)= z>22lvKELHFen615P9$@#rw0VuBD^%peV*4xMAQa$ezZ(OfSnl+8!kt zQXFRt#DNd=MH}qUb8uY6?&V@POI!OeTh6_hH5xW|Fk|~t>sHUv&xKmaqPFY@JZNs){x+kH870Az_Sa?L0&`X0mXF>pFjCz~>|uV=B# z$|BFQXjF>55Ap9axF;Be_l;TsOp-VCF`qd)%f7N?GgSMb9u)kSY{7;hxadj>f6=>3 zA)4=0>lCDz1q(;ht7%{SlVojLDf(l2PjiGtz7$voC0Dk5?|q(&Ii@^Xj0}%Civ+QC z6K96#VR>qs(DpwM&CXB@oYfEQN#iid$IlZ{e!g`fZZM%kLJn_y}Id9y^ z5>VqW2%FK3b_}&UoXg3X?a2<|RgdANcMW~Qrc3$?T4~RXkyW8G?kU0n^s@8YlOmR8 z3^r-?YKWwH<0`tJVkkHb@N$PDQm$!EhSnf!%A=w#T1Kw*RxNy9m&6+(XgSJp#QLiy z41E89o`H+ld-PFaZ{^KAT!h=Ivs1~-*ZCBWq{6*(p=04 zo3cfL3lC-E6`fl87*oWSYME(0TQlfQeIN)Uu<=Un$Ivb}LafobP_2b=B41;-9{VY0 z`=XNRlNeZ)ohm@pj|@qW@}H=Ap}BI4uw6Nt7lE2ud#n+U8TO;RdKvC`MK*K&iZ&`B z57Ip(a^UvabEks({YGH%N#5s>Sv=K?MnHQ&pgW_trD=c=#1f_HT6hdx(pd%Q0-_x8b5B5S=Hl>`k^+7vP5*CW>d8L(o|{)5&}lvgmHedOggq zQWY69l#HqDuM^$#xQY#?t*_VscPxZf_ID*}hx&3xdGGV|=L&0ne6t+4q}=3L?c#7& z*VAse3kFE7%{HdaPN$ce)2SOh14j3MaWdTxzI~Kgf__I&>T0;_RQXY;SjU}lbY4Ma zA-#hH82y8&vDs${p|SE0$kTdkUr@Y=1`KMBjt));5l+#068;^)Cd*3edwd%?Q3APj z$TxS|aZLcSGS-{a+e~qH!`+EIJ>;N#JrX{d@<*~%_#E-CiQ}_193jPop2j^{_jAmF zCg`_#fZ|kBj<8z-I>kE>XpgXesk!(wfm8Td=TQ33xi)QD#G~Wnr%{Cg_fQ_aFILj( zK2HR7Q3|O|S1c8G5ZrL3A^mB0pQ;cZiElgPqR>$8t=@@FtmnqPLKA@ftPZNQ zCze@FbkR(WiBFgaD4v~lUx8!Z%Gp!333cuRwXuY;nV2Rt2@-HN(LFGYP`EICjt@x6 zZJL9fkBoJfNm?$KO9>}s+qqz&Ks%}gw*gh-Lrdx2g;TAHz!G))CyUG1da)2}?>;R( z1+HphTGN5| zbKbHbUUaeGJ}C`@3B*j-&77w6lUz&;vl}|y7PavjQNEh?Y{@~)d*cU=U#q;a)71zl zQTAWA-IQ1b9WSo+V$PObyKU#xhv)*VDNr_Peo0h5A}`!0pVxbIC|Xp2bKU->B4XkE zd|N*1bR;sRUHDbhjH`4Vd)gN5V%|az$K2cE6XDw#{YqG zFme3X>VGmhn10zE|EX#E{~-6dD0}dzSmBe4YL1h#<7<=GH^MMuOfoQycEc2OB4r#+ zP8w!0o95@!Nj0M>>9z<(R4Ph^LQNvEwC&sN@ij^yFg&cEUhiGko7&#!7vSI`svre5G=nf4v$-!)mJ$KlF{;N41$~!6 zd|cIXPYMC_Tko9?cBA0K-h-Ax114e)R0DOZ-wOsK0E5VMryDb)#fp9feB1N0&bWQb z)3UIKMTSpgpY0eoI{;ki0JsA#bU9gk;`dv`BdC8S0y_Z%_i5(-;N!XxMnf}>GGuMrevkudA1DZ~Ea>NBfN1VCO+8{>;5j*7UMWA>6&4x|`BL@P3%g1kM5p+Eb4zbkYlAG)wN8C7zSd#5Mh4 zn6cGt)UZi=rO~o2YfYv87r+bHS|6=%SDwbl$rE3loyPKF)d+cC7WJ7rvFEfd* z)4fD*F0Z*rd;YvMF!n~uRTU-2M|1^_s)>wkyVLkPXJPkjmSUsv+1RtY&Uvy@_iUly zqM{?|h!y^FYQQG(Vw~-T!PtF*;mbM7*VE71La#fZ zcvQOYX|6ZumwNoroD=E=VHJg2Pkwej|DLg2MvP4wKkvHUKriI!i)@6yqwV>%H{o1aPeIErk=V5D#g^BBHg!9PykD|R zxd4`J_^%ncD=T$BavvA9C@rZNF+Kak#ARTO{97ssCgaMf+s@*d{;90dUK1ywP1JV#lv;LRwDj<=~vmw0dPde$EtwPca93@5%Mq}Q%DKCxt0{a>N@*~ zw!;K4JeFx;Gu4&MELoRMcJs%1dpR*yj$Oghw1)K^Ml5aqkKnz)6R2rgxXS@BWrOJD=i_#Z|O|VMgoG2zG&XTj(YbOzj5gDZT{1V2`lz(f?R+gjZ zy?ju)h95(X9aHPJ!vimlANYJbo&K!~%;%UZnJDs?{Tyz666}AaKza2QO+LI6_k^N` zo!WLduyLv%1G-CJ+(<8mVa7KD+wQS{IF_8(XN}wQ!e3FTz1m39;koB5xsX5;|M4!$ z;zwz!lI5#;SV{*sNeSgT1E07V{n0Eli1E^NW_mso4uUpmpK5=xzy#iiUi4zKK=0#F zoj*Q~&S8aKmB!ndSX!>qdkoOM-75$DyFuDmK|KY%9$kh){P#&s^dUtD=h|2Ft%yh= zxy$OU`m$hV+82kk-a6-{Rc6YGdNAE1X35SHE)-T(Z44AsLM*1-;Aqui>ujp|WAuST zu?P1iLLRE2i|wbr+$!-rXi2(Di)2|g>^dikU~lJf*GV}QKS)@&1uTB)0rq8OFvL9A z^*Qbx#J3ek=#BBO)TJR;KW=(uZx(##l>yi4GheODpDbymDZ`%jAlm&Dv``kZLOeH$ zHSODQTcs1fmX!$#)t-X{>GcJl16Hc+yL-cflFH)}h&WJkrE!)byqg+aK05*>E`v%G zmO{1NyUCW97m8)3i_`KP5J%gpljxCi_tL-TN0vy%Gb?s*1UgO`J(E+3_m3*ei1Q&4 zu&X863iXW(3cpqtRd*^~|RWm&=xkdB{u%>S04*a;X2?2Ig-czFIRCH*ES zHm3ikr~W1=MpoAUW|{rZ2*t?6%)$7d8S4LyHLLPaRmr+apu?hR^#bnO+REO&mIa1o zxxPk%sQz6BbyyJq27<0j3Io`A+8SG0N}4f@qV5~egF1pj1332{15nmt z`Ue5`PS4E5%D~1s+dn#nYG}0qoi;AIoMK|8e;2(?fEt`X$xmn(LJ!X42|PUS!8Nx5zx`u z?Er*B_s(M)-@LN{tAhg!Zci>xebWEF40KozASV}hr^jm7+IB)2S8Ho~8mBOhE})&N zYLW4Gz`_#L;k&vG`e-KRuEE{0#kIllv%0k3#`gR#$WIW#3*Dc3b>XQ-tJuJwb8u)5 z?eIzMgEqZ@`Opr-mJY#d!jI4Zh$DzraQe8}06n#Z#jfoqW4h-ZTfM~CN*znZ#4;0{79_a# zgI~*7N8q*?k*(dE7&JEY27eL)I%YWest)SDr737Fz*_gzgi1N!1t?=f-A^RIwO?BA^YIVdPjq=tUxO5N+35up!#DfLlb!fd3#t9mqoeo0 zGkz5XXWCE6tvVJYB)GgkJTo^uzJGdRiEz%PE?uV_r!219zGu~#~C zY($oUNTX_=s3& zm?z!thif>d(|%kjsNm~2r8vr1wI12DdbBD1E&O$7jDpr5+n9GK=>!_cs{#l#l19vU};)gZ`7XCuykD%fQ}Z^a|+D4=tf9YNEqOyN?h;ydH+N(d*Q zInFw)^qp&HD$>;TL;mXfg(B3;Yzik%qRvn;`aX1=z0Y z3WoD)=CaCbN;sR96x|_V40TsTz6l(1M9arVsUIv@o9UTFA9iYiT?m03RQYX=rb#10 z4%ez!P5{fo^l)1g>GXKDoel$0>Z=rV z=H8Y&xE(c7yORDSPBL0M%(fs71up>ak<;Vy%*$#f3u)r)4|HoQa zOCg;7aK$tSUbl<~8EvvGoQg9t?{6DnCFx5~Z~~hIX0!{YMAaDdWvN!1qa&IIc+$`( z416gx|8U{TFBr?X?HS*u57aQ0Ye7hSrNXwlqC%}6ayG9Ep5z#>7d+JKB>#F{DqMMS z%d|j>w3PjR9HS+_W>MN~P6|jRJy8?YD&wOTZF6U@@Nw(EW6ntzppElYAtXf=Q(}rt zV+tZ2&~Or80|J=FMu)$Iee;Oq#;~P_=6A0k#`jiWDw-^v*0LhLLK#i$S_(&$fO(j@ zpCvfrj6`>^x9y#AyN<+mTQ7IAKQqnrPtj={x!A5B@-rX0?X-(LJ4GwKmYsyJIqwp)uHbS7%8rlp9AA5&B}5IQ=gbifXn=IzIadY{#48V~JNj&}W+!g(-2_tQz~o6i+J zEAe|5YQ+uXn)koICSuGM)4C*YhoAeQnVe**`*U9dt$&yuVDz~))##jFIU!cSO#;CW zDa!x;OEgq+ttZy{{YeAt0p-(|GY=N3yFb_6b__`@cZ3KDMOFXMHB=-kHgTi+lppSK z`8_T|I(cYs9;1vtbExrFo|Ji4)O6X7|RgEjVW zMSgA8z`#tH;5SBr9|cSVRY)kDyZWHXi;YaEe%A3K;frFTUEaaAm!G?W5Ie{Whqav1 zp6U7n1T?Vk(Lv4PXlb^nSC{E$VM8<(G0~?Y|A>K}l40AdDNAJ%;V1dkGvxy+y2Vy! zF-q;Rw7&hkLbHhm!Di|=Ket)TEqqavMbm|?!e+XG5@(Pxk&x;-D#qYqRocIA+7Lm# zZf?j^#p^0JpR=V$5~maFET!RG-yeS@-_e|;GZVd6M)Z8&_c^C^@i!=s29D_R(BTIh zzWYPD7(zO?hz8_Wz12$V26{Ds#NPqbR1ScGa@#IX@cUe%TzV1=I|8O$zQB43kP9ZuoNS&-0ih; zc^{I72_+Ao%a#9)3{=hEa3-({_rD$!^bgr68<-ToC16o;qgK~+>DcPVO^GCa?OpZ) zSqR^;&Nzs&yWu);&F!gWoy$f)L)T3{y0b+5lQH($Q3z`pMWSAJwBIKMoQ~hQ7(H%{ zEjbqA-D)jC?VxW_q8j$y4D#(a=aIjIX^y zKC84^eEGmBS_5z{>RxK-s*|&k65snKm-|Ywuk-`Zk@C}$yi+x_HZdZ~$L*q<2;(qw zARTeg&>^lRh}bPOCc02de2V+pMQ_9-Y%X7a;S~$!(10HFZ|`5nAZ#z6?6<1s{9#{h z6zpt4l=GT~*pIof<$M6i+LTHOh*wx;Rcn4An5)jr zF4(ff?E=;832+A$+QBJx3k;MUk*RojhD9rTJ%8W(I#)z)BKF~-O7?6A*v;3W?|n_| ztK3Ky_xB0M_3|b5@rQWP5V6hGSv*inPWHF)kXA@Y$tj=Yh7D(J=BvlPc}0boJlEh! zK(39v+bBOAJgP#&%mGOrk0@AgIgo97KMANED3#ZE9->3~4J*QLo2YW2*(k*%epmJN zr0kAIMWu%TujY6j&O4mz+k!3A9ue~jJ+yTz&7{VgFE7~xaCK`ivkS6WuDyn*cnI=* zDUkvv{Rjq`cAW0{%RYSJT{joc;pYpd=QGqTJ~GrEdKPc-O&ca>Yk@MERf^_X6F6kK zTTBee%V4du$sMLgBdqB)snePRlat5MFzByrJFLt>98a(C?r?PCtDgff-^lf1t`cQcBsTS(g z{A9qjad6!~sGVQAP2%3jf=Lb6u)FqIpUW>TrKI8*@7|ti;=)-WWp2(+(K=ghyKp0?MRX&bhlG#&zadgl+a4Os%YCf*6^1L zfn4?G2Ho|7hB%R*RUy%&+%XAKt>WkI`^6>noEI)3OaKb*87c-kyjAI*qVL79{tsE%(*Jq`gdgMS`P_Ai@b6Q za#bQ;9sI%u!cG;kcN$%!<<9LQY3s9^1@edt5L2i&zs5NZRLWLVblge^ctnQ)#rng6 z^H)G9`qZ|TzmXsz$5XMZr5SV}mHdI!bL|O4@bE?{5z8617A4MO1Pc`JHq~Um?pu!J zqOU>%VV~k0o(h7>nSi$~+iy7wPk+|V05U&5nSrh3TZkX2;0IbKp}~K-WFH=zl`=Qt z(Roh3BKdm7L0`t9C#l?Fl15r69qDXPz~#Uj#h!RmGc5>_wQccIj_#vcr;ds~?W?gV9ai0a-^W2;I{U$#LSVYV3h} zg-u1{{JfHh5+pj&9y?v$Phx84*AmB@B~9i*&y-^yvFCh+j~LM$51vxx*bAF9<8(&@ zzLyy#SIESpFQQ}H0+R)QXR<{1>8MTiDAk;A=~UOjJjJEEI`%_O#E)?!=P35j_ga+( z9pM;^yj@;QfNPFEP=p9TqR8z{^eY&gUzNBil0B& zWz`hHo-2Bhj(|>$!l{PNRlRt5yBagZK?cXwRwXv`^tdjk@4Rp(l7O?(Xka%%TBvjI zW&BO^CaFl-f{uH-BQDca$&tiPiyRkt?}HZz`~A>$d1&@c9!pr7NjbG8Yqu@s^4eI9 zLk2Iz;!m!X-;xcs#87l)Y2hL|bK}jDPt^x}bo&si>B=FjK^QIPbl?gS(B7q660m5A z3C{;;BYzzJ_?ne6)P~>;LIfkRzEac?_b+GoOp@~P&<=<9pzgd!qM9szU3L&>pk}LQpfQyM*S-B0Z~nl{TqMbC?6(`9T2=<9ff1IKoxTuO^(r(oWV zit@9O8WFwzx(OvI5tP~1kmU{OpszP?2HbXWZmMa3q^G((&*hO?t@V;}RSaMz@O)(U zat_%P_NCB_ZyXwW^$m_j%mO-@${>VEOm4$j#DgTIO)g*0^z(P4z-!?49(#gG*7A^g z4H84W-r8zGc6dRxC@?>_sQm{Jjo@#c!=!j;e{pGrq14!iwl%{;_p%LHj>g1)8_Aw8 z9&0>@Fu^*h^i>#cc#90(;W^RumMk7Xr%39&X)V{3%F%<$H#fgwNMM71#0_qzE`8Mr z$)n(*6j(Q9Z8I{_;Cl|QrVcd?)M5?jOkhRN|0cL#&7UFYk_aJQNwq|QmXUic%j#C( zyimgRH%Xov4MX0zyK?1U@Xf3>$x6o#`{D@Qti{CQ@>GW6S$zgc6v5Biye=s0jG`8q zu`H&glc^J=5ZU)3$djFMhuJe8YVs<27ux%A93IjH{hC^W-K7~|= z;$*z~&tK^yjp~U9t;rfjdL9gtcUpRPm44(c+@^S)zYBk~9lhE8c%fBgMstyR^o{TsWyjy&>XN#cU<^Ut z>)BhA1lg7b(2Z{#Qk{}4QvC2BJZ%le`y5Vf3nRg5;`2F6ovRJs|J?@&z{SB;O;RY% zcacdy7&$*uIyX#&Ja8aspd%S8^$$j@ty<#y($b!MSEVV&z>^`c-l>w_RK^_zGxO1u z%&FuXuD;0iU8@KG#k^o&cyuclZnd0)#`YwK&NwMrE6{WbA5{I+V6s~hzF?#>!rleW+Wsg6u73*f~C<&-LHcQ;sqxz zt2_5HmdDurs_BAcloD6h5e>bwVpZ^-AhmOHp;KHoneQO=2{A}NTFW9+9=nfb+WmQQ z9=h7p;t4gSK<=1HTF#yn1((P#CNe+4BK+5z#Nms9U^6S|mqMBs^b7B$B>IrNLJMnM z=~bf9FkP^Q|E*Uq6cDSGnYT#b*z%m86Rf4njZGn>p7@;1PczazGIUqUzUE;a`cch& z%Oazk%?Vt_Y)!`e7e%RR2nd{M{ip^@*{QhtRZR)Bm^H=deC6<#OASb&PgL>!fQFej zgn8p!J@fFzK*P|3(RrxTN|?wKVM74D=i=ZIgQ{E-6Z_GlK`NL@aLLK4KxHdyE26Ap zND3~ZvyW{auI={{q{Gr2a@wRp-w)z4(a29jSV`~Uyq8~#4qIfBB9CM5-Wr3PtS11I z`YT`WKIk(^Z>V^lLE^e*jbV>)<*hw-W0_ZXV8QKZP+e=wxuASSuMsj+KUW`4&-KVq zy?27*rJO!I%{m&&^(aDJ!tJkY{O1Ct$jAT6Q5uFjKOg*wg#_F_l2=U$HiLJF-jWv^ z2-;`a zq~}-J07Tgco-8pg1`qXAM4vWyz6YtFIsm1}gC1Ks%HCzdP z`)>L1dP%gHgr&SNWdH#_7(!OuM=|8FeI)CWH-zVtuO0%qu&dq1geV+-a}}7`@rDxu zz399U;Q{Wei`UpTQiNJ>G*Zl)O!dX!6WW?l@hx?$95O??O#_2mc#7UHMjiLlAvc)_ zjE@x`3>*2cd1cnp%*dHvSQb%iS73u$)fMDESlnAj&0M#Ronsc->4bFE`LXDiu?x*Qa za6w}{;#rFZ{X-u_EX$pFnx(iCk&}9Ts91qWCnT=&57}W$Q(Uj#3zB86j;qt)@q zyUiC@OXh?^WCz}`#J`k+@$xAhXG7XTUm>_k33JthdKqkRQwPGQ)v!JI67!?ywP~#m zn8@Y)4e8~3>@!gc9*t{T&%r+&eKw`jrd-iB2hT>6`kGf=itr<~n33v&XKN51l$7n6 zd}wFwoKpAp0pC0b02bTFyx0IrSa8GO+~CyP8KrtZS~6y-<&H^m10*j?#qeH)RvmYC z>-D8oYMk}TgVSq^k1pMmaM6x9KvulWuu|v+#&8nBGy*)TgLE# z7gv8+g}M_~UW8W++DRSj_N)ynr&N5t4=;7K|8eGm(SL&dFL~piX90ipAK=<& z?6(RK6f!-Yz`dMRYeY$k7sK8EIiF~`7KRr6`XVb6QsgX-dbVqdR57>aO_VY7{yrrZ z0dm`a*>G|(Oh_-tT;aMez=5V`$u!u*)R|O$LWb~?ympkQsVyp(5LrM-^bP&@ihwJ} zd%D*qtF)UyL*`5o)7u{7Dq~;qM__kUX@-b*t9(|sNXJp)$1?l}474SI$i7$Uj%4nO znyay*NTlrssmTOF!yc?y(`1T{h`X4J1N!5@-+8l3g45>-;g>B@Wv(K2SAU5=PuFBd^Ts3E6IZ+$MiwOrv{3*y{4?#5+k@m@fxVB}0E`;@VIu8_+Em z!o;Qo+xC9JM97gQVF;uQON;1JNi)~u!65G`Wc-i15e1yDEPiIZAuPA^lOLQv2(%hv zfkup2=?L8gR;c-HUwGKIFdOZ2LQdfK?7RclXhwO{a+Nm}WkIhQdB|(s$_kwUukQcpX+EL`K+0T^;{*wds*tfm8iCSN>(7>u zj^;%G$kw2vBL%M%(h6}d%AMRX50}uG^L8vf4gsVx?rYP4Z!q#T+sxnEA1MO~0UL%S zcre79$dbO0_>ZQ>5>PDyZ9~otz}yETc~nNFz_W$KYpvX&rPT{a?7i@wgQb^?y*G5HHeUijWvlNGCAt zbXD7p=a(1&zTZNnbu<^|>GOJ9ow%(iaylJXyOf&@hhaT7&ds)$T2%|DLsly1W^vxqd(xr85NxcYF^#;jKvA_> zO`&^7+ZQt0W7X853@kH*N|+XiNx^5ul$j34Aeox=Hua<@<1f*lmJnvq6T)m$1q3%KPz`)SRU7>htW3m<0w?$THqwgTA z)-Ec2C`s4VS(-1H(;T9o{<1h0b&}SZdq*w;DH|=7^T5j^Sj|D(T3|QX<9Gn^XUH(`&ue9vm~q)v)xKYU;zF7ay>f;bpsvI8L-u6G zizZyfJHFs56=)S*1_JY#{6=|x{Y#W~=oE&By)_k71e?Q@=02a9&eAMdt?2y=$EvF< zd>LxI3{g6%iZ1(;un2tKO}m%aNJ?VW4YVs+YNw z{59XBO<7zG`zKEu{uegaE?}5nBnhMSDovl1MZF~~Yd!SBJ5C^86-*htwAZTpPZUzW z%rw8x>*i)=H{#w!@&WGqs}&&d?APp3wAcF(_RgBQzh3=~@v@k%v1r`&S{kxCmraPV ze18{$RF3UZtj@%={`zOr89f)Kc|DOsdisP}?tkDHn4q z!8BSvx*y}IA$O=bPg)?TXAI*&y%3N=hxo>XO5;ArbvnR+4Cw8CoP~VdXy1aKUl~DP z&Z~15`0mJMFEL$rrREk%8yuMeiZdc003re%Cy{Fp(g9h>hIV$^-9>B-wl??d2%Pg|rSjIrptvP@&IpdsQuCD;8c*R)+l1U$fe)l4SL;fX*jS{ZWQ0fPAi@c%|*4GkJ zkw_os*nzm>>JxOalY+n84bw^9;)&RsD%oUp=4ccC(m50#oR89^n2iMw$FWVz6;^g| z*{xRd(AUUo(NP55`j9awXTdx6?|84*Xk@2OQ3P$s8)(KNNun@y!m}c<r%6(dplO`hTs)B!P0yzy>WxN?C}zqW=Nl0IvMr13uR2_+vz_b!r^XouG5Ff{Cp z-fm83WZ-GxP5O2d7mJ2_U<}nFKmYyU;P@=so^W+@6GcLJWIphQi``!$0-Swtu) zVVj^JIZ{Q@wpR)H-MFacK^=X;=G%H#D~8z~^A?B&^l^EXF zzU6iMt=;VPy^SE&qU0qL*`s;C(#t4#H--kDyyji?*%uhS8~d#}gCr2Y3h>}F3{0C- zb?ypiDmH;|AM`G+@$HxbnN*_0&>~e6l}z8o%{TZO7sAbxV!t zua{)r8Ck5Yyc~Q?8(SA*c%d(>7v~R0d(a_^uY8ON#I2q197Zyi#&W_qW*Hj{QnScc zY?2IsE(ipLbCdOUk0P^S{9ts+mSv?`Iw>Zm@Z?+YK33s7D-Ng~=HGsXv2W?@MF~DX zl{L7uEnfRJRt2>1rnYUc)iazgd<~alzr7kAk%qjX zN{ZgL7gKTsx;)vR@9vpMyN(E&%d@3L;$)1#jvigVBU_G?t}NU6^cH{41W;#)S~Qjt zud80g6cONQ_EA1Jfoct->Rsbo$xm&$=O}DOkW7+)uNUjL_UoP;a;eLf7T@lymOQ#Z zt&863Or!W(cLKmH8oZC)k`1?iQ5Pvs;d(;R26>OcLys>usG;%yXz0)pQI{_sh>@Gu zW&jg}qj}`zUQ6W5RSG_si{D`Zk6 zVa$AEo&nVMY|DQ`otkLwIP;DZANsp}XGpY|J^d%92l zSQ_r_O$&5JQ2Wx?qJ-WZX{Hi{4v)w-c9S{s#~h4?p;mZ5BmW{?em~OJ7Gs(rY!tr6MBp z${o+Qqn~C8*9|H%kS6Gz0q+Q^oZUiv22sJwLiZkbp~gI4&J@dp+xX-|EBBPsu|}nA zo1x@ZZ)R8TJr+2qqv1yW{9k7jTzX}v&VMj=&QO8?+X5Whp4qXvW81cE+qP}nwr$(C zZR>CJqxa5vNyi;^(Dzo=?qhmaAgeb|q%2rYWYl&=i&J0NMi)VhrOxF~dE!*;ai*>7%OC#*NgMr<-SOqUbyvzX{ihZ5eF4#l(ciGC zN=(S_p>L3St@CX8FmN4YME+arceaf$Gv;?(U`3jeqWWIVjpCRsBp`Zb=)hxsyyBF# z$rMpjqSR7T479+?hnTp^Py!C7cl|0(JCVynovovNBO6;n>Z`CGBvBX z&#D-zfM>tMzd)j2$s4v?G0<+tf z{S!-KJa9pXu$xzPVYwE*kJlW+JwyJW2nAfRb@_IDD2(D~R~&nkV=CLkk~=~vJa~^s zK)yC=%b&0(4>*0{b9PRxD@(;PwUy}EJu~3a7QMT7lT5=yawzrD-?;?8Txbbao1A~2`#w!7i*Ayr?15T@+*<{ncbVW4DnY>Th~2ix_%y$x{+*{15;gJYczf1vI@DA@1X% zf%a9(zS~&``{J)>D-se~YHeTw^7tOO>CX4fTzPQc(rPPPN0?#0`j(E=`> znEQdNt@TFvr>m7WjVK1m1#xkp(AF!&AaGVhrFq*b_C!vobWQ^$YIy4*oObL}%_q1Z zBxj^@Jk_TS9Ch8M+Bl9z2vD|no5??w|7 z;Z|e=hx$OSHpd=LROu3+p>**ni*}%x{{#b-c=QuCy*Mc>L};D(X*qv32}_)%Uo6u1W2-6*(*=JcO6VqF;Nrl@!Gx4n#H1y3j!1Ur~_ zo$~tP^$WWbx^{GU4}YoS;p4(n8pk);S={ zw~5)OuEl94=mKz--oNF3ptYyvjo!zTyy#%n;RREvd2i~oLkmSPH32t~ZwzSy^DnZf zu0nzrT)}OQ#Tc;mC-k3Q#08ZO%c^&)Dw4N~5b0(awdlp1uQW^*0Ki#4&2Gz?-2c(` zx8BWM3iyetOB3%9fb%Cd5LhHRt#o(LqR-Z}T!Wq8P9GaM4O365_(W9&>l_$3;xFGl z@j=pkBlAO$Qd@eblYam0yN1`L_ZAqyD>mxc9}LvDX0V{A{DQFb@&s*86zS%v zCeCZ_$$3Por0wyC3_atg8fj5aQiy_&zdGB584K5!G>P0zfoGR8?9SB|sK}|K`qSd1 zI5tQ7JSD-6e@0T_v@EfVud=t-w|y-TJ{KX`=;iKafgW7YIW#(sWm~YGs&6_?NGh^Z zyYWR37&9rY5(&M^5V{O-Ixbp3PEQ$Dd@yVwWf~Ec;aPGkMvRN(?PN}~Tw?hWQk>`5aj3J8 zE%#hl2s)z&n95Td8ZYR}X6ix>d%d8J!kc-(e*So1gWBw?AdxyAGIIol#p{TD(VL&^ z-P4Ov=~Fm%5|=ZC1M3&A|5*HMgkc`cngyOv672q>>SHYM&lHaW$vAqJa<36)!@BOj zNJXRbEnFwK3NiaAVyD^sC8E`21FarEC=v(_T7Km0G)uBpp?yICq5`{?|8&cpkH z;VG?!ysG7B>tG;^%0Zy$`ERk(lM|hT)I-F^uPeE_zOu2QySj@zl{&j2J(y~*HVgJ& zqJ3z5YHWC7M`mekIOh$bM-LPLvQB+1ozsO?DzAY#-Fa&wIrG$i3 z06B@VN!@OijzLklgshRaCC_S575P+Xg0J{2C#^#SwCex2zHGzLQ zbic;H^zda}Rz`Sa#!z3SCIE0oGEFMe)81PEDpSAU#Q5yVUEH=c;HoPrznYR?;yYCv zo8Q_X?5wL_jkTZUZ&1Z1R~LI0`vwO)z^~{Ea>}1uE^X>)t)K8~t7lc%I@VuHa|_#p z4_Wcwh40|^N@x0p21l^AH21bIK52$Igw7{J5J&`q$S` zQ*FJU{)>@H!R6!yfON!nQd%45NAT3-Ze6`Z03NL`CnK57fpjb zQqw&xol>JcO$}%&+RxYXg|gtEU|9Y2_ zk#q4^qhGh{$(7V4bz|b#eQf&EbVDrid5wY6#T( zTAZ$pcJP^?0E?1uBkyEChYd!5mP%xjV?N}j`<<}uKWAd9#JiQPqOd~i8A_OTZeSnK z?oj}R?@R^7N9O6o_rNG_8#Q_+Vh+Y%Nz21sDE9OC^yckmVC7VF3+Au~5KBsno02sO zF-iNJ#5O#-{PO%L;&TI&d8J&#M)g*Ba%LP3vw+lc zEvw`#!9TvQyS||xUXpMTl|`N|p>Z+q)regM+Xi%G@Fg}M@C=B|tKb4EY53f!OKr9n!MFuO?nU$KS9>}*eIo12O9g^k&?BCa7io$V^`rJN(H2+N<{N>?EN_Uc6oNge0$F5-8 z6XL*|Rm?(CcRF>u2uy4gytHm~5QTa2Nm#FuVYyX}8Ehs^gteYpFn^h7U?aywVWwyu zFvp@`JYgh47-3%Wr4F(%%Dc>2fzV-l$k87olt|RFa)uUuv7MX*e3UeE|E~@_A#q>dUq5D8>fd&Sa73uX)tg#>0ea3KEHSP* zNcZk8u$w_L(AEhM#}aO z_2}@x?9wxj#z(rb#XC zKd^u3*avN=!+_}|8+Y|MG!&-bd}DKCJ49==ZrvzbrBM0kVcz>!W2G&W0D~C)9Fu?# z@r!k~uxg+5rjmqBIG%k(_|EH}*96srn%okrrGM7WRMA=vrBU}q=;<>`pR~fUuJ?vt zc=r=v2vM?FqqM{N#^uW!-DT+btf`eU5(-MDh~p|LIUKO}mao8tXwb$eNbYeb0p2JF zOzDyeTcAZv*M8Zy11xdclv@A6^WJm6F%Zs%$VmCq`V@w3N%tsuTspMZnWu&D*d2vG z45S|-Tv6feQXiqd_#u7fI&bEfT)PlNLb-3za0ORsp>Go_$cbd zNs3qlGi%Ab-e7pRfiqsJ4z9!joV9Y2&Pz^?ClvVJBHVjfUe&hRMHGt;`NGptAFDN4 z2tn>YtXTDNhB^nE6!a5)GOgkDn%9l2x#iGeK7&!*{&vaBIvwG{j^bOB?`?o__B$M%8D-sNuzp60{>^CC*Ilqo3?S^YT;C)1>I0{zX0}OX;yE*eQlRn;HV>dTwJyZ{r(Wi2HkXC`( zdFZ(8fCizX|@I2>apJ{@GE=mVzaZ3lOXdAUp z@jtO9S|Y(w4VkNw(z!zm9{yfAKh@;tKtl9N>-=0)O{kZ8i=F%ce994>qU%(rP~-Gm z@M|(Kh#HN&T9r&bd%NvZe-*$y0eJ?nZRBNE(0^Eg3_?kfCh`*zdvAEM{*oFz zd7nt0&u|>IO*=pmx2@to`TLZaoG=pO${_0>1T&Ez5->Z$NtqC z7pu5NQRqi%14|#IZ%j~V8@D_qg)KM%9>`<3p>F?sptN5M1R4ueQ{~?BCjD-^5qc>F z;GnnQ>hSH#u;7##2fYd;l4xEenEQllek4)4#prrf<@xChdcT{?QVD-0mVR#tro5&& zJC`&chN?wiA*%7J?Cb{K>9&`1kKJG+<_g=r+uS!RHRY?M7VGb z2g+#(_3ZmWbp>xifL`3);wT$D4w7T|6hGL&VEmPIGaoe#Z*Q zBVP+ir{}nn5lS8x5H`dE=D;4U)&W;^)HK>Gt?3+D8kdRrHl+x4Yo&vKoO%OQT%Ol#BoB;TIJ$ zsyOV3NH{Y-iC`PkwEJaUk4+; z8Z}p9yVJp})9Ip8d}(px3mT8Ktn)^WLrlaK*SnX@%P*+BI@P>z<(&iLHgQaamA>Un!+!cPDLJ=*vgo9 z+eRf$O4xN1dbpyjl6eUqbAOLN#U4GFWG^lZN6bTPvt`h7VJ{RPg}x)rL#b^exK_Eb zO0c4&u`T&fuf}s)R}=30#tJg38HyF0yO%o_#?5u09S;dRMiEI{|t(Y z5}h&ClLP3FK3Df$EqjZZ%-e>LwbjjAPrZWYZbA zWd{~J2NcPxK*8cR=rcSW5@iJ(__bF`+a5ZyYeq+lsdC~=G8V@aTghtsFe#tnG@gW# z>gKO1c##~fx07v@^9z%2AJ;CQPhHz+ghI*nj!D3G;iWLavL{?bl5~T-5o_$O#J`Hq zDv6@oU}d-_&eZX=(F-6$$-LHg5YxNNBPkc+pCshn1L(^l&IAhtK=QT$VA6|t+BTmF zwKU$uO5M!>#rh!H&E&5QXFvb;P`7GU6~4bEF7odCCcV7%-(%%ZAM)XE4_~}qK4$!oJxSzMC`MrIHYRV9#XI{9&UvzJ9<}Ga!7a~OK*%)oX|{td z3Om%3yBz5OiI@=>l-QMBc(Hi$x237uW5aF9bCSWLRjF(MPcty=xCahL6msgmZ^j=^ z7K0vLSg?V>E1N9QP-5R#k9gsA@Vyo1pmq#ItK0tchb8ku#p|+qzn9BB<=3$LUN>t} zP_PMOPLy5)^avsOB9!s$A?=&jK1f~40jm+d3+^cZDlSA@n=xk7oX{%ku>zcwvQ}jB==FA+XSOaw1o+LcCmF1- zT37*$3dTdSNA*M3m^0qnNQ?g@!(88^;!04CKV4dGmKVK5fhBg0x*?094${2)?D*iu zhma~TxPY zy&$-EqWK0a5W}0bHz=V%xPiql@T0_fhjH;N3 z?6SQ`0WXv#7Y{uU?_VYoa-gd*VY6h;W(69zO>-i-y?sH}HYf#qN|v@VZ#Nwp!%47D zxXaWk17Mbfm)CNW(ASI}oZybJB`38@S+es@<-8+`b=rH^GxK>)GSNx?Gg4cc+|#aU zk)G6DUdoPK!AuieV#~^sE|BZRu^3U)bfx$G?O|+y21rwg`(i>C-kR)O?q9j{(^arh zC+DUD?rX=SYQuI{Lm0O|W?^$Z3Q0fiKbTaTf;ks{L(__|uEbQBh%Pn;$p?q3h8vXE zOf{p0>HE){_rI%C;4~8K?WSiEkYbL!qWsB=Ib^(NtCJ9 zSn8#_9D`QqZgiR0<*SnlmrZ4LFHJYi@cRT76!B4ZDENie+Qc&h;7npucQdqSkX?J* z8!0G1Fz4O5pwi_o-7jk5BD5n1a43C)DUGU|@Gwv%gE>d$+@#0s7=XX6=UE zmln8%m)wY#ZV>3E`DKjAY!Ysl(An{ga{0lkij*QUJx1tvTZ3FphBepkUUf$hFmbnr zWtqL)FyPm*UOc(76K^GN2z0~p%F7ziA>e|ul|u*xNI4p6K1(oIq@1bs|9ieA&=C}z zzq`1fO31T`8jU-Gb0R_kURLY6eXY2dTj5?b^lNlOlx%RdqJ?d+R+|?4%Ad=~&a_<| zZ&qTPVGs(_`e)`hbmf7P{qxGr=uixZ?9x{=HsOZ<)kDC?{>mmYWjq4n;5qUTA;y-w z6l={!I!*~!x!R{vS0>AIGj|^DIpiXTeX$J;;*%*^$dnQ1#tvE!Eg$~9Wnz3C*D!L> zo4wt=sJvopML38s%CsOlI4F5_qOc9zEDh*vL3Cq$?nZSWc3Kz|38HajjMhBA%<>k4 z$UYR|3#cS=4hdJzr|b%R#4+BC-o()P?pk&=?4;mqaa{yY<=uQ0u%CicZU>5>@BU{m zH;mq-I9R@;4hq;yvwnrH162wrM2va zQeSKdaRzN)pEpW2j6asu`woEcQPCU5A4Q^r!}<^DK0y(5|j7@4LrIOQD-7f@$gj;c^TQPC^;y=&)ha7%EXD z?~HS|@Puhq-x8#W*zFNlxDsv34t3k%3LZh1hZj0qx*sOw@QB2(;yP+rp3z9#?OfTS z>vX%c?xy>>h(=bE7$DzW{Bm%(2jlTT7FNL%ns6V}5j%K&vZFsgmKw5`mX#L#PXxc3 zf#~{M$BkFyI~n+|K0IV-zNTt8vx!MFS>M`M5>JFMJ+TIg1tSxzJjIB1@B zOd6ow$d;$D%yKEAdAX8EzgO@^asQ>L2W4%;F@=lM9XZ-ppW@bt#MSBtww!wM$&g^G z99ILCnvgbNuPr~bqP!8l5IgZQS(19Aa4`X0c`T87EOW3=jC*?oe!<9XyAun;Vf9g0 zLXik5iI}E@-W6p|DokqEdyy!7#J)vrBw<^|H&qq3d-$#FfpguRSQ3@YEh(U#p$pa{ z`-H>|_R$ZQs?d|GiL$}~J1W$K9Ec}OpTGrj$m?#L zSgzP;YGz3qC!2=clJPASezqD1p(JmJ!@XKJi?3_M`C(-DL)Yhz15fr`v?Jwjl09Y5 zYg^B-HpQ!M9}(6*Ea`c*?y;*h_~Zmi70nG3h4Q6E4HCmGJRxq#jz4*>G$ZOVn$1b< zu@zXcC8}rJj=D?e{ck$tHz$(17b$4li4mBv+m%F# ziSxesIPb#NmMXeE26Epd(NS#({?#e-Kec^&r(ums-F^Z-Nf;!&ja zO?J)Wm6xx3(<|U`i4~iwFLZ zsdy0}QjT&ndtX-y3>ed}ITN*m(qm`wzS=z>!*MkbN=x+t$o9eM3v(|G678_ku!*Cg zs6hv5jD-vaG>K%j_p*c%eIGhbYCZntE+$mrmHV384zNp1QvZxENfdS3&M1wzl-t9ayx zCE{{4=$!m-o3|mJQ5DC>K}(HF_f@ZuxB$11qOs4FwGE`!QQ55!e8lL2WfEd%USMi^ z?JFN|8%2w1ixO9V(K(`amY?q^T7bz4Mn~1=?i}V$Ah-i2R-TC$4o~3AE`y;(Q+eAY zYY6yxT&c3IU(a})gV93q$h&>>NqFiaNKsur)^Yl0QCk5pOBkQ^C{Fo=>az-nEN=bW zXk@88`1KqOC=3E$yuUU1%4r!{RHQbk!Y6G!p2DBuy#PWU^OiG%fQR4$5jSAXxWfL^ zzdP5v0^dXeKRQvoV!+;}ENL=21}KvHAbA9@@z$z}`zN&=jfT36?=8L$ z)p7j^I06Jc#Z$9RzK^6T&|VQbSTHUjPjzg+$zbwl?wEfkp)ouOuowD!9svlG>dDii zY9rPTUSP7fgZil)3CA6Tf@g85Hn3C8a@O-Bt=ip1Z>mLR%ekapK;I=PBH;GobAH^h ziA8Vv`Ouq)N+EEq4ej&$x?@h!%ky#9Mw*Uv_Yyt-xdIn=blf^_{O7P4-LZpz1flI# z#A+8P{w$P1V$OfFGxN|I72I7+L~DLENDwPP5JxTcv5mj>5XxI-dM#dFXNrA9*Y|Ai z8|Vo|7s^dL!g*~f=LiGjlaJA{MMtsP49B}bQA$?bv%HJDP53;nJ%YJz(zPUTLQUlR z+SvaU5zegrN~7->WXZmlKU4(JC+NQ>MXW*QSS;r>uo_P8k8<5>4Gv>PQ>6OvH^A3Q zI;zzu-tZM)Z41Os;XAna-t?k!2R`rV4?2K!1~ccI0*$4aZc=S{g>6 z;A%Vsl$=Y*k9(;aBJl}YuB+E6D@y-RzaZz{2u+mvBz{{gP z<%y}?BjaYX4Os+_70VxO^F}l3CEceMpO#`;l>(cHT(U|kzAuDIeJD?Ahb?-(TVQF|LOWY_H8ch z^K2ULL!^nHn9ViTf!5gA=n3*iEbe>$YoPE~eK{jSQggIRC|A_BUnF`+!{Ns~$cl^e zWr(D{+wB(XHK$Atu9$LR$(_;}(;J-RF!CW57)f0B&lPvky+KPfINow=M;X|zyW0(3 ztDxE*M2!eK;2339k)epG5I^Ef90Q8GHY8X-%z7odA~0f6omJf65q*^^dXv4u%N-R( zgsWim0g-)PXvelq8J@w2-lcqS=jY@nh>Mqz|EA%}Zb8~opu|LC9Y*N$3xA?r+M|^`i!9|ByT6GfA(0sr4X!&=xd-g}s8txJ}h^xV9J%<*FoTySjhD1k*A#k=0|&hO$Nw1{@3#FU~!u<=_!?x%Mt` zc+L6vE;Q0SRT^M3MRgru2z*z}#li@s0Jm<)wW5M6u9l_$O6+BSbxtTzS>30%H`lar zqMP&##u{{JPkOFUU8HYXQS0v_eTaD(2Tt*oU9m#P1>mvvdkib8HVqnmNQ`iR!#@69 zmgaI2dY;)q(DV+~orjlHWD_Zm^-sGuqHTDT;I%Ki)a??Btf+X6irS}SULsF}ywkym zlfSvx?&W+%tOn?P5%4CS$iLV@A6v&gjp+680oAB73*jGAsb7zn)@!j~ z_=WHaQ0ta&2`qj4E|k9+>Sy{wb+>oYc@Rn3*IcVe8+&OEH=+J9H`M^H{9Bi^dYRxqXl)(C5Shtmw=(BDK~^ugi>tc_BETNI zc|h8i-k!!MF3chKG#jg6xZ&`l?l-8TM%E@y8~*V|?OF?-wF+@tbd(8au3L?1^|xYU z?6h)EJX@`aeXXi>efr4ArMs1XK%(E2stIE$7T4n*AF^;edrfo;Iugv4z z=Kb7*pyTtVKO#}pg~`$*_C`5&E-L%7pu1g-@U))b2=Xh?jj6UuWZU{flKD?=`Z|eT zYAOFHy&Dcq+wy@(`>HpYs!!cPLTCciHW{tKgsiC=v zFC`B6T98=}*$%|05kh}M?~X*Rf2*zw3gdwYo;(g0+G8zKSjIW0m@2j$j^4na5 z1_eNY6x_SPn4UYK$D>D8fR3G=!Kg{`;eOP|v-7ix(D)tKeazR}T9aWVVJSv&MFKFx0TiESOexK;-PpywulKI+O0sic zu!jw=zF}C{AVFBZnTec7SmN3Q)bkr1xby2{o`~9Fw-SK%&I;^g4t3Q2nBKZ(asQpv)d=Km_9^kXams6Q z$V>dna%2FU*LE1Yh~Hee$GXjyAqrWVV{ABL(&50!iq3>p*u5FU!VR`=>DeBI9xR2aYi7+}>OB_Q0npjzCD|B{5BFyeMq% zZ_ljp^6TR|z&JbEOF~}*V%IKKPFzMq4=j_CjUy}K7I@tL2VO_F&G`H0N3Zfcn3f8{?>xfsQ*wa1 zp1@{MGa|8P_2d2cO=;g`-1US&gN;_%2!2Q-6_x_jZ%LhWn4A=oDy{Wt))gE{-rxXy zr){=fh&Qe)GjsPgO_{(l`mO95L&#`(Eswhh?|C~7##S3NAJDngZ->&PXIg`ofLhm` ztfd_#A17T##C|ZUM^LCEZ#QDUhcf1ZVYVOuj)SWhJ&avApjzr5Ri0gu{F?;K;XK=> zk(&cuyaN)lmLZA9C*kR(<=9rxWVM>~HJ zoD)FXNmfvE4P|rSB5XKZ{0O~bu`R5( zi2e(EAm=a(jQLBk3Cg-qAvtkYDreLdcdVf3Efk321cO0CtH&tQ%3)qu%k0sSZJ*3gjO`qg-Ln@K`_$i!%% zRlJ{kaH9k7x~M;g5Mh$@SAI{@4Csos2|OH?{<1A7=PT623)Njnd2o6DGxl#Oz;6-G zV;^lFGW1#B-Yu*oYC;qse@Sb=+3loewwl)_N8B$P704@d_|o!5ia$~!h^ug%w?TD7 zsFF~SlMouG`+?+~CLC8*py@@cL|;rs)oV3+qYS|WH8J(l#f)VR%510F1gxwz`fqvL z@>i_NY-k1niF<#<_@m`HEa1bIr`lpl6W1P!8iW&m3kcI|*gJvBg(7}J)hQOmlQR~} zbcx?rB)sr}q@IV4YGLZ!Q!EIumMkg1(Z~k*EOLs`5X9Yd`Mq_!Qy zgQB|lXuQ8A*V~u(v=CZYiXr1k+#BcAV3q2`-=b&+BSroD{dUT z%gY{Sxo^k5xh(8A-?%gUN#^lBaLoGFMp0y2D%!$;*^9I^4oKP5{F*!V{MtpuoIY zmnWKjQhSnQ&22-5?){jA45?HE1mh^Y&7mzYGhk3OAYO*96G^ihIO9?S2^PVtA5a)* z$GK5^`hdEW6n|&%3ot3e*dvLlaL=xQKw#MyvMKJ`5R!izO310mG%pr`FBOw)I5m-O zEvI+RKWmM?cY~dK?nQf1dODi<*t===JpM*7-(pNFmwA^HN)EvRDHasdCib9j}ES0AmQ`4PW6y63mbGaFhm8ei^!pDJi+8k9dSLm&}W|j?XT$ z0zs&u^uyQ46xRLpumJ4W34w37Yt(C|m%Ct?)Pr(*b^lC}$33tXaxQ@w@KEB9z*eSq zQmXof47>&9XIC@1mf31l1JB|`0Z1TW~@nkQ4)<5 z0Fh5bHFvzy8j_VU)tM~}moWCyZ|l6umQU-^*U7_C@1WLf^uc)aM5EE`HKF)zW4O9B zV~l{fZR@XQlTo7XC6d4P%Yad8-b*1~M(X2u)u+=7to&TAoO07@H(8ywV{Sderj z*QWK!l{c~yXE>Ij&EAi3fEzW4Bg-u4d8yTa-ZCJ+UAi5Egp`(%QGDUWnroGksB$2v zDY|m*k8hM ze1vOK_~`1c=-YG%?R#Ce4PUU20>=CbL{`oyL{Z|>{eJTEs3$4{>YXS%bM<_WZ6n$p zhFja-J?h6^r7Kc@$$ODWDrw>!eJwu}Bn}1#z3?&g8nbqEEyS#FQkz9;g{zKE$68D1FqU2d_cRyA_V%)aFM&0k%WU zy0!zWw@i&UhJM;QdB96o-9eRa$=mge z2EH`^Fr-L{uSr3e`b9aD8+8ycWP{QRAD^NaX90lEGidQir$Ea#llfdtAyOZl;U|)LJ!>XY3VOO&rxNb^QE-pIj8lk!>^hj8=Oi1t zYJ#|Auc~z#KZyZGV^8$UvMJ>QdsQQ+Ib!sFnQz6?^bz9YGiXmbZd_CzZ|&+_B-60! zMG|}$&|&hw+9PDTIl2w;E$f#*DPrZhFDdjyujChBDAGp51jNp58@wH3Z*4JP2n^0ufgV|AN z&t=7ziBLRnehikd=iHW8{%L<~Q@B&I{#AsQW#Mw!&K1Mbq(zfd+)_Mx1#2D8xb5bf zl@D^%89l`Y*S;d6B3-dEjw(8ODv>}!Nof#ZV}1R`y%2K^(7W@-b7*_m5~-z!<#;V0 z>P$m)*^W&B*vn|d6^_gNm5;{o&9YgwduXZ}jS+X>)ktN5MpC~Oxa|SXtlDkA zR`zXXGJ*s*PHM#f2Jvs0=ZHqg2eM2S3h1;sqfj{a^@lD)nkXhc$iOMN)r@Q1hpi1Fw zKnGA$B-vuivx5jMBo_5_|1^UM_(;~%o&{Zg64&l5J*Ukz_!o?50kmk)P z6)h94;R#)BG0*Q)uV_}ea?kBKW0))dF6a?IuXRQ-*9*1=JWBSlMB zqt*95<*|2qi#K`I!Uce#wR2XWKX9;XlI*0=->k)X26D&B|sJ+ejNHk1#P_yl8_ z!4$K=F(<{Su&oH`hYY46pvJ?^RkKYdObj3z4D5&W5)ki;T8=WD6c9zmNGvQKOn@rLa@+)D$DASK*3nEp zQ{|-&gwx4eG{p*~wrnZCBbw2*>Wdbaic5KJ7|;+WTSRCHLmHRvnSjHl9WMo*QZpv~)=0Loui;ar{-Sr+4Q zk#i$oq~%gkbS1thOsve!fgYPGP{!j}fygIk_-5*|j$|-**026!6{SH#0U7pOmnGPO z1;I=n47vKphgma@Xt6>T7P&b0*J8Q+i@~Qd2K|!NFko(m$e$ zj0I9|SaJ*Dso8SpRmuBbsoUV8_W(?+T?Mg6n(0C*sSCWYy`ZbwlhmAEuPy`Pln#i* z1>S*RqOV%sj%*0eX}y7=jeML@*M!vde*k(6N4EL?dQC_$vX`Qrtlh7a+l>0-B}<2< zcTvd|e#qTS_)5+uJ9-=Mp~4C(XlNT&dH*=#TWAd8T|bkr&%kFpE@oS+A&RB72|f|2 z9Ueg;DO4K}zWShzw@g(fl?R?T~Vl{;H9aUBntLSb-<0DUDAMa}=0{}DRQr+ui=UgAWyRRgk zwqeX3^K+vnBNX~poq~BmSSM~>>r_TLxH*QkLX|3}z7u()0-fttv!w~?p96i8hw{2O zA}ciMoB&6f4h8+Gd3~P`9WQK3GwZ)bDUAnscZ4hdYGwXGKLT-{jq)oUGIqyOGV^g7 zlF}8G2!-)Qi^Hd(L#EnI`g&-LM6WR`fl5Z_6yv@+CQQ~_BV!K3pT2~*hL^9teSpJ% zAOm+yYg=LLI5#uH^GZ{u>m#~k`8L_!=7+1*icz#g(U4jp0H`c9zd&5aRcF->yx|Mp zJj~1p*8~CzviiGQqh=XgTJ{8JrP#JCFyFZP!l{IolubSscKyO%1ydJ+4srswImmzp zbUSv|CX~#U(#h9t1%G{pE-ikdfFuSFDveq$MZ#-ZzQ8k5=MdC~Q^K{XUV|e8>d4BS z3#xi|=W}#1|6wGC^xMn{2O10!Ft=^rj&cX8JDI-V4bT&V>@fUiM0Z1k^J92JONhi} zr<49jP2>XlqLuaoLl&ETP~@0p3ZMO0znZiilm4k|{NaZ@#sZbQcu9tm zF2)G-S_f<#Je-E)k|)|h?G`03GYlf0y*~!zb!>ub93wnT9!kp#y>L=bWW^eCC1~br zj9H}iWjkFfD6%5#o8jt8$d~w~|4ec=fSl4~=|g8lH}17SzfHhY!645J*o=*`hsIue zP8*A3E=x=0CxH-nYCGNQGF2}-dXf10d<@M0lcs`*)ho4h= zOkV`(32hJwCpbprKCm=z%(|1V>=({3Bq4W zvSLltB0Um7g%CTv-9`VHY1VdZhA$lD5~1V@=XM3(z3Z*<$`Wk>aDB2}GW)}Pakg3; zc58QK8cOG;4XSo1h81QW=Ga(HEKww^V%M=}QOWIKf(I|GKs1x49Ol0ynVWa`!Ycat z-8!r*9p^5SRN;)|rfLW4_@5PQ7VmA?Gi)PbG6OgaPt}I{s+x8%lOcw!3AyJ$Ht?+T(#3HM6}W-8JG*J|g*^n7Sw_bOihAui$mnM=C)4%}ZsFh*l_UJ$@x z95vwaQWvrq8}HBWFQSYjF(Leqtb1-}`IN?N9eITKn!oJ>32-6plCx$3?cFHB#6>E9Tb($g)e&A7^`b>^xN02kyLF~`Qh2pa zswm?wfjK44*K59j*^|bbMmG^P2VxH*&P_#A5W#j}~}AkkhTYUS%P12kfP4 zTW_35OmQhys~Kq|u4E|>rq2Q5bBIoO!^bilxXqny(7jwydWTz=Wdx?y)|>Q!d^k8P z!n>8w4m*8(wD}Ql|vOMWh_an+|j>S!kr4-r{@D8 z44CavO-u58U=pQ7acg+UYj2%U9x|mE9GU6h1r#UWg{kd_w)RGDi#E zP}I2e?feP;Nps|}XVJK5@j*Jo$=TgNV59JoH zBDWf^-i{OXZ=T|w*?xTxQkxU(AxcbqS7_o-GNsH-bcq717!97SdMIes+-p%oDxXKCZ9rwWiB6;Ykm=eUV1r^PmYRJ0`BEpHO(RULP)&`&V z`ML)vJE9I7IKGDFbMFAQa?ZP{Q>mN|igMU-_D^%ol5XYn=TOI{Q5bopj&*#@1Jf5~ zQV0OgC^hyO{IT}9Yf!0D>|Z5G?xSQ!kc#JRI)k`vr>dawY&LOos1g!E@zq&d?KQ?Z zx(|q?=^Q;VG1pz;XA;gEnQ$Woz$N7M`N2!1UU6NWxNBn7$SV$2R_44c?h-!>8z-2a zdP^VPD|#*vqJhBGhFBys&JGR+JXr-*{`E`QH7ik3Ll%7sTKiqz8Y|q~ioX=rF_Qp> zakz7JLvY_S{X#EvDjv5T;BRNcS5*w5m8u#?!-&K2==5($UJn1and9w&7meIu0SPTh z1CD>Vr|zqmNlEE^#=4=wdGRDW)6W8671&6;{vWG-g|a2FB#ZRUIsQDHhLK#mzh~LCK^i;8{ZX?X8@&6T13p%A zhfD#w9GG%d-WFt>qng!H#oayD$mt2(8K=4VH`istNPP_BnkY0v;bCY>{KT1hMiPky z+IbB91t&K^j&W`PjP+Z5d2$i3s=9cv)^r>&V-7kQ7&dlP;-lc=$sziRwBega6=PE+ zX~@q^0XwRsH{vmXUW0p*0M%xw(cIOhX(#!zgTuF&;*AO65&>lBWs46{Z&6Y2n|0v8Kfhl&2eP}msqMK3FCBRtGi$QD za8Ou+Tm`wM9D=^cNTDrj21sMJ=RPt>S;zlCm^kn*<(PyQH8NfaI`mKVO3J1yII|9~ zmrO%=MCw@^^p)sIeOm9^SRl^lGum5ujATXHE}1(>E#^2sds7!AR?B7HHJf<{q6xOSB4TB68WV=<+;V#{q^FvC+y# zM+%gJNDT$$Y`uOxil(_lQHv05T-c-N`)#q*@b@Y1rsttzJ^)=issk!HAVT)>vaj2R`}bih{a`ZbWdGvR`; zqH4f=L2anw1wz{kOmk@TyFEC?r45VOe)qcr%|;eAg5()fG`Ah=f~unI)o*oqL?k5$ z^OHqBJh~kW=_8Bb#MWF9-5ps}RBYIU$QwkCNf~3UUJ|IeU1Qv>%`4<=h3w6fvwe@b; zF-@SuZMStR>%*)$aF+h<7QR|jnxd$`DP+|~SY1*PehqmzJ#K=*t%7qFRclxG0A2)x zCn}t}9(&Pn3a zm3L^}?lpaoLn`KCbv(3QtG6SUafBf;^ov4D>qbBo^gzsBNtktW5~9h>u99YxEcxFu zRMdD{(k*bMz(N>af+ty+t`7vc#Sx5tmHC&!>g)VW>ibNOtv}XxC&wW!mLmf+=lSLh zkWH{26}iF|f$^IymKz#@woRq}fHxAyP{u`I@)0^M$oPJ1voUeY*7vA^EF%(6=)2sn z99Nsd`d<64QMD_xO_+oyV}(1 zRK;soGjqW2jf0;QhCq|M&^`l4yj=18Bh-5-C+{<-F7L|5$&EPtpr4qUwY6Q!VEe1m zu|_J|`0%?&pyLJhy&jUcIRHGeHTIKbjI$J$nh*D&LyfY(Ws~eznS}Fin;GL>;4|{h zm7rv3Ed4nT7p%AlM16w`EQ>;A8(NSY21d$m9@1oLj6R7947)nn8Fn0#iKA8UGrsyH zoUGx&LE|ocgqzL?hlPDjkz!{UA*Ra`CRK4V#te-|UL}-F0{fAnnaq~;FrH33UFmw? znYr^S>G`W1GcFIzK({ElyGnc;SJ6@-DK(exyp8CJMY4?M(Ndv_e70M$bsS2KOy>lp zD0i=OSUau2)G|hc_igD(+e}`8^J(Q#NvpX-Oo!olxb(IjG0KDxst!@v5VPf0YK#l< zQf|}f7dtk2L_xEC%orCHqttE{0UvuRxG^PyaJx`X_&KdfYMa&NtONFPkERI~oJ_N# zvn=z^R2771bg2_4s-GX%ig(6(zU%f0(oArpn)3sde-skAge?9g*&oY(j`Lfl3sN(gWp8?;^aLbyrChQ3IP|~22k>HB_l!7&@$8^nw8`js!CnNZJ_-GsaAb^57*oPSn3i6uCSSDI2u!Q*+iMYP0cP4)o-M9!edp+qISws!i z@#{uspej!s@@|%BCtQM_r-C^v`fIAz2| z0rN=T#R=vOBHqdLx#6=g|C-!1xaqLD(k8ETpHuv6%@hNSKPfzq@LV%_NoNZJWLB5k zmL1a`lG^~dZ zgw(r)16BtnDg*?LuEx)8h7FV(+kheTQ-*cs2mw;vnj|)a4J26wF}(pPg4E>U0wwz9 z0iwgZ4BG7}X{+mWifRLz1Y(FMg1ybg9bEu9e8v1vbT{X!_b0mh4O9o)JH9r#{ZtKE zg8)?51wIJ>${EDjQ8FM1{h#RWR-Mnl_NXepuC5+kPG5XY;J?2W$@Aai3rp*hPY$-2 zx43YQb-o?Yu62&@I`e|7xKp_2)(|Zoz^%c2df&r8OKyRNn39p1D>|2)ERq(tf>y0< zfmhXEw4fA$tfS+T*igW2t(|k7PTyD40Zac^PUGSl>iKzabvsl2Jqdhy4ic={P*CU6 zIn?Cn7h?zBB5Cx=OY8ich5+Q=60jCzAO9WH7LxOAg|xMkSr{PG4|i&acn1O`oJLhO zj!@%34D9;tK-2i~67{h|bQciQ`%QoU^&RzogY(Il@cRA2|IORngbP+=YyQFk^u5j; z_>bJg9MN;*Kh518aZqNLuN~jX%U-0~@9dSHon_!p+O2Qc%$&+K9i73;>g_*9#}<40 zCI{b`K<-HpKEZ(%h8nOofhE|7)ehgQ&6hz$7Er4MkEJ())!%wEgXl3#%+mCxyBdJUFA` z2Ks)uO;^<2>_gohn4BJfG(6rl3Gj(?D8F-YeETE}{uUklGO!?L)v&0e>G`S2c7Tde4~}iw-M38p#;n_G)Zr@u?M<)v3~+^Fr^ZR|I9e@o$|?MPbI z9kn%Rq#$_Y>+nPa(KSHPZb(qJd-v|L+MyX=BxXc$j~pc}=XKB?58n2hJTKMYsUO6u zB~#|3e%IqYP6?L9N>zLlmr--wwWwj|gym@fC9q-L);HtK+Iw9|a{sAP zQFG7$y0lKw5cBX=jsH#3&wE=1`vTyiK|wVWGOV|5lKJwTAZbOOa7|47$+jO$zQ_TY zaA~5=SiD6LW#CU0=YEVv?1t(T9K!%<^dI&icjF~bs*ml$Jzp|FEP03hiWCD9eY%Xx zkx#1a=DgkPS}MiRC$ydVpzD1>tx};W>{z9dqC(3BT7@jI7MD8rOg^pF_aP!6$7@Q+ zg!H%YV&R@SxwLw==Pz>)vKjYeA}xyD=UCW)_mGZAb^mcF?Vnv!dVuYv5Pi!B(_pz=5Z#k8*#tGO)@3*%cR@OV$)Wu+N3hB8xZyUsTeHHD~r3v1W-AR8zhM7MGSf)d1c!b z(^YW)xU2~Og>DnG!xn!CzSEimj4Jm~fhzFS$4YxfGM0vMnK;kOQVQ2?QjVcz0Z*Z{ z)O1T06$l)Yg4YmXP4n?1hf)}KEkM}$g)^PMEGDXp5-Z<^R(tY-#eV5m>qtLcS@HdJ0J98U4znelJVa?bm@z{!>^NxtNYCorcdr8hewptMr?ko;rno3 zx>xkc=g;5|1W6(HbM~!3_`@~w%Hl@&w(ecGF~OW>81a#VGGTbgMi2hAb%4+%P0Pt@ zL5*Xyv!;kzu53mWChLHv!LLT;fOK2|jIG*NMU)}jnG%*$^4Pmux@h`MAe+7&gOnvm6uw1O2|UgJNn(N%~dM=DJS z(0q3de&U084*4?V!9DXJnt>N>pW5RfD$|irOnP1^lqVw34gTwl`T^}>9*N)th~{VE zMx(7s{0kA%MchAi+&b@l7{xA{ZDotsYt;a9rZZg8MHS4H)F2DxbJ!_78SoLRmK9uinzJ2b}3$QvT+`&tSC+9uV8fgTf$y!oZ70#k| z{4~DOh(wQAG<;nvsuiCic9>{ewLA%&k^&NhU-a2Mtnx7&w&QB6yhCzd&oO8BG4sx0 z5RWL=Y2|!B`qHE2h1+Oiabd2j9^~xyBbF%nMmn0=zfoDy*%%m5p4Riv%t=>0IR>f? z%qf+~2~C}OU%H#zF;af>5XE!1xQ(Zj9A0vQPYn06_V6x<)frHv(_6|Mj4cUX))YgR zR@)*p!$;VAbcCuZ<|Z)3{k;E!JXhke;gbb7!TzVNv_FJ1Wn?QL58JPI#c9x={)YsY z${FI4RF|MdLW}7D`c}8?PY4vX9X+;=x26cH_?^eyDRaI=@Og=$l#n<>D%IuNGx6kI zbZ&q*vfwFrx0x)V*v?3AlT>2lrWU?jA*UL?MQ+&?w*!CTyeI4&F}NXQPiQ9CCQ!UU)^vOb_v zvOw^>w}OnH4cq*)l}b|;GbHAs!lHebFM8V&Bj$)hECXS}s3c60?s~-f`zOBEwW`(T zmT7fVdH--6z16_f8LmvO7WQ3fc8W!1lKy?ZFydtJbRI^sZhe}VVYZMCT$nf!TgeHt zAi}*34=uS8cBk~F-z3%svtgIB)MP*aUQP>Vg>+~Z@O{Xq-a3WL zWuNo=`Izy%Cr9#NNnEk`t5n811#K*>#y@vENvqVGdGxM#t4F^Bw`7_d{j;+s>pe>C zs0Yd|89tXC_Bm(CL6igqjLsSK_^d;41RP#b8>J8w{S|@7_};vI$L+eGttPP+sWJ$Y zn!BME&uDGONf=aIsPM-Jb6yMt5*_%9ma?E6DnCO1F)jsSB=SQ%!%Gh3A_z`*VP-KX zc;k?D?jlaGB(6JkX}*cGv@V~uPkw^Ao_z3PZ~6W2rJM$f!fokbBUrxiv$fm+(>Z-i zArpc2;=5ixuSBc(V;-bPfm#vEk`#6uPzm|u*yV-1;0-#t0=fYr>ECdMZa$`p={!h| zKb>XGs_+>;MozNy7TdG4{hX~T3S5mIVP=AYVf;Nxy|WwDC8x;cj2T7ea&@0uC*xL>@)Q(svXSux?;Jj?WG#-wg&aVTH5k{p8t{Ys&F*o*W zQoS%c`rfOBuh4jY1{r@tBNIOI;28|hN7^|ID!lpBG^DM>+c?TIYep|k+>+XUWBYX6 z81>B4UCsn;2i$fA7iyP6P2ajfWsZ=)#Lrx{oL%w;>C>$(dyrA&*>7xqSZ#!(yA5BN&SVSDY_%Z0hHRyKN=qwSOR{Zs+vxJaRw{bO30ns z>`rQvnbd^-I==42Omc7p^xtgsCv@O|bh}FP)P6q?s$}BGlczUR{wmw;VrL8M3hMggulK*_Dsc^AS8Jr{Omsg~WmM3@h?z#U1A4(&=oL?2ekgk84tP$FJ z$JarWYLycBeHwKqA_eDeNpg!`FY?09B6O-Po!~L;Y-5cI09|KDgN@b2lkHO|UA*#o zW2reZx#2&j6w9{(ZP_f)Vgnj?cv=7TFrVf(gcmLbL44<`L#XSDNe;ts6yOYSN@$_R z1O{V-Ufq>+TGH>#LZ>^C5dD7SZXmdN;s41**;`YwxY6oQ{Q7K`UH5R z;id*#CyI?xO0w+}^Pqozyn2V*PjZsVX_5D3X(iv2@0w%1r$yb5WrR;`co|L+iX+a!efOvv@cX!8rm_zOJT1%k}^Z=vL~zoy!he8WYEgLks-QQTEfk{ zg8il)$)gsR+Z6Ml%8ihAO|zwKwvfV=I@w?T%wkSc7s^`m=5qE4uSUZB+7HJwy7Qr% zDhk_~Iyz#H0U#>a+^%Hz8sSVgq2?yV<;H*x76=>whkkI#(rCX*k7={|sVJKRscHrjI(I2aJ^<-L zAo1iE3JL#2m?%x(WE>&J)Fhi$n5f23tnsQ{pGus!*yc>$2>#c?D%%R@*lI%41x-{Tl=jLgR}5rf z&3vk$iI4^~0T^GleoN!hJM(%Bkwu$eIHf?CN8a`E$NcvT1D+N~_AN7J!9g{nQ#gA$ z`Js~cj{frzbN1+);XQ)>{jV+UM!t_sEd|e&kx)L)8CjtWZji|+K1}Mv>7!cOo|3by1f(*;YEQF&f2~Ex zWeQtN07pI9ZHeo>enGkaQ{&+D^U9CHtFe{q@0VC3$51N~@S!0?0KPrJSFL*f`%WZz^$V^w=HlL0imkUqECj(HTE>46iJuDU=)7 zURNhao7Q|GS;&GVR&;cs9N=^hUJ3CHiby4TcK^YlC!5hKAlz^|`wtuuOJc>K>_t zkB>8omGg0Dg6+CE+UK37N~7?a%({4aI{NpOq>@bi*xvI-M~?NaoFe#EMZw%=m4U^a zUYm2Rx=)erd%MWuw8$(1Osv#yHixeiuk@RPJ}t4>D(#ih^(CT$(J-aJO{614FJwn@ltz6>lkj~f-#6xw4>tvKs*g0RRUw#q83mTNRU^Dj(_Wn z4O6u`^8Va1w1RNissx}VYwieJDp19{&%p+GFbaBa^eaa{#Xry180T(FC;ge{aTbl% zVG_uRR>dzXS~82j8Db)16(!_;S9*QE5VJ1WwnvcZGI$lc9gstqjNxDX1;O1E?ToAd z1D}uCGF@KJ{(xqhApTmuh|&nP~(>S!z;7m63PM+^< zY+$Z24mte#d^I=dx6tU3SvwV%ZJRIn&@ZQ=uZMUvig|ylG5OI zN7C~7zJE&GDBedriQzf zc66+Givr*jb0$-&bfmTh`i9jco_K~Jf#p$Lc?nbdT3qH=+ zk+2UVLqJY+Vaq$Jk1f_fi@d^w(7QWkf-xi>U1ccSgXv0S8xN&jdz3qO)dg#?TPs7b z3=bHl>J=^4g#?gRDcx@+B=9p=-&`^B9Hr%O zd#_V8+&#FOLavB()&5ai9I^dJ1uzIgPXpw53{h*T&_N{rbb;%r8 zIDa@PRkhv`lKYO|WKI|%Or;Nfvg$_U+Y#DJTW6@Bq??7=z~ zc$~wiO(moy4Mum-nwi!;<8(6n=GQ=JY(iMAS73?)rHzn>x#XFSPeQ9 zs?&9Ji%3=dCP#4dX=EcvmP$BPhJ_^437xbhWr6hrSkb$AnQ^<;Rn|R5=2Xw=8up;iUYRDKLR)G%az;00k&`n@iT`-94_5%-H+;;{n5Pv zD`J|G_4_x6Zr0z{mov;%*|&SX^i+~dVE*{&>z4==3%AV53V^4lIO8Onnbvxh2w)?* z1Wr=sLUCt1Hm~6sa)~rZjG-?*jTyyXAVida5r5-wCdlj=IhxL+hl%I@Eg12Af{PTj z5`^HFuOuuf6)`LDQJ>--7#S>qDu(DqYSABq4ID{%fl=uToJ7GGORcgSy{M#nues`j zGq+I(w7F4+n$)B-3WUfelkbn|*{bgbc=ocH$Ql&omn#SzRp{Qpi+4XaH-;jgwkVe< z=L=!UcF%>S0R?k!_PeD3HrXmv*vcC}vm5lfsGK-o7z2#r9LaNDJR|J#Qu#ZTH*}vM zGFNsr+Va}iG3~D-ce{T_m|<|?q#U}Uv)on6N=8a8LpYDpm2tG1Sl1R@3ryZ zc6;sCtNlUk$N-m!e9~7wCPMI%v<1%>S)C;@?M2y`lsfk24XCtkBV*wKVh+7{yis!> zEbdT0$bQ9|JKC1m#Xh}y{k@=bY%7|YsPc53L+1GP>wqlJ$=0Zz(M9D(!a=M39Wis+ z5(T9I_yWvjXAbqvL+9hH8Ou!fYt`C&-NZ$wIB{HP%cY)un7D7KAyJA;-Iu)-L%5xH zgq1P#%a9j$yGywzBumDQ1a2%=D@u3~1X>$eak_<=mV~0D>ZE6-WxHSRn7vXpG~)&V zZed|DN_?v6e0iWXv{g}25$^uX7RUWiX9v*0);f6D;3=jL4wVJB#1_l33f7v7=UqUumW7`AZ9g&!B;hW(i2OU!+xq?Po8l^k$GfvF zHsDkVOG?jiK+$J$bqlhaft(B*QEZkS zsAjsT<#vgg0=%pQMuii6eEhWLg=GML@MQa`6vGc;iPMqRa5&D_|l>PGakBj&(zwqWV~K=HLmfMGd=w zFwB+UG>@`8)P=*?Y{1A}2itvv+YF+kgdXWxa=;A`-g7|>XBtac6edGOm6J8%O%rO_ zO8=t6mhKQ@7WZLZABUTQn@7A}J!I?@jqVtgx7sFA-1swXgH%o10U_G?R@D2k=lxY_ z8M$5!ws!m}<08m%w=jB>M~VDZm4Zc@x(uigfJtRW9lykd@M}D5aLAGL`cc$$-Zcfm z__vE!P_{)*;vbJnnMV9rKWR z1-+A_5rklK@kw*`MA)lVBWhW(JqhB$FA|#et(K)YQGNsiy5`=djK4y{5mT!#x}C1E|){pJ+vN5DBO z!%I5M_oAoPk>6fzNJTtBIFbA{x{p`P4&mcW)*9mXfqgYbh4aEN?DUPRi`PSi^J$`r zje_zh8o|4~J|Gh2n}HMLP)QT-H!8d}@nfLWM~mUD8g6?L$TLPhU8lsl9fMR52-c{Cl()tNQm?mJIZ~DyV4@&)HqHj6ko$zx1&0U{_8mR3I7ZE#5EHW3zksO7MtAp zYhxxut`~jZnCo#nerQ28N27f+;YYQ4@oI(Y^l#G%X$fKFODXrVy(w*w+r%n%DAVX# zV5UmTdEHK15A*d9=zXrd%T$Lvj6BIE@x5gd|DCqyBzFqby}WwGET`biGFsji6wgam z-9>dXE!Ipf-2+nTIOh7`)IJX*cHR&3JqSwiWZxbuo1t$iW=EW>8ymD*p86B`xbJhQ(dGVyQmTPF&7P1ph<<}|t0~e90{84hK-z}`upR82p7o>&)@&Xn z!(}D2DvKsb^P<0Ftatyeb4cpo@u-Bk>bE4|V;yQs!v(kYd;XV`ilire&vY$C;K>=} zm&`cJQ0~BjJ>X(`Z)X)n)OeF*!Hffr)Zez~(M-Crs|8r>oiGidy@I-DKwK#BZk~pN zK=?Y3IZ0_7mIc&ryOOwA#RRwx>K_g|a_mIOQY#1EChaOI4Za@E&E4vluT8|=`fchs z%G*}<1mUOeWAKW)J8oRyDw-zRGu0He;q)64x`8^KSI@{b!1B8;qA&lN1**EnjWp-qc{+imQRH@zo^9kTAFq~EQ6f) zGHJ3!`{(Iu4x~Js4LdpB)Q@T8J6puNrr<))}DH?4#)K<(rofBv;$3>f)J z_ei~9flb(tio1G&3REw?qSUGj9 zjJ7!G^$6Geewyi@iI7i8_-5)t?Hb{G3n3Brfb@AM1fn$#VSChqDVu3mMYS2hrXfmp z9Rj#r638f_aSmHy2vQf0YKEb!7rkSx)iNqv+yw=7BL&mF>}UUaO6D)?uqJ$G{8dx! z29;q@2=rR^7fXe}x2O*IRpF$TVTz{w*h~$W+jCdM$a)4yWoiem{$*@>R+FTx=rNe2 z)!BX-Pch>7oAg~}5wj_6pIfpBqI_O9XjK6$@~hy|0E6%lVyv}quCL4<_CU>%!jyd8>{w|`Xq#2e29(Fas||& znII+*T4qBPv>@bO2#HN~vy9ldeDJlg>;%i<50|Z>MG8r}_S^s|jh|Uc zhF~K#+_)!F1Ms&!rK)8&Kc~5cwGa>G1cco0S@$3n=BsBGPUK-O-sWowQX(v7@}h$8tZH3s@nvZj*1J3ddhJ=>gQb(ET%7n?!x?e*{azjpIyV zT+*m*By1)`0mmrl=T6Ld^?1du(bwRD+l+2GYUC_=@#sf1*qB8*tsCnGR^|WKx)h|5 zGC&>vsNR-mZ)&`Jd+XA^nvdV3F6o}FK}qOdU~i347jZ3h-X%R*74Ai$6!@Xi(yZ(ePBRbkeQ`B zfdoR*svOuHjcxGNasH4|Ro2msI6R4&(PJ8hASVg1O5LPg=SZxnero>2Q^J8{d)NOW z@qnMtf9KlPNfs}m9|({bVIm^DCR313gFhBH)B}dbD)aV3+()v7X2}&4pru6ec=_bW zR$}6%0LKg1@xQ;V*}BQt+q)cY^zj5$ntmT&7yjntLHYJd`dj?y{grOx6Fp* z{ZK6-f3_9f6FM3(TbI`PgT$y2Qy<27q-IK>nx>mRfL#vuTiTz4uHOVC+Q%~@t0L6# z&fp5c_j!m?!9B5=hJdq>#G7^%!$iv;0cn;?pa*qi?2etOhn*EW>-JvCVYY`pkCw2( z^7j~3AGESx%3oqTWuJ7vP=~eby2c-&_fHm8JB1a^TW&aOF7SmGNaPLH9E2-K0>X!o z;Gc18%?$o2#c|cks1?$(EZ&u@NQ5)X1UflzH&<2wu!6{q8=&lSoGR(2K`>{Qzovvs zdP-C`uK`Z5(X@5R@D0E-8ykmtyq6hqnv+|k;OmURd@I`SeGU?h{(He!e5ugW9;gKO zZNYuxmBtevHqTpMbHph{)%Lq2(o)n>+y0|!P5E8dNA@bMr#KyZKShH z6GNS&~kvBK2+-~pJGI704$RH zX~*;yRsy9a?>hnr10Rl-xZc|Jg5%If2zul@55dDND+eqnFkkR`$<6&qsxP+@1?U~! z7MQi{wJkMA#0Uh)qdmWxQ=cxt69y<|PIB?gys{lcG3;`O8+8vB*~GaQX?Yi zqS#Wd^(j{n))hLF6LzPmqTQWP;!hsy!iR_qzkNShq#GfO>T`NJOh=On+?#IBfa9rz>spWM3N zOPE7&OOPobV<8nFTH3&{hzQ7d5Tf(l(<56mqcfNSb&a_{g~i3+=|_y1fl(`hNi$NI zw6H!vM<<9L1=S(N__NzjzxM<1{=$HP=+XxPxzOqm38^Yc>u3w32+@}pAS8mVgXRim zD>}2*H?x3`YGs04=>@U@xj>f!~Js^=RCDX0`2 z7$EWYBA?n?9PQt~;Tj%+)H!mrczkw(YCs02c9mCWuNi>*u212AF^tZw?JW#kqrQ`= zI6FGIF}t-myMp_gtD*dUmCUV48?FgBa<2!30wWyf=b0mv+6Od0az0BxM787StaSkC z2Ic*nzyb_1S*43-7uO}=5|GTzqw}d3o*k3gHPk~>0LcYPADcU68^pkn&z8R#ur=KS z0Q*S4j>E^d>-vg!f8Dqcw5fA;176Q!?XR!%KW20LoU}7CLmYWr>6F^Y<^|Hs_%`Bn zarvM;MsWRl5B~d|g`G7+5<7)=Y1aj0F66>R0S>t`lLB=9ojD%+?L+d-DD}}pa{C?K z`5mzT)gkeHg#7*8eG*WX=GfS*#?er z;jh2F6eTF_XX%73eJ*mLv38kzaF!^AE_tAhL9v$*>T3D(5<{z4$&_TB+X;e^kO zP43kGZNBk!RrSb?USq8vFVq`gI-laf^gc6Sph`l5kOxEvfgw)-1WJqfaANY|8_!pD z>HnS36o6X)aY5&Bd2V|IJ!@ueYHkkf>;L`mQ2YC#uD=J4?=!z;wb#Xs(dAQ0>>9Fg}b-(s7*2k?_hyFIZucRO%=g1((k zA3@v%ordJSZN$-y=CKV3H_iF%g|R2LcYH9Iswid^!z2$u)#b{mA%Lr70c86va__H3=w{ z&ARVRtXY|C|F#sOS#C?e)t3E9z?>$bajQ6%HJHZXjLw*-s9rG+@ao~za+ZFZsW5yj z#4KWb#fFnv2?WG%vG6xHHZ7q?Wo|wZ#@N5?s~%UoOouHw2kVs_-Yvj%`Kfga@`cUD zCUlh3ZiI+yft-@RMRHDVO{kgqQ~Ov93ZP!S8Y^$b#%+E0k(&zB<4M#+_r|IV;B(Hg z{KB5wvFwD^uW&(*eT-nI(jViJkhKCzv*$&ict^--nStAr>2FxQVeG6Ee}(7zEd8R8 znuj?l{K#t0mM@^d@@b0%*}a|fazju0qj6TLN7yAx1J}88{il3B(y;-#?v)$9JxrKH z!Oay@DE>4}TY=2`2C`hJcxl|k{AzeG9P$Ff@Z%a^1B)CAq|096*IefzKa$sNFGl$e zWhGjKMUQTPq>Y-M-^y~dbSaQJCcHQM(e!kh@@OrT^N8#CYx?Fo3A*A?E;`Br{`yi2 zTjaA3%0J`%vEP(WnDqC(Ov3``r%$2xx9Tj&{mZCYTiU>ZymHhgv|Hzg>VC})So+Pc z_7OBs&YEz#hW;1Wr4WOh^rCv)xcy~2$qK?lm!U95g%rqOQ}LlQ4mfGQSg2FN`6b3*Er~QB2f5ajip@sl zK@#$OehqrQJ0xu`K*~--gy0HF_JAYGLdZ+jFhAEP#$&eApevw$v4Fv+{MKDX}hXB4!|uz z{PXZz-Bxr-8=jM|A^j^gyX)9zK!fmt?x79YD&BEz0$$YDMPSrPn!3&`-=+8@POHC@OihCN zs7FiWDIK=mlO8p0e3z2lVr%f$Q}_~e_xAOSJ?XymcXuG%=wJK(Bfc*Jfa&o|04#Y5 z)Yvoeh&?;YWvw)({ZT$}4Xxpu{TtT%oD|w*dY%tRe%T$AaX|+z=)B%bNzJ6Jb}wzc zDoXz{VtKx98j}7-{lFiGc9fA@sPDsc{L}KB=xqkreQ7Z)3r4r4Bg(XUqae5j{am@278DDtmiobf zVCSm-|3lb22YC`ai@rOyZJXb*ZJRr`ZO`l&JGO1xwr$(i4&MCkx#z`+_in_E=&0!E z&dRLHf4bw7m6?}Ppsc0pZ4yt1EUGd^A8e3eMwJQ zaIRLyr|=F(;&l{{qnQv$UGGxbbrHY9K~~TNTsJu}*Uz*%ma48VaU_M@kec&@Mh$0M z2R3p}!g14e(IwDU;O)N^&qrXU1Rl7XRb=jSeHu_lDFXJjSSQf~)aDv@*2f*7FtJHr zm&<*$?hJ87yD~n`|42&*)0RM^4TU@|*n~g^@>u=b-;(3`K(6}kq@4bwQ<01qke_f)FV*AXC|J(77<4I}=nNNvRWtW=O zRU9NBiKAchaGa!rgG?LLN=Bf6zcyk{spyS9$}L@hvJj2{6`-L9w`gVv_1)?$Z@i!F z#9&@NWGEMotb8;lS)1tSoeSuuRC^{PP=Vg%IF| z$en}56vD<>fT~YUsU=VEf^fzmXXS>HF!WmbIuv|i;+vpdO9_v%5qY_xrDT=u25$H_ z%>RO>6Dqyhs3iE>7;OPmA08`u5}&OgX2018a}b2G(Myr6_&5k9AsJyWLNc9u2$L7c z;4F6gCoP5!qf2}0y66d=e`hd+FCRZw;3CI(@7NYx8Q1c))IszmWSmwND&sv!Mqu*W z15sTsyI;bNwVXQ)y+I?);xz;YDhgQ>=`L(Ws16rrSj^>JLGW(HkS>w`RX|$4^=)z{ zj=<}-ZD7!<or!e%G7A-m!b!Xz!QD~beI?5{0(m=pl zT0Z4^a=NP+wCZN3wi%SjMvfX-_a85LPMfAfajkoUlZ_2AHARP#Qorx9IA%T0Y1pdg zJN$}?tVOSOx5L%#gJ_za7Po>n*^%(ECgL$~@`Vyi1mu280me+lYAX)M!)+vP45cY{MJ(Sbhtk1Q14)MT zk>q^rkqS${1A~hD;mbaOh00?D47a;$(7^UAb8ba=awVuQu~lev%%i{#bZBRLDqq`3 zfy^nuwZ4(q`2FTiJj92CsUhk35+ zJ;YvsJSzw;pW(5yRFD1ym6km=KNP_AQsDnd(VO-!k8}8gbKk106O`dmvo3D_t~y;Z z+?I48VK6e@w&2&Ke3}m%qi%&x{8&&5o~|{~d08&o#=5n%EU%3 z*&b4jqTb?LIqVc0j<4kreVDucR5--o+SL&A+7zmmnfT$abAS1v9D%yrLT`p(czXF{ zArBqni+p!t0@N^>Y`v?OzT3o*|W4LS&$(Xz`lSlJla`PczA!$xOt zBjg3!XSNnZiI^sXjorB$l$V#iTr-w-dP^kBrZF_ zkU8Cp9?cX8N3x_s1$sXrq(Qb(6T7?*=CC}Z<(*>6hbZh+rk55y{dVv(7+b0N0tJZLb-g%;%&d=w4C(^k) z0}k48UKnIW``Cwn{T$lRrLMj^_CZ7P28&Vvsmg?~Mw2GjqL-M$Kd?qGwdHlqNb*ns zu-V_BXW9OA%1(HmRTv^X#2>ECG45_EQCpA7p($}OmS@L>a@$bUIwisz%dg!r-9=uR z7W$v@)ktsoPUh-AAHBn6BcNdL}hiBpuK1P53Xd`hsy8AELh~!ypAQB{6jFvOT;UOAtk!Pt{ezqS>bn8XD zasr5ZB#LBa#L|BBg?0s{3OD1t;-4L5TUY^Lyl>L48aX1Rt$(j@R5)l6nb6B`5c4|g z+N-;Q+E{y5E|2P&&Wh-k7K2F}G(dkhsvk-vvTI3QrTr)3HD;Y_pGMfWcna~E2N~%o z-mtEwpddqB4=OmGT47azDgv|Q9u5VJ+JC>6V6U9BaM{f5J(;-jJg};6-)Vfc9FEv; zb$6rvmXr{psMtGr@cOAg!-*vGtti*t5^@6c*9S; zV9YKIr|(SBPn58sENh3snA;waQLnjL+>)~@^I zF~br9ptaST!C2Huoo`oB`TY@6VA=w1t~MDA#4$lqSKucB*D2nIhlQlw$Wcln&W%~`*yr_p z#JKWmK}kSNV|yEM+culf2_}ferP5L9>mbL;l!}2-&HdwXl0~)XFQ}Ew)T7JaXq_>XE`apVWen zMHaRs3qxf|FC%9qYc9xjW9(mMtRCKnD(#`U&lTP^k4inVmIgb6LPT$i?=>Z%%>bdb zG|SPGMqx963Q4OB?KhI3ugtHw44AahUNqZSk<~X`?*I+{u}G27V(q1IHkVJ3scJur*S*4n?&-7rln4LKSw0P45^E(%Qylj^)Mj!n)n`qxPFx4;VGFKAa_U$V zgj=fvZ`zf0OEdy%iD1|886FLdG~%pAq&tQv8Qlut&*08@5c)|_A$?T5Q}9=jfH==a zz4adoc2(*;4xsYR<(R2QeXXoLhC)v+45Mxcq+yl!^YjT#0Qw%_;@88`lsvhVI|~A- zTc8Y5RpA+V=Xg6B1M|>eI!p(N=ybAWngZy_0}p5%sG=@oo}uy9OgT5*yR!?nn1P)D zhNs5ec8{Lu%Uq!Q>XX_s^D+gqul@J}9wk2j19`(ow4=@lPh=iv^spaF_>RwE921fj zU89I{TyxpHiFUcbK-3;TkuOks-QU(l2Bil6Ig4cRB~!5@wa6Jl@rX;lCJ5C99N7sa zntbs#g%5Vzp89L~veSxhNeDrmTZqh6@fP!;y$~ zQ@%@W+=JJHFJE?r$>pXA4%WRz#Hw#t*dUFzy#!C%KH@((^B~-WA7yS27MFmtK~Yx` zd7a5_4*@jo5rJvY7$^+f1u(?^{yzSb7W)Ox?cc4 z2hOaIbe_c0j|>+B-K%0RPv1*jfESn{aKW@t(QG*$qqt}V&lo=h*JTba@Qv>B?LYk? z$?eyvzxeq_tlUn_Pgi4;1RaCnM8Wdfsa1a!-if6I{#hrE(%&>sMPwrs%|Lgbu%1WN zwzp+bGDiC|B){IJ58|J}CZ~E`OY+BI9;MuiSN6^8T*|cx07SG+BuG!p1!8wx`H}dr z;qYt7yJai6z;g187;d-kXb8bwKuB$CftFm|&tv1yF#X^;UqA#&GYvZpxI%c`;&heYBTQZ_dlyQfXi4GmBDr zrN0RH12MX7nn}tx2glNWFC3saM<8C?yhRnp@JHQ}q>E<7&piTUkR%A;?ooQRe`kPK z@u^>Bmh|za;J&x@Lzl7IgKl?h)O}MnSQXT?veh=|`dw~M^)1_Ce$lyPOMG2s>5Mft zNP~^IYi!ix!7ZlIpf?6_il!@gm|c;>&ND%LZEB3DE6XU}6|Hvgr^#{hWytH{2c*Z1a9*cM*hEb|Ay$Ac zo%KVqVM-rP9GcOA^{cATL)-N2&oy3NK=eJ$AF*>}@<5Qf4y4XEtug-3s+oIZsOn6V zRFYcToT^|s*B%ZNHc*2-%?2^f!Cb(Eqygv}VIy1Tz=L7H>qBr+E0lg$=ZKEQzJuXM5dlbopEDb=CPWTeFp*X(r6 zm*beRz29zY>!fc(Ewx?cOU38(2JY^TRlQ0ki@H7PpApb~Cg?S1qt{gkjA4OOmbP(< zV6an4f_MKzBT)I+=N-4C0?1z;Ak)CLBRq*SkSh#ZA;Ac&TJguANRB8_rsl&W3kGhe z25s^np@lMby$h}?b~vh>=HoJIM=l&}>CS+87h3e7?Bk>@^M_-z)CQE7?Y$IN8GOQj z43sT1I_Gy_ODqKmu#2pQ4&-ZbX?Xo4PmGyU=d^pmVBIel;Qx+^_EpQ{bG^{R9QEhO z^eh`igh{w!*Y?flJDJ@C_h+KcB_P>Xy@X>GVCX)4+$irmQXjzkU5XGkOi2<0GOh7* zAjqwY&_NQ#b^?Vq z_xtHajI`V``+?muEO}!Qlm}gL=242*MRkz8gUOf)_i{C%+0rov8B^r#t%$Vo5iB2_ z)d<3$QG8`_%6nF;u4G26z7@RfMjaM3N6&@g_{+( zei5%K_BLCWAulNf78i3N%Ty>H{6Jee{bBkiPr=-i5G6E_4tM3=%>PL^aBc@bGX5VV-ql5oUq$4=|*jOn_Xafp;FQQAS^|QyoVU%6MtP{M7%*| zoQBd8iR;YOQQnkN7y;~rVC4*>OJBYQmR)m9e_-NHPM%l@n~Bm}cKG z9&WCLHRH;B+k`T8JzMiByo@8HV1QMe5JeUo(06R?p_##6Of~j{bq-Cg%zHV+-*T3~?MD@NY<1K3j&N!ZC-mIF+k6!}_q`l;Ps1Y`JN7BlFid8Q z`sXqcA{vrsi)1rf?C{OISgygIKdt~h!xcbFUsaH#Tqh?f?D59y^Pb0vkDS3QI_tBw*R{gshy0wxS%qxJwQ~Y!rIv9`g-@Q7UWA22YZ-aA; z?-he8`IvD0ih6&H9nnFk<2``^HU>!@J<3rQyNfTSvn+@#F6!M9RHZNg$anzr*0FhxXaw3Rt$GpViJIAL*z=?+c-zUGy*rtVhA?Am z;7b)q*x5dMv8LcZC0W`S&g*)buwCY)Z{Lkwb*(qX(U@Y|W96pBmf1B>N6bmH%8?hC zVi?L>s^+g`F9HFV47(f=?oSm0O#-cNvW}w)dS{5fBG%xxao9Ojt>~*7uW2bVg!!_O zp*8b^ue=GO%8%%tki=!)kk?vbr@A@(F%Yq?R+Tinje2< zpLmb4MDl;F0icBg0lyR-7k;bKK`_ z_)0CF#!scPUp)IOV4GFeAggD&Nt5f`C;2^F{abAqZ>H;m{vM7oRJ5s0n_-ics@dJ zs$@7v>PzQMi}ql6#5X-u5!l;#-GW9Y)B!~`K5GLz zT;{%a!KX#zy(a$@*egnNY&Mzc!^7iI#P!jW4aH44+$?wqTwS*@|0GzGgV^&7tL-0??vTJ~Z?Yjr zmJ5kYI3oBCMvit@K4guQ&ZJ3GoeSaRymJ-?H`>woI;DV&$A#u{(rg!P|F5}dnlU{k z?|mK)LW-pcMUrL^{Tz?_bC|m6&)d# zBGh0J!-n^ul}!YOqef7}zoi^VUX*SHFoW9nSrOu9shn5F7k8uw7Y!VY;HZg=Hy{f? z8}HQDjv`CzDY4Hsxt4e`GnlK3vZ{$*40v zW(z@jO62o7!h!YU(g@RPi!h^tI`%ki>k*%1@C;@r)|Fo* z*c|c(>K!aM^0rGGxj`~K=h!DE)9sDM(Nl#g24pf{lPw7J&jsEGR;?^ov;L`5qV=G% zUaQN6bc5m_qyJ#JV;gahtrFtTNW?(mcRf=$n8Nfx)?Xcw94>$cU+B*x2M6c|lXiA` zbZX}O^TG|PFPgsV#7gRkJ~>nq4z0RWj%GL^ca8CMzE_2n$xT8h{%pR-~!W*YP^dZ|;BJ zBA~S=Hf)U6${M?+{SHPT>Ob2$oj;~nkn-Dt1LPfSey!3}z!fGmG>xh+a!Ew_fi+g3 zXvhG$rB6s*(?t|^FMjnUOh*{YwfJnkS{`gy0ZOQidrorc(Q%uF+? zaW}W_I67#q%4QzbLgSIs=?dSaFNoehts#=3bgg+}pb<{DYgGCF`rB&v_kg`^%;u5D z>?&Ggg?^ZuUsu;CkI57WZNQMLg0higw$-$Z8QF(TODo>;gSzaP((P8kNjdnJmUoOX zS?Egp$y3sLazI{DGq%VCsFO}=eJQyOX6oGl7#n8!^o1?Am+b}a{Baz&{O|)}(v5D6@uc-%aiKSTH%99?mdULUz2+$Rzhj5<83gyhmfq?@MB*S^cF^^DT6YVmdOehn z%s5Xj=E_Q}-{A>70o={K$~;-++Z!!aH}6Rpd`IBpvF_1`E41+SeJVEzZ5Fl=s8C|G z*{T?nWyVGW`TX><-sRBP9DhXE+_a#$!cTKlia+6&;G zarK9pF%GR`qw{zNArj#~Cx$^!uk%}j3pL&8c#d%Jf{>5fZAWh4NQ+oo!dZcFJXh%$ ztRuMTxFGMhiJq82_wMOv=U|=7aG-}RTceM<b4`{;C2`6BiOlAWV5{o9_-i#{$95VCo}&e?>t!$`UrFt&*4M?^r5nCRFFg|uq|1nC&f_lh zC0JkF;07+9id4Ih@9Vk+eDY#og9L;JYC@+B@dI#JXXvG-KhuvY_aVTip0cHWnQl!S ziQsjq<2b|<0WYu1cbsuj198sNnVl5X^oarZR{t0y#hKNkmP3->Y_uDxmk|DXk?}K9 zKkLF|eiO5B>wE`m{Yvr8Y}VtiWof*Zt0Wqa@w!E;&~cV#+?n(s5`Yw4;eeWZjf9{o zG_7O9%_1RWCHL@wN*5Ov+%S= z(TR7Nier}?;3cN-^;Rs*2$550_}j#G>+Va(v6Px~hV)#>eAuLeqSFup_dB z`p#~GDp<`bDEuM(?3|6jX4A|*xX_%}VoDSw7dhqAQ^x)9*89~!mGk8kC_+I+X=uFH z>M{F1Wo4NcOPleU3_R-!+33+ckGyJNzsTNFM#d;fLL6~Na(kpSi+ZATwLr1K@I5Os z*X5&rHef1q=Vfvx)00w)kF;dqDPT`yCGcq+b2O8$$ne@Tc^w$S*vp-vmb!k36OihZ zK67jLPki{Fjsbdmk4>xJ3UiL4$*oeXH!Oz301bJFnG{{5e9FHpQ>l6CA`DTn(V^>q2xUCf2A@} zu3&`vf?4-@l8vrv@cks2mqK#k2p8DL3yvdkc=D)dc}nnOVTDzU$K6&6t6=MS>3zR- zs>v+YA$tx>U`-O2;b@y_bOCczA)L7zIM?~U4kT(;cS^#se-Qd*ZuwicM9B4jL!JtN z`-}`*0Eb5l$p@VV2EZk>bLw=WHfGIBa0msh)#_k{9SQFmd|-8tpD!}PjI&tS>q`{; z-Wcuflu142vg{S1V)H{Q{JL(V)iI;>k$3?9F7DrXvPWzG+y31Q;XsClY^prtEwm`B zFnU~*0_&O#;ClAHOUR;zN&b#{f=5F5n>`wQk!ILn5mNsU6Z9skmi<}}!HA{c=XK0^ z(l=-;4ka)COFV~bV*^syb$sZj?gz!v!q~mi##mUYO4>o#A4~b3iFV7RMU0c`QuuTV zUZ6lWjr;s)o$1UytR_D(KBgF&1SaV}c5Z{=7C3XXUk1?4C^@^TF@hBiS5M6I0DJa> z$N2sZhjb^?+hi6TRwyl79e5u^XjWG$gqUsGBy(P%YXeLYOYD>Vz;A>w&*lA3(uwl zY&%E5Gzq;{;4~(@jowF^4{k0_6f4Qw3L=HoiEynEjf--w4vmHF)vK7WcNzQz543DeA@&yg>o82^0HpPn zbtMS<1q?iM47KyBx1EH)s2O;jcN{o$t5Rs(=Yndk2U=!jO;cq9zY4W})+h5%h3D`i zj&+GdNs9sxR6}r68oa5KL0H(vYBhhcCdGkj_34G3jWav(cmhq903`9a3rrWFh@;?3 zUuWKC3@CF@h<@^U*98&Y+^?b# z=jZ*~>AqIh*+5x`F}h7n&MCF{u(qSZNE))R?K4$0fd0a-Xva0r+EIEdJSL}I zI2rLbe9hH`vTz*dxGL+0uJH zYNgV3LqxO`M6PW9YipPt8ZXEyCUqLG_`dhJmq)-zj-12W?Lo7O8A4UaKK(d{oie*7 zYu92a;dG}AWvFqoX;oSE?R(!>m7%8BY8#)NmJfkW0E+7yuQ-UWh zwsQXk!r&b-&(`=A><=ixYBUVAAqxXG)HaJSxE0RPt8O*28Lk#L+A@`ywk-B?%H!AH z?H3%`_Ksp{@8Sa+EYZ9YM7;~3A6q;d&R%m2)?utX(LC<(Kw)1Dx zFoyT(H8lx7Jg&HGys6Qz#09ocjnc>U1L7hAr{mV!@8Z{jxbqj3l{NOISCT;9rPe(J z?JA3qDI)DtydPQ4&SqIrc#a<|3W(yxiE#o~5I!S6+p3MDTR%L}ujJm>w_)sbLE9_p ztP2G(pcL&n9tR#(&Ab*R&w5JPSlkKSKbP)GtCD=sjQHu_I+j1IYm(nJ4HT-ppV@Q7iap(!Aot6ypd5{PsKg|SaXc2x;|s6@#QY!5~l4)v2mLPhqwNL&Un z(`*Y82{lVmTe}2C88(ieFXD>KU!%(x+NwE!of2|jOK$_VF}34tv)PifKyYoai#mFS z9=y}HgasW>E~ix@o%J9KiCox{+XJ_Yn&Rs%CQOazTlkJ1czSC4E~d&=WQQ1ckWTI178y?1!$7(==#d^9jRO694cHNooxFqEZ;EQu=cxoC`6wK1lN_tI|lDmPh=TAi{VkZXx!K-TjxT{z4%kw}<-eYh0PHmG)OtkM^Cu09Q>V3gh5nFew zAWKjN@cz~@mH8*)%fE8{+8AzvGaO2UxC+p!`P_7fcAt0*K3Z)hK>hq>p5>edhP(0-YG}K4#M4q&4^t1gH){k{4Afa3X5R#N2r?fDk_|3J^+c$yCRp=iuiO_SCydJ3 zakuBBp&w#Mwx3&(6x9w8XfSSzkm!3{tVvj4cGzl4C$HPIRPf=nk-QV$fa>FTG|iB^ z+|t80CVFWBV_UgOUY8!!w7~~T9&5}r%_w_t)&N?paCpxRKZ#&ad$ z0Cwej?q1Irh?sF&WkO=Us#*7B4<~7HHOf+iX57pgh-6m5c_gi~O~zjuc@c8gXSEos z6|I3DGhAuaOCe78xcC}!o;3!>UX6-A?9m%J{!WC!h{W`^?NO!rS1`%s%-t*%wf8X? zajSe?9gVHlu}$C@ z_yevxFh*S!D;~>7BdM0B!@WT_uYNB#7x}3^T2y;4Q=^rr-(l8IZRhxM6w7-#Z`jlW zNla?=pH7l7&*9B`#BqEVng;oMZ)R$o;92jFJYG1_hQ(vob;0_2`1NNpKf(T$ne8Mw z)!A4KFDxd~<~jWkb$G%Q23?@+{7cLuLG$bnh{eg7c@wY@x`VzZFYEtqCD6eam~rgB zjk;Gdx9_$+no2baRzwK_QY$nQP`wbX#83gv3utA?R=n4u7)+0T2J~8jqoE?RRp5tS z&=#=!N2U;Pm*zlHDLV~(;K5OBX7!9}xpH^i^4X9n=%N65fwkEh?KZYSM2>!MSMe!qQv5;}$7V4}b6H7w0xr1GKCT%;w z8H3RPbp$Ghd3Jrxjq)g&oT3&dtIp47QS-tcCPD80~z_lvv3|kgLUWMb$ zB|gdgU<2urmEri!LgUT*sA|{i5^Tb*+b)ta1=uA_TIx4K&({CkY&z7*8Fl+sfEfS+mGJ<|7dYkqIOcW=5@BswyKNcSi zev>->!S^=8A5XQwHt3)eb^6g~kZ0*S1~PGDFbhB3Y$U*%&s%tQsWZsV9A1d?vuRk- z3Pu=1op!+4M+cP%RomVumyNQE2LivGO^4g;B|nd!DP@13RoiIS@)@6gWxJBl+C>E( zbMGv-a5-20o}Sn?lz*LL?m$ESHiWnXJIl}W<`uDWKsy=y8z_!6aMT7Yt!f)srm9!S zR8L5-xh-xDJl(wh6yrG2+X`nbjk=wEnO%9`^VI!OGiMS^t#$}(zXNUuY zRZ!1eg-h1p1BZgaJ{p?0%xyXR-AyEPGM$H+Iqol-+m=aV)}pq2k>rYP4UuIO$=$6d z6R(E(xHsjEUZ}wrN*F=1k*{DuyyFt=U85N~vcjFzVU{8$kp_b8dzes&lDUEz>2!WA)MP>+j43ZBUEPs>h{ z=jxaj{kbtXwKum zoQ%x>opeaZ0$^hMUzvr3|9NZeY~o1BAZBggY$9S}WM^yw&Bq7r+mV!%6Cv%zPNNEcmVl zI&MEUFmU62GK@Di-x{GdUoS-oC9kVFyhlYd+!P#8ow^JT-fXxWSXzK~lGx|3zG=UD zXiP>4f&~IHwj_kD4JD0ct(sIAGVqJdK8870r`I7fke*Ct2p6{i54N%Q0Tf?@3OcyX2-e($v7!=`r&gI0 zoC)_&Rszhhb5R+PL9HG$RR5w{x)WkCGWf3qAXBx78IN3d@Vj3MR@L4nnMJScEaY%4y|IFG^1Cv4~ z7{*b`YyZFF=C?qpM!UJtl>owDg=)i0g~-|HrN1~K7PSRHy+}D$yrz49ElSpoNr8QE zt80P)<@O4R)4^*TfnGw0tH=?xsHjY9;R@I*6x$cZ;ugjd>tW|oLystrTnPdB2^9Bp z`uBI1U?Nxc@eN@{(Pl#6hak=8_Zr)W4XDBj*xDD)YJ>mOI|{&_5k+ZBa?YC#1};;X z%*&yQ55vbmOghCTN3|4WhX&~@;>ZMw%mvdhig(xqGLz`DhsY#~!vG_KP}Cn1GN2UH zH-zlR(r2k9-i1{F(*n%|iaR|(7MDo!XHEqF_n#5STB;XooauKkGvTZKpXz)W24BJb z@9zixe;zQfGY>6u$ky$|OJ2{~QirCFY@g?{N*<52<2GJA3df z^_wM}zN|aI$#IMyEI_=aCFfSAK;VrK44x5w!2aP-v;4R-ss*|l0(`l{E;U|69?H%}<=&70XdMexSR_Fa<O!L((w`Q@K&$8C<@vRm!_`&5Qmjo z9jBh+ z-wwAUVdzx(+I8|)Gttf1%^}E}#Mgp#N2I_?1Nvnm`od2oHakD=2|Na0@2{r^Ge4iT zZ5=$q;F$+p16#N0Z@Ej$oubQ@kbnW90cJ2{JHp*w!N^~s$5#yt{>M1rJOJseH*~;^ zpZmv@y1W?ZG$=Uo=vABmJqqv?qa+#*Xb2w(V`0AV7ygUnyW8&V1*z#9?X=bSlR>L4wnhg(=+njUtbmhq>V-q_z=zeEz0A=kLK*T z5d7KL88~PQ(#7w~rYOFo8RO}l*PR|5$}!Q^C2rIo|3+tmHm&$L^APsrA$8SHZRPjq z>nz#XdzHL=wgl4YGIX9`APSRWeSB4gMsREXNGn$+79^npdDik^lVH2k8n8CZq1m||hzn2=$bEPAOonLxsmibd;5bqD=xq&7$ll8_EP zOjJC5DrgxLk{y*eAI^+BrzSv*_xCJs`1eT*cT=9K`;yWsmUAm#F`O~?4>dcst^h$m zUxA&a2?%!UOJC^J5l}i^FM)mr>Xq6~y~owkM%J!TbiqHxObi-6W_sD&P_PV~obk2$ zGfe!T?GRKx0el{8d<9>y-X~4h04d+*(C9B_1-{2%d)xr5NFh03XDopXkdN+e3)fki z4v^<0y86!f$aaik}3}KJ^4c3Xl*gI&Qm#eqyh`(1YusksumPe_QtZL5o8z+1ccw9c-M| zeuxf$1pvs%p>HWz?iDhGLa z9pug0NQl z?yaGDF0##dM;hcj*ub(~W*V{&G)s6cfM?oIG^g$=m9kv|%$6Q1kKNU%X4-oj&OOwe zcqS6fRChI>c_uisO~xCjZ)v@KDo7evD&yl2TNH7`%DegS?UyESn+UG3XE|@5C?jk=tI&zgBg=w^?rJsqy!X4e?$wyg$3Qm)-U{ zFKlsIE;I1To%ZNubQ|5Se2vdNq_$(mxXdt!qxBHM?>jc0vE`15mY}VCjRJzU>^44a zHEVEIaqoQ$w`eF|LEkQ_Z+0_I>BmG)>Je3o7e$|@q`0iMsm~TY zqUvY5KX5$Y(*l?KEbhEv>ZW02ui+L$SS~XF*<1B_uhX&7l!3#~w@GjjvTzda*;G#0 zapeoaOYF^#shqg3xvF+(O1S@5_VUL)6AE_fJ^N)S(yg+Q;j9Gtbc4k&pVgd|Tna;H zZ~MAF`^_1qL0@(;bF=zddDFyHX#vb!5wK{YJ`?&>#XdXv^!qzUsL3B}Ni})v3YaV1 zEa8plDZ!o8DbiamvFW9Gqr-&&Fgz4%QsUM*beXQ3YlMqy#9sQlEBVMh22^h4Wg51& z+uV(?+av=e;H1{W!H)J|OX~3@mWGSQqEV3QSCGY9|J@vTEpqCdujvF6??<}vcodfAzR@%&7lI1s$JfpYY3t_=kaMEr$<$y2_s5C0U2Vk;K9cG zxPmpzT{D;P>K&K`3db8uWu*SojhS*V;8{=!LA3!F_A>n&rf={`=rj^zuMkOS7MW|i zamDN;c9I?6?#O}-1hJ4>hQ_pZ7hbLGA4BrP7;iaE8LAI^NTPM8-T&UnMe6IxE(m(P zH?!Apj`s-VTs#rCssD1Km~!YssdAAh3>Pyan}PeIO)_$cZRV^?%l=wu%_@S1n+|gx z-j9+o!(A>w1@F1_D^8cm9g<0MIEzeB!NL&t)R4kF;b7(fJ~N==#XU0s`7JR65buZ8 z!tH)E>vg}8h@}QWF*o-t9t?k!;D}3m6p2Qm8X9GkeG>_^Sqt6IPoV;FETt8jf{)Z{ zd6N!nF^VB=KuzGz8QySkk7P`4&l^HDit?7N0ZC2)-qZ?!Gx1L z!K9O$uwLJD2KNcGpyW65|6Wfc>Ssj5@`hb{B$(^kH)8wwG z8V$FCDOvqz`cyArlWXPya9Psbu2y5w${DIqTiZkdZwxAzV1Nri~z7G}%Ouqqb5 z*bcA_r)|U949cfGvCJ0a4oveW2AU&q!_tq8r?@MZ3_O9yR^}!W*4W$4G;D6aBGmb zNt3! z?Y$yCs~Gs2Q%!aUcj1}#@BtXEDgT4d%xIFKy{AlB@d%toPxh5_Jl(T3XH}P=>x+1D z^lwA@XD6&J5|S6{`l@{s8O5(zhA({g2Ha`oI@$79FomJvYC{_JI9t)HlCB}V9zPCz zj46Y;%+6sX*pcw>RoXzbVDXAD#ES#-PD2+|5?aeKO z42-NyEv&7D?HnCVjGQg(Y`F;k_dTJLg_*5^HKCo0v$chZqZ1W_vWubRkI;__;U84t z{G&|9;Kv4`n}xGEq4^I8`O#rY#UO3s;b!M(>_quv^nVHdXchTE9fXu3Tuh8iOpKgN zYybcw3nLR9BMSv1BgK!Ltex@yO+>}fz~0`(_=lSqSUZ_OGbpHtYS4?hSX&z!*xLSQ z$13I)PJ}<#f9;%>Q0-^YKc)$p=>aSN)*qv+bWHSY|Lu_<`JbiBSh$)HGBE-;8AMDB zEevd_2(9c5oXwp~3&(lw@0m4Bowzl$Dk9&KUu(+qeG4ZXW1T49Y((S{1f3Wh+zp#1z2pdjcz+aQpRw0s~B zl)^zg7lmjWS7SpLGb2}1M^jf9CpRZUH#1{bM^_gEGYc0BLvsUXI|af@fcE&L=A{Eu zt)Y>*1+n9Ds?$yeg@%i?ecx5Wq}9~mvDls19q5qd;tc;s3X~HbJB{?IUBilhYO8-{sA*+D?GOo8miOd!aW?f|yHwc<6oa?^Z z;RX-iqF9&e{tp5cJ!PV~vo^5GI7U0nR?1<`%_zTcPFRlhZN}@2$t|)Agx_l3vOJW$ zfcq`$EtZFc7h-vfb#GZD@;?at?6}3}2D{&atqXev=dt|FsLe>{WIv!FrLnlhxxwVA z;TGeT!wD?8Qg2Nb7O=fFy2Y{3?t#v&QWM$B)6=)OZV7Jr^-wq?JL8v^?H83>YPS>? z3YoQjWAU{QKe=vKLSDw-79+0eJ#xPNT9bcf)=9i&S;&>bwwBvBa~5lHhRX6ULg$#* zl*(<;IC`C-|LNn5yDjDie7A6CY)WVRu;QN#FT1w)1F;A;?FVKNtmOw{H?X~H{GA|c z!({!y@W!SoQ5nsr*lw`!|4`al&k@ivwLQj^xyxY1jRj|#jh9wz6{!_@rCfSo+tOWiyLMf$Q0%`F zn5FnP&6F>*>-ZFgvgc|SnEjY1h00pxRt1 zDm-o1ze|R<>}Ci$+Oh|$u6VQ{r+J~MNOpj}VfM=MnBy8}76?7ITJZ0YS_I3K$bhtu z3~M4@y}x95N@vA}1&5lrxV3avEDYGMdGkt?%@&g_tCOP>q)+^i^npeGOqsRZW+vIx`6M(vYiJnvFCre&vzjsGc8atb++bHpU9MFwRTKV z2Lmtg^)>r>hfgo*k)4qF#)fB?u5Q^&`B{BZ?dpwf4;UnvqBwtDC{8$X=~J}q<$`3( zlwOmw237XiMQ58ly|e${U!!?6;#5!58G}>P4~QN(b8`E^IZMyR&P~|&B>MqdLQhJV zdmPjD14a>y(Fs>C`Mr1(Tfl#ZJ!jG$MhVuL_s*Yp*m}Cm{(7hE0=uN6Phvj^y@{-{ zToFAZJo0qgn_W@4_b>hSTEum91*e8mBQo+Zo2hw@ZwX zZ`UzgQK4vDAwEgm&b0s=Iq9@&)U3zy>Zq&vvZ_Bx|b@m3Ud9Ph+Z+9hhZshHzb+68r z{ksy{d{eysb(fHa&+SFi4Sy}3u)*n2SKJhjh>NQkIki>RyvX%j&!(~lB+LckPuQ@? zTtg`A%Q*)Szp7U$(Y4v7(`SQ#EGblz>ox^N*6qh6xl~e#Ld@e&n3o{b~E>%@me>W}w DPEo{6 literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION new file mode 100644 index 000000000..dfd52975c --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION @@ -0,0 +1,22 @@ +Package: delphiBackfillCorrection +Type: Package +Title: Correct signal outliers +Version: 1.0 +Date: 2022-08-24 +Author: Jingjing Tang +Maintainer: Jingjing Tang +Description: Takes auxiliary output from COVIDcast API data pipelines and + adjusts unusual values using a lasso-penalized quantile regression. + Output is used for research and model development. +License: file LICENSE +Depends: R (>= 3.5.0), +Imports: dplyr, plyr, readr, tibble, stringr, covidcast, quantgen, + arrow, evalcast, jsonlite, lubridate, tidyr, zoo, utils, rlang, + parallel +Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), testthat (>= 1.0.1), + covr (>= 2.2.2) +RoxygenNote: 7.2.0 +Encoding: UTF-8 +NeedsCompilation: no +Packaged: 2022-09-26 15:03:17 UTC; nat +Built: R 4.2.0; ; 2022-09-26 15:03:23 UTC; unix diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX new file mode 100644 index 000000000..ac4b9369d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX @@ -0,0 +1,65 @@ +add_7davs_and_target Add 7dav and target to the data Target is the + updates made ref_lag days after the first + release +add_dayofweek Add one hot encoding for day of a week info in + terms of reference and issue date +add_params_for_dates Add params related to date +add_shift Used for data shifting in terms of reference + date +add_sqrtscale Add columns to indicate the scale of value at + square root level +add_weekofmonth Add one hot encoding for week of a month info + in terms of issue date +create_dir_not_exist Create directory if not already existing +create_name_pattern Create pattern to match input files of a given + type and signal +data_filteration Filtration for training and testing data with + different lags +delta Sum of squared error +est_priors Main function for the beta prior approach + Estimate the priors for the beta distribution + based on data for a certain day of a week +evaluate Evaluation of the test results based on WIS + score The WIS score calculation is based on the + weighted_interval_score function from the + 'evalcast' package from Delphi +export_test_result Export the result to customized directory +fill_missing_updates Get pivot table, filling NANs. If there is no + update on issue date D but previous reports + exist for issue date D_p < D, all the dates + between [D_p, D] are filled with with the + reported value on date D_p. If there is no + update for any previous issue date, fill in + with 0. +fill_rows Re-index, fill na, make sure all reference date + have enough rows for updates +frac_adj Update fraction using beta prior approach +frac_adj_with_pseudo Update fraction based on the pseudo counts for + numerators and denominators +generate_filename Construct filename for model with given + parameters +get_7dav Calculate 7 day moving average for each issue + date The 7dav for date D reported on issue date + D_i is the average from D-7 to D-1 +get_files_list List valid input files. +get_model Train model using quantile regression with + Lasso penalty, or load from disk +get_populous_counties Subset list of counties to those included in + the 200 most populous in the US +get_weekofmonth Get week of a month info according to a date +main Perform backfill correction on all desired + signals and geo levels +main_local Main function to correct a single local signal +model_training_and_testing + Fetch model and use to generate + predictions/perform corrections +objective Generate objection function +read_data Read a parquet file into a dataframe +read_params Return params file as an R list +run_backfill Get backfill-corrected estimates for a single + signal + geo combination +run_backfill_local Corrected estimates from a single local signal +subset_valid_files Return file names only if they contain data to + be used in training +training_days_check Check available training days +validity_checks Check input data for validity diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE new file mode 100644 index 000000000..2d1447e00 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE @@ -0,0 +1,2 @@ +Currently approved for internal DELPHI use only. + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds new file mode 100644 index 0000000000000000000000000000000000000000..8a2190e98487fe1343f025fc3d1ff66d3bb5f7dc GIT binary patch literal 1959 zcmV;Y2Uz$YiwFP!000001MOMejub@{9(0L-fC~5%jj>`((73yTH+W$*xC;hBVwNSE z7}Kfgu9+$5?rOTKXBn^bU3@MdMf^_vbXE8C@&sJ$%ygZq^Z%VXr}wuDv)SzR+0JYD zJd2;RvyYGO?tO!&t9Uqvzqj%C>1=0q9^dctqR4I)e62HH7n$MhQkeat!js=7a=|xh zaU#UixNeU%yycb77OKS-BXlHRB(h$~#h8_T)*7AjQpEKuv`*aBqEfY4B|`F6fNxgF zHmjA%#B-?=hVOZD%_{+2j1g@esaK&A&la-8GB;8sq;rKR4Uf;4gf>~zO4TMNyeZba z?7%-Ry(^wKsx_IR{jJblnP~GqM3q@3wU+fV>zc7gEV)%Dsfb0(^NbfyQZ9ZeJCSCU zHCl9qisWlcQIk1>3=?E{!VQ75k&H|4&=T#`SvhdsMIuxReu!%y&|0IKu2h}Qa@EyF zrWRZw`edBmsJKkrdx;*iQX$N7@nb73Gc9Q3Ae(IPBYr1psreI;8@W!*x`-fC3Z?e@d=SqLwa?)p}QVa}B!?Ygsr~B?;DR3k1L=E0HG>eK>PbR?;oIXN^U)~`c z1SdsI_>~V&2SiY!M?+ZXp%5US;Gl;~lI_9&+Xx7h?eq^>F_HVh+)?6>vVQjJ+kX%J zlHD!}Mw!PDYUYXoGm4p&U=&k!?3SdqOCAiY;)P(XSY#z%GKvM`3&7FtS;$r!5KF;z zNRnSjjH(4&DZ@mas{%n}s3vrnsRiSVQkBViq42@LtJ1U%8KT7jVf6zfJF8dV`yugl zsYVh(C`kKc$IZtYYfci(v9*e1PYX#7eow4(Rd!XaiKna!nS*R?wAGD{QcZzwhUsS= zZ{ZUK152^)F?x8a<UVy> zub*T^#Z6A4n$9qbET_Q|cm{pbI6rH-ta~_qdl#kFRa&cez@W<*R~(#ksp3Joh4m)f zz?T8I*B*5hNpfB)m}pzodfMMVgCARTbsj~)#d86Uwt_R>G_B&$`woOvAqK9x5BC?a zzm@aOa+-4rJ$_i_Xdh>}Xbp)Qq@7pfpF8e`ptGP6Q!g)mn)=aCTqk3f--j|+TE(k?z~PHRnE1!j=<$l`BFIAp(vY`^tyg= z$KGD{fYXnnn|Io%O8yD4eU7*TXDY%zRZid_nSNuhgq`=Q=v_pHHC2#VLr z1D#nzLKd<^2G7FpVl8puKrX4EUDjFurhBu7eajAak(*_xCdf8K2_CQ6@1XCp!#@~- zhBSf7Hht`evx>N5*+I;O(egb_SsUSc6BUf=IBN-Amj8`?@5Tp5Vh_L;&;5p)?*bxE z0q_poN%diEU@N{BfU)Y9D@I*_6*_DYzIg1_K_g%z+fg4emCydbPvZlF#Bm)2v$wkw zS9cY45tRG{!2;@3vbGDJ1nkJ zal3y^MabLZ{ENzh_NS!8SppH79BD0mz_nJaffq`%*=5kNRJ?Fdqy+z1cR+vdAKK;nI|~M*?CCZ znxRL6Cuiy5p2?YdxMh2`9@71cJzPv@?I9)3+{5*$*?VNq9c z7|i}d((oO?C?6)?1&nq~yb~B@_td+AksG()5u{aM_UgO;e&v_RZ~Fg5EQa=W_r!An zi&o2M~ literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds new file mode 100644 index 0000000000000000000000000000000000000000..a2535957db3eea32a3f76a7490aa3fbe85d332ba GIT binary patch literal 121 zcmb2|=3oE==I#ec2?+^l35iUT);Op!XJ>TGUdPJJJlSBvVU8nA3A0)og*{eFEM2-} zrmA$*i&>Gg)L+d~QJwW#Epo}Smr_z{(n}(wmP$%V&oplEZGN42*87aFuA6a(wefnD Yts44<%Wqs}J@J>F;psHn+;*Ui05pp&cK`qY literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds new file mode 100644 index 0000000000000000000000000000000000000000..418ba68ead949a4f0d89cc674f9784a359c3b519 GIT binary patch literal 1809 zcmV+s2k!VEiwFP!000001MOLDZyQArc9KSI3bdsy0u>=v2&q&gA#vKKNEKC@m#Q>W zQCgYxt-Ke7*b*x1lYf;Bq)Ug(ItOXs{d6p&X z86TR2S6O1Y?hEUA?;$VaBbA>BaXfyY=9-r#$(07a6~^-xkY@%mpO9dlYim+o2)}q4 z7g3_hOI6uH5S(fOvXV^dq*68!FQf@_u6bU?O99HQ6*|<$6)=D&P&Tl}$7*ICYRYo| za8VeW)LJSXOuZn6yl6nCf4(YS)JogL;!Z70Qv}`Du_TjH8Y8QI($o_U6Hlv?C^pwT zO?dV!9`?`4iL`^HHloRt=e^$-71}J&dV#J5A&>&fiGRL?&$!1VMH`a^TrCy2=@&l0 zg<923p&FBret8z{>}m8uMkWKb=5n5mp`hN)n($`!mZkX0I72OUDg0YUZwbbm%AibHWikae`AF4^N3mwW zHp|TOhHHq40tX8*j2PT{t>@%0xlZ_d)%RDt{bG}fo26p7-l_Adlrum9{D%#61BLsY5Yd*IIF~dmEIMAqTE| zP43UYe=U0rN$PP-JrHCZ?&B;K+M>D%*m*;K+jbog&jJsHqI=9WKhw(l{_z1IKtZ|> zVNd%6ph%m-36Nv^QPPQ!{Q$g@V?YxPNIOWFPgv6!Cye}MYi>^f8 z)?3@uo#}wH53`###;Q{O3~D1r+<}=(kD!WcIRvy=-Ydi+roX_{J>7U}mf2%+7z_f# zE9DEFks|?xY>Po;A?{)*p`n6aG{HF3QGCbuCN;aywiW@KMd&7gHpmi0ykb8Bf01qd z#IR`45-^$ILvPM2ln%?T#9SIl?(2-D3|FJpz}6h+EmjxFUs?8H+}#&TP}t&y-%#;I zD3N0*@CKx#>65*I4fs$%8LOs0U^wMS>EsB!b>z)~K}VTwa890uXMf<^(+4Ji<0d4d z>E+Z_O^H(y!H*@FfjOnDXu(VDZmJ3jp-Ekl<0Pe%G`(C1T*XuC6adI@@|R7IVZqj^ zNJ=%NS|5VSzLO%b6vtAHML;9OcUc5V%-$&kB{GbXD3P$ew2t`QS_*fx4|fq$G_%IL zRmj_uLu^9c9~WQfEHK^+1Wq3+!eAiD(_LbZ7Q?o{3p( zmp9K6fUtQ}03Ui&$wsU^7_n7BqZ+U=0zH+VK+q4s*`B}Lq=$i8nPU6fO&F z+3n=T8`tC}S0i!Ih0EUbJI(Yv&0F`K#{cp>efS?e&>sh#8NWD}$r2k2ym0w5c%47B z?%HwxFd**_>=d-u?K!{a?$6rLkjEuFuq<@^&dI#D|N*Y#|wDiyVTqn9P0N=$dprG(f+g1$rZBk+K+5 zq)JkD^7BQ9GO18Y05X7Ki!*+YoYBl1{yECB?0z;ofbTgxX7k^_ex9CZ*}*X^UT3rH z5PrWc7EAaL%IlK-!1<jK)_>zsYGGM!QH#?Ue1(}vZZQi&T!OrX(2FRj%@r~V7;K@pBLcbx zo|98ASC@9VrO8MTH#=VX;_^NkxgRYd_Cpl55oqOn$qEyK$ElB^(9KFiwSdMkvM>4M zvCzFYn9WeMCCxX<72EF5%=I4b&d}aGvU`qaZY4m&LwNakF?F$UzQ?`$LI@aM1Dx-$ zo9-b)H?*Dc>0lOn;;9kqe9tE)r*j<-2cnO*opWw+bThu3<$<3{ZvAXLZ2Y;VW{3~C z8x5Y;3wH;e5OU``8R@ep@v`gIPx@-=_wGc}8D36mpHKQg(wuo_MaL)UMu42<)$O|C x57EhmH*_lC3=f0mcBV&R+xh&DThV*Q9poz8agv5O}*o6a}xJ=>(LVQTvEH{2_ zy*YMfJMFqZ9`T$wB}?NXb8u7t{C@s;-@i_hBpD^?5&n+xZ!-S*8 zeS_ly$Fn3&GW=$_T630DW4*vpPhd>7R+}Dps(~_6Qhi@RkjP50)je=@_3P{{O(rPgh?yfEN~gz`&*!2a8x!2j!-G$= zob4z4x=tP_EJLIT)2bvgsP`dGdLWJl38y7rD@%mJnjEZQL&RA_+;A4=RT!On&bq?T z3J^w*O7)GTl@J9`*`9yt7hY1v$a_ZDhEVJdEY(YFc6_pRYnX7=Vgqp3?&LtT;)^Fz z=3ZNqQ#5A-K6jvFsYR1H{b@O71Pp%0HIevic;Kep=fVXzX}SPvIo6X-{i8aK&sN^UDIB!UaMOiPhM!66UuIPWIsoZ+Z8DdR7~yK39z*)S``dZS#y^iB|Pi!oG?r?j|cFj*wDztDrg@^9+yqSKb7ELEx^sM3BxT*p@ z@H5zzVmq+m_aqzespMrxvUl8sZLvL>@9reght}{-VerVm{_*`?;WxyW{{h4==aEee F0085dj|2b! literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds new file mode 100644 index 0000000000000000000000000000000000000000..7d400264daef72822a92841e8ca2ea92aec0074b GIT binary patch literal 1081 zcmV-91jhRxiwFP!000001Jzf}Z`(E$muyA_PYIE*P$C8>P$xPNQ^+qNf(lr7mq1MSexhAi@t@8kF5d(!jo9mjDRj$4P{ zCJdeCyXh~z7chGala}MuH{t%B<2r4)e;*JQUDCJMpI=bMMnX#BYbv-|-#CR8JR@fM z#B4x{&NyaB#F|kem04~KyuKNBhW+8N*MHF)?ik6pzfwNCf(uPCpBdqg1tEHMrQtUG z8S_Ar8_y48j|B0)+5G1e&xt}fzNL&}d6&44VvR1O2+`=|{rJGgN~1TYV-#SGQA8uc zC{I*)&_VFQ!BwoZLNSk34DnxM7Q=F^k_a$jSSitq2tcEsNPs@Zm}|-il4K@{QoxIs z+BwM_(?k{aT?h$V6v5KJOnjkG0Iw(@*Mx~EBwTw2!Z#O*xufyu@c8V|5;H+h_g|xJ zZ`bQ@b&c?kAYym2i?UxyZ~!+=&(9g@BBdn+3<6(V(;xxe&0?Jq-tAy1#Z4Cx0GBOY zDZv@lpvdBLNr4-Z*a{NMpM>b57(goU!hrm4C&CglMwp3YJm*x)G{k{7*xiDCA(rz% z+;Ah@0huOB>r1SSsP7H7x*Z5FQyhBm-7*H7&tYcs#AIhSsdHgQKV<3iQ*ZbT4R&Al zw_gsPqj%HM9^zP=Jx?cSC$Y4L_Rz}q;dZt?=JeJ{tHG&RmmG&g!C<)%N{Q}H?;<;H zPN`;gvhg0$eNi9o0g!3N58lN3Qb=0>zhTNXT*OKq9uVbA8ZDRC5%mdIx$y%6$rspi z+8BqCklIdLXYp(XG~4;ZiMYLk6F(Mt2(3fzi-6|kwRucPphhB$D8oyBTLx`lA-;_% z(=hl84k39?xaN6LOY$>)prkS<;n6Ac=_hq#_-t6ulMm`nX%c?8E)e!8$~>3FJPwJZ zelAW!NiAA8PHFz+U(UR=&0S&cl;l3_tkqvQh26b1GD z*K7DouI7Tjx{an@g_;5$OS+q6wv9nVHjRp$ny~)=NKyn&$$(Q#;+}z;X+Q9bF7>*D>+a&)1@;ia9-eWg_FE%a!UAHd1HTgHkyo2r{k03Vnc0g zO@cyS-?sDN`o6#1=$HNnw2vsC19wwVdWqQcdTd@IOaC{935%") +importFrom(dplyr,across) +importFrom(dplyr,arrange) +importFrom(dplyr,bind_rows) +importFrom(dplyr,desc) +importFrom(dplyr,everything) +importFrom(dplyr,filter) +importFrom(dplyr,group_by) +importFrom(dplyr,group_split) +importFrom(dplyr,if_else) +importFrom(dplyr,pull) +importFrom(dplyr,select) +importFrom(dplyr,summarize) +importFrom(evalcast,weighted_interval_score) +importFrom(jsonlite,read_json) +importFrom(lubridate,day) +importFrom(lubridate,days_in_month) +importFrom(lubridate,make_date) +importFrom(lubridate,month) +importFrom(lubridate,year) +importFrom(parallel,detectCores) +importFrom(plyr,rbind.fill) +importFrom(quantgen,quantile_lasso) +importFrom(readr,read_csv) +importFrom(readr,write_csv) +importFrom(rlang,.data) +importFrom(rlang,.env) +importFrom(stats,coef) +importFrom(stats,nlm) +importFrom(stats,pbeta) +importFrom(stats,predict) +importFrom(stats,setNames) +importFrom(stringr,str_interp) +importFrom(stringr,str_split) +importFrom(tibble,tribble) +importFrom(tidyr,crossing) +importFrom(tidyr,drop_na) +importFrom(tidyr,fill) +importFrom(tidyr,pivot_longer) +importFrom(tidyr,pivot_wider) +importFrom(utils,head) +importFrom(zoo,rollmeanr) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection new file mode 100644 index 000000000..668615632 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection @@ -0,0 +1,27 @@ +# File share/R/nspackloader.R +# Part of the R package, https://www.R-project.org +# +# Copyright (C) 1995-2012 The R Core Team +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# A copy of the GNU General Public License is available at +# https://www.r-project.org/Licenses/ + +local({ + info <- loadingNamespaceInfo() + pkg <- info$pkgname + ns <- .getNamespace(as.name(pkg)) + if (is.null(ns)) + stop("cannot find namespace environment for ", pkg, domain = NA); + dbbase <- file.path(info$libname, pkg, "R", pkg) + lazyLoad(dbbase, ns, filter = function(n) n != ".__NAMESPACE__.") +}) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb new file mode 100644 index 0000000000000000000000000000000000000000..b496edd6577e0f046af3828f0bd591becd046676 GIT binary patch literal 57192 zcmc$_QoVGD-+wPvWZQHhO+qP}nwr$_u``mTjzO(Onx~uA;vR3>e zA|oRsD)@y4aNYHmk8qv=vIVm5=;paNYo+*DB?TUO(hrHmdNm^(BP;LP2km{)OD2Yl@>=? zPfSeKNX}G@LNb83o^=JZh?$g!je`lyfD%yNi&g{x0AaagXiD+H_2D6CP%y2PUr@a6%-HHQU7C{E>@p)fDD2OHt5Ca(*85>Qb0wR3SZm9_a?i@B~mH~sDh;RF1 za5_D@X%3WDZQ(jg@Vm&@=@StT7Fy`HZWM?E!lw4tHk1bLeELpP?$jXc>&^Ao+A+n{ z3R<#sFt>Qha#nKriYC>6Zfl$7S|%tE{f6HV9h6v|f)$@IN!2UomTHt}Y* zK1nul{lOg6?>xU~bC)Tk=>Gd+@2?wsU$5Tt&GY5$SE1{j*0Y(bQ>hiiq_kw6 zo>}2)5x1(oHf_xF6jHe7q*NyMaZ|F1KbtKKAKQ`HNm!a?PNQfVt#cWuru`4YN$Xpa z$KI~fNZ1WSui>~_2zguqM1NJP66peFd=F@!Rp?VR8c8PE~uQ zq#@m4Mm28#EJL)TI*N%(qrXI41vVf|4Pbzwj8aQfu8~Z_1sDVnu1>p8Fv|Vwr7hHf z4}DLN5O6c`xy4|qOjy8pM&~B%C!Qvkhd>eWH!Tk{y%k->K#qA(cK6-cGDX* zy1*+gXFls-p`2{mjj1T_Bm|lf43?j|@{c4RVY>=to&Yt0-;8Jrp$xu$t(M_AZ(+86t%n9zzxQj zmzt6SgdOs_c$Bph7k-ao*+dJn?k0lvDP#A^a+@pjX(MF+ghA<evx3DNOctg{kpY%>eQXKuXc&Q9Q+Ma{2Z|V;z}T4Ea+8*St~Cc!J1qp7@Y$TRx}D|f$smu;bFFsmcRAgX+4Si64l?s3;nb-Y!JWo=J4 zpIb-Ok%vs>E!ToIya@+1S@LTKkhwi0ZGkkGp=T&yC5RM1&{8P(0SQWF#L0qVn`LqCxJpQG>ph00I> z^)b-VAC-CjQ!nuW0}(J(Fd4jR1jJ0mc!cxhbZi{d45Y(s-6dp9twanw@PK)9=u!Xx z*xDr{Qg&9l9}i`)(eeCuJh4}>L~@1wmydQ%sC59JsTjHi*n$Yph6)XG+STTC9p{k^j~2DDDgsN(mHw+kkOnq?@Aj`-OxG#6w=7nQw7PwcB( zw_q0dety**%Zrrvz9vp_;~?z7;}>ei!pJh31GEgaFV;Yaxlij;V#66d$sL)zPA3|= z{aG!AYhn1Y-CkkbDr>Hm9Z(>NHaskljbxVTB_{s1&Nz@2|!)c-S#y0d|dha4He zy8`O^Kj{SL^d)~7-JRi8L=AHcwAYrFx4+PtSM#=JWfpYHlU z?QUsM17N^FE{^37zyGr*vH!P;A%R7ID2x9OlKq}=(R+G)fyC{3O@YX$feVRq8ekHX zeCEXesey@?^O?euRe}{d<}{EeI9d42WFcSz1ZsW>{%}?JPh5u~4rtjm*sps4;CQGo z{~a;#ghtsy2M2kB0C9NX5_)13F<%e>W_HNQ|FIun@VtI70A?Z(iyu<`|0MFqeJ%bg zGU0)By}HsGiJDSGU}U8M8`cFGB>YD0AJ`0|*PYlIKmsHs{Q9rSCG(#d{kQ92?*`qb z4h|NU+H@geZlm`>09Y)c<$i$wAHB7UimsTE>A!py24H6b5%Pm%{Ldo)->90#^5f_K z3)w|hiw!W3htLK9I2~Ow2GvfTu$nQzSi}~rEE5ba-^C^BEXLO?hn*7I#LiYsF$;A~ z=BXU#2gTlnJ!89sr)|H9xQ8ivTJgGl!Ru!n=#Mq{QeJIrZGC%nXg>;K z>3-|*@qA$OLpc9lMu!G>ITA6#_WEt0oApE@pSnMMntnSF@9#ijPnkSjj5O%* zH~if9@C1o6mJV0?H6G&Qn)1-%g)xgwY!In6VsJ3X;|#B~RVh0Z_w4Vj9-UWeR-5;` z^u<O5G`1tQ zdWfeCqhLdVKGw0IMnHb3{>+T5xH?-=PH}L4ZHNq$bbj{8*vV`pY}~@FkY{Ke(X2+S zM!VsOU#Ww<3w+%(4>CgD2`Ae4ULh-#Gv2Fw-3tVIRkiTkH?4d3Y*>mb)~Qjo# zUwFcdn?sG=#k05ljj!2Ugd01q<;H`tdDu)$McxCG-e>M|C8!CG2p`r33UCQOsn0$@ zEm6O};WFT<-kd<(2C#44PmtOMUA{8%M{1*;tb3|?YzA^i!z5l}={K?+<{{E!rXPj0BVlx#3Mn6tTaCN&5aimvTJ1Ko{- zRE7`guBG%0lcIS^sX-;TdVX2WbFRHZL}l%(=+?FS9%m0}eY3hpC-qpUdt{DhSAh87 zWa-O>S@Y#m&55N;AWa^p^l{CFh2yYlq6oIfDEU(6{Dc$8_NdERx-i7HA`zEQE}nG3 zlt=Wc_H6MiA~|t~YLd3y9U}7%MdiIygH!h);OYfv?Ko&NWNPb~e1)%=vAyAtxhq-6 zDEI*ejGGaNXpfhY&K#_GPfV^}$+jFxn=^gcP8RSDZqm2Z_A6MleOJ2I?DLIqN$>s= zexVP*{5Is?8{T~bPRWb)g>8u?ibb2dU5M##RwxF(f29mLW-~mKiszLEF!g6ed5A_F zj*@3-Tw`TvVN|av!w_Qibf@V4V)@n;;ygp;5BhmcUM}dcZE{j~M)Tckl4~gg(R_;; zdKiwnlY-f5;*T$5$OncxbYhJHzsSF}bjbsit3 zMNb$~mq1!7P?=1@%;jBEilqz|#j@CEVTz9PWt#U!boObn7itFPgOcf}eSWE)e2pfv zy?rK$XBTLNi*7EZLpV*!oDG?eN!S`1PBylM;-1nBc8%OxRkhKXsJNL)73FYBTDCQJ zW{F6e6(!}4_iB-wRGX{(`V}#wf6j5}%_SKmXefM_J4Zv#a0bt}T$DDd>{wEoiw}#~ zx+clJB=i+L>9aYa4EM!fn%F$n#*K0m@0}PImk;F!<2{r@w-yDP2cTuU_CiG9?`{bX zAB|_E=Ojp!L5J=(f))K(acS@HQ=jn-F%f;C=E3PYQPL=J{!FLRFlVYqCS38!rJ|e) z(3U>5rFRNvZL`)Nna3Xw7L~d5qOB|F>GOHJcha?FGqtg(-Gv z!P4a?3%C&W2a+RYm@ll>Nl#=ZL|L$X+)-a0=a_p&rWQ6If&|RPd zpEn>KO`g|H;Toi{gzVp_iM{3|d;}F69&y>gIOHIf?0}-!df{w}9(Ep8`8Tpqzpr(=CZsnMGkh_zk7<%#@q09zbuncs3D`@oKzbcm*aTLEr@;k5( zU;WikVj^K^?ym2|?%R+IJB(ENz+@orDz>q7VVy)tO=Ei`T}x485;j&>RdeyL+H(Xt zcCU5EFu}Q|nRaY68I~r$BT3C;MeKJ6+RE#!)$I@=KSwMDu0#{#rYw%?rX)|Dg;v!C zxV5rnS&Wb+__9y`4rZLNFcrH)l7k$;w$r9=o|Tln>H!tF`vz<9`%ok>JF%E`Txb%n z<!=w|G!yN)A)BV(IP4lVp?Bin)PBis(DbIA6{Ez%PCw%diA&dF!+w+gw2N%bqe>oURZC|tCkl@)w4M9fgKx#5idldvG z)EQLTpBd?yT^}4jtz>jObkHClMA#uH2RdNn9wNMPNpo63+GC_-$Rr;_9U*^3V9kkN zxBI{F#x)joQgtSQ(n0MwEfGiT*FoERV0V&4M6WnNoqe5sxnRA7a>7>$J-yǡCQ z+d;2EZ~<-XUIIGBhktgs@y1UYJa}(*U~kBK!EZ45cy8Eg;0Js}KWvf+qO+OEu7o68jqXRW7Lt{@ za4?{1S;ZAYm=cQyR8b}O7^i5&SCFufl7&^FZN&+E@BIRPW%z10ZV`M*%uw?=2@r^U zkV~f{xrS1Q<_$=NI@)kRSwXAF_{u|ZHxXa;Zh;3Azcl7HLuMHcX!Bcz=`;D2&uIo; zVeYo|wUl8hqA%C`)Nw#mVIgWL{xKg>zMt?P7JA@)l#V&V+}0@p?Z%6cW4zYFPg961 zYt>$Vq{(`YZKc=KDBRpJE8l)-^GZ}rq5EzB=+u1rl+uJM;%H1NxriO~yoB)keg8gU z3#}v>+B0uZd8|KbS66I_j)}qJs6X^pcS9FvAqVT(``0IS92Xh%+rK&@UcaORGy}mb zY~l4k2&>M0(hhjvG10n)+*_HBP>+&MVx*>}AuIZc~R8(iGUFljCtwx|UW} zL4<`QbZJdpEypuUWWBNQps_bTzJAPcg7OFDq^RvA>M)@c}RG(X1t+`*KGWG1KQPY zp4c^MAP;ylc1+8UF7ZeGLfYbC84Us7VRP^Si)pQ9?W?xsonhN&U|Xaljoa23%2Z9)|h6c#ZR(gFbpkvz1Eylx&gRB78EMaq{l=1oo4~u;CnNUlkQ!NmB(y| zSFSj2!vq9!6G00>om7L_zLhRPlfe0Gn@AzJFJt zg62?6xGbmH$f$q~b)Zy8RDc+l6?I@$@KqqDXe}gtm*$_wVO)1Us6IcT3tmir5_P|b zTrwIJq-5545&n3S;{P?3$l-dJ6R;MnY_T~BOiV3?i(0^^D9gKGIrw= zg{pr_sX#l{FC3V=cjK})+M00rtOo*sr?78Pdw`x_Je+^w`H_BcoFS|4yFf4Tdl zyfamNxj-;ZX0@EVNpg@bSr)=-=qAd*`iid&EAie!OZl*+a5b~%)Z~He==nwtW{J?G zg6vbLx~#_x=Pe4pPq>f~^LlXGj*9kr?98`%=D;BBqo7zE7q=uotsje^-&P$+7Geo9 zb(^gDqEqjw>x@t`S}2k@qeVTh$#LUf_u%(nwRORW9y&HBuhy}+9d#l7{8Fv%F2He* z;^xrZKQ|z%&1AD!mf}ycuX$)C1!$By+gP> z^73uO$$z@64@ zAwvP9!6~D5yWcTJ?QQq_mm6RZKA4h3lo*!$b?k&TosBPD9VbQ8Ck+HAcwWred0 z#0yfir#_5-0{`4Ri3}*(J%G*=0RtEMuv*{1;C@~&dYn7wn0oBFZ$W*4_?93}o#immrp{G^a2o*j)9Di*RO$Ck3v;KO%1OR3QbKbR-7(GD5@w zo2-mr&zK_%>08w?zc}R`JRt@9kf76Ms|lat6SX+2f{)Kp>$T>XZ)%#edhB+vXb8gT zNt<}3{wWk84{;4!=z-BBD5kS5uuAgYy(Gx47UJXq-GY3f`bE@=nAKIINlvQRJX) zN#g-xWZUDyy~FPftPJas^l-=hsPGon*p~Z8sL*2{L*^ z{ssIbZ^_wRG8R==t*}^73YEswJFb6j^}P$Nx@*()1~KS@lH3*XeBoH^$hfu>iJ54! znJ=;KoDtjy!nc1tp5(sO4BAZ@dZ7Au4|iLWI}Sp=@}waVk#=)FSO~r; z3E+9!xxJM_Y($m5ke-lHbY&^WuPzmgj#c06?9N~3f6_<9Kk1{UU#A+sFLMtAiDik9 zfjY`458_WRj-2XZ5*oWz%`Nc_Q6(dpXyV$_Uk{qCKD9_0s^!wvTO&5fw!|DnGm=aa zoSqF=@n#h-hz59PrP^U6BvCq%_WbNHEM-HbA4+4H1(^;1J zKo2Ma9n&?z^0Ur=dhY>ee8|TZDE-7pP7&A!JNb8A{4YC+m6*=IWkUSG;u|CxA@~nc zu#t(H{}iK(7q2Ijem3uijGo2eGvP{Af6{#=LKklNfjSUww67VDzwK0>7u+qk^~pXe z%Vh1ZD5kw(*6?wm&~2et%vZ&P>R6|T#dH2wPt}!I`@z6{7LZ}rAzRe6>fY;f?!7m* zhI`k!uT$4H^5u}I^8tyRz~8*{eN*CDNH;K!~>sRs(Or;}yXo?a|NImW@g z|0_ul%n&zgnJcO15?&vDSKF!{TGbL+-!vjPYN$4T%H0q1sI2ewjoc4;YA|j%WRYm> ztwIiB>KszQ_SjXSxd$_+OckRmazmb10)v8`ii+IumQt|pbgNT+d&onHEUHCI)!ooR zSJI+eC-JuwHPb1Y1fSp8AI{^{e9hbM-Q`0W01PV~^qwU4GfZYp2add-7$hy9b!Ayyuv zruQ{h$&e@a!6CeCoMSGk2CY6F_j;g&KWJA}UzUJ66+B$Lt?lu)IfXOEu+xBh!!9re z%*PH_7d3o#krQ0+2N}`j(b*b*d^{dQOPXOO~kzw;gyIp`B-SzrQ>Lf5<8wG_IwySlALt9BpZw>J2xM^S8BZBg~uZC@2arS zRLDLXno;IczgM^TQN|@s=Z{9BxcT+5iwQllNps4O{!6te!3C-Bq7M5bC!{Y+OLVMM7 zB`1zXXjwHu$%Jny;aF(|S=|udmOaB5cv8Rj0&Or-l4sAeB~H2<7f!|7E52#1(P;2W z?QKuouQQq2g_rQP-i2js{RKFNbvoHai{K}*Zg}0Md@@2Zb4{bxGD;C4IcpA#<=&F| zbIZ1^fH|B2X+-u@TVS`YOI={#mZ{Z#!4bFQs^+;aB6{MkIW7f2d&SwZU+8La(*fi^ zW=%4VKg@;WD2q$Jb$<-8#;g)9xCDBR9fgan+r@hkNa4nsb)K` zRv~PKypu;)%DnorWp2$}4%{+ku9g3lS^qmQ+4;g+XI241lzkh|7M}1S6VL=>1b#3u zrN^7fkq%<^gUf+FAvytf==9HlI01X}0rJcJg>HBPG30}&gPj5(@g>*6OaU^2I`UBs z0;PqgLZVXkX-X%Z@hJA02Tgow*dzzW@n`5!-NCl;?Xliq9Hyv|&Uv>0Btw2w#4_<# zhmZH5_P`Ij^4e>FRRJygirX%>u|PI>v-qfkQubH^S4u5QeQg4L*ra^4LvMgkycRod znl z1Y|FHDi);=LIZEIrT;u6!}$Ykw3weP?9i^Onsy|<^$_>MA-RmNjp92ko0K$O4y~M= zyzXr|;-8@bI=+^B)&A8x*caWZ-oQHQWFLNdsX79%+2x+h*tlRES>BwGLH*0J*bF?d zc`*-5`;k&3qdhs)^*lY>+hbMf;Dk%}`McPeuIzCIZ|$3#&7GZA&pOTFdtiGcdA=%w z7PIe_8=yPVS#Uj{oMt|#0a7M}6d6dd5WX$}Es;BhCo`>Zr=ZVV10Z$lB;C~r)B^$o zns?s*A2jjb#C4l|oTMD-tm|!LGpK@tRA*n57@_ALAZu!E?SAolP zFru{*C2WoyP5x7V15h*t1hL9Z4XOHp7XCw`^Dh6-F#IV{c1y1+hIGT~3jiFLd6 zBa?s;`Xy&nld(LGZ6#(@7}>kfjVh63&Tlpn*2P}0eKGE@YL@@5%M=Uzif(V3xU9DC zBtc2PX>`;&Qrs0SK{ESw26}sZg@hT<7C@3)sUrrGi{?~zjp-RYxQC_D{o5O`RJHdG z0Dw%pWF*SSxnKo6}MrszQ~4*BuB=p~ig&!oMTX^ns^l zdaDE};y8^~T^X*a>!-N^kTw<`?Jc0sr?J}|gPKBqRGA&H@>`{|K)e#TL2!^ua!wvxH`K^d#EP43cuR-@1QbXB@Ja&jM z=^w$Ra2Qk#KZ=A1XuVHRzCuRgo+qZu?`6h!BU(0^&{b9?p;6~;@;wkHN*h)=6h;10 zQ<|WPTser8&NCwq)fCsI@Rxl0XC}gV;^P9gJU3yY#R#3YeA~P>Ail3 z&T|!nHRM~QO-IixLN6m%%Ctqnr7c8nEDy#)-n6GhNbzMV_hD(L^7yd$kZOb54g%ND zv~%gxdwzWk4bQH1^Rqj;|C#&>niBoAD4plS4e9+*_aFmk7+sbp>!8^vpxdaRu}E!_ z{*9GlLWVFO(^od+vqrq2GYk-LsA=t9cGTviZx=GIG{wkG_XQ(H#KwT(5;V>4=fLpL~JI~DOn`P zL8GulK~d?B#NO=LXDtqiCr?X>V1^ptcsoF(ADCe+m7aHLr~r#-m+p#x$d~nZ`N+#T z1E)T8^Uv_GnI5w4JK?)ox3dcuj^rEAE_^iO#qXy3&l)lzVJ%tvE}hT*ut$-NZkgm} z0q@*|J+k%2ir=37OQrGa{JLik4szvs?`9{qaWwXiWZ#dJ@UiV?Mf*tjzc|e1kA39v z4>o@rZ^dwXhl-;CVaH1>M+F?DrL!<+N=NpfGZ_Thc6pcJoFAh_pB3cU^=w zwwqKc0WtU=2*`Z{Edeu@=c=uxeHVpiD`F4))E{8Qx?~)xs#{~Sp>X%Z?56RyfFkkp zi;Aut);in9EG1>8It*N1X1*3WRK72k1#0skpl*MFno-Cm+?7+s1I%sENClsSdWWssq!(# zbGx=tu{NHjHv}4FbzYzhg(4&4hJ6$x_SCsIhmtvH=R{f2qLE-9O9t4^ZtoQK4?=)4i9*I`O&^! zDqKN~+In#SxoslC&9?C$dkwf(5{AvD;?ZP*R4Da{Niz+#A}TY-$^J>O{w6a6Glof(5&F$6VLg}$(t z<39M>K((*oUg?sZ#o-WMhvrY_pIaOeSJ_EDsia~>*Uhe}y1D9H0j;gQbOXAC8Z-Rfw|2$x;NICKXU^{R?VLcVXN6Gi@y^z5R}pO8#UQz zo-%bTJbGJs(H&Ca8LzS7<8pBoe@Ew5jEJPw_Y8r+J^^cTOL$ZWrKTG>YWM7D?y{mjDu{U8I+M8XJX zDwxC@afqnbRZ|;=5WE2F`y!y9nG-;U45X-b~HAIR;I7)_$n7|gv)`^-f+n6~TSU}MH|G*t?{|+(r{nklAdGp2T=KZ%;^BwiF@C?dZwP}1fClSZ z=gyn=xW~8A0+-;+>l9oSE0Q$OSeE5k83f-K@=C})9OnF~s+m)tP!|RUn1Hd7Ne2*Z zubcqIRS>w9B*a8?O|Z!F=b{=2$s6^XEC(K+$@@hUNB-27r{UwCb0@Bb7Tx%(e}i;v zuOe~2HzSMY605o4VqTLlyIZq?it8L*2>G+N@la8TZiv7{oIS}XB{k#7B;&n$Ze6u5 zA}`U_VI2x7^zu|F`{=+oiRK*RxTWCzLD)W+yV6#Ka^0(`ZV+UWo)ITV#yY%ky)e)8 z{nd0bncj)cJ4c;6Q_8inCjNfBbdlCxSjj%D%dDx^*vyrONh?fJi)ZGOeOK8fU-W|Tp80`MgcM6 zFx*|i!Mu&)Z7z`I=+ER51F<^Iaad}G)o*CU!ES55X^z|yKh?h(Py2}0_QuAGGBocd;J4Q#^Sq!8R^z}dstP1wTZTBEb~ zU6H-et)L&bK)ym_>wQVt6tI%aJ~PB_9;fWJOy8n^uA3lt6^Uil-!Q!IAAX-$ z&d4OJaD#b6d>HTWKzO5dHSL_U+z<~PHQ%`6)^&q?A~AcxZ$cPcT=h=jicVH|@r#fFjVZhR>&R~~BtvK~w z*RE+G+t(m>0C}N&Adm2XWs7(Nb(G!+4K(?$h*vZviK8Re+OCyEg0#}dQWF0CN(u{U zIY?V`TIZzqpcVqhw}-)~?Hcg#o|3LMHgjJZZqhin%ld$I<3cajdKhQB<+6V+%uZ=f z4DXDht9$ky-k6kh_z08c={{YSu4Cb`;QB{@U525y} zS26e;m5$0?(B%S@_=ywP$&gdH!~Tr}Ml0cF15UW=Af4GcZj$1_T7{wO^)Nfj@a1I$1EJ8lMtJ5g= zm-{s}ML%rWmn0uu3E$-YEYNI)2Z$+gw}ilrmneq!wD{RO*TEhY-XXC`t)usPD-2bk zeSRNd2IeZ<6@P~CUshtPQo(}V1WmHXaB zTUA1rGT=lw^U>mACkj3$9XaMj%bP)J=K-bE>R@C`>SwxU^qR#aW>@@PBzQJ-Ef8)? z6S)U2{5c+ZJ;}vB?d~ zqs&Jsu39`Ni-#DrIV-S*2lk!)l~AdY#kKej{d3z)GWX*wIhSK^-M1p#7xch9-CrK* zl&~V!Y{#a|TrpsW&ZQhxQ%W>j+Rl0|Pn?QCii6MgNpD+0 z=hqXJu=?3%wl0Inb;lFoze$~hD6i@hJ%C!8uBeeVrR#8jW0Hb}?BuukXX3c2CPOD4?Rc1&%U z7)7RIQRh?=9e?My(%}!Y?%u`6S?lK>jesV4%i9;IIme~k!~zc%Ti_0vi4V1Wi z{pr3uK4{+0_;AXK-(FyQjb!PoZoumEmiPZ)34G{wk#>|}B)b+I>6nMT%O28d2W}(! zd}#kjI|%L@TopRp-g}{9>azxA3Fy1Z8P|^e5N&BuD*aHe7#?AJwPYjh4nIC%S%mDK zvx58{1%H(Zdf=JC`#zt$su}O5d8JGK=DkDf8*G(6oW7@Gyt>1kdn*#%;CI4xyQ3nq zuzX)hvw4NO*i39BwU;1oofXM)9l5W{Fs>p_o~( zQ|BXbB;xxGO;WpqA`ZPZ^(yZ?nE3rHo(x%H6MJO+VfC z5gC&%^w`7v4h~T$8!tP+b~#i}yq^W1d_1i1McozAHa&VRN%&#+s*ZoGXz%~AJ!JF% zbyDngTM!E0`gdSk_aIPEK}$3yJTylopQ*#F!kH6I>p+IOhe8V}Y1 z(E+3r)UyYGXBdnUfe-^)_^S%=~VqG^of7jp3g#w z0{_2IyBs7rHvfPD{5quOMw|E%CsRbUFZ))!f}l;8CLHU|D0woKG>fJ8Nfw6eM8oUOxT!Yy>S=GN0a|MBmFOnM@X?@B7;QwqU~P`gdOj%)Pd! zigsvu`jK5~!kW(qzQUGC0h@leE}o&37p*P zU-cjw?!y-Tt0eWyE;0x8L66q@ALbq;{QVm&C;>DdJ%fTfKtOX~o-6mlXh;&$y>jS7 zwIA1@bjt<>8n3L$+_FS^HVNdLJO+sxC0bN?-_8MRK|CJb<{*)N3dcke5^F z^OK6}l409r?>yn;MxiF^Ymc>y{Z^N~vM9fBa?URFS*LfocKy{xo;#m%$aq8`-9I)! zbA-Q@EfD^xV8j;r=?QAM8Ll|egr;8Gxl&Ivj1&ts+AbI_u&S8t2)Rgqw17A0zBH3; za%PJuu(cfd;wGeJO+hg>8muA#wjoBA!QP}8wVvZeOkQ&uCGDn7^}Jyez0FbU4{B%} z$(S36aya^FNRDghwHCNf$?u5LSa0va>C{aDnM`AGe2~CnYmB~vL)m%!ERy9AmR#t; zZs+9)$&>ydXIfVxzEJOPZ=I48Ffl}s`0_~^F^2JqbBxP_TnNyZ@1v8sg(;W1-6q6V zs}PxguR65v)HjBA|K?nORy{w)g@t%BOi+*o<&zVbV2U%quwrrMJVB0g0Q&s3>+wbc z={w{Mj?i@ z|FQYJZC3s}+>y@1lK*Am;&HQ78nKv@W2(b3A&`4_90rz-2HCS*P`hfG9K2iy`|2Hs zKx%M{1&{q-nn_Tbm^_O?*^$JmNog8|AxNQF(n6%HII=_FTF9}|Cyps0?kDCNE@yw6 zixDyQGYY>Q{p)IRPKQ22uOmi(Uw9)7NtYq(vcW4rJLNgkz3n=bxs z3DsxLG;r^#chAoF=a(wy0HsEoFW!mPCyc4NV3cq8ZmGj>lxBNe+jE{)&2yGb$NZAGFhg&F}As%qLy z6){~@y`nzMfq;>{{CSe5oEl<1O8YhTHGRGFfu)(IqgASIAbLnyyPsmhf&UdC(s>cI z^z}icKfDS~3FQ`#l1e%sXEfAG<*q9iun3GUE5d+)oHT_J-?HZ=6i2WA!aUUlVT$Am zzi~zwKc%m(>h6RGAyDLp33?m4g&$03b8B?)mBv@lxQa79^t>QxOchy7)lC;z@(O zsCFEqNZpwF$b{dY0*v7ea6Q_|IR$E90i@)d(R|!M+wv>N3@g(}n18EOPJhQ42Wjg_ z+9zyj*<^JHbuxQYtPi0;D-^<%J%O&Tt7uRcxD*8X{n~#clh0EE65Azo|1{9s^$~znRa)h_X7n2(+h~I z+_sxAUSUNoHjilfqUMC2*e!#RNaU`pvt5>!_HYgLr5F2VDilU+IZIN%E%dzmpfm6H zhdSM^(=j&>hqAXH^<943?&RBkA3}6O(bWK>izcB=D7Z4%GrKACdzSeY*mCGHis*nV z4!r{Buhk>3tvN(kIW}39TTY7_qHU?SIqInJ1iVM43Jr+zRIP^J?r@Uuv??RZm5w`G z6UFTpyrv$kr8lvd$$g{4)FGh&j$J|UJ(bAUBWm*%Pm^eSNDEywI44_G=Zk6=Q^Tg- zsdYJ02h&O!g($JMAD-M7ew7ou(WRawgSua`pLpFebBeVK!; z;9%%)Li$gw4yqTm-z;k#004|{myAvo3F~!X1g+45tt7+IrjIge4lkm=R0X{7T#VAQ zC}3{XYdJn3Efu0{4hFJ#sOx-xzed3pBoOh=;Mgc(l{Liu5|Lgywl-a_ZJbJYenrql zo|=9w?|jqmKW}?+zemw&?#Fwpz7(WR-Fa-(Tu5J6-si8nEjzJiDpwt^R(9<>*=oNR zy=H**sr*WwQ%e8-B&s~Jo}6-juRCf{mM$AO7h8{4p0hA<1rf?vjMVncpEp5C<~VLT4vv!7KzwRpFJ?^L z4hOH0voPL`;kjq>{{WgmWxt8ejGVSbmHmF-?MeD7WPkTp$A8zb?;XGIH=J&x?Rs~n zXEoq{80X-$!z)F#XOkiV^h;6t{51Nq$~!PqqFq+d7VKNw`_O+I^(vCzP0JA%;$)JD3)B2wQTSpwezR-FFsqDW7(`k~;ta1Q7)}i+{)IinWRb41`ObD# z;bSSRQysT^HBooh6g(;EMp2g8JQk|Hiv*R4%}dX`leltW0TfL7thH@jWiG8k2^D2q zn(MM#LQ!3zC~Nx7pUl23k#9UgZIZv63NtdE-GA?OCF(5&^%6JQGW%w;?eVb3n5Hr% zg;_TU{|~YSDh^~ORH{{6e~CO*j2y@pohI}X&;`LQ3SAGh1<+AJ$Mpm9XHiF(BHex2Pm=m%Kq4*P{T0HVkkivV(-?^0Ywa!h6U)iHgz=xO3t z?E9W)`#vv#{x4n2z8?p4i7KuoDAs0x^nhW9!x?Q60j;vhPX%yrk3~Nj#38~wgL62z zV-oB<2HObdru-L<9}kL_<9OX1h@S2w6XdM0Xh&uQtr~Kw8eF3I zD5)DI3Qk0Sp_0khwuXcUM(8i*N$?t5e1eN#yQU>LtQ$Co_|U`^uRfNrow$S-U8CSJ@%Pb|F-a}8qW8{yv`nfM{N)t zB8LCR9R<2o-%$F+LL`)(00wTJ~{6UE`_EZ;vWIU*i>d?NO@*i5;&P3aeviy&{4 z$h&@so`nh-f*hitfVfTZrXJA)$^;b!$`YzmutuI6Ge&>%Qa;1F?4 zaDou?LxN`td_B)O{|DjVb6)@e5OR2&#a3Bw(?AejUy0)!P1_u8(@;{{f&`7=4I!vF zL=h6@0fB^6q{zfh38YT3gBrvO{~2BoQp2o0+pf3S#Hm_g>GOJaX1+n^L)4U>d6au zJOPve3xGUe50C?_gyw{l06M@6OBLxp&{6?D<6Hx9Gptl(p8^zV7{DEx9bgEcp(_9j zIjhK52NZCe1FWO}8ekrZs2nR zd27hKj=Uv&R#2b9dJ?YG$`33IRs2TfI79W}HfPCeblAK=U{Pe@9tei2#88875==E& zX8Zf`%+yg_)<@`Z!i`KnHp^Q=XkBJ_J02-5L=M|VdAvjT>tw>n2Q}hBEs>igVbB*# z+<=s)&8B4wNGDuC*6$y;x}FoX!t;oyFlEcw+}RXN zGZNFh9W)(#U^hFyedr2Kmc+?y`~4v3dI#~8=Y=I?Cv_I(&&v1%ckmk05w|TTu~*vh zz*>?PpOjc$7U&Tfzey>l)AHP7fu{x7rw+CA-wEuZ#hlqWqjdsrt0unz2 zitg~pci~0pd%;W3afjsWi#;WY)C@^9MhW8<$=E|m!Io0+4W?m@NG>zk$rTL$3PzJ$|}z@p?s(7ur9HD3F2n2A}-^xkSD=)A%1f@ z|Ap!`OTT0zd5PcW96A3}OZ^-8%>TqPKGrw6NM6SN*7$RUtvi>zt{1AqRn=F)PXb+M zI+9nJTp)@1Rpsq3t6#ysp71>_T~YjNm(@RWrwkDyv&J6fZh~tBM_6Pg+?K!{aAknT#r!M80Tdq!>OTnU7KNLD<1V`eAnF`(+h%P&2 zV!DY7cyP@y@VYf7n&AALR2O6{V8YLtgDeMG0kYD>!c=c$tlPvS6T86IWD^_USQ)Wh zr?5Q*WIfofCAJ$(OjFoCOT+ebu-yc@?8~?Mj-MO?b^UF^}-Vb^TzQ z*KHwn7no?3)NO*gi&5Q0g!2+qS8(R-Ylm1YL&ugAzbj3wB7S9hosyW#^v*G{UZK|| z@!O@)!+uqI*sn@&n~5G5dgnphsorkL|6UWlX1Bi-V!n^1``!VN2SN6MJOuJE$n!z+ z@yGkcg;2f-1?Hw6hF24}$(qiGIY?Zvg#EKtBTdJuG`%4uKp783Q>2F}M_D9JoeVrf(U*f%pGa z&>rq<%rw7&WyTq|ZC)Qr))_T zCETxN>AozuUuT*hl5ih%$^AOu{$8e@58n^+10X*L@0jLEy?7r?y>xSueO{giM& zLAc)m+;3;ueMZa?nEU5Iejel(Kz@yVx!=RG`;3hHS3rIh zAa3 z@LiUD9ccugl*11|3JwpO{*FmF7%n+H3><#UvaeBm4fiBVubJrjV-)Q0@$d+1LJgNH!6a!h^`%w&DBz6CUVj!#gGKzsX_6mx@tHj=4Pz;2T_inM zk9gGMHUx)9hZMRE7TvHh92-K+WUVi?wthNp5juVrX{l|*;eL5-I5cMC(PyTl8#uV|_F>7<4fQR5JM-;4|wJ z)k%6(ns6djO|Qyx)9W%#R}98#tM%EZ$@$58-e3q((c`CH%w|B+Eu6L$921= zFq=q6x7Z_m&m8wHW zN8<@6JKN(CCzZZX(m}<(H_Ngx)f%=rxsIi(wC8x}GKoX}OU;~D{fprrCDo80Qqn7f zTMpCm9GEHTz?gk_%x;!(ORdoxH6JO@&m}z5^TQ#h6rGhH4lTKGXvzKJ1m)NpE=_bE3>Rt=26!F?=yU5->7I44KfN#BfXH% zX7&pS9PwI6c7JSiI7u(F%{CdV=I<#}uPhT*vvfkF>xIdrTusWYq+CbJb5+@wEozZ& zJ*ito%H^b7LCTe+Jc*QRNVyiu4KU+0KzSOJr{S4MNF06sV9;({o4)`}`CMkRC^}-T zxR3^s-utLX%~O-|Q&RJ-`iqIaoXaK)6xOr0VWT4X?Hi# zTwtepgye?$ZdG2cT;{CR>fPzd0v&{YRmbB$JP2#YmxK#f9crEjm(REBKqqei~Iqb~I=z2DB8J&n(_*`py>>dnc<+h7y z<<@VP+irGdIc6yxiW_K}j{j{_;bK@?5l+2xOu^ON4 z(dm4*jog}_MoQnrPkx%(;&%{+An<)UBJo;~?#k8K`$ z^V1&JBYAIr+S6{=o_+agkL%&OKR@m1uxrl&tDR-^5`Iw6k8MBb)3f7xJRV~5iKQZL zBuC@JMEkIfc3Cu%AWy>S4B=BTDuIRY$@KrJcY$IXB;T#jIPNdKg{cJW3w{0>OiKUi zjGpY%n)3c=D8XMRSng(wH=&>>Lgo$AMHG%U;=xA45BJ>Zv`+o-wK;-z$cj z#cV?DTVUCthfkF+Ne+!Hv7T%aX&6^Zc}B5AIY=oZrz;(1ePMj{X^Fj)kSm>B`kZsQ z;s7PViA?3ciKjeIKY@p3iD$snuL_9@?HYQNgbw^@^~ z(jG6@f~*5sqqgtJw-(vg=m$zWVysuvxA^3ZJ$XBy^(LOYbtiAw?e-!Aj= z6oxJ*XqN*&NA^%TMQ}cDIg{8FQ78djjqJO&&3Th;R$4QF%C^_p(uptGlI6s=MI+8F znsL^#?Gt^7?AxL~q<1xLV?*|1IoZ~VJK1ODWZQLYxNWk3$|gj>6SOy zVdca-pusz!$=kKGS?Q*=ilHzwU0x*0d|Q#u(zBW6Z&9SP7@J_flZ)iH zx=4Ooi{!ViNPg!Q!f)1%l4KK{3vTsAib++G{FWEVZ$**(Ru;+cq$2sPDU#pXBKd77 zgkNFTh5u;<@}II>0{cX{{n;Srf}8_#KEqD{+4zs2n+LMhU0cT29jz0a!`34C^%u!+ zpb&oP^)PEE6jhMyfa085nDU{%^fsTd%q@eT**qgYLZ1;^O!2A#W_?!DicBMuYiK>c zh8CqA~tnV#ydhI~jCR-C~*!G8Tk@*60GUv7_V^mZC} zmba&Nql4Dteik$9=xB3chwCl!W=HpVTc=ICbf2+hjUzt$=!~NdR-AD(!G8Tk@*60G zUwVzR*=$JFY&RKleq_78)3|J*{f=zgcO0MX%sQ?*kUiD-THTt6oHsD7I0tC`jf(it`dojLl~2HJ zoHu&2qx+8BYn+vKINvQGH|z0RitP$J!fpwi*I4%}?eX$fkUb!`sqIs~?bLpcm2bNy zU!^@>J`dzhkUKzF!)|0(qrWQc@NzH6J!-p- zJN0_K@l)F2<$WOcs_i=V&wbwbDeds`L6ChQ4}c_J8))~-1K#*5?U?<0xj23pB;WN9 z>lR<>)oI<*98d+QL8wBg%>h#ZjR;c_rsseefLRDL5GDbd5oQDC0OljiK@fHQ2rYmB zU?D;Pp$V`UVG*DWuoR&Up#yL>!g9b0z-ojQ2tscSU@gKL1fjPMa4y0+1fkar*nrTD zAoMl@HX&?85PF*dTM#xQ2)(U=5T5&$ z%jM@KZEdXxJ__2_;)@Rk!Ga*Zwos^0L+(mb&r8T9Sn$O^S%v$)8nt?9UZ>ec|qhly!c7QW0Ok(+pQ>0rsw0=!_mjHagrQN zX0v!SA5T)mo&DWsloullE;ky^hirdoemnYX@HVDdyoe^J>Ee?i+Cnsn(}^kWbK2=> zk{Ciko^u_fC1ZoJ#%MEAQJ=A%ClS5N=ThJ?{2q8NBV@d1)EF1=369Tr%BVB$iN;Dy z<~JFSkl#Xn6Pz~kJIL?m<0m>Z>|x)qhjT`caiH=0i~;x-_ka zuT91WL$Bjo^pg6IP)F6jrRbME16bkPV{D`TL(D*&DJh3;eZlZV;0J;xqCGy-m{tYT zA7{JiB1sfFKh70sOU19G*RQnlcGX5+?vyK+@Afdd&Kmz#Y20*+`_-nEWAEx??a=D6cgyi` zl}3fvtMLE#8U5>3*7-tnNah<*o5l0REY;Y(3bwUJ)i<7S6jj@?FpBTR_uLY!Tln(P zS-8j+u7-#^#kwWeHmqOxeegXQuNCXATzjw$SaHoebn^&AtO*;y)?vG_9oRN(4>l}( zPX3bdRSvFzP1i3O4))_gw}ku`lxrWT0}TZ-zp0=F$aNc#Iz!+td*~^U{JsLIPp;R1 zbwK7yEgN+Gk|81MZYubjUQ)lTA@y$o_W=iI8{kX@wt)xQ+oU&2hFONeNoN>ePG@nJ z@#}E*HjUy-_AP${-4P2{000#tc%02xOK;mo5MD}@E$T%{*2}V;*s+r|X{|bSiuTY4 z&;kX59(qZE0yz+jMB7AZN+2mWg;5~?BR^BQ6b0<=k{`|GW=Tm!4khquUo+p%&d$z~ z`mI6;DH63ne@nDVO25AR>G*3x)FN&CO`@kjRHD&xFtME3h5ds${{75$oFAv2XN?1U z>SDb3Y;;0GGNPbMv)ze3HiM~08~+G{8@_$+nvP^dJGZ8WV_jLY!Fr@-`17+f`;Cm( zF)v?FOvy+q@J!pa-E+foC&t7KEZW)R1c;zeq=qaJsa%=P+%kecXUoXSf#nB=W1cUY zG%)8&L<*BXmPqKYt-v&9o;~%9%jtyKRtbkCD`4Y}D8+C?(WTgBZl`!hQKRT!yctFZ zAu|325x^oOvO zl}F%XHXl$trg(^Y2Xn^t$6WmruKt9pKjrEQSO1i&Yh3*^uC8-79Kz;f*vx$MIo9|L z7P{Rq~Uo>B*a-ai#Ql zOK@4`W4dr7J>Jfni^*@M<+t)|Y0#v6bRDPXb-h$x;w3HLOVy9*rAoe+s-k>#4KHc* zUZP$y=j%GCmg6A$)cJPa;_CT}OU_1e_vI`nEtO?|o;{6xdpfIbY@YA*eBU)z^<8sC z-)Z#K?(S_S%lM5^_{#7f-PX!CrJLtAbu{*`R&-r^#U)q9)63b^wcNlL7`8~p{WBH> zI~jsC(;tm5OwSzCM4g^~^HyRu`7mk?Y*Xx>U&^3bsTf% zTM}Kxka(NmaL7nwgNIp8^sHd+xoNhcj7>qzG$qSfMB`x@u}4=1YZoOtlJ}3rofo3e zW9<@373HRc>ccyFfu$-4Qr6Z`mQhwEOsRW}3E`8c9+T80O6l7|!8v8pb(D>$zY+KA z5|$)!D%i^fCnZaCY`hi8_hb33s6B|=cwG-Oih_)xAX6yF5DGGbf()SGq$>zw#?y}4 zJ8_#G$Szh`1Nw-7J#6Cx?EHOf-=icl!p&JEI=i8KeDu;~>lmS(MMv^}V7%D)OLoEXhpejHo$)5uSl5B2^LpuWf^Z6NNC z=N$s9ZwJ`rum{`+;{JW$0APKGz{4CI{tqzXaIyr?Ih+Dtt`oce0uyxkzW@LfF?gK4 zSKV*hHV{8q@~3P^PSYlBlX&%-bj^@p!v=H@P1be8fW5E4fD8z8bcnZjQ(@Rx=IMbb7&9v;d*RA%zn*7m$z!b*xF5 zSdI6ZaC9zI39-h4( zE7A_@ge1%wh&8z;l`5gW#VBiXl&%^Tx8?ppoj2{LB1PShS$pSbCnslD)E@Wm0#(N!6ZrPRJ?(aAQ|*E^~}{z zr^LtfT*!6~kFTP{Cp-(qsjf!ps6^tmnMO!%jwp3kbj_Q<$36>%U{PnBQmQRF3Z|4| zo@!S~HNTRvvhW7-CuC{m4WupDP|M&>GI_OulnBelH3jpP!a2UN(YJJmx46w(#G!28 zZjnoXiDyc&6v?rdljDw%z;kUwb)gpK%4bM z`A^km8Q-eF_huqjP(@jR1KIzu3{^r+K%G!i;C>|T2)WAxms5RbSic1i1#B76B|H+q z^TEem0X+jA6LtkWA@K2I0Z$G1a*;LgAL%vdr~m*JCwQFISW9oyKoDNXPSQB{HzW`TkdS%@m0M*Jx3zGS$aYW>7yfg&Af$%f_}gT?wG$^wi-mAeXUt8~t_SZ|xw%_+0$8396 zuZwl{W$OhQJ0l9d%2?fw)i%7ILnnWv#guDZbqzagrEfUKjT^F(b_ZWBIu_iKJ8E@m zO=@{+=hSl4o{hH<@_J04<>*@tSdH2?3xqLE?U?0HtwC)K`?j!$>2yXdi#3kXgxJha z5o3wk0ktyx7Obe};vw%3Je*3=l@{SDL)*t}^$qV@+K`OW;xHIm8wKZCG1++{ zEFwlKxUPrA74hjHJ}=>C@}9?&`CpsOXChd>V_5e03o5Z^I$g<0hJ>qJZ9AsnnHOE- z#thlY9;JAy9KR&EOO83rNl1^GP{HZh_Mjh;Q8wtMD0x|kcFK=5hj%U}@3LPAzeCuu zcO^&ph+?QmZ;vEsAp)w~mg}`-=R`Q3ldEDXAM+z4!DZn5&a2%!`If>I)6C`M4K z(ASh}dKcde+v?oQTb6usQsh>VD-cGZ6tfZ)oS^7Zl!Klsa&uRL>(z+Mpa$k8%OPztvR+U*xnpF6zn*roh(wjztQdztN;KL?|7WGS6y$@NEDqUPMkPt z^U*FXg=|e}145!&ctT>IKtkdjBz9TJJB_D}!L`G9QmTcO_|Ncykm%0Xcbf6YcHC|f z>2xxlbI+W6=OgYL3n5fPMwS1q^RKA>`qhhvPY^O{yz?C;pDHp?i?`b@@uw5-86Lks z_I&?&NGTaJFAQM3_UQ086pNt>JhDzh9^_0hoABNbeUwq`1&)gu&KezuXPA1Jktk!d z#_%lTal^-_M{br}%bz+?=${dqG1y6*#BO0W^poQkz#4}ycpRH?Nf7!ojz_4+u@%=5 zp`oZD$B4t=cq%M$yale{m>hpAI)WoSyy0ka?3d8{1Gs`C*6`_L-Q!q?*bdC~ zn4=F`g5L#xAKC%58yvgfaR@aK{Tmzy@O=x$wxNF$=IQ}&N15j~aDN5PP#N2W{vBxd z;MrZ^+TeWx{C(h!00-K9oA6!K`vKHTtR+&`yYdM{#lSZh%pz93V~ttKV}AauFva>@ zXpz-CGj}v%Er;0V$BrD5b1!01damjPP=98$lBY%oezVduc)A`>Sn>iVoA2AKQ;6$T0bLdvYsN)5U&}rIWd=4b*$t)t&rsTtnM2xsL z@7m$9J;vh+@5R@SeKcdXSYZ3p{L=h4BPm_`uI+{-vV)M>@z!{1$CnvT!4AmBM7%_; zM3SwXB&)wa)C^o+!{O08O~lF(S^qG@0pkfeK8~VLi)!aYwQzKJiW&9JwSZ1eK;>=P zYE9cXM%Knrh1c?g_lt!ui9J6&an7(mBM!s#gs}8Pbudte_y+VFQt$G(M+^rprmpu7 z0SOBw7Z6IQCWM-ca$3*HX>&{o|ABQ}k2-t)FbCpN}`Cr5=Jir zmz*mmtI#57>(Dl!HIhlX(u}E?Z-V(PFtZ1GBPAIF;+jdncLCm(bqmYFG@&pBA5o}X zIsc;C=pIYn<~+p>St_fbRKtQ&wUhqlg%aOJ^L-+?6NnRY`~_OCGPF8LzXp3SzGP*| z+F#~;d3niC6>#11t!i0hJxr}0FBRWv{%h&{x77U2g8Z-RtKdA>)qSzxJeQtV>Xw+l zv7-4KP}}X2KFk&QFu}(_>bH{qP1#>Z*E^5hq`y-#zsq~OyTtrfb)LA7Thec@Tz&(U zzpVVOQrBh9%lWrq-wdVx;EH|*OY|f3`p|ADx<2U2ds)$sJ!V1v$0vEF0;$siG!-n; zlRQg7X8|k?y%pk?f?)ydl8`k!$iY2;;OqeIUuE6@0|*-xod5t54tSj9R^4yYFc5bd zC;eEz)@^JLAjHJ>#?b9v!q#i=ii?+o+% zh3z=+2fk0t&>pxTSC5a{bh@Ur15~3F`KVIbrKC`LH1)}5OzCjyg2{@Mbg+S@|1s0b zPf&^kzd)F!sQV-w`mUg@NVL_1*9U@XA+3rhf?Qo7#|z}z0=X`MS~xl3GtkXT`?29v> z;tffVIz3Gx@dfRwEG16_#hOI1L|iNQY=?b8zrI?i^+`rJ(P(nM)ONd_lZC8oG$gQ# zhVL8hHzKGvCFs`x5te2qKASLbc-j1oL~wBlF^)0%a@wEcK5qEgH13VfNy z`?10JCcl@}jP+SZ;}vI52YXe^?3F|(VS%dLglQq{fTxgqLk5N)iu|8b3l?k^l&IcYPdO-JTzy60h*kt_9I#)*v z{hhY(ukVNKF&Dj7WRJNm#y^OB)zSRJo62_ieBV_JV^}2mS=58IRs8$`&?chT000wY zc$~dhTTdHD6h7XYSzo~T2ELRe1Op*4m!wVGsA{8Ds?_&Z>YKGSOR?}JS{oJAm;8YI z*h=)JsuVio{l=ah@4&jEwe)c~GvB$*oShx}=L`{e?erKO!Om3d~{decRZ@uotS+C#!ZWIK5chVaTF>jt69@Eqrr7&cjdJ~_?zZpI^QFp-*t@DFy4^z;PYjSmErpZa^SHaGqjPz6U5j? zK3fup+dPH;9m)SL{O*9e&%m~po=J7>!R|n^djWZ<~jLGvDHV5Fmk~p7& zb0m#@hOxF%-)jnc;Y$klo%a&ayw6guYBu6-|1yriOD?AxfV zaSI+H`OWt))G}8!Wu=}g#5E1kY#p^EYOJq)#aBT`Y%9d|C~;jW zE~n@#@?s@vGA;LQbAkJB{P7x!lx%~t{?#P#I^A=>d!e`}Y1WHB)V<-v|J7Gd;j+de z^>s9QA@*^6Th>+W$xv62A1MfJ4qPuM2zi>b2j(mko;SF(vxV)A4iN;^V{KOTF z$mh&Z8Zw`8%_;x=oCicM)pobY|iwPTs>-LiQ*bkhK723JNe>&7NLSK>c%hBSu7K7&BUbf+HSIXakA2-yq zx5?_AHIw^*^(Khx{1E;U@ZuL`B1M4y%e!6M{LU> zw<5U3P_GFc&sAOMn~-(LCS(J$CdZohhZKK)xO09LvLdbZ2duTBfhEDwU`0YkgPa7B zUm+n@C6pyp??EjFO$l2-11JHHfX9IN6=Ev}g7Xw;Nf5s>>;U_~Ghk1`a|vzW2sj2@ zK;(G>h#X!5$1yknUI9ljXvaYKehqxS%(M6py}-Wr002Kic%0>0X>$`t8g5;dEgu+s zVlW<`7=y)a8;p!kh*<(8Ko*v4l(96jCrC5Oj0AS2QpMKh+y0#0AG7;;e?sylmD+f_ zr{9)(TGOK$fmo_mRgc@<{q}qJF+KAlg%FCNcohFl;D1r#`LlbAD+tA7xbiEazbJ~M zE?kc0wPN`|-&Aw23wp7*Wm=Y&tLUZy{jsa*E68DnBGA-PRxPz;9?g3zv`4Q7F{O0>av;c%lgJY5Gy66iWHUIHB%Pb5!aj9{eTe>Fm@Wo&cN6)Xdi?&GUhyh@R-Lq0v;ED&qWxs z06d7j8PMSYMjCWjgEoS{0FNu6_chSf4{chWS4=51*HujrUj718yJtcM8ISIfu`Ae?mk0-aujQN18gMr4$8a{^^lCVWyLjL8URI70I8vAbR) z?5S9)Ze&X)-ZyOM`<*L!VR5&xj$@%e;qFfgGNn57m(mNeT|_ccL{evV{k&#W9MjyDv?GrF;_s779?Yp3jMhGwbGzpAAvm8xZEdBrRg zRu%0~uiz0L`to{oSQ;7B#M?=?6>;9m5MJ&)4(Yv4iT9#+7A5wi@JU0nvqjym2)w!^ zyi$ZV?FBuUmq-wcwZ4N=2)Aoez3)A?-w57?CPei^a5|mNb@- zDMTl6gp(n3l^hkzuIYY4S2NJ^*r6X!I*V)U*TvqbZd)?A| zWL`eW=0sLA^4Yvv5zXk6n2}WN^mZfp37OTFrsf~&Mnih^y9W%2$$JuoN}m&6lLH}l zdJHufovs{KLVEZVlFw$+KM+pwnsS}w@LnuB$t%kBo8^NjSi}=K(`tyq5R&9~UaR1z zjV&1B=a1ng{uq&nNa!}6l5S;Bhh5@ECCrm=Rm-63w=5afx6EoWuNY=Up~fne15Gii zr9I8Udw{a_W|rBO z&99xErCge24jFz%GCWq&4MEb`a2D5ConrpZmej*+PRK!C!nj14M4@P!mOy+?LL9k_ z8k3OMi!6nab>#Z_trpEnUa1M0I}4_;71ZaZZj`H)?7n4I%My~|-ACKITN}?FfB7`K z@n}2yboc(FjfYQBunbXhU8mb{o*Q z1>hR;UjVMV`!9)8fS>MJJl#ob6!nd&7s-L_qs@3u#l;}GKuo*g9ycwW-EY)lRm)b6 z%Hk8&P2{`bNKaGUb&g)gD8G0upy@J{e$LOmwvq32 zT^5yEY0(c!e6+`hNw58#u)WnH+pfH3n0}0J`!kjv6uQZMMb@qs3i=_(S}b`{tH?KA zY0;H;-1OH4`dfbUFPbm-Xyv6{26^YhK<%kKuO8VnDTJYG{yr3QhTAJ`8|OY;Z+Izn z*PpXA?7c3fn^?XsvAoBs8reN~!RK$1sK^l1&ma36Ag0%OVO^gOIeuQ;7izPbuAq>x z&QcSCZN_UIyZp?PVxuU%{X0q~URJ(X}6e9H)t#`6|6v_l`?_%w7D5=L#inVuu(rvl^ z$AY?o^2- zt{x~SkUs-`1TvTLVQ=jQIeJKl!DDV5^HE&zQH1*C-f=)z1d!X?4!Mj^>Rn@CCBIPo zu|C60?>^4XPDz~j_^{lBH>M+ed}MK+Lh?9Mecf>mIzLXL_nIGp{UuHb=}On`z%l-v z&H0B-h#{5`y-*I&_EFY;iq^+ieTddivpNLp46BboIZWHnvi4CAhkl6r^%Qd7cN1B{ z{d)bDV@CQ~GqF%9l^5Kdqij2@5joFEFw*OiofCRL$mKn*@3KACiyy9!GY5{L`b;6t z?SVG-Ef(R;e2yORig zsPPJ%(4K>G9MzL}_#JZnqJS6&1bVZN4A{42Nzj`?_=QdWb6~?Plrzl630j|H^+^7c@wl$z(-^vLe z?KC_NBP73J^0`S0bO-P8VciYCabg((diIDJAkNo#`VST72-wRWRXuOqU%e7fE1!AU z%FYl>M`kO>^Fa__%lI%KFxv;g_NWAG-%g^&J)6}NZiMn3e;{UY;C`Rf6IX!zy2-C! z?Z~f-w7$gZSA*%5=IK9_UWDHjC@({K4YjtfT}SeL?IXPTY3J5hBAQM<#x&qdc0df^`j;(S3i|w_${|ycRm%pZX?%mIMXh@BJlEv$XtT* z4r-jr^d%DMU8SSe*r-ek)~%uXjKJmHPsQtZkt-Y4+vxQT=D&3mZuMA4A9eMJ{*UAi zwL@+ME&$jAH!e4soDEvvV)ac<&o1PB(L)}X`AZv?M_aVM4KjB4Jn8})n*3VwXDe60 z+qB-&74Qyn&6oe1E8s}xUELw#tr)^mnpyFfCw~!+9Ufb6D^JIpr?`E^}aQjK|d`m=tgWuq0q5Br+WC z`d}Tf0odfQ<%1o-rhxlR@COc`OTct`40yuf3m-6@o(N#M@|43@0$#L;EQfCZ-*Qj^ ztiA^jyy9FAjG$9wHPD;l@A6`2 zZ<>uB1F>UEaVmt+`wzrhif-!W2^N2Cvjd0wmFiw~@0q++`R#!7N|7lOr#HG;tG{7K z4z~{uiPx*@-q!P1oL|&X&W~ye+p}Bwd2_46SIp9mWkWe(yY{zt)o7&pnJFy3M$F$!%5gl5{C51|#PTpl9~C_vo_Ck7k|IB|wE z2ApxQDXyNWS~F^{o;aT{Y6VwIpSFCot&_%6eomKYw*xx(aJ#W))>CtVAh%ef9?YYdOB z!3J!+S1ZP^9^ZJ5X0+?U)M@5`>6Zno^Th`_dgL*iTZ}9WU2Hy_G~Q&2AbE&O0Jd zGl7q0y{0#=W%X23&un+lB9|n_wWC~H>Nkm~_aut^!b$?!!Pl?do+Z8<>~q6e94}q5 z0gDSyGW?jcxd?H0gqvIhPkrvFUOE$^dg%Q28xt@k-7cF=!*I(9vkOQ28TTx|FiH1# zq17WE%O4lW37sof95p4N29}ZM=IoYJw{2WbxWw?~6qVO&wi83nHZ7Qfq}b&xN0zAg z{bZ8xlkV`deEN)ge2$YF8FsO!yr&;&^^P+y=wI;q)#K0NF!k`#ogG3q;pg1^C9k^m z(?#Otz4NDbyINW2f(33iT&qiYDl9zS8YF?&s=C=fB_x^_Dc z5zciQ{r}Zx`V+nSN&J#PTE=Rc@)5jU$wpmOxI>T69OBx`TzjU^)3966!^(>T!C0KU zSCIQZM@VqNl3Y8@wNqR>(`$3$dr_YBzjMB~H#j+o$z6h+;MtBi=e`nvNl`xLhT8nU zx;%jEp1`>=9m((w zy9*@QcPClE3lL*+C^1Ws#Bik8SRgTwyzJBd)d0>*U|^N*nXko~RyR3xBy$HSw`;oU ztD}4RDC+wxV=T@RG5&9o|Cc5I^!oSro-&q*bLPJ+cw#KUGQ6FyI*s;8{TsXTW3Aq3 zeA{wer{dLHP4veP_V$?%#w@(bdahk>maDdBbMBMUoAI2EXX;FN_E|^sK3i#>+iuc>sHp;P1DlpD^uqw5?G)XJT+TCM(0BQWWYN461%4a+4)|Tfvw_&}60AMQ?xX)cr{|EVb9%t(Cr&@Whd<-p3xXs4o8?HoLZ;sI-RqIr(nbk{w5mrjz{e3?8hV8&j|Zv08)U2DOnW-DbGTSl;^k=r>&wT z4QR=>-SHO{f|yi&r52K9;V=X)m)wri*2vPa>6NPygLCnkzu4;17U#Q7xGO=uCTP?2 zy|9)tSWA2E`)_Tpa-sy6aNf0D=%)K%XAIb}qkD!xCW4KJZ_rM;X`ea@VAcRQdF(VD zm+uF@rlQE7F=Y(L6+Wl%6AGVF_`Jer6h5o)X@xIdAOO=G7!3!yV)|aPLB86yccLJS z8B&!I8QN16ghj(Zmk1)%Bt?H!<|4Nw_w{M7bb{}e}U&lnV}H({%B-3Y5AV`{0v*&g}Tj^vwOo(RN( zL3704JNCocp$%Xh33jUG@Omv^=M1o8Je8Fb+qJo#vixgT5j6??*}BlY7dGdkC1+0= zjDnhyTL#~)-LT@UI=lU&%gcW=`j|#?!C)joc3?VZO^MNAFZM`;>Nk-)=p`2=KI+Tm zk-~A^mfT@)8-h;P&YHpHwQG=qSPm3EX(BeQNBR?y%MVd{p@m*2qv_YUtF6>*zT;F^ zuAE-vs=j!kQMXq5q$|oopvsGk7Q-=Ys&jeCkO=FDMHuT>tBgKTvM!7+8;p*)j(6tL zK^`R3l|G@COre&-2B&oEN3U>J!~IVUlWcC#&vcDN+bn1NTSTRG`nKNWi>>xHbH@;T zRupvhYVPG3Qm(S{M4bUI&wOd{X%;QQ0&^oT|3= zD&d0#?M}Kpi`8&vA2#Xt~IBUoXia37HbdCBKU<>CMDbHo&zWj%-oUT%ptTb%@ z)=xdji+h;U>4?hJcg?Ev4p~q=zPzET9kA%Hz80p{$F8Rdyf4c4BLTl8`4oxx7eqXV z2xP6Oh-?%Qv0QwM&=+kQZ3b-?t*ns=+zkv@fn-?Nnt;qC+9}B9Av;ZEXCX7ky0tOU z(%P747txBXS1c0C;AL$|&=Tv`gH{l$STz3$oK@JlLv(39YYXqzS+{<)2JCHM{{#4f zm-Q-9$rV@U8u0JK)&cMz09Tws8<2eg-46s;=stqZ6X-mKov&a=)Uo~!9}bbzXTUlF z{!`$81$;3^|6<`~bU_MGWVb{V< zPvDLQ{D$On(fnGY@?S6?u_4(_4#{qDNOn_0vO97Ub}iJEE!3et!Ve1W3z3g@ACZJzmXmGgq_b-T?dds|0^On=YuWhs7x+I2D{>g#74lx#< z7MOjeQ1VI;Tma~&M|&@<2xNcKM@R8q*K`#em8emBc>TcUsNB)>)R z-HJIXNf|ik|3vM#*l*+a77M<~Dyqz7 zPRYS449Z-rBHzo%1wA^XSg7i{AjdO8OoQAQEOvcMu7wx7mX>`>ybvF6 zN-C5hlu>AGK%%-+gyhj-R-w58N!6ca-R1l?s0dmjB#)R&3d!TuDu^B-)jZu%`qx{OT$1Aon$v@O+OL8 zF81iHMLl^CEDD163WBG!O=+>TA&F4%;!hUpMTFMbWMX&M#M*;hc-gPFZ{N)B=D`5~ z2HeE2g-x*TJGadX0M4-U0ZB1|!w%L*eQ)d!N7rueX)qd(Z>B-u^`g;qLi6mbbqcW< z7&K%>Loaf%@u^K1vy4IR2h(oGIBT_9$uKMs`v@^yK{OC$#9mAXIFJ1T9K?a44&qQ4 zK^!9rR2QihIwuI5);Q9dGI6R@*N9u8y>@Bcg7Wko+Y6Zx6Yn&cMKmv)5>uIRyPAfP zqrtqHuZoyUqAa9ZmNHeT!ZODM8;l}tDa&zT`Q7B4B^|cZ7W2G(J*o?fx*>4@#?>3h zMGwkS#mkyTwq{$AR1%i8hn*9E9eB}mFwxkByjYNBdCwNH+I7i8+B>0j`uO}!VrH8;oYR~ z&0zItg=h)X2RkG)4(W&uI+8(7#<&z{6@K1{)O1NREft69qYp@iPP_1E7qdU* zZWj)HLeEF8+wwm)Ji=i4JkEqM&@H(yhH6!4Weol{=02BdwJ|lSyU6=tK#^HPLmJBV zfNy~lMxDu^-pAK3_lr{d002#nc$~#s`*&2;72cC%@`MQC5eS4N5MF^yAmJt9VITpb zAwdXFh2fI9gn`M-I5R<_*jlT7TIoZr+J{x!YSq?aEmlQaty=qB{R`T)+JB&H;Sco} z>DhDlVb0xipVLX4wLTB`oPGB9?Y-|S_Z*K@13)R1m5Bca#XlikI8nX< zDQzqrI-3?V&F0fH`dBiiW%RJcyxGdUkThftre#uEG#(yH2&rjtT%8@u!elpNOgkq? zQhFqsjeyz0rvh^XPNFE0fiCndP*3BH*}MTP%ccjQ8)KzDOypd$EhpPbYO82Y1%pPf@ferpwboV!?H7R{SYwsoWlV>m{6?4~($$#x#u*3;O0fjh_+ z<7g1rMdJ&Ir;+9?B>lS;{m8vYU@K|7RiK&rwkf)2lP}e-mHJdWHNNF6zGb9&1!-L= zkRk1D%=c=6cEva99%p%%JZl(F2l1>Wo=$;Bh^LG3tQXk8crMxS+{}135zl7g!GAwb zbyYdg_jbndf(^$G#?eE5cM?ah!22}bC(uvz-px4s6>iD-feq(g#yLQogTy%`@DCc_ z$9VP&9AG^EwBb3#cn%WJVd6O=@Lw7~%6RY`zmxHNV#5<=JST|fB=KnCc2Gy-rx;H} zKsWOQX4&wZVLYS66D6LrV(w}hKgW2+1mcXR-G=8b#*-wT6!D1rU;xhnJijxH=e)q( zjAyqE&pnK1f_UyFo{M7cN#ePW@qAw3e#WEO@O+W+JU~1T63;_o?mfiwCC2krfv+*1 zdu@2W!FV1fo=1pBT<3vDiRYV)=Q{$AGM>k5c)rVc9w(kBi04T$_ZKw&6yy26zz-PD zD>ghoVmwb1&ojjHteE=|jX%eDej@Nw#`8xTo}V$E7l`LY;(1BT{TGeD%y=-)R~XN~ zZFqjgcwQx**T8uGT^7^vxsA`XUy~jB#CzN80>2Sx0pmGyh3fbQ829)$!MJB!Ce3dN zUm@~wmHB#`G~@O64j9kc-$L%Wca?m3sKEjdH6dR?U z%8j>K<|k+;IHhZmu<^#6Cs2K{txZS8i*fYy#3Au&o=A}!_0`rXjB+!51ay zGRsP{Wu@xL@+?`w98}pDmMx9Om$ICyenVV5UTPjm7C=syxCBctlLDwmGHQ0 zLLDi6M30`2#z#E_f`aFObGhb1Vdbg^@8zl*(MHbbpFy?eL2;{P3WN*cQCEaIvgRIQ zK&|t^xlj#ISfLhx_d-?TO)wVIW7fVWtxgM}xOGzF;5zUp-$6hbx6_UK8o{|Xi=eRD zG=cZp)XA8_W7>uANFrr=QJ9J4UUaIVX8D6B3k zzi~-R;D%Jy-XD`Z8V(JBG#@a ztw?L3xJ6RJ;4(~R13kOL`+9mks7IBZ;9Q_CD6By1z(;}N<746}J=O1q>!HX6%7wvY z@Q6!=sR#9_asxOQXd@I>piSW8MixN4$if|1q!Qu4|uPA&3P?0u7@)d$tnNqZ3h&$_$(BwjPEqNUg4CPV78_X29>8% znz6&jdp=M^FF2RI4+<;$F7RIV>DjG7(fDXMqD@S7;deuE3!fJZ7h^IW-rv(d*gv>C z+}m^1>)&*Hz`11oP*}-s1@9%RNa?4;;$6<#^`sT+HYjeflrXpqliB{hU19M`>OnoK z902D6?S;Y$Gzi`cRGZP$89WQ`j>aR2yR8`}txiKw+&bk)!_}Cqhx-l;<3)A2f3SDx zu!mSs^FDB{+U@4?oA~&CD6FanzfUTyBuZH!g-#^6lF6{h2-R?OIjT2^&%6DLpfuVp~WoFJ^JdS7HA(vc(lm+>wC$ zTyQVL>>HkqO*Wz}QSF6oY1aP!oSoG^?BGaDOQ%`LGxQ6|l%7uGzU=;ZM880yu91^# zIr3^rTIx+?scP>ca`SbMy8I-kn7qP%y7U|LsUuii9kthucAE5GMs0=BKU4Nskx$z# zJ2HqW3Zj8TVK8{+$X$9;uq2}8#X41p46up|r3k3ApGZoW z?&*qN(auqISFpnTzj|w^EjRqlmAY#!isEzEPJnjJpgXoZCwEV5wcQODY|D%#+suPy z{014!{`IR20_5GuyIpSN(&^Ug7JdS>1?L*%ZS~=Px3J?N^BXKspQ9htH4E}Yfd>X`mMO#Z-fN&j$9hJtJXStO|?Rt+df+T#o)w-wKPkatr8av7FjvhJt1p8C$k@i>!$$X=af% z`HalbAL?6e5t(V_8I4v^+h&MZEQ4c9-fqFQFfof+a4kNAbM%M$)>s6mtk|MS2elzX zLaPjoEfl+jwuDJ(WuYzc8JeR%)VIzmwA@-JnsiayX<hu6$OQ-=d z_Yt@s_o#l8MR2ACMKsz-?FI|u8w-kPz?L@MaZW$HVlDq)Yg&)vYj0@2jas&{=$5AC{IRjkoR#H1v*PFvk9#bVz!tF3>=tUbTV=sm zpvqs2!U;R6-Qh0}E4l8S<~*$QnFmLIc)ZIZ4|(f3XxK+>uOVcWbjTKy+yZQ4r|T+q z7i{wxprb$3cdJE!${Glo^i#XXkkBqeV@oe?p{-_8+F59;eTL@f5A_XNgqFW-fR=ly z9Wa!1$Oze@g)&Ya#!$ zIob?UyU!R~FMS-P_K4BnCHoJnzLd|sICh6&yFuC>GmbaPWBe@Z;d5A*YCnaa12;?t zZ{90E-CBg7O_Gm&4tF~nJNdosIK}d*vmeXP=XQSZ_(U!bC0&$<5*w5ef(j~J5M-$F zfSVN5xnK^Mza${)Z7`o8?HUv;v_S*IB0>|PMM0B-#e^k2A&e6)D3E<=h6xwkOSq`uKEme+Ur-?XE;2me zf(Ho?DflwsD}=8rkbMs^JnVu;2$vLmi|}p2V+v&7C5Fdc@C4yW1y2#aNBE%v+4m&F z(=K?1@T`L82tOt~uR!)a%kY8=UL?Gv;AO(k3BObz`(9#r)djB+E-Sb~kneD>E4ZTI z4Z@oQIsYvMR|(SB+X~(x{7!+if0ytc;e7=k`oy0Je^ns+-eQnC|K{f%?tjz743z)? z#Zq{j?Oh3Y9L1Hc){I7%j$btQpy3k2Lbk$hI&B zb3=fH9ONVkA><|p3E>P7AcQLygxs*%O|nVICYxlP&1N?nyj@+dreD=`)l`o(8fSO= z`+h%F*Q#NGj5q_NW<_Nk!sa z@vi;hSXVS0jbvgTOqHovM>O1?OxS-9MdzzC@y=NINF>n{lUh_HBKtiW1nkoi=a6)K ze^(?CPWNtnUoww0K!$!@5`YNcWvTAIc%XB!h27|0YAb*IB zIX1?DjkL{h;H?8X655QgF$u|=O8XS)r1s5)bKJfvuwe|79c+d3qd{LSv>6NbP&+n3 z{aK)&;~cPKJc5JpLB|od CB0opFSUCH z@YDm}OikT|9(_0)`hv#mBC!22;Aw>O^KHzt@etU$0Bl?YbTQbv5cDij*sv7rOWIg& z<6axPfj40JEh2+(~HpFJR>1;vNwOO!bI68IyWp9gWEQ!mXi zI)l@(VNaD-6A^PN4wTwV$Ic-e6QF;mKy0brPGmc8+YR)vqHPk|(izkqOBU}xJ{B?{ z7t6=>N1!d2p&Lubh%=S~zXP7R5QAR$`wED`aodh-z>h1z{;Pmq4fG_?Yru~aV8gX2 z2G;}6jo|xDP`(-HEztG`)RxBJA0+#3g}-kD`);>+UN6wTw|dz3cFDeX0DUL4y~Cp| z*ZXdu?*aN=pznjWcS76ud+7b3r1wKWKMZZ}@@V@J;Q1)fdx3t8u)O*>VRJeC`x8+2 zNhp5`h13a$){Vvc~fqoC@_ksQZ=nsMZ z2PT9 z|2p9R74ZKO^)Zd>%|u*h@M{c}N9Q_vjrtejUQ^y6Z2tQ#$oU;=I~3VQ=^qH2AOA?) zwcWn~Jq7enAour#&8L61@$Yt5z5)I69}uVi1o~e<{|fZKA#Q&Gd;W&v@HdFVY1>bp zYdmhJ89!Z)$@~_L%gbWiELa~~0g}xbtDM}MkdiSYd8}8G; zc=u`6HpFk)YoI`o@mqtzms;Q-g8FU-YiA9IzM}q(0Dnd@f2>j9Pn`$P7~mNTJfne! z${dIOJ`4Rl#LX)U*80|XDW53c1f*{w=sOqmOhPiw0U76^KAH^tQ-Q*|+nNS+I?x$F z>w(S$+5q%IB8+2e7SP#1=K!4x^de$3-mNKMb0f)FXIS%r&+b>tUi$=o1@UNvzO~ok zf#YKCSPOG=AZ6s&UOU#-YLK}G*-Pc~dx#jwwc`K$1LcFw7@ z*X@C4QQHJNC)|V7K@9l42iE(lMz6ib54 zSUaWi7*tl}ihb`8+(?vbR%C%BFX>v@abL)h2h;40$p{*UI&Wsoa7AXm0SzwY$l z*$q5xz_SPO!2O5?_aW9kpzT1TKx05VfbIu+04UsFSag20$D4HsbnbOy85|_WHW-4k z-7mpQ#C~zs2l)PS5tQj1W>3@>y^o;n>9ZF?+~+aYaVF5I_!CrmF_tZvi8$$>r=^yb zqGhNfp2);fd0LmL3|t~H22_maiyE>O8PSu^xV^O6d znq~10iL*+<8JryNnk@N&A1#)rGwiXd$}D~!39pgUuv#CuI+?#lDGhP=k#RQ3G~VhuWr$cmQm!AWu&He6Qc>er zU-2_BZ5Yw!OU^VmTot~_%O*>#BShA-WKngRdN0)@5kD=DQbldiBI4bV<bCL{Rkz_4EmP|Yi3dRdhR8cd0JU*>P)|VBozj8s`%I z>~@}5KTe89Av7+<6K&J z;Y2*0k)%vlgfEEgIchjP-+HfJiG?~bJ9wQ#-UO+&se zoK4iZ;<<2WYZEn0lQhoqT8&dpbBMp>nrmj+97+ESQ+qBluqPkI@)|Yv(!;&tI@kJW zhdi6kBi`o|>1>=&%yPdx-Lo$$-M7p&&G`j}fe6@F6shS#MF^}m ziE$Cp&KvT=O7>-u2*{m?NitYvrr;8x5!%rdQ_tEUq-yW!?*#OAYQ z@oR~8K9JWD%N654>$1D_k9B)^?>r(VMBTaT4Bf4Y#=4ST4!7iS_+oz!H|KG>F#1EV(bL*j-X8aS}~h~~Vklb6jxs~R@L@n$$S zS)U?_L<4`;Sxwj*X-i_NL_dI_p6t_mBG?e5(}Siy#8MlXXG(FGwG)3`93`471p7fs zxFeYgdtBY*D}?WKjQO;+L*$p=S>^EW7x`=D`U7J9V7Wdn*5`ZKGt6tXMCBaR%aK=I zBTQC*UV0Ac>JOXO1eIaW>vz#$*0`*`yj> zy`r26QoEpR;1znk$~ObG*&pFWBfVbloJ^v3ToIj$Xe!wq?urkuf_CT75hivDKNW;f+iNz}z`u>?2mbAvdg3wg z-%9iY|7~LZX1V?j(mwVgp!M$%>lexO@6_8NPaIoy=|c~eisK1BOfMAgBKnx# zN%X_#-Fjb)WBMLZ&JtPAd;1|~e&X^zQJ;Eu340(H^~(SE6Ypuw8Jgip^q@j!wda}{ zb5wZ^Im|FbJu+!?$(w4aEbrFId6AO6mrO)D_eCY?^0YKB-%9BNq_7KVRePlUfW2~# zr=Ws&qPz9S31nw7%C-(uWq0{g7M2s1%O2VpITW)ez*scyOtW162mQ(4YkDpJ5YZ;& z205r77Ww7ZmOJF2`gw7Dx89ak*_MwKVoQ@?it)g`hnQWzDx5dM!lN$Q{G-Hs%m&kk zQyGc(UZS6~K1Q^uutAyE_&J;(-#;!YRPK(bbmj%NsUNrhgxEf3pZF&g!Lj7MxN51Y7n+y(fYZ#k!n)NpC}2pKNFnOi4teqX?qW(0KiTzdO%PNz(RSbI zgU#stamD#Iuk%#z%LUr|ir!xNXC!{@?KQc?n^J7|cZqr?EanXOs#ssLy{q5T+fmTA zdCdc_Sqi?{I{%5m53jyY%qE*!=V24yS0@Si0r8$zs%++{{G$fi;D^N5D4nItVl4MZ zrrtO|IaGM_V^PRea@YSvZ@l)GUpnJoQ$%ri>{xd)m0_;|*m7Fba6-~}wN&owvTToH znU<4u?r)lz#q60r_3d@yYdBuhpVL-8YQYP$Q|xc&;|2KpHg&*D>jI3HVSf5uF^B)8 z%Lfp?Qe57ih&Vrz$2Ye_?NHf$nx^j_TDqdKV_+0FVmLB_3T=Tv1>jr|&hZb!P}%f* zD)h-LeJsJBzS6eUU^Fv|z12JAI_2z0v!F9hflLc_m<=YmUx_&d`gs6`O93cD2m;Cx zDgc!LbkM+Ed)hEoxI>8KLj^0>@p6cjNAYqMaX;3V^K}k`OsK zKMP()+|PqgL4hrY_9KwIzR0QbkYh6S)D@vaM@aUcOWaR-9clf{wG`+yFAzp0H= znf4*=qj5w`<+F&7RK_3J;f7Tb?>Pf2=Ls^NmCwU-#b9VZ4mn2K*MWoZiBU%e|8y;v z8v?m!6Zc{x+bn#V$AXRfHVxqn8hp~U^Ky=YvZXS=zJlZq0l5W^g#Xo;`f|CPNOFfjNRorw1)$v|R-TNrHl&VxpVmVjm+dJ$Y7N0DuLaM-sN4&IO5c6T$_Tc#$sYlBuQAgzo!!&L%Ix#eEE8X`P%U#n zg7on8I+MqacKL?W9&%`Tu3nz!lRzx*A|%g_&PDwEJhNUZ!~6r79TSV8*Md(wQMrw* z+{nxGfi7g{7x43okUiN4ROkhh1+Tm*?*dj{#LG*7E+xXMb|J4{%*so6c{wXD=H+Fq zyp)%l&0^Px`3JDi%1fj-R3g0#OQd&kG4xV-O+Z(W+_e#bx>Z0|0$oG=KZ97y$^jS; z%Zv34!p~J`O+Us`j-B&$AzMfjc9<$%4YZ|zAYLp6vH460VoN^;k^4mDt^>LO=z5@+ z0Nn_b|5rGkU+JL?mjd0)6s+V5TFo47#QXy|;+L05Z+VIIhKixrf*%S}xm#Fy6)$fy zvug$BFV<`!?8x@hN&RV*pHAvey;R;-kD%ZmsZyTpKrdrDR&!f+0NrKgL$=+nfsI{x ziS&kwp_j_r33Rua9odO=_rUt!Tq3>YCDI!zhF%N4r9|bn0lnPJo@~3zOBDYB)*C8@ zUMg=7Yqyrm-)m;iYRo@?aadDKy+eukc8cn!tK~_=eG!4*PSJDp;U|7e#a}=~Sh*5b z)#cn4^%uW*FUwrg7jC%IL&51j9DWDuI&h}N>j|&dzKVUw9sZxnvyX-G-{J5NgQ>c- z&`lP$6&LeVft#lFyd3qgUbjYzde}7(Q_KMC4HZK#mDdim!)M$(ir~MW7ufq*xfW+T ze}Qef#SL$1s|2|RSUJ1HBhJb>TiOn?@(6rG&fi`0IN�Ufb5taYnqmrhf4!-3b)e zLf$uZvT~m{HFfoqoTP^wlRQt7(&l+L$<6Z`l0VOJZu896-@T_x z&zYcm0n)>_QXeH^-yS2meT%O@SFrM>yxfc8=V$wL2%)drr~lsb!esZ)aaP`f$69~w z{;}XoA5_mFpeIP*u6tLJ!mfK)k@13((t0-{yhaNn!Kg?fvL;A@kr{N$xy2nyA+w z=AmDrHDnK;=9seihq(XA*T_iWRMv%Ve7QT1k>)Shtm`m>_ zLM~-@N42VO=^a}ly$vPOyQf5Y>r13}UWxPui>cT2Ug`tHz1~>xL8>$19`7#Eu5hvJ z;+S}6FVK%MneANWM_D<-%l82N zgdnqCkoifW&iy>Cd(QnVt$VH|d(MInt5I1WXXX98{Apq|hN75%0LSiz66tL%re4$B z{|pgw|3DFQzk~bxDWIPv;#%)Mpr0q=TI_SI9Ovcx&EmTY^Os@`Ily`&#n4OTeF5kf zfqn@n{VuFK7r0N`E%=5g<#_<;L+pG4@~o5BKWye(7v}FT-*SFUnf+QT)%1u@%N{AB zW&CTewwxqHup8(huKz)xkFoP9s893yuaLs-tRH9PjN#L!tUZtSV?2s@r+S#%`zR|P z;pHdI{N085OEJq7L$B%8`YmKKP}p|r-c0!Ezx+H zS0cUjCDL0}BE8im(mS?9dK*flH&_h47JOxx%6$Up*ML3)^y@&M1^PT|*UQ^I$I4gm z@{4Bvv|;}KT)n=)+Fi-ze-r4pfPRD3ALsQi0hK z+f3w8`@Yi;a#VgGIqvFs`VsiVfPqxcPYNTcH)89kH!lNyMK3pB^m-_vK&{u(e&xtVmo+>TVug0qO*75)EkiLi74SEm4IefMMAuCUVtNko|wU_tO;e)?- zLWkek;{^BP`#^u(5B~gwm1mT|pR2h)KVs#Ry!;w5n$=EV{sFu?TwNl)RVC6}T_U|> zOQg4NN=!2dh3g+*Yxh~byntgZ`Ty-?oE;%+;P&iM+dy`R{nf3MK!{pJQ#v7 z3@{=OhIwEVV2A|1w`Pol(STaOSim@h@p&)-FirxOJrOX8xIgRTIA6jPgsFKj9dNFM zdI=2(a@{Nh-iNahE|Sm)m@i>r9xO$WZCD0qLRbRe>tId zJXnXY7I3kI4Sk&2qc==L6nDMfL_2Afa3@!eBwHU8+_s| z0Ir+ccPrpF!0iZr?Ry)-9X|0cggbrW0|Tou~IUbm#0OmO#VaneL=DFbO3~N5Z0)$4uLKi%DvgZ~daQ?+EvU$zV zEk#(K2h9j88Jyp1*sxZ);B$2L+)BjH?OhF6ix2>;1FXs;mtsA_8UQbEbdhb#&#gmP z0=NXQNkq0SK7T0!*UQUWTx8qwb6f}N*POXeUR%oR?WiJGT3vXpS*@)i_<%3ZvdD-pZOo@1m`dS0Fak>ob6o+d{akyAK9{e zVZhuZ+~x+G!+<$`flUnNk`DqVh=(PEK$eUo8; z2q`4a0`uP@^D8Op>TIpvNQkr0WPU){r+_#~sYz#eeSv|K{$`{1be}&E*d7XpeZ5hC zC6-PGG^(&X$f&PkYl&S-K(`HvB<<@Pz5} zIb5Tn)7{jzw{7oE&yJ=;a;;dDw9;9B&>K37OQ<1Q>tNg79s3SS^@_v3KI$624Yv3#GO?#SmG|ABh20W2aV8|E77u)xHI`*}9x42E_LlaX?yg`F98})sY zjx#YmR)LV&v0g;T;#g-PWE#yY;1+7v#1iT#l$Ai)MA}+#t=vMs0?MWUEr+@jOmjEE zk4o@kGSDgD&m_<>73rT2<@8#ciJ5SHKG>ZBb#6syFue? z9q3qxbkMebSMqZc=-do`ZZYxA4E@}y@UtO>pN*iSUZI2A-wt$##(p!@ZBf|2ErtCy z(6LjYV>ig`0lF9HKEnFVe$Yq1AAmA9@ExGHU6KDvWTOk}9n{3uO~gJx+wKryW2(o* zVN>mwi8!Y20D2U390463WS7exmt+l)?M1RakU0S|bd8{W=OpCWPgvhMWlk@1S)4JS zOzM~?#mjZ|gRTJR3PKx&K-Yj-cdewW7U{C;!)Hv{pcH2WXcX**ADOC}y5 zi8+nVFSMUL2znkub{`?E&pc}4vZ?+}i1{(l`vlM@fj$K>KMuOCAYE4=hO4HJFG?|A z1^H(b^4CoHPfIah1NrNy9i9gtUIh9Q(3eSKEK}cJNYNf&(zM4j3VGTdU(vM3b%p%v zru)5tgWNA5d%p2-vs(wK)(d^ zw}Ji+*!d#p{#~T|_d(_lfc_!SKLYw?pnnYXD?tAQ=$``pGs5QLWx)SA@Rz~%n_&Ab z)V^;)`@U_C5jPS0&)X!qKXCbXOnE;3dH;C_R-(qyh_-5L+6?RyB4cg zg6iFXdKFObyJo%5N%g)9_3*tUzmKHtLGP!&PsDpsn%|p{*Z%YDe*jt7vvaUffPbpJQh{U4zJ3pzdkKR!&Bb-?|$qkzd$mdZK`lYMr$fEEKS0a^-m zi;!laIzy&@KlGqqH=;BoX1@A$jO0ZySHIl?7B`16#zO}=e92!lo!Jz-l zpih!6&^g#6nk^WSDi&%gIv4qaizHd6&d5RwBUrF3>hJe?VmneO`B0Q}j;OE4I%a$~ zMl&WWktz1ONay!9U7Lh`5mlSGbjbo)D<_J@y0djTirF@I!NXGdcq-HIJ%T^7jS`zS z#`T8GeY3YujuF+Ji(5ivt5lPV!oE;gW46$2II(Ct_7G{CDtFT|v3!~?vZBb~@z_%q zTv<1I3m4VPU3{DQ>Yv$ z*vdqkR-84y;E-5WrRpCt#kML%TZ`pPPSWJYu~ibBZ0+P-Wj$-Fosnj%+!Wp;2AF$ zuJa9p#`T4AkGxgmYAJo*>j{U>MywqZ7Al>=L#OOoOBdg{E!P{d;dy`HEMKH-a6 z=#pJ6WxpkajkDQ`+U}%^qpK35TPsJukYwJiOZvhF`w=JEwpGtwMC|qK3GtpC9PAf+ zH>$qQYG(S8=&_5f^RYqB-x5)Ny_LM|&e-=#?D*yVIQp(H)*32(mUDHaPP+ZOy9YiV)RemzkkYNri zW*x4&F~>=0N|A(SO%fdSlHm6Aa?q@Ls!H|j&>5_j1Y1Ut8LX!@Jw4@_K4{sg39Q%w zgBpD08x9S4f`&~sw-I}@P(^GOf+*-2QPemrf3F&XKyG8F7 z>m_Z`4~peWNAHIh-QK9R4VUmF+FxhvI%x_*2CgtmU87EMK02b z_mpTy`}}7ddnWBw_1DuxJq?wH4RauQ4DTsXX@g{>RpNK28utNfryJx(>DM?|&?CKG z&~En$q{^$HwO6gOS0Pb;uOzSbrBQIjb~6tUd$Ut<>}N0G@e{3nMw38ickUR?&^l++yHeefdS@oiH446Iox980xpujqoG0og zuAooJ=zxm!8Ug9SQutsC;M?T+sS( zueA?}#Hw10D!7o5b0xN%yU(U`Wxb&xBkY$RwNefDiyG$YG!z}G+tJi5RlZ2nz1KBh zM54ac8dVhsTUvJ0gHxWS=4Ln1_f;o-=|U6vCDC+`ipGo1V6g zj;@vtT~a#Q_w8@d6qa@zaCdfWZ)$HrhE$tRg=*I-a`&HNln_W{TwHnUOLAM*MFo&h6z(%eG%`uxL^ zqRSdL=bZ2b>ALCjFb(E9>I)kEzGN*zHvB=azt@OrUkG$X{3n7&KrF4&uKP^d!+!P= zh2$-%yq+h1bc{+Ps1miS85Hb~JHt`RZK?h%w`tK2)4evlk_xEccT&?y-`W}(M{P&to*rZ;g7}6L$;%PzMy@a`AlYZ z1EaP3Yzg{dg^_$BM?p{G?b zuan-unkM`ijEG&HD$E+y$+ieZ{7;C9yd^+1FRa`UBd{LCm=t`qn8) z?yOcGz9w1-N)|#AAGXuXvOSqP2$@0UZnE#O_t5Fd1^OyIJ@GIFuAhWD^j-YvV28dX z>;y^=(kv$I&=5a6&=t=NJAZN$>Y04(w1i1-;kFBr?PAGx8|Z<$F`GAmB3-e!z0LZC zF&!5Yw&mY?i1ddYih2V^Btpbp%|vt5ncX&G-m*p7f?nS+B#b-B&x1`tK^8LV#6fX~ zoRUKh=0tSFrrn8>oN$7J8<}hobzus)08*B0JLrHjJ2w*vnag<>alRZiP8mu5!yCNE@;|`A^Hc=&$02<-8o6Ku)yq~RbGs!P z|FPeE6jU&NG4^{rNghE3;QJJ)+eQ+{-)fOwK2qUV(xp63IPka(M%!|JeIjZXo`>;L z9tsfnA3P~SK%cbo#qE_`AgMkX=`BmGHzWO-v~wjHeLJs5dU-ofB4RsFX8cOIox#S0 zk5W53;O%J|AG}Li!})1Ip>1YCKB{^7RK~C7{7lBL;`|K8ui^Zy#MZpB5XM|{vO%R&5^ z4%+2JF=!h}{4+*_wvOj!DbSVnavHCTTruYwY>60n3kJ9_q%z z?nI=E_uEw@F>g5)wr+p95tFisZvX zIq-HXZ_8@NZ{z%0dwro6%V%>;tbuxv&3bO97U+61x~=;L#tU2bO!U-cNzX=w9y@v8 zn8mzjvc=rQ_^fX+H#0uI#oWU9MtO_5l_dJdM`ero!}us+8yS5&Z$Nr^JJ%Djof{b6 zEVpwb`M90a)t^Z_H<9Fao=VbdXMC*~?9=sy`nnO>EC-v-MC=3GiMkK85OFPNWqdj3 zcc7e3y^yeDr*H6U#R#QwvbA z(XF$)NOsoi-3mQ+*6ZCQzw3278T~w7hxGD!d=C-F@Lr-`ulJFU=kav)XVT95Np9wG zy7#5N;(Inej~^gnA8-?OALu0FJl@6l3eIZu@wMVeE__9YB&m%apof9p0n|fAx2_x`nXN0wvKVtF z^Nm50=bK3+z4?avM#pP4*v9vYWnkwx5$Bj*qMl=XBr(S%+Awhnk;-^c&L&AYOEu5% zxupbvKL0P~mQ4CcAIZ%glKqD*i9T_XsQZMUeDv-;9B@FB#&?wQdpLiZq&IJ#CfT1u z?5r(gY^k5a5!HJND1T-f07{RTs(x>wW&C%yFwh9od4TIY!}xuiA7Feb^l>-8 z9%Xzd=ZEa&#Es?0uq_($WVbaJb{%kZk?I`;I?S$j@#|-S-pQ_a^Xumre~|O%?ZwfB z<;O68-FdRRFBf*HzPo_l4fGzMRCQv^CRTdW#_imfdx751t{;Xm*~7~(+KcT7md{pf z^6&ioL?5+tDNVmF<zy7FY#ohQ5gJlUHtkYO+%-3$_1~Skdxm z|G;vwFn*vvE`?;B zogV7rbJqG&`4zC~m+}y~P7n3#1)$Hz^2W~TQl1{*PG@3zk&NlY6v&g^!aUin%9CAp zF6=ts#5vXbvOPb}VEHkOe@-s!I^g^})%z0ASMB)|8A0-6n1}hfu397%y=2u2Ec5&ASZ3Gf3> zA)E%Bk`T}!2nYcN5V)=Z3A#Rf1|ggRLkPn*aW}$wz&!|jj^zBk2weUG!hHbF-w!w= z;Ud5TxP)*K;X%N|2#)|BmGHPtJdJP#a20{a%=u>#xcoH)9y8~kLwF%gyn*eWj>n%D!o?tIfTzE@C6O9%&#DPG0XTpgg3_+e}V88 z;B5rne>ne_2weUh1m1r*|5pfKOM$;dI1KnY;O`N>j=<%<0r)1uHxMoZ-Ua+4!n+6) z0N(=q3&OV$t^>Xe_&0=d0O!Ara0Boi!1oY%JVFe&AIBD^d>4Ow9~#@LbAeO zq;51@S0fZrtJ7*-cIkibBnQ#1VHp>;>zZlF9K||Cn&K@*iekP$gOFms7=&!YM}Z;? zWDy}7i(xDe*?xz#JoooAA;*6iYF8&h^2to#5gtBv3_Aa zM~KkbZ%|C8_zTKWhWfLLU`emgwKP*x%#2?c&gps8u+Byh5MjbQiBMO?{IQp4ph(sY z$E-Cv7WF8Zc%IL#ms=NGb%ocE!fVw2rAhNp6ye6EAW%f*N`<}p*fmk)V>^cB)L2%w zL=jO$88htab80V{M-}EF`|?MhN|k@8?(J9iYKC3wG-^Aq4k}N-e{=lXZk6t1KVO>8 zrq5jcyF;&57uVafD-KqSUWsvC5erznxY@4XC>_1hDhD^WZ@)F13K&MlWvg`!$1$z8 zPr)iEcEjpi2(_*`9Yy6wRVVHrR$5LY;kD1m`lMGsE%D9DY2T^?vbZ0+<|+TIX5^^T zW3tNJDXXJ9WaXFTw11YvL(FhnkQlN=v13*w!x^vPoXS03y_6>BR9>X*AfjST zJC@OGH``}4hbwzbRc5{3<|R_4lbB^TYAxd|VDa+-i$BG;6>0vQe?<#2A8SGF(?5xL zsbVYm$EM5eJx>%1^lHq+1=Ykg!EmI(j>NR6GW>NkQ&A3ONky?SCKI0iN%*$Z5;4*#;%P z0J7%mu4URb{m`^uwCm>UVNe4n1an3oJ2kuTuqLKRZP z`yIIsH|uayM5VTcDn-HLe)YO8E%8>uoMVSfab+e0(f<>eMAJM#zU6h_+em(fp zaJts;F~G)i?)?J4pMjip#~1my>&HFAvHNMPfqXjeAx{m*p&s&N65Tpj!)3@7$R)^C z$P#1`auIR?vH)2g)~jDfX7>PkUckIX(udfS3`$pN3h0bLGkK zCu5HJfLIc+91yF3m`@2P18V{{0Iu5W{t$QsJO-WsJdO_p@Hmfe^DO=g7;S<9000N0c$}qFO>fgc5M6)8vC}jO z6pD~486?U95``NVKm`QKrASaDI7Lh1lohTMc}+>hr9T;|y&$B9nRrY#i)|vY(#vFa zp5J`zYCrglF^kzI{5tTLIq#2NbssZkTj2a;*)o~UJg~RMVKnX(fSZ)pCXFUp z+=EfEZOYvuy%v=!VN|SI(B}7~yp<8u`c$~(w zA52|E+)GMbl-?8*jp>#R<>`8QYq9RTMV%``j>^YG^^!#HH=f{S(CA(nUCt6XUbPKe zZH3Z*6Gvyky>WooE8s~O@KY`#{wacgX>c-?X)u{4L4FN;6`lRd!gckKe?c1HH*(+3 z{N(|I-OBeJvUnf2+203yk?|33{!KQ5>=e*<$=0&{R=$r@#`g%{0(>LzE!ri$yHw*I z<=$WE{;A2!{Pu!o`oD9{Yq6=89VMrw_2^|>Yso)f@d#@#v@?phlv;u9a6U^yDdFXp zgLoXy>5y_!|D0{QFj*%b!lwFt(l-fq1>6eM38n&%;Iq}i=4cWcg#1#q6m%+}@HX%J zH%7J1tpETZDR`W)Oa>P@iV2Ds#T-QsS~hQyB1y5pxKOMz$tcEI zQkc`t+?yV*Zw(*$R9wWBgZ1n^kC&Lp-M8FN)mc z*y}><80OrOa=a_$m?!R7Y0f@upHTb(-9Cf;A;n9Irxc${-#w#v z4h>%j`W?V0vwH>GSBQIveEyX3eFHA9G5)uZFXQu8h|S_G;PWWz&A*!ghj+w1556Yu zzGdeIA+6&j@*@IWfDBO>M$NKh8Bw{+N`_H|VdB)ZZPPjy%$emS%r1nMj|*-Rmr@u_ z#e6UCjS!P(#uPK-wSw&!0h2jh@xaz>qXs`|QI(^Bv>1uFmqecwCoc8{yMD<|#czPi z8cHgNWREv^QdTmdP%^r|@1>m_)rKce=>wxuY#IZs8l4=l)^N&Vp=N! za(*)>3H^ zRHIq%_*l?7*lBM$^VQPDik6u(hWrgkRmFLtE)5%BAc4wzGE6zM z`tMTxb1jBBUWRKGs2gRYZdgTHoPuKn6+ont4H0@VMChdsgw`qr@f!MnD!d$$)=CGo zQ}hB&pXE%|8?bMO^(s-HoxTUoGPxeu?>YkIWiBjcPT@OMue)^@(AundZSyx}{qw>U96o^6197X**qC1vZI) zQF`izb5^$`>%77`R-qhRTfT_7mKpRjL0hwdI1S>yxi+r+yRT5J&)x&s#bf}V+1+}y zW!HSejNrk~?WwKSwExhu13Z7nix(>e+m_@`8|SsUL0^Kami^Ky8RsC|28o^yNa&Ys z#!VBS_rnNY9>vT-BF_79x369LIJtV8V}=Q|J??}d?x5H0r+GisVJT9#r~;3hMBEAZ zVNZTm(p7;+4DtKie)GI-gLm{&n0H&ext}Eb1pIX5w*f68xC1wkUj$pOfjn6|gf`ut z4p$2(eTzCUe}ia8&~;>X)_ zBVbm`uI|&kJ~`su^6PGYmiIT@{v7YGp_Q&&ErQGPA2eG>?S^w&+wcxf$$y}Xiz4UX z;ld@+G!}wpL#{8}`O>A2Ki0qFBU#ubgYsD5{_<$qB?5~)lK8;0b`b?M0r3veC!o{0 z1Ar!B5Eufu4#NU6z*q<}0wx7a2VfT9wsXLI2<8On0|( inSd9-fq*Z7F9q=Lc%0V?>`8dnCBEw9f%hNv@NsOydnw2O literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx new file mode 100644 index 0000000000000000000000000000000000000000..4d227b061bc9b519ebb4481b41a09b384d1f5497 GIT binary patch literal 1098 zcmV-Q1hxAgiwFP!0000017(&?Y#T)s$JdG7#CDUEHZ4#E5J*UrLLg01AQDI$+X*I) zZM^Gzh{Lei9ow6(cg=n{?S+C60wGjU3B&=UqMqSg4jhm`NQe^>Li7d_;=mbBNG<~Brd^TkT^=eKYassRpQ&Q&k4@Y z!oDc+<`I4$wkh#%>G%aa%C^M2urEveMLPe&Pq0~~40NFAfazlF_Ag#7nd@)q1&FQuRhl zBWk@&wCYA(t!asOs-|v`rdm$0z;Fj|GTI0gpSRkdwcC~!3tT%MEmdIIMZS$;W^@^rf>qk<}GnK8* zr{(lUS=~!uMsKOrI(rEkpE0vjscbSP>e?FLM3IT4s%h=YIMUuqS>5KD>{WH1)x2R@ zWW_Ru0Wn;QgofXxArH=cmT|@F98!8oeqmtvMn51O&xb!m<&zCCU(mBVsR+Rvei)dB z!wWjcL>AHO^gTE1@xZ1}5sO&1Ph2k~^s*iBIHwXHYcnh!hSYZx*9jzGkdEy@GeX_X*x8V2r&~rNiPpPKO`ZW>C+%`_}cS`?U2451h(5H(IBf=HF}=tQ?Qy1a%z5vjXZwcDPprVk#PLw7L^&IWC!r4q+kD(1 zI3SF965jp+HA8#IbI+qttZBwaUyOxO!|<=%k6f~k51pMJKYv+EC2=x{_5&o3dfC?K z7grO8E~IEVMHfdoDTVkRjh|H9Nq(Y3!?)Qr$+unbX@~mMHHBh&{Q;Ub QpcV`IKLrl57*q)W0EfdRJpcdz literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex new file mode 100644 index 000000000..a91f2f9f3 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex @@ -0,0 +1,34 @@ +add_7davs_and_target add_7davs_and_target +add_dayofweek add_dayofweek +add_params_for_dates add_params_for_dates +add_shift add_shift +add_sqrtscale add_sqrtscale +add_weekofmonth add_weekofmonth +create_dir_not_exist create_dir_not_exist +create_name_pattern create_name_pattern +data_filteration data_filteration +delta delta +est_priors est_priors +evaluate evaluate +export_test_result export_test_result +fill_missing_updates fill_missing_updates +fill_rows fill_rows +frac_adj frac_adj +frac_adj_with_pseudo frac_adj_with_pseudo +generate_filename generate_filename +get_7dav get_7dav +get_files_list get_files_list +get_model get_model +get_populous_counties get_populous_counties +get_weekofmonth get_weekofmonth +main main +main_local main_local +model_training_and_testing model_training_and_testing +objective objective +read_data read_data +read_params read_params +run_backfill run_backfill +run_backfill_local run_backfill_local +subset_valid_files subset_valid_files +training_days_check training_days_check +validity_checks validity_checks diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds new file mode 100644 index 0000000000000000000000000000000000000000..fd26621643164829a81fef1417b4e77bc24a4a5f GIT binary patch literal 427 zcmV;c0aX4UiwFP!000001MO4GauYES6p3Ah&BGxi7mnOH!x!)cP{nOZUd`G{w9=v( z*~!mS%&etNTzmq#>}tBDR?kDf*2b7iv$&A&QkG`<^T+ou-;BAqlHjvhn3eo*taI?y z*#m)%4%p@$a^>DB=j>zZkBIlr4~#b3fuK)Wct)z+N;%s5{!D>>XQs{utl&mLRZRVm zBJZ1Umr=C9`3x~J;De{8^RaT#4k(v1V~&-&5(5@`9|W`PQ^S`I1KS#`kr>A8Q*KsY zuf@r06@-hp}5hC{1OIz2B> V;pHj(Z%@Je`3vH%a!?!w006}s-cA4j literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb new file mode 100644 index 0000000000000000000000000000000000000000..511908712e9a218cd905de9c3c8b731369315483 GIT binary patch literal 57034 zcmbT71CwCgx}?jtZL7<+ZQFKr8C|w*TV1wo+g-NJss7H18|Tj4iI|u_u)Ut0nQtxt z0ALii%t1vd*?xKi=lNhCG>9;#hD#vGCSQI-P)mVRy7V=&+V~X}ZWKt?On)f{K-nHZ zb^v{Jtvtd?4U#lArtA*3=?Fx^LMe|p?_pMwo5_cl=g?U5ZNFEk$MbnY9>(|8h2)?0 zr-lBCsY5rtX@==iNo&sq3ZBJjbf7A}%t_*Dx>8d;JL*()n7P$3Jb=mDWBKB;&MJ_> ziqvBwwPVJm3qhliCjJLMIb~Gc(^JDgRwbq6QP}0+I4Wg;!pR|2sO%Z{TzJ}}4{pxJ z_tq1%7Mi40eIXf%bI$d2rhcHo@NBH|ThG2<|4Q$6{>jTzu)=hz8RD-&lzLE$OORQC z2d|(Rf;g5WC;YTw<@K~&GC#B49J!G=H2sTqeiGy5&X~*Agh#zCsB9rHO6xQT&Kx8( zNqZ{~A}yQwE?`E>JUIgP?gw5?$q4`eezjZXqK1~;Ix9-DU9FQ<{FF>{A@K8()ZEHy zKOJCjp-cimuP>k|=RKh>e`un#a56~zR5D*(cU!Y*;ZN)N^hlW~R4cI5D~iIxE1c}R zxOmxaDO{QIEeUE zPh}!ZqSca6{jf!m2h`!c6pw-A6{TeNq}RpOl8SrrUR+rn=dXwT&t%g@ATP+r@dXVT z=j3s(@`>)seYP$FSlgIrI3;ps9DL}wU8b()2M!A*UVJ@LTSmloj>yCUQ&+T{X!mC^OWyV$- z9Vs&J^xk5F8VOVfdVCm;D3%lzDl7LZx9pTX z4G9`rC8JJGwAU)B>YT95vd>l-V;y;)8l4waYL@VQi`) zvvhmR$bJ^MzMh5D!z62RXn_e*lTg#u3!kAnJk8QRhAg-U{?aP}m!L@^+50t!dB=2` zm9^g8(e9SDK8YsnJ0w-}1A4hFuSa(&d=><39JF|7NT^ZvieMQgkoD@lu`aop%E75l zC#a@c_UNu-MN?z(aUPts|2VV41`tXokS9h-4iZ;DmK~;$znf{*46skUrst0GRtVgx zafA!CLX;~@pHO7&fElXQr4N=Pf92%twICBrw&QZfjl)n7CZRtdv#U~|%j(>Jlh*5O zhy-^6;yiSMa3us3l+_i`QgIs-q6%k!iBOZ6#@u;aIppBRF)pEDtC|Int!Iluoumsr zzT#AjC?C%0ssYnif50|ZElj;bm5bmMc_y-BK99!($_?;QM4)wUX`Ka4SP1h|6g7|>p%F8kHb@f;V-Yf-jOBWdVi*9*5eB{zNcwebYC_#q6WI>u z${DCt&z3dp416;@F+^wpLGJIw!Y=SCRiGkKXyV5qR*v4@IQha z6@6?;2disK`{a~1ad-H3kx~e;ps@JVq@p!)Np_%*?SQ%f-zM{+jyR-Shj?n9#^N*5 z7rOjrprRJWSPr-B`4{K4!3t0zW|s$*Bk)*GE)bt>Ewk^;O7P*#$$RSd7*-wnv_x@e z_O;Lu4)rk|XWe76u${~8a)L9J#Ja0a5l+RtSx?h`;@oHQI@@w?R^x8EH5oEHnVb0r z$S$322jHdsF>qdBN;?Bga=R6v!LunEV!lsL{{-3~$#Zf>pOq*qHZ{F}L#wu$v@9`0yso}|YvSAukvPzU0&23|PH>^SvlRoM z7zIyr#%tEk9S9|FcDlH_Fl&(nlrqf{7e5<1HZfvh0KaMtddhgTh5)NX+bH%a6G*{h z0`!2tp%RssH1@Zf0*P5&HKsWoXmMpJ<0Z9>0b@Xq?PiRPvO(vh9lqE&N003A+Zkdz+#`l#ioosu9 z){|(1r@0@qV|_|x57bH+G>Rw2PLU2Y2)%lIJmi^S=+BP#@Q;m77UCi!DOJ*+V}(Ot z$5$iy|Dtyy-#JF`EPcB3!=_^KtcUB(;b0jjr!u%rf2ozS+1V57Qe?4%+#h>>%3Ge` z;x6F{V%~%7C>lO~6g}c?K@3{V48lu|u2;c$(CV3P@Cie|Hs7-0Q?u%;6}wy|9}r9(c^dyVZ_ceUTTS%q}wh~VE$ zulH%X;jqM3I5CJJq8sopqZdcptZ_-WQu-kKBf@o#Yyf|*yYSoLj%BUtUa3nzeYD7% zvh}ArSca<*IxBEd?ALiT>1RFB}Fm4c!27`aBbUaoj z!*zj)if;&4;F~!-WGFJo5H6FY*vVgN`fEoFlz_cJ9t-08m&JpAH!0QVvefDViv4XS8V~p2G9X8G$M$jhbA=GYSClrffVp#tYrK!9Y|kz{tvt zwPJ2y<$U}>aN&Es z8!=+Q4NpUJEe6?Jaf7}!I8tgsSes3P;Og7|?h{^l>sD++-ZYMy8JkiiwpuG!svua` zfjGM~eM}x)8Y!>Ro2nf+mdP!Clee|u(r=oh3J>kRF65V@E0J_#K$1+u4aUjQuYwf` zF=U&aGOR2lc>~ON^K%iLCV@$?;MNP2jARXZY03TJ8GD8GrvF5(7w<5-k=au{(gE^j zBH~A@?VG+J0hDJ0-s&B?1Vx=b&bXM@o=yzqxD-(vjre1tr&^gfItb>zPI?0K&r%~R z&kXcIPv66gAb-`U#UGXE$6df3s~SBuQkb^27=&KD*8v2jxgq}sW{MxSor^M+bC8F(=3T`Db%3)OdFJkON$B3xFj+UxB=Wg5F|@BpF>t4x`qZk4DikhZCLxk&$!3)pA*m7%n4QXEob;bejI6|UqTGinB96qcHk!Mq+RiQ zY9Qd8F>G&n*uai>v7~6Z^5lz$bAcRv*$FMg+(=wC2meo(sJl>IZxxpsmnzQji$f>& z?C%x6FX)4K%FJ)}{-=CS{u_mO=5U zH~C}2l?y>9nWmYCZmh&A*+>N^IaMtnVSWsXe!+9GjdOv12d126wmfgy2czp}^KbQf z3)Rw^zF~|Bu{hd#v~(#8U@gr-O(J)DUlyNBvoA1_uYL;494M2!vz5M^0GB$jZpJ}Q zD}ow9!nZyCLoxQ=oUon1!y|GQ@9)cuACV61-~3fw>QB;3m6q9PDbOtq9n!BttKc#- zAGr_Iz!}^UoXxaL`vsV4RZ0c`fY$J>pVbxY<~dPZ+FfWthkR>8dISEP=+4rDl7M(( zMP8tmop$aTC>jsSTGGl8t%F{~T&uYoJvWeBp&{jk1|~^fy5_Hp9_=}G;k?cJBGd_R z+PrNylAV0AacurF`i-|1AtSivOB=BceIBcY_O#_7i(I@Stwbtl3Q|H=yn$tWQ(2Ln zVRVUwrcTY(#l-Q#Jo>aSrqtJL*VXRD1jy?>P-4zE8Fjj<&;@g0HOf}^uEQfQoC6Q9 zcL4UYGu9$&&*{PT)_YZ7{ETt?Y(gA)7iK+$qnLKU%$!f_alPdoweeE!YP6nGI{nkU68~u6<0G{YFuTu^GN|0`|K6ElXv&AnO*emjJue&D<7iJRf z_Huc=odiXLB8|>qUeL?`F(JKkWy-ylg~Dl-YWjCvV2HH|f|dts zryQD!QB!~{IJe+i=^y(Rr=g(7MCr)L z0;FHSF=C-Bwqz7-dwVhXvQX-_hPQ>GRdJ;lXr!Nf8~B3K_RA6dgpF~4L)}T_b3*9A zaI<)<)eugT{zN*P!eAd@=wC}$RDDF@Orcem5TqjbeymT_b+|jba5Gh%9>hr~2lMOj z6Z9UI#HuqZB{UF;5NJs$=+ndnC+HT%jQNq4n4o#N2ImYj0p(JF32jksk&g~S7VL^d zVW`r5WQ3>_8G@gXq(pKwV-jOTwnbXgDJkHnM~>Kgxb}gMr8hugSPaw-){=*yYd^3Y ztq(vz6n`*<+VG5Q3fL8`%sf@d^hbDt z-H$mM{a5ma(c1ZVsFarGuq4-2nyYO{@TEg#;&twZOAAConeLS#p#6lKFWnW{7{E+@WuU7_cz3I)pa3_G>VEs*>Ph1TeJxlpCc2 z;U_HK?On-^k8!a?T_lf=x|1!;NG`e8qC7QXR9y@h=Y95qJ<7NxA^3?^U3jNwOc%xj2UaY%0K%iZcUDv(!=q9m zb5V?7F0)Pv+pfXyU`x%)ZXlFmHjxsgZru(uB+r@NY_%Z@q$<@^{bC`_Uz^Nz^(wC3 zDvMwV9K(`=+6GYtG3;eSVR>FF)P+yuYq;MTR z2)y)aOj27=cx+S=aJDYsRlJkzkx10QK_19F3ATedVpgl-+&E@h?Lqq*cGx`#T=rz| zaKq7*Yuc(FijgzeK0EJqOj>mGT$yJ)7lfOfV9ldO=!gl`&s0)~jMj6~tg)l+mL(fkVS&6V^@17dMs*l&fbb zR^GsYHEH5Wk-5_S9H`F?m#y@nUGH}_vU;^ZOCUr60^f?>z>}CEKIT^F&M>i!J7b;K z69Ll`r@6`=@5ozF$cMB(Ii*oGV1TIKqwd#o4LlB;KmDzXWMbmh_`uXi6XcMMzvkHs zC%Gh4-W_#TfroaINLEVCp8aWQqA<*Oj=Lhpb{bB_MA2>Pm;oJ&_I4j9s4+{>mNIk= zv}1Gbh?NuPLu(+7bS+CXsW!0ZfWF|wVG23s(;U#R9<-6FOl0BAV{kjx)&gyEa1nKkIkwB= zjqY3mVO92Zazm)Hl}`*iol;*SK_Qjwt-06s9GK4UI0AuU!v|)wL3~La+q=^^sWvq z*$6G|VshyIfP-%TodnWEZ2ZW-rIo}!@3bX`AextM3CG}Sid?SQo#=x#^&}$f^PYg~ zr4Uj5Vi5fS$-+Y;6CyExi;q!T6FWRXEbUy(i+)Jjbh)VLy70hJl6|_Dqi$I;c zi^VN-uE;|-mH0g(yC=F2WzD| zd_mBCditLMe(V5x|II^-@-jI{R#$cGOkKDqAqZ*bWH;OQ1m{}2II(e%;x9dGKI3_w zND5_M`7)YFKPsIS>m^r)Znnz6`itna-)BoZXIjfZ7GW$1|vY$NpYiYMKt!Cg88&82ANGVj~<&up{2wAq_%gS zdw7W1{g%6$_7Tc>`2X&0|egC5ZEAky!jgNITPmvE|Aq6maL z!+_@0rTl=j*4n7RxeAzMt>GuAoJf%x002hxe^E`=ql7w0VTWT5j{=FfqX*0rV^5JJ zmAUNnFV|Feo6uV0*iHVi(Y@#Ug);TVN(6qYj@`fOvb=bCk5+h=odpoW1VC-WDhZ95Rq&Ri_=S2!%PpE1 z?u*{@mj=|eI-g$w4Br4*5=ek(tm$0=I&A=a)*cwS0CDqnya@~bIcp~ z#9@ire~`TzSWnOREU5=mil8OAY6s*<2uncL9}CeQ_*r!5kXm?xd?`ViHR7Bj?wxQF zL2z8!xaH|NjgsbKO^}xw2bgba6&K4Jl`C9^MpBx%cgt{uFt;;WDY3m|duqx^S(U&I zC@?uf4QEQkD`PDb0~l4Ifmq>OK9}>Aqh@w9`U(&DP-MMKO$F4`i5tWx)ziZsmpOdA z8dY9?d=?pNS_WR~Q>Yk%*2)!iH`@Po0#&iRx}R*>z-w{1QhB)C+jZ!tByI+75zR1W zq6GPn`0Y~9D00dy&<1!{&YOv^d?k4(A|Sjk{(d5oVml@J17Y9{n$QMzjjAt4(YyL@ z?eyZA#C&W7Rvb0o-}^X%8F>g;=}jr08pqrMFIYQh*Hz2m>0swrej`Rruqjktp5 z5rnpwm8d&XqB5wq*p#GfgtF-b?Wac740mDMS!vq&?VQ?KVSjW=k7TodC4wl@+M!+} zJs((ssv>Lq@1b5-h}`JVomxTU(FV7meqYBA4;&f0D@u_NTZdL$vDt1JaJWYIcXl+@ zk?U+FU5d01F&pY)D|s_5ETqb^Iov!W!@^BCUQ^g>Ah%@Fa{DXJY3P9(?NibqLOu&u zrBiQcx_*X`eEBw-8Z4v)v9JRTzabHuUTCsoN{Q^sOL2tLi&!r0mVI+j0@KzB&J$NN0+Vytw8BG)?jE7ZH5KJ$ z_m{NXOtTxwAUQH_)v=q&6BZR3tW7I7(@QL!iZ^YS{rJBeb`8Hd2><^k?edVcpMkD+ zfjfM`umAT@F^wP)>^oHCVkHc>v!f#$h6oBlYtZ|tpA8^l{$5}9!@{glLV~KAYgp_k zo{G%abT>x!0`sVRR~@M*4M2LaQ$Nu>6=D>GG#4vM4w7~$pJtwEkvY>}9*IeJsr5Rk zi9TySa~^2g)I!I&%=Y-(*b*Tj56!_I1!<~D3i4(#)u(w&UGF~}++2)anyV4ghCMJ(w#YBN9ams>mgGZ*2_jqh%hyQzV4bkG zHzy%EJwpDbiZB19ieZ0KMct)>B%RcWhQFyIRSXi)Oz0x6eNI@QF3!uA;;jE)Q~T4S znC{!ulKmy2!uL=Jp%|CTC6m>rsTlNeAjD=pBu!(pM~FY3=(di$Lk)+VLiSVgygv5UeTlw%tT#`HS82>Ak4) zbyl~Mc;V+t&9i+lZG*whGp&(28BR=5FLz#i7|DR2@^k#16AiGmJqBs z;;O|gI`h{icPu&jZ@sT%%$Z`uSoAjJyH?SG2%aD+XjTI4o4(}lnG_;RKKJFaH81aQ1a4OFLhssJ7M>7O7HxlK9J0IC>-xp``f-0B@axse!i*EaG{n2+tx>99Ie}hIYuQhr zc?e|XxqFU&d>=)yf>TOiOLWFqK%$99eFHDBQOTw$za!nlSsJGIDshwF>=ZRPJVdns zqu4PWBkD&GrWPi#x>ivHdo&5DEb^|_L`ZI<)Sb7S!C{%1#2?x&6Qw>towrN<1diH zRIj-*kV1YAFf`8%sir}|@%gj2tTxGzvG#uVx#>3+i#cohmJ)W(6+C@55T9ZtmfQ zBWT7lO4&|anLVnN2NDhEw@zr&Sy{q4I^Q!v3f^5aTQb8Fl3|BDM$8y=FU9O(t9WV+ zxJp@rtN3v%E>=`@p(3lz#h*nZ?)1pP-KpE5E#GJko~PH3*?n#t4bdQ-&QucJ!z??y z9;i$!x=WfAj>BLYKB7sF0_mKlb+dXsR4(bc`f21Yn7Jwujdz!w*3veS*1;;S+7j#h zmWMOb3J3hP>Mo#^x{Aa9p6tyfH)#-%OwG1Iqgj7XRiM77DmweoGM1#Z)Sk}}|1}u* z_zOTuA&q)*Mmtk(cf0HoOu|BuwK7jvZE?PVKME+ zKEb0Z)<0_S^08DA006eqhumKbn>L>H^DX z8BzE%qOKw>VfKTBk15Pz!ZcpYbsU(|g_a+Nb4r7yXfe`Lx_o=~aqY*5|M2=cJO^-u z2Z@BE*Wt2*{yl(KqoeQ zV2^}S-v}Vv&quWm-HPJ}Ru)l8k+To~t?<}5jDMmiBiGv3=9Kd}S=)|IkoU|R>hOp0 zXunBOvaR@+SuWFmGTCSeTr{j_T3#c(nkJZd?YO0rl1m!r79D!I#p2f6yx6hq= z{^PA(8~a&kWeJ<3m}7hYsWt9DBF9WP2!2P}`dGY!YX}>XJvH)KlqTHV!Iy%T2YxSc zIz!xd!E968%j4!JTDL?Di3XNIVviA0nA*s;o zYHq|z9;MaF50x1UgZQCNW%qkvWP>I^>6O=Jh`|n`O3TmHEhYhj;+cXGv1&>raw z2J;H*HI@=ZOqt(TC9TLP&8K3`qFt8gg{v<@H`?ki#PPbixVv~ba#&Ra2c_jP-}aAp zy?mMg?(MB$YsGH+za&1F5WPJ_!_1g#prP7#4b#F&B%ou@(hO`XE$@0WYEYs%?s41@|G!OgSTkzBQQZ}S0P)H0;9_l z-2jbwB@5qfuh=bf^Pdb0!&&M_Vy}LU!)CxBviQt3y#<6t0Ts1Bd0}~m8$VUHk!A$* zs9{PNe%*H4cZ}s@A(R{{!d#$5`s7u0vG0YR!_J!{03nknsOycM6c(PNHQfGox*Y$i zAV~3>p~Sv-7^s}$-EE(N2-77jaiYHVr+tk0+Te!E8N8%D>RtybE+zMAE%l(SZ4aEq)Vwcj4R)M{35%4H;-MV# zGt7^X^NfIikC8y4#!v)A-hR)5c7b^%$ER5Z^mr+6>dlPq82U*-yZ~=*3^wO4jK*EN zr?9%P$$6xQyM-o+m_|Yk$N^|QtVs`;3rbx69+sNrAu+CGsT)CLS%L&$ZE#QTTO)Pf zJ{o|FlbbnN3SmPBBBYglfbS9ph7-bF9KoS!ydbOQ0lavZ!i5P$UR2b#*Sof`OkQd< z32hw`s)1*!4qk;$fIM|ja~zh1Xg;h3g1B(E%wOT=d|L=L(C}|WCTNfj-(@ykwR2eC zPZS?s_uII7UYvcT-UTk!${_o*ba4o`b&oTC+Mcn#Ue-w?^v;s)@15SNc@KIrAYluM zA#J9PnN(69q_;~L$a;Az;xHXDRzsl}pY4ZZl?m?Y<5lQgr-WE{6?EUS+1qE5sJCgU z{B8G{$h@32XciRlXhXPNUFomo9u_cZyBPnZkk55{P5}X z8l}XQ=ZbhcLY%QhAh!9}M<6u6A*_|gNH49=DHHUk*W~@`L4GG;7ScTp{W6C8>rpr8 zVbU|wL{WY+x3x-Rl&%HNe2J=Hgf-*Xi~w`c)LAA@)qTl7=*@hlm25U9JsLH`6yvo} zPnQls*w42h?(?4$z)QW=U*xx}@JU6r!JnY+S^&vdIXYXq!6VX{9VZMGgqalXo}jZ( z*LGuCCM^+i5}J0_W4;;}fAX(n96Z^p7bZ>cT&1P6un)^RJ5<|epsiVKc+rGt&v-UG zgdhR$=sS1~NR4};l_i&Ab?}+k2_?zFA_y5ZG8?!pIxQ6Kgdk|=#V559e$QVqXbs^? zs}KnPRv?4Q6^ru7Aj5$SIeMpWla0Y-jS~%#5~3xwRV3QKyauF>oB!9Z2 zEDt;q@;iNt70*B`BOz0diuNl0W|9a7R z3+?L2U!cZXxd;@wsiBCm0ls8Cy}=0Fq>F945q=-(7?%6xoS(g&r?TI6@AGMuS}k^X zHf!3U@?ic4tQZQJ%(h4hS6&oAv$h2sFI6~o)qtIvrRrJog)I!TI2`~0ede$I)A<|k zJ9kShyO%;J?dk%v)3ylgAA7PZw}U$ ziTxh8cjja>%!eHqi$sZjupRzW;^6K2c1NAJGX-8nFx(e3EQ=#~GT+dl z#R2uiDPz4@&_)Px0(l2pS^j_x9eHqPkHIO879}DbFRj~VR?S)8+&_ITqAQQPgh(4xEHhoN;2Y!p%`?)>3;gdqv-8Rp@Q2@E>mWg##tYXnZu>`y;ZZf?S0SCKDnnA zWL9WH^cNS`RG2muafWOHka}M(;AL)i{UIPt0m=ACYc}l>DpDa@$rcnV=zz|>D^s*S z16r@3k;+KlyDrg-tR|Pgd0Jd#ayG9SsuUOf!ZMEBEZ?LC^kiHw_vS&pd^@~9EO!&a ztgaYhlEByciwMv-m8q<+d8>Fn{`51?j2{{KdxRmQSca5vfaEhw(Pu$r!DMM|j)xaZ zB)2~PhtFljK%>}Xp>tC>%qfRu{>z4nVgHq}pScEf3W*KW13w}q2b+C3;QFyyOC71> z=24SAnG@=p$E%b-Q}WqrRf+t(LSVs!wyC{N&+b!TuQid&?A4(U^NzoaS}Y~Ocmnmd zJN)1{1`iP*a~RGL{sX2lgDx(e!jJ5f*m0PxkhXWGEYDN!a^j38L^i1m#4Mjq*WX)yS6 zVj@>6voGzv76}$E4^^WOCi((loUAFR<7v!y8`)CVi_xH`Djo%7u&{6897`FZ8ue^y|x@r z!E2JWrG@vfa~d3VyCuC9>mLCLb$j+1a_MfxnCOPpq%sLn>WXqDs<$xja(uW*)B>2Ks+6?Z0>BILT^vR}}un`?lQC@Yg_iJ-`55D^Re1CJ0BTEPiq-Cj>xH*}U<&1Pr$iMC_ zr(5Jv`@bia9N!a5H_@U(jV_b=C1ZPr2G45SYocCr#rpz1|}p| zJe%QwN^BrSbTSc@+1vu>XGsOF*GOm5y6002VNH_cL}{~Ebr zTydbF8kbY{SqU6A3!4?2(^}kiSwrhnKuC=+z=(JKuz-ec{a?4XCN8%~f`{h$GRmuz-=OJ){rC|$TUrQ}v4OjC&mUEuxNq@~CT0*F_=nj>guBFbU%2m_XBi&1H|Ejb2 zl2#sw8`7%}>$iujcs1f~O^9V3JkQmsX-AEhG-yZ*(b3pQN1)CFTN#oVk_iGW1+r*2 zoQnqy!42<6!U1gnclmwV!vf`}#Azb1zI+FGK8G2RloO-u1cKY$idbN6Y!gEpW@ zVT~eqR((P-Ahl{*3M|+@#e6nhb>h~0h@yEAfjHW9NjCiy*MTc;51y(OraDA@&_O_0 zKxgWOy=F*G^anW?b_Q@I;PtDBlGqk93P^MO~oOt3?-%IYeaPKaj} zGe(#-S29wleJlKgv{a57UBJETN5YCvtK%rHv0RNd3`LxB1syu2v{`U~$v#rdC|tc6 z1y|3KBJm(Lr8dSLZ}()<71DhFgtnro=4bL68r&+&gSDl>A$Xp9NEM5#^Wl>Z0)iCI zlC%%hYNh#F{Z&hvI<;Zdf_*t6eM~jf8JAIQv8&9l=leP)-g2%UdAE#Bisr0})m`dwMvvW7*$p9qK>U z`v266{odN}@3oHCGyTsfj~n2hZq{G@a$dd6RIgF=Fa3hIh5-Bf_f&BR#NZoTgI}ds%e#p{K#7Ko2{Eo1(17aX) z?E({5!)&o+1_+#7G$gw9gX?#v=}1!u>3FhzYdk>}7kLr4nSS4FelaVEU5zk0DsiTKG0snlf4Dc`KGos<&N854>vfRQIrUZi zj;|kRuVW7U}fAkY!Xh#h_@c^o3^dJawVhW+?;*_>9To#4+ zSt6_B&Zdts1X)m$@wGiJQt&0BT1 z+l#`~5TM&iWY@#FHeMmRFgF8w`TZzMcRr3(}H?iY; z$)98j+0yfYdp9YpsgAy&1`M|N!x~qUYeOGsKjjD(k)OTvB0Z{>+Yja>srRz@0|2A* zxzqLb9xxp2j|JL3DCc>KPZWlWdl`PVAq>moNv(VE$Sj>HMlp2XFwzjB@i2(C0>8{# zz5~O;xM3cQw=Ilx5L4uf`mS;Lj~a0Tsm!g5qsWUL+enz&j~n`(tgF<#Gfd)!$_vw+ za31XEIFD^bf&aj(Zp>ZXZaXf!?)4bRQZV&^ilXR(I1td04uOR9-lC7#kx)x&g(KkW zF-UA2Y;!;o3pRu}w28U4Ab||^Ce6j`^A1c*E0I+!cjkDTUjlk8L#{P+Q#U!)4vF2C zN2A>#Azbp~6bnn?2Ab)w&GAHu0&-3!9)YRBH?ctsUv4|OP})B9w#C+-Cg^6;Sa>BM zGR>Qc#SSc<}S0LPypVY6H( zgXLk9dQUUQl@TWa+G4|Xvui}V4- zeIH03h-;iN3)2u<4mt5HlHvmn_e&8Q`5fU)A@^z{xSx>Jcs0I`glsY>>XTBTq~zzH z{*RLMO5mKmV!y+^e({R|atGa(HDndHhaDkRs^3(Tl`_uL;f7zDr68(f1+x&uwvr8) znW)dF8w5G}Rr0yNNH#L^mkA7jQ}E5O>N2|v(i-}nh}fFf>#N3H#;oW9IK!YX=J&uO zVP8N4jro98PD%bft?7&|q$w6C*l7h?c!OCKJHP}M=3zlIJ;p#p85a>Gn&d)qp&|$=!g7*$1Jk|(W;A_yaACB|Eev49&wflit4L3xd z@MleMC?~oCc0lK19FwG#$;{|7(_vJ$8^42f>$H1DdaG?NSua}%T*S|fTm`%ZE9OHh zD1N(4WL4`k0dxf2R&6k=>pN=+(r_5KqFyBF7hf&_kN1TK9?gUSfXmA|>6Dsmc}zx8 zO*G5YO;5zS3U7GwiY6X;z(M!)y6A@_r{)=}`gQo(vddRN%9}&n(2cn|Hztfqq6G4L zh6Hg#wHW)!FB3(Sf0Q#zPS}1e*81to8Wwne2(B*rE++nctG0OGr)^5J@;(Y8x&S4H z1B4b0PCGMlN)aR*tDpDakA2P9RDHLA#OYjQ_N0eidR+e^jYT#38+RP!FV%860 zdd*=?!4$;-@}MaCWlnAd3?M~8cS`1aWKWG`I11U`{m(&CWEWkUDeJm|;Xv5^Z^hT9 z7Q7ty9M4|VN+b<&2~wh3w`&Truv7@}vgla=s<)9{WyQwPDsWMW>kL)yh-xJu*hng}C*K!85Dx zk1j(Zk(j9FRr2raBzISQym>Y57*va6Y>~h>bOa%sv>~G{lMS1J(NZKl@=F-ND`QOI z{9}N9FNRgCC(t04LL1w2BMlmIuMWSLT&yVq<+%ba(xO_<9GgeB^H)b)?oyQ zt&dMtYcYJ}JTW_eS@yH|X!H%n!EoE!_@>R)*RM$RmdP~L0t7kFH8|vXIMCa^q(;Uv zXeTcO4(U|y8s($})A_+n;=#<<3_`)A@c6uSkA#V4pUeS)}T)|k=cOIj>B|-;PGVryt0H^Fz z6=+a+;?@m0LCP+y`YbegOA+RYt5td5pC2;{3`-`JigB0i0HHo^cgM82>v-w&+!onT zO7tO*A#h=bW}zl%ZV$*V2^hF z`2&O&axQaFu2>aUY}c%${b&@Gh^)7kHLZjX#guzEKKh82xTt}^#=}Odu$jsxUIDMK z1d*dyhAk}>NCVf330>K1_NYHVc>_l`a-(QzxviXh4!Op$3vw!#^r?`PecJ&Q47TuG z+Xq7(?VF*^{*0mJ3L{AX?MjFE8%$hf@HR@jJ zw~ATfWhjceU#0^eC_CFilhJLc^`i z>~ViTE{kAI7Bi3NC;)lSzTypSzs2&u--j%xLP96gLwJQc3TC%(Hz0S^t;HtqPf}gL zdZ7%?mNSwxIyAOuk0xxecfeiFB-VROQEkCiC(kooLu`PF?JZ9IMk0f*$EA@N>4A6aXfvn@i{`#KJw`Uatn@KP|cx2JMEceRPP!a6-I0aC*!O{12a9egb+bsHbRwib`3UxQOJy+&E8B1 zspiK4lXnajxtBXND$4gBfx1v^bjGbjehb75-&?p3hThu5kK6H;kMQq%oVRx^et{Dc zxwT)ag?9itV0LY9*s!U&K8aN`r9&2D(0>6xoKm&|No@6f#xSwL7WorzE4^H_mi#MRR3gmc z930HG^2a7D^p}77d)lw*D-TrG>5#!Y6Ru3QPVaR_3xuh~$7j&Hr?S>zzEc$Z1*}`{ ztD=f_ms{D7sJ`IV@rlltpddr%-`e|;(cVA11UFw^bI2&I)rXH4vpST1??DA*KqYkl zAkAh?&ptt=9+1&nG;=>JZYiLwl@kgg`Et4Tj6$I<^wZ{pA5j?#s8iT32mTdw_VfjG zLP{0~}2xinIa#e+uh{|0=AHwM@vo&zAA*397(I0kI$X zmaSb!BEcflG08%fitZ^1j&soodlaV!E9^+_MuxTRbj_g^J89Bu?yk~QjdL%;e$=C? z2^%&M8p`;}9HnWDc710le;uK5N~K3kbvgTmar3MMAhhG zU1}7Xb+mUsunY?1zWYo!|1>-QZgMxp+1{m1VbRJs46}3aTarzHQHO@1(Ddx_%?0NE zeI^8uk{4}5)YgKOl>001mz&)rToJL@Y$a_U2{YKS6G;4=D{nUR=o9YH3TMUgb+;dt z)jPxI!K&PILj=6ckY)QUfa(Hq+@WJf0;!57km{Tj;fd`znP;M){fccqkP6Pl= zbk?uWS1$O%BVeB)FQb>>!;y4z1|iyytyi~apG;iL=eZC{|CLBYhJbpRThNmqjP;uA zR^`_G4?obpl67`;yS!fBDVPcfZ4f~)`Nw1Ge>k*@^q*kB>ZOkq)tHzMSC*nc24_>t z*SCUMj|<&!(zp$NON#Xmx+eRba19zqjXsD{VUw zaq$8erV!2GKMRgPGYBC|Zv&hUYNIyfiL^v<6MWxkMWbb0HQRn)VaEkIhg z8`p5yl3SDdRd$n5Mkb#A%VfX>mLpOdWd331hnpzbdF4dQEcP z0ArdNA2{cG4s$9jgoc1@j8HM&Nz!j;Yaod0(ZXDDAR72`6G>EUv{Y@f*kTv3b;AZz z>o=)2TjJ>RNsQ_s|3;QFze%)01e1f>+ai!GSdQ<~^G=2XzF)+_f(?{Ss@Qz*)_BF< zYo?T_`k7-~ae6c4I0>E4N})y4Eb*hBpkBo+H3xR4EanZZ_H+g8Q4DzV%rtlwvCEy`|Q;JJLl`}+xMP+yDxdm+AAw_&$;H9WBi8TmN^&n z-6P%cWo0$;I8>LgLBMD)xE()}*k$CMFzi}>44oa{b2bHNbnDU=o+hM-)^0_rJpSav z2GYEhXE4lo2o?^5oWQcF&pHZ*v1d2q}GQ%RYaX&CES~*E%9}TKhLgijJ|`F4Q}ei z24^2iB-5u6>BPR5Qt&R@3vkilpW08U0@3jHNts+72qSkp?=py-$96o-BX!F_jT_A1 zZKlaF7T+tzsn=^dP6Y4@9`vgx)eMnN~fYRMixroth@Ny zm}NQaPDnXesy<_1+XI)ZrD=DD6lt774{MK}&`xKimWku7-UZ9qm_+zQ9NQ2ajz(?`tTHmW9`_I+{Ru5V~ z{72EFhUo1P<{ful<{jyeem`Y^k7CUA`08uZ9lF^1qmaih_sEO)Ya!fr(9t33qt687 zr;kE3?Fx{9EYL`%l3Hpipi$x@2cGh1t{Hfi>KT8W)AMv`ovMa|Na{24}b z`N_YG!H--O*8$@_pZqbc0>8)4&YsBQ-V{DfqI*J4Q?KB#Mc{;U06q(Rzvv@KPZbQ| zUNJglQH))XI&SR7wO(igyvt6}a6aKPY?A@}l=s%V39qgTXHZTQCOwYQw2HG| zHq%GAl#=Z{-BA|i5*PDzAsblhmk6$K+q6sZcXtJX((`JG;9`Ri)+EGIGwebc*SgWX zD8daFwBntBN8|ZIX4eUgA*U0L+8UY)1^_+bS2^e$WMA4)uB1sE4fTs&fxcJVASyyV zW}OMjgb!BzL84^Jm+%mAMPNCO zW;1T}hDgWiVyn8E&(?hSBKb^l50x^WZZqkON9_<;svu`khXfyGOc5|m%3|`& zZyR+ULB|FHa+X@O*Yzrrg}>djzKg@kbNg{b-aDX|Xv|asQ3I=M=P&W67lxeBYlHIv zM^b`xg6bH{H%@IRbULtr8rSD^dz4t^wO3FK6_`-UoqY|fkUcy%@Fp%L+Fl({X#bg% zpV|AEGRlx7A&;TVWlm3@tgAKdKUVkXxEYcJf~v@xLXE3rk0F*_HSM)to1NM_4Ac^Q zue~Xyk-dVBqLDQ<4!BsVmE8O9k)FARq%@p_(C9MxHnZPE<~^3V9ZMbv47<=F7Cux zfJ5l8mFEF?HrMYzVd~~V;_R8tZVYtNjXe*W*+LBSjX&j<(SQJ*ClGz z)=t>L+uHe_n}38;J%H-_-SdocBbYwZjsnlOWxplveZ5!BT~=|p!!;H^7FADQ<4aU_ zXhu1+Nj}_ObuiJj^Xi~$G5l5`GF%x;3$^R>1?y%g4(~F*nuwF;&ob4qMjhZKff<=A zlKM!-^oy!wO^GffDQt1|$!4C?rC7Fn2Q>w*B_A_!@yzPZj6?MAPNZ=rR4MINe?sw& z@_uq%OkNM7h4Vsxk1&_!=JU{V?#low72 z2OzFr`b`55cvgS2B4rtQzCgYKdj{T98gL6PU_;a0nzisyX5()|)7LiM&o|-KVuLg@QwIq|@qE`#P z)ReX8R{ruhTU=&Zbx-9KKzh^HJVjb-U*ULy+>-V8aXA^Mexr=d*BP7wMm3tb3w0T|`}NG0M(>i?vG`GhV=)TKZH!YLy2SXoG^o1wX4 z6G~e4=AXgi+>ro4@Oatple)koTgoNvAPoo}6UR2CXh2o?YS)xt&}&Yj!7S9;sAaab z^R)A>3ZISSgHLN4AI9aKlVSRbPGIe9(eb3teyPIQjh=BML!y4{hixULA)^dE= z_x0yeOpRUWr7wFV-uW{aZabl_VUYvCo|NJwaypT>-t_Cv#R{E7@r)I24th^sDx##{ z4K%1lis<$l0KEy@Z_|4Re`_mN0|LU=1)T4xZOH;&IRGgtkb)^xC`zlwmnCy@xLGjl zm0yWqOfekH`icccVQ?>}=|3$;JMwq%mOEQ)j2^ZR`i4&{Xjtyhj;`IKChsrW-7lP- zuU3_X<&zT4g}usVl;*N%WwgC0qgnR&rR6DnltbZ(Xv8gqx0lJ0`*sXe5%_7X7Klh1 zt7xlV7fw}Ml?ln!HGmjMXl`G5ruegak;x4s4pHun~PJv zw;c-~ob~HZb}f1uOB6l`76MFr_#ON&<2}lbu8F%N`Aa&6f^bS&_}0P$83Ci|D|5oi}_4vv&vIryhnL^4E#x8VI!GbwKTg`#1-1 zBF1K<-KZ#7=`Sqec?g`Q&I8_(Y~JU!9Ixmn$u=RoZLW+SDuAv7 zaq+$2i~d3yo^$hqCJX4CmN#&Ko8}o~>`B{_KKR}aPH+luBv%k|+nWrq_P-X)dM5ID zV9tX4L_b^4-6_7u;gEvbVF-e|gj|t*ht@0KUQ&&9AvD=0KN7Q&@kim143VAoN_EDW z4`ib;N-P+xsTf4ipsz`Id41`&KHmlc%FOXpNl42w{1%yb{NV=psc?03xHUMQr{?Jv zJKzDCJ_x?s(jSV7#lhO8p4 z-lYV?NT}7tp$~e-g{&%GT6M9aBxkohT7YLOEkHeLf(D+lupUcjLLo;-ZkWIqv>LHg zDZ`In12^C)h2CXQ7adM9y5LX)oM^0s$A>rqIWTQoSrCPYd|Hm3;W*~ng|>o;-?+C%g3wXjN2<)9#*-@a$&da5h6Sa$a$aAvOYiD71w4C$&+ zKf=^P{DeNMkSzp<*SGwZ^H$6>4^iG=5K=*O`c1#>dz&J)sZd#8ME;hxW1nN)&03X7 zqF-*UUNP^T2<>WKMreTM!TS|3y22p zq56q`${dJewAV^s8-L^dL0l0vqWQp;O293E+3UnLQ0%zgnNw~BpvICk2nt%^tYCSg(kypcHnD?*{wT=3Xb{KNRnF%kuyayn(% z6h0-pmX#xZV?+QC^;YkS+hP6ZUART`_;+Sjrs)I8wyLMH@n9JW5FL=X z<)$FE-`3f9|1E~kA*_^g60VNbT z1Dy$k{!`aikrk;Fd>BH)#Gsf9omm3Ak!i%b7vfg42-EY3E;3=d zjeuB4`hx%q=W6*y&ZJ-vxwpImLb+bH4||*0>yZ=L@ivNzkhS0jVA*nDRPg?GI?!G| zXwgU)ds1yHnKA04U>Ko);w~siqUYv{;?~?pY?bT{EVi9D0tEE^6(HEDTYUx)lQ#7S zt!`%ek6NrLd95R{IfYp)fgzZjw)X`6fh$>W{ek^sOjAOnAafJyI`w#6HdC*5<10)V zAedFCKo1TK;}&{%)?>flw2`OU}1AvR%jszmt4Z!WeEx{$?$%F6S+CLUi({Hpw*$8UEJzg3jyC1Q;w-bB+p z_l2c2v(jNX*HeC0b3K(k^iI2eLPBW#G`q zJXus3a|+#&4_9mskO8(uzgt(vuEJo{&gmF44R7k`z1ASsKonBL9W_1LqnK4&$@B^~ z0CEX^Hd;8paxtYVX-q_WNQ_O}oJ{J27h#L6 z*@JP^Fi(WWY%-D%j`#|*af8Tw$E|=mWRvf`f!)se_BJ@zUJjV8wX?lqJwdLBnd56N zAQXy?j<97f()d7Vqwz;ZxPM5dQict%5wCMZ+2F!zX#FO&5C?uZ!Mr0oYBo~F z8>1r{_id1bnrt0k`i?MabXSzB17$#)f}b~56uRTTra>G>u7P1!uSDZZ$SeVNodOmHj z)7k_)rxN%c)>7hF&ZeMamc$68FA&>7@{8M2Zdb!Pje)yC&%zZ|;J&E{8ah)IT77WG z8TG=bEBIYB$0^(``S%*Sj%fS|>DA%8eU3t?=iT{4>s?~zT!u>Z1S(WZJYytXY59MF(LPq4I7##30lEs}B(C)G|f z&97pFOL@UOZ)RD2f~B9WjW96aLOKl+V)5BZp=NtmKpuaUvKS$@d=tPa;8}WxkcM@M zc(z^0tfp@(Ov-Ywyuu-G8ybSnUdXMp`NIh@&Enlia-UEScBfAApx)v+4hu-_!QnN_ zaIYp-z|B|r`4?86wMYf8-zRlbne_C=33u!1WYN8q&Ra*PQ`+ZaP*<#^Dom&1>Ehl; z60cJP5r=POF#NC%Fxlp}qE;ya(Ez$)B`OW7fahVi)XU`DLG;qz{xGM+iRRojj~c}( zU65VT4~N;nd3>eKvc+Bf9~|HSSeF2J%@V-BJK^!K^#G{#Klyi!0RG+VfBScF{`z;l z{_*c#O%`?l{JSLJCCSG)co|Rt)a6(oUd9UJgi${Lb!h@XT{?fFE_*&vmzF3zBy#1h zYK5dVW*C1_m*w{7lva8i!Rh?ICbe}mHA>6?)MaHz*GUkc#NoJu%(#RTb#30_ufayr z)^sZ~Ii$UUPvB77k91mo&Zb)57SV&=kE7QGL=O&;-v&%{l`j6ttV4Q?K5C@6-z=;( z=krUozPA<0?6kHr@QYqd(bXt&@P5=waT3}dOgpWOy4BS{Bd1(rv(8B#$qUq3KOUyZMfQtpz z7f3ucD4{F!jo=`e9}62*P2tvOhBxQqr}wHurGpXt028N(!JOU1rIx|6hx7gh=R=h7 zt%b~N9b83i#>e?~?t-IBHDhOw&iWp03v&f-b>z;^fDCoK!4$#8Y+yp-=wsibC8AsD zR;eg8zR;7DDAD2*H@JD(y2G+EyweTVFkotX@z<6K|1%x<mzQiQjIc zKHbPJ;8AO-D4A)(YjIeO=4F_+c`(6S3(@=C$TNI7;HW;)iRBOeR~L%bi4|6>B8q+3wv$E&I%a z1db!jL<6FiOdYwdq^d)`R@W2cdy0;raeM-#o>!lsSrHPO|Hc=7Q1QjZGdc;7cN1*cY~L0djuNGe zIE=#WQ&e}fCX_z5MZY2+k)5Y4P5~~{>@9?~#oRMe{zhX3u*0SJEHU()!!PMTQrJG6 z;FMIUS4-4*g>-6}N_$Qw9{3n{G@!6LVuQZ6!8*CHAw&MO=&ICfpW4Y((1$p(LwXOQq!& zzy{J3c4cNq|m(0(+tCUXbCLCl(W5h@Q3wN|ffob+pWS^8F&S+bYE>i7pQ^ zxc=3e9;parSlRRIo%kW6|A(-X5ML2u-y&J3iV-cgD9s&DFcsUj)H+?)S3Px@x$7Il zNwoTz66q*R)d^>Zc!;qWWosyDn4h3~u*xIrysZAn3Xm0D4%9)EnO@p)*Unci zx$^**l2a2|)E>NNhiGwW9iU ziXQ&3hfKI&7mk-JV3=qpA4;|!WAHYoJ_t45xbju^gy9nN)d&JXt%+(1t1YkeVePQP z7+T0AbT-j=kJmE3#|2ZZW@p+e6J#ZD6@4#T;8I8agEcaO#;ncvEJFE?%vp}Yen;4o zNHB~CYs`6-b+d~QC;wx!2G^ZuT1};q>lpMAWDkAB!GaDyd;JOWAE7j_J^$`Wi^)7- zV#STnCIdxJZBQp9BV` zB`S&2W58YS7fx~rPau=W)5{fw6;&2{+a0*G-gT`(ggSxGlJ|gi7Z*=72G$* zR8*&BkUzh9JuUJl+-8^81hC@ACji6LgPaVb5c7u6m@5PK&$e(%_mC7so1N>lDFYaK zNJul9FGK9J99Z2g3x2}FX7*Y(Yk=9qS~{a~ ze4r`7HrF8W#+V9|{S#|H38dQ|Ft~fwW-1}dNx(mEn-(C6Tb`YL93OFsFZ?0n4jHlj za9;SGVWnB7PScGX#PvOZ$3ov3xtQLsDXONXS%1$4p=V_6Nzxc-wao(hhtVi^OIE-M}7J})^{%eZiFHFgPKA@@D|Zc2=-2Sg(f zC~|KtlK;HVF3K0cQcg zN$H0F`5$u+d`r}o+;Qx5p;C)`Y(NV(>q1PJ)toVJVs?C5vZWF^Gi)U3JH0_h6XyI@ zLFibG@_qKFiYtliMuOoWm=cz-NBFgdGFolx&Gq`A)ez^T6zOe6cuQQA)C-sL9xb)# z0spyvPuq(Z@eTkk})p7QLF7nd%NsfY;PkOxJ zUAZm{U2;~oMtYps3svhc5}hb@Jq&L1 zucX>f+FxK92CBv07>k28alcGd@U@3TcIip9Vq0ZC2HGRZNp@^EV+H%k^DW@0C@?TE z*Ik{)4lt>z7t6KAa0$?L+Gyq}yI;##1yI6F;FH4RL{=^wS005mpe}eSM{gZ|nf8jz zccy_Ay^gm;no_`tlH7H3W!pEx*$GpAaPBow^L~L>4wmx%-|GFpDn}CjAIxO*W&>7w z@l&!)h5u4HQjqiS%8^=Nf_&-ip$<+Kq6`2td5fN4ItMh3mc3dwyf6Qw)=YI~e1!;= zGSCd?B7<^Dvvh6%FLH%(+=0AUUraF+<+dHbU4SdtM#4B=nNoy!507hW}6J2wC#q z&Jk@LX<8GXZ|4;R=TrZ5j?@7=ykaSj@H%7X`2e%3?`@|FRS053w!D;xtjD;X`6F}mxuLgwSd3$> z7WbE}Bpm^@yWx*_^i=Qj%U{1*a}Tb3!*UPG(j9rS`bAr!&WAemTUd-r5kELCQLfBR zi&HsDZITrNH|vgcmWV58Z8qB6a5-~J)7Bgf0(#;tHj5(R$h8JVwOoQk4{|rI9V5&k zmxR|cqBL6GMXl$vWKfFYo6I%pG`VTskF_v2s#&!P+2<&|9a_FX_WcThxg_5%!bFlK zQf#k&BqNWM6vN$O?3;XtH!J7ML`A4Fk@ASbX*T=m43~j)$-aG{cJLc#)S!f(){L=n zQ@6Qd#SLt_#sKq-d&D|il5=wK2XGHzpQvlN!JUAZs&fRrw3Ezr=nyjgiXV}}S%b_< z23YDSouZHFlj$y-8R?_)aL@&vZ9Qn>Z30uz(qcbcNkP#nz=TPP(jmq@kEP;!iE(B7 z?mpDF=bzv1f{AkM3T1fHc}~zVM;MaS98sT7DH@m`8}}2Ga;*u|M1(EYG_NA0Mv%1a zxG=XBVojti!`oBW#f~Xh8WLk!`DPhb!6NntcPPF(|I)f(2gsO3uPlnLnr1&!Dz4ME@?)L z{SE!X2OrW9Sv2F{%77F*^0sUsBK>cU$j^_cAkYEcDCx(lJtiVPb6m2(-LX&{TQqOo zMLTjE5M!4DOKh~uutP-`@GRI?L-YzK=LS0`+|WrbTF3$Kc?j7=7pd<%#`iM?*K73k z0Cy?MRF717A`cZ!7c#2Ev)Ml@ZTH?O&B9wONS!0p#kO_V>u7=JMsPxKhRV~#vd7@B z^Q(6E`TWI>(k(A`;)9ULR|`w4uqX9W@kxr3TWDI55tV&I!;K|DB+vLtPL3g70{R`$VL5p)|v_VtNC+&TTz{;G=@J(JNaLn%1tG4+c>N({3 z?BB5XC(8{Q%is&Vme*EKB5A*H_nV$i?T2WL=Zj9aYSEhn?PZ%ZtPqM%lK8V~znfCC z6iTo}bV5}?YVf{rB-Ki-|499Ff&N;Vv^BGauZqWMH-=k6g0>+y>)w6A3$%V)3=5c* z5&thXuD?qA-(>WASM7hWaUuMZmSknD)&Bn>Ey=FhM$pBmu@m)b*<~;USY_}Tx6x&UbN2GtQDg5HdP5@%k_k`v$MAA$8lA~&_ zehp}VvN$R{1p*Ss`jl3z00u4uwEc`rq-*V&I#p8XZ1A);Cg39Wkmo9g*xU+YvP(|HQ;q$r4qVR5(~al$_^aY`h%^+M}0*4vVx^93JS(eV&5idxFaB zHl)qPkE7Z(2w4xgRGHc7luEi^PYJaO3~bac@UIOW%_{+p-dwkc+9FSOU({HM8JpI1 z-Tb8SwowzR3_IYzOwoObxf2UkYorr8UScxkFM_{;5j9N0Zx z&&pfDn!_F-6OYfr-SG20pmmZ0`P)DTSYHfCaquM=L4(CI$zCRJm6Rjc5wJ`)S5Ejb z27#}ZZAspQJxa5%jsJD>08Is5)owND0)%pC<~vP>OiLX~z+TN;5yv{C7~)T?J+nx( zKU2q*!t<_CgpF6$Ke}KD)`}V|S`(4%E8>VjKwPD+T^%>#ATb!pCJ(o{zo}WLNs-2` z?_O;zpqN0iXUhypycRE5FUk;z^PSnMxL}z8)3VpikgV5=U0JenN&f&T(o2&fc6z4X zf+1MYOB0$TVCN%`IU;aNjL{k}E7@Po)bZPutKM+AKVEO)upQQnEF;sI7k&i^Ra_3Z zS=fJZG#WsU*$dpM`8LqZZsdgykVoM5(%+3#Fs3AG>yHr>nhZ_HDr_>GQot>qZ6SX< zdeECPX{bdn+^7gIREK>NNf&S}^)DxEcq4Cs?}L)h2PG<|pQXX`9eThBMGMzvk2`rr zz>1;MO>z-&1gHx3A(Qf6m80XC=OL(P!7*33-F0Yo->-4hbwIy>Uk`kykSM=r%h6m) zX&4G6mD(w^^J^oX$e%0J{j3UtYqWstWnw2IQ%F*1Xra}a(E+YyFKeid3K%y^;8cs| z)M85E4}$y@Ps~#1(PK6GTXL3OQOLc``nq9%;_S%;9TfVWZybPXm5-6<;*c=~Hicau z1Ed8l!5t6}^H>IeCA$x!M0T%78b?|KcnYm0zgs}u7`PoS*qSD`d(X$U|#Y0ZD#c#}~Lp)02Frqy)rtMdFA$+3Mcb_+Mq zK3x3%6vJueWN!_Dgt{|NKtq3ehK7I%{rY*(KF`ib$qDrw=q2z7yc>&-)~_Y1oVJ7% zOyD2Xk|ETDr^bN2(CIk{_r@=!d@hr|c;xH`aJSTB7^?0(Dr^NMwz7Rh#FLWV=#^~V zK(_E}%SjItxIvHergX#;Fp*91&r5XlzDrb>bufDBS82w+JpeY^98s7oMxL;1#mC_3 zk7I^3RTq!(EX>LLZPC|nnXWSrrocWN`+c*^6xHf+2pS?wpDw~BW!d8@I@#C@kZ0&W zWFvVklBAMZRfvM*{U)fZ_QO<^N>GM7zZx%;zJMlRiE0YEO8+WJXVi1YS%8;6>ND<6 z|Go~g0-f!^lp`vy%demJ<`Pkp1zZM=1Q?60-vL^MRwRf)WW0P>9XcRdO11sgOMjdp zhL=&Ew+(@4Iz^T)qMb-8mxe;gj|@jlsY;3-3;M?bFr-D4Z>og>;s+~P=WqlpXj$Xm z3$A_+lKEbdxIB-r{EKW^V42*tVLuzi8vBM(HDQ++Y!zql5XgmGQ8}io>?YNl6r)~M z^n}T2AGFz!Q8MQmoa}um=c%ti_2{UHc6#OEM1Q4Dz<_uu=xh5RA@AE#`t^vmv%nKM(q5ME4`jjUW5KCX)8523`tA;!w zald|Mr5!nE*R>|?iBvh(45q)2?DxUtVu43?MDTsD-`;t%I&=iT0%UUC_O)h=Q?8Kq zex>}@(Y=!p0O*g65rDgL4o8iwN+O%l<~LENZvnX4_I}W*cE?UN1*tv!cEYg$=f(Po zyLXq#o>Z5>z#_LX6mbMnmPOoG*jP2tP@)4_HnunJg-^h4`z%+>no+s)4b#^ASM~yJ zzqdx72fT`8Y;bbssBIC|dmxz21H7@FGVC{lO%RD->sz?DDJLZjtLYrREm7%SIA`Wc zYxT!8rfZ9LigQiTG6M^5)@`{fIDbXK5xTgMn|8J*k zL`@(dp~%JHC5=P%(tmqB$p2?Csr_dUy8yW+8$hpz`+w8xLGriPO8GQWw-*-kx2_3)+^U_>OVN`==)Yk=l z)Z4VD@E5SgjjUE!NLGl$BE%1CLr!+*S9jx%)*4@Puu4_;I!UDSJ@M}0JBP}guXBCn zlygoi&c({JIaU%}-1sdy>O*xOVbXSiIX5C&4Wx{z@C|~fX}cn(AoumyjO+q*ui1|Y zMm3lq3JHdOs9#|(2LEd8-5sLK2CgM4r~oI<4`Bgr)pbU$W!a?)UzJMjoZ4vEKd$mwW;4m$GDU@V=ATy+@2@H=t11 zW&iWy5(ZkfXV_16k6a`ESWzl)z+()gs;fgiqKx}(+KT!q*(9?t&MX29U1HwEp$;R% zqPhw?pAjC=RtTJGKmt<_q_*XcZ*dSDg!_3ontgZ9t6xfX0&=a_U>;OGQRc z_FAS%<0llS0Bl<9qQY>UC<(M#muELOD`6%|EH|HNzli5`MHc4W%Af5*=M(|Of_Y<@ zoJ3@jFY*E}e{NDpVP>C+^7a=Vq2RlC$au%41u<(dB`BcMjA&eT3e03s zH((}QDK%S4#LvME|%q! zRB737;JG}6Pqabjd8~rB0lyTMM>bKn=8MvRKeIPC5VWBt$-^4W8Eev!3O0vGli&4+ zvuI*3FbJ4d1+1eTqUx#Hl-xQX?7gSw8<79mEbyPq`KlC(_8eh%!k=^T444wX#o?8)NJYcpc{C`PSh45optKfu?foEJyxlvJ zN+O^6ZFH`>$@0w`G}}Fp9WaZ*{f~FFnO&X#bw@h}2qv}uUv{*`(GKnE3bA2n6IWLy zxv7wetB-l{?gN39Y${+sX@C?xNim2a3I*u-ArA-s7zm7F6Bs$=rUfY?sOt?FX@$g? zA=iRavr%9D^^T|f3ZpKrs0w5PkyNyy zqc&p!0K-p*FaTh9JehaaqbDC*5(M&d6ThpUfyQw!`D4;|%rV04 z>D+GKTOHxNEYVlyRn%X_vBceYPK+&WvfJ;c|GqI+o~FP&qCA@V{D<$SB#b)oYAN+f+3bQD;iWq&Q)lnphJhbeG?a!0#-EraO zqL9-N?XuO%qQ-f?F@|l)U0TV!7IT!fAJd(6xACm<-X1QGXI^GPtNTJBaoGT8*y^nh z0W|!c_OCecCiFTQo`ifF{0@P4inGSsOBt&(@3ms8B0H$eW~J27Md?ZsgRsK|8`8WT zs9{NbP+QQa0T^P{Ff*O!zB0Q%NF1;~k)QTGII9B*mIa1K8I!n(!REsV^~2J^`>1y{ zYVZH(5Bo!x&8D-cZAB}2L0Hq`f2&ngvs_P!%R2^iQLaNa`Fj97_u8!v#npsp5@?T# zT#y0)Pwh_Vek(Ut+xt69gauFDjd~y6epQW|q3x!QFoTDOqC35C!9&*m0pMX;tC!y{vbRXlc ziFCy%p(J(drU3=@ST9^Gk@$da(&0(fTdp$8)g5JN1hVgVNWP5L4o^=YkW0X8G5Joz zd2-t{AcKWw7Zysg`t`$N*n$kXI`o>`h;xAGu24v{&=~v4NfE9*Lz-`12KXu&+41DC z3MP$WEU~#VK1fG;xElK8dnG}Zkz1vd!)80@biqKIXcDNuTgc#0^arh4WpFpROa2>a z&6<4zXz(E7 zI-G}iGJMJeG|q@G!lw95K(<0Do>D4Xw6f0rDwmV#x*71QTgbuCWExZ|@b2M>&&?k= zsho7TxnJ9IjdK|B$croU)Z*YvMx`_|KYzXHQ>2wtJDscqPsjWLdden$2}+8Xc~Y>u z05^Rwn5+YZo6GTv$r3{%Jxg~{g##A(`06j#!)WYD$vVZ*MR2XBsFJD9#U$J{$ON)3 zek5UZk|ceKotn|aM8}S+2$v+Vrf$$Udt?1@rEy5xZ=WN7M4CIZ{QKjkoAEmhvyyl7I2H3$1(&VTyV!>;Dp^bP zIgFvmX0_04^G+WZrjxRHNu*|5g(pg)itKov`BZVZDT^5nyUWS0?PT7_w644$+59aA zv!(82b{0fCS{;`jHj?`atdVhk1pdRCF4j3X>NY*K(fk}<1>xxq=KF{7%LV4d+!8I= z=*NMbUo&s9jQtJ$#WkiJ25^lLHC*JtPG3GB zTe^kFwOs%LHSYJJa(*PAJ$2S9X-fIqh4vmErNzbqdn|w7XLBe4B-?c8E~QOK0LixG zFUgkvF?s4*L@Xua@Uy25K(bXs|E{#sW62LKBgvIz;+SGmnB(2B9Y&UJKSW`r0ayl{ zYfg&)wG5nDTI9tgXH(w~Pw(|#dkA;;X21q~%YpwOz5Sa_h2_rkttV#d^B4SNe5-vf z1NEq4$}gYGz<{&2Np93etkG`a-=K)K$lqlE5EMB>11tk9)+GRr?Z1`*hETdu|0zml z>ZhP5^sjSyB|t!w)SoWm7yDTegd`ntOC%6TxvzEHaXd&+EDjJ@y4k&i;*x@#%)WZ~ zjQig+tfX%Rob5>7(4W6NupU;n)u0;4mf^EcjO}S?kiM+8zTMwXxM@u@Rb->yTy}yL zs<<`Rx*FhWQ}=|EOOF!n15F0}m>cowJbPSoNIt{r(f4J_Ky%0YDVma`+hO`7ATjH> z>yC4ex#BVDG6l%#wD$ zyax6ERThC&*HIZ={ff47grOiAZS;EV6t+xNlD9dHf^Qw)p5f>)c8(=*+;K#HY0-B` z7<&O(Qh-gE_<>DW@Ep&ZX$_OZE^Hnaqt3!_S6cKj-zQXL27@RY>q$YGTcpMshy6|U z>?E{#0-+OAL5oIm4_GyD#R5a~YFBf#uZoMLvSoFvV>mHpxLGAc#|THWNKex>Cf$&V zZHGS%b{NV4t??nkK}bJNbZCDO31D5Cxi&ngtttQ+*64+rCa+NSFm9B|{8&>zrh+6EFfUQ{SgEdQ zp3welfyR6*B<-eeKz<8%Z;=zC6 zzjr57gmec>!`Tw7!80zn?IQ>;_lhE@htw1Go$Zw}h&=%?ly?xxXl5(pyY6`Vn)EXJZ2)v>9GF^&+{1r|jOtNt0dxM&uv}XHC zogHrPvTzTpVJAC%y;e3EzlY@gYG|m5VLi5>IdacA^yj5Z8vv*<g`&q8oYGwUMErgE0xKIjgq zrb95SIbHLl#$>30qw6kBfj7!^)Gaa31>>wpMi%Yp*#%A+q5vdwrFuquO zwaJEr)LH5{%cOrAz`mZKg@Ny3GU-U&jT^&d`JK&}(`gD#ufgB4;yD^mtf58GyZN_@ zQ723M1Z0at<|XFxWKCFd#klAyEz3`hIr?=z^wyI_mct~#nU zT7Ctlo;A`YdFkUFhJLJVv}FyoCzt&a)9WsUu>=?sVzU+v0zb<3cLSGe6T_BPEQ9!A zf8MM_ITf*JM4J5a(HujH)-@aOhM#lQqy@SB&myBFgL+l(^DX?c!~+*ktqV?8^7c)o z^-{{S>rrNhauDm;Lfg+Z_ZVmfP4nhlww^kNKcwgQe6<0rty@~96E}^+m-;kT><2WYE1OD zQ6j6XfDKP{Xq->QA4}TDpgrsKO05_E17w#R`Vp|?qW*jHHT&NMcdVrVJ&JiQpn(1` z@aTW~!+ZcWOo>5u`!R+l&h077S6l=E4&pe=&XYm;&hGZ~93(^>4!SqQU+#}$l3~1A z&LmpY((?6*Su1w@?ZG*aDH^XzmEChBwd_hUW;F4^J!u(qAw&g?K>jg#^k>8{aL+md z$~9D&c52ka049$C#UFNLhlX(~!kMX9@0JC`icAN|DlO*u)>2R{@?=ezvW%@+o}xFq zXLm<$%U0$Zq+-T?6GNmgi>kGw?AgKY79pHMfRN^)4Q__)Wbpx}V})=x^|!U_CAdEm zD$&7Y<^4k=Bc!>(V-~NM4@I*xtBnAY$HJsHH2}i;>RN+)VaWZXDX?TMAX4L4;Y$Kt zpE?!RE&(z$BPUe5f?)S&zlN<+@huP#6g$A=p)zR!fXF8ya}L)yp^V^9p?R#qRS3%D z4LU8G>HW#i3-*~dBu2EQG8hhgBYshIwxA%Mw9Q?P@as+Hf9H8R8Z3IqB;s{K6p%J# zom-VhVTX?gaF>v(G9lPU>Z6_VucDiHRwr$(CZQHifLB}2I-0A0i*Is+qI_s>e^JQ1%V{X)c&N(u2 zjd5MSL>StVaXn;10ytE8?Gn!8hfP$~N|Y=Eq-h~57mWPL-|e;PUKnCrsutcN?u;H8 zDKx?o3LJ_*WgRWc(H((?dnT{f%a$XAm1eh1UVck4d$o1qz((N0ht?nl5^suB}NVW6SfQTI^os5)}i>3*L( zRbj7OxrCr>Jm16>D3v|za#Z@o@UWKXMDPVmv`2JUCOY0!3|#yNA!anyMs;8Zc)(3` z+`#o^wSZD)+gsNZ2ypUr+e zIEiHSHbUGXms}WSx*OFu_rloD>;5njRy)bsR#m0FJ>=B}k)zANuGNX{aHo;c17iUg zRh$7*Zy6m8C4CF{0mrxm((i?4y2oEpRnKo>mU6W}Wsv_&^82rzL=G#x!CE5sTx7DY zo^BVQ%N<}JO^d)FoBwzYDTN{7wK)P$>itHAc zhq})20aAZcI0T5j0^}f1s%?I{=!(0|mMxz%4bD;UaC&M_iD|;RDgh7>Fq|D%K|BzQ z2BA?4gIUCrf+wc?<%`Nyd7kw;Ld!)XR2Hf2S>p5U%L^Bd8^w4;1%gFJk0m~m#_bo; zcI%vnuN;m7VJYfcj8{n%vM`>ENUi^iUm^kBn(It=?;;EM7Gf2LoEDk08%w)6G zbW2+&?4XleGke`?=XhSHWX5GP@U1XvtWwiH{!4tV(a&Cb$~Y>meZk!%-m$CZ3S#qp&peCZ=6h*;6_q468|Q-x z4&Go$b!j0s_G=g9m7uY>C7EZYOCAo{7K3ptr}vh3dE3K!+#&dK-sAFHc|fpN5aQVM zs9E)~_aW@rV#G6NIwv`OALZcg!2R)T#KyoEC0w!}hNC(L(ZOf-H@mq-j|I13mp6BI zF@E4tVaN$xoM;Ii>giC(Xrk>LO`c{TxNuMRG5hQYp~7S_$aTlJY=Uaz?fyuzp!~EN zxoi0I=%O;X1pL>8D?e|$N{w1RGUo~_LIxsPIbMpegOkd)M-E3SkWAK1m<-lUgx!6< z=(Qlb^scPS*$!rX79e)vw+8Yw{OYR-82)^xHRU+N!dYuwX(gOC><0T($rSe$dbiz9~iZ81xnz7@EXde%%54PBtV zU}!@)d`xcL7Q+ij(zE{qBP@X||C=uRMs~!2hXMP{$tsbRwi=x_JSqAX5Tc<>;}CTX zGQzRU^eDb+Ry1`c*HIcxbz9ylF+;J@#dh3swxj6U+ZmKLmgykX?J~@(W}QfNg^#(E z06ZDx#Q@>F2Bg;;_6NhOCZHyReRfCOGW`JL`&>X1CL;1L@{67xXh>pGMDSAegMW6d zUV)&{NNI<`zXL<;0Dv8UnoQ5M;gTgDWmPC=-x57%l;!a0)!)e)oRK0LV8iN1t#cIJ z6uPk_G_F-80jvptK@s_P!;1V4&{Y!YvU5=n>#zH?)3|Yg>#{)?SalF}8Efsge`w}q zZPgkAkUBj7M(PxCkGR#@fhq<&4>W0YzZ>2t=ryWxSMl3^Sm6Mpg`nQ`O(@(3~PHRJ9=C2991bTK#4^K2mxJ< zY#NBy%kY{x!gD{3gH4e(U^l>a0$J~zt2eN`r4kC`+tnPi zZ*qLh-d>(ck_8)z$V7kVY^)N@YA{BM^SyD^FU}}oJ;-gn;7zKCK9m#9-baO^wP2%W zjDc{V4tOR!I9oQG4Ou11#LlwLzJ?qEC;XxqsN+mJS=x$cT5dYU$=hYQm%B(jM|nY! z>kD-)xTCCTCA|Sb8t1)<7%KTVoOgwvBpsT8u1(m-(!{x{Cc;wG?pjSVgQ+a34YzB4 zoslKJyHPMgp=cP_*jZB)^xiE4e1JV0u`5(ptHvn}MX^&q_~ud+1zpJ!lM?T7BZa-!Q;jC4ysb^BfUrf~B6pSI~L|e6!yad$; z$L~PeoE7X5khkpwa)pz@ciZwz>P*lVx}8dsR-(xx_+@des}0SD>3-FuT6NY48;r_L z{HSof#g~X_(&b!8tyKnRRWNUj6_9?Z$pO)$K!jJHI8(+TXyc@d;doeCJzstY;xd##TAlaWv_X?U690xlK(WI82~oeHd6 zG@N)6V1L1(RHi?|ypc;8QMlS)pSP%;jVe9TF&k0+lNFAP$j0f{5Epl29`-Z{0C_5% zecGoRp`Ih}xER-#L@&&ynj~x;*V%r0PU!Az*_z7~gukwKG^e2rXR7M5>l>x}``!_1%+y92jux>E^tVsVRArhjpWuEOT z3bTCvvj!j`M!;y=RzU7(02MvU&`ZEd1?u@B&qVeP2PClxjQtj3gDb{w=!+O{Lj;fz zjbMJ-{M>uuDj(-!`kRY7fRoEuN)cNRUN5=oxDoYFc%NI-puD&gB02zq_@CxvbHN{q z&y|2VSwv9QY|k??^ry1yVp&N(Bg{mw-9hex|&Kc_73;*abexo%H+H@V>yjwF=iw4YL%IKFKO4Een5Jgn~XRovZg-RjnhYr z(|y_fQv)eGXfRkt;3u#;7<|x*j2l={Q>y8_$0#eve68t@T>mwf6xFM(tV$y5o`3pAQefi}d?*4t(-XdN-K_7j#J;t?hWm(72;+vwu zz$b*z_p#&_&znCd{-v7Acf;@A^RnJAnax;&LWlU9Ah5<*Ni*U=uu?6Vb+B5{ z_fVN=i(BnC=2JBhZC-qtuiST=U`*8xmVwzrToa76>Rc*YGC!$YHbFJa)`fcR1ms%} z)h1K}R$P+IG>LuVnb%A=;b)v<_PcgdX_ZQ4KtW@NL)5U&zJP9@eZg@*Yfp{DV&;^m zXV9D7hwwy=oTgwp?-K$0E!ZxLKJO(Vy-owZnsGuyu6Qou=JRIqaF*wms_;JO*kNyX z&hNFWnp;VO2&;%?!#nZ}4BAD#a~j|tON zcn~Foh5rag1>5h8R9&rHly)4OdrgkW;m!-cX))1fJL6n}ddNxLHrhV73T5$UG%vLs z{l2!}rq(<~b=w}60i-@_wb;Ew@l+va-n#PRM9q_FbDg(s2a{xW1`rq#L^m#0IcO>C z%2+z$79oV9)dSoBfi==by6#R36Qz@g_RW^^dw5{ler6a;I*yQ_7&BXup1{DAd^W+r zFB9J7>|=URt&094z~a^#odcBSs=n_c9kQM{Bqs&^siG%0_nnFgS6Bm?oDhnTY*8iY zW~!PTDnlp&59jUz1vq|T54$LL5K{82Ru?^ibPgzxnRxSiOgTvT@aKE9o3Cgwuqumz zDv#R=U&NQ-_aHT3vv(6U+GQdz`H?$vokD0aV+4XKZ(WB`=gi{`ZtvaS>ya|yl);`B zt&z!P+YYdqbkDLb($?vh1fN}JVA7I#VikYwPQP~ilW`_AEu~I_D??55)p)y@K6(jDelO^t$xcfd4r8DUY)k09QJ#OV zG{EW?j_^ehIsE_}RELFBx6TiE@II`zv^NLFel#qD0DC2HCEAIozMiGENS7|1!*#^l zK01`WNv-;4VIB@dz*rp1Wnyz@;wf{;Bg|T)MtCWO#8`7&vPrj8#572`Hh>&}=rsTL zAmq6Zr4fXem`aFxiVl4*hSQ*bq&xL1gEwLdXb8HW_bHKBg2XsBETil<_fQfMBmpGk z$a7SE`+EMwH+JDQXE_Ogzqg)$+O`NQ6OSE?iQ_>a_X|avF0`TRS7%IJGxb$tsmy%8 z{t!-+OO2GiiyUL1i@eTs47@KY)_=bSude&x%7I~Shik=%2U;MDBr`2cIp#LYxes~( zpzXer1B%eAxujY0A>q})O=26Emd}(QNH$@!yD224I62_^K5IrGo@N1Dt_)R69I*NA z^Q03O4(lPXzLrksM*&smc`7@X9U@IU0^nWt7y?qQa?(7Uz8^vATntvJ!VGbhoFr7m zYgBTD3a=LE*oMY;0@hvMGgmocj-%KF16#p!JIhOVTa3+q+7Ep#!Z%uCLzWgW;tifjOr<`Gr{`kSj zeW94$KH)#9K7S9&unT{wT)7BZ0kwnDkbPXX7?5({6Yd$-wvXbNYC8e#p~XV;fS3v* zVO&g0u0?yT- zUQ{Nw*b=P(#wdn01BJ;rq@{*D>97cxlf}zT+NIcKVXR8jS{>T8(0;=%dg4*yP{3^Z z``(bPn<>Q^E_$7x{HFT6@D~!Vc)G^qa>eSUM~TAd@NZGD!*oEw&hcgL96HOT&z@IR z8T!A20WrCrQx0OU=)){?J@D*;RqJ>7X14ORH|e5RX@8czfq^E)(*cbVz}_*a@c)%O zsQk$f#O;gz*G`cdx_Ti&2K)EbRZVLnr#dkx0`9nh$KU#5&j2X=zM#OKCj_f?T`(CT z6y}@7>0edbsIj5)tY2|5QjTNz5#YNl3M?gAgf*%1v9m7*Ge#SdfEmP08Jg1{8pl>v zUJW(H8s*274y;fKgie&!W!w)uAjy=eS57r<+68zQ({>rurgC&8hVN{ieSlR`poqj+XAgrAB8V7 zC5JR=r%a)BxBfHgm|9~jw{%LUWrILK8a1D8nGffA5oFb_U}j%Le7+sGbv{Xf-d#>{ z^5FCs#P86|fn8m!m+aW4=f}cH0bMCa?@2Gbm(6yZ<*}VkzwOK$!rVHVJ%(y6yp5cV zm^s_mv`Jz6F&Kj&o<4{)m_@caH#=G_Y86U(q>r(_v z%wxYB5Z{PJwo>OH5qb4cuSZx+!zFv5DRt(^NYO#6-LMJ1>fD}*G*?OpnQO#(8$6y5 z(OGj=!-6vDD9cz=<{AT?*bYc1uyOLR4Dl*A{>6-qb|0xvSXNYRxz1dNBF*ROuJD&U z?GNFJ`$xli{O&L#t0Q`(adE!Vhgbw2Fec}ftipbN6t^N0^(u*%S7y9 zFTa{C6QYi2*%0*I&1P#_Zkxdakq7}Zj3aofG7xdn_<6aMD!Ah7qn-P@s9nqyJgXEO z`|dLI$32DA(E{r}hso!A^o_?-C;Q55Iw(pfN9Ey@CAIdglY)T7YHh+cjki&RqVfft z6Q>U5VVzZY%0{i4$(JUC1ouDR5ucEd)SH}a@4XwtW6$rFW!c(hz6m2pAf@B>w`_1i zQj;^`^~->FMBEr^PRJq?s;N?A&0>#f499MNZ`<;bc}U;Wa4o(UzXO{?0VI?p(3b6I zP(}(mCZUX2H$62(D?KXt|Ds#!dPxVP>PW$Rl^9*+%>+9mX7ZA>UrMtS91h8aU-%*^ydv@woxj z#I3RbWRv6utU&i&Sg&YPnAtvPMLFcL|57!0BYB3`;Q;<#xE^g3h^Gwr>Gbi)`}#_@ zV2oIu>v6uK@k{cXT@N5_2U|CvaR5v>Y9g%_<1(}h z`}81d?$pmcDUw#9cYf<6!gF(sNg4ULium}**c3!GA?3j3>`=9+0h?U+s$s2P{6bYy zRb%f_iJ*X0BfiDY(%5HzN#JzH2M3$C-)oc9IcFm#fWeAMQozN%PgNR}v1)sj)gxRF zt}{mT7JgdRzipy2A@S+XeO1@}jj47n7E+1-yGqZyFvWUkO`}B4;Eja?Bd00CvBm%V zjvl%v5%3?m)1BPJbIZqf9&rbs7rF}j^{P{RA1tPjXgRNfaiJvdfPmZ~>qccxzl3%n zR*Nbzz9aJ}Z+cm-n7y9$`d^X;@!e%}F`^jCgTCn_ zTqu+=fj*tl=X?PI*2B z3Heogn=cxeE7UHjd{c9jcFk(QjElr({}~sh9y%O1C}4?e#I9xlNHK(`fjzRt&n~H1 zmc_WuR|E)Pv@+A30C-IRmK%WWc;H=vLDd+mj6pWm8p;#m3T%w5?mmCYZqx380#s@e z>c~k01-S-GR(KSI>e|5L_9;F4(|VI8q#^36nOr)Nl9jmdTbj;^C_OMd%prLOKy~{a zA|DPy5*pc1TY&!$)eUett#m&xXKhtuxK!-DB@q-*bL6*h$8BADF{jAO&=!2wGsKx3 z78rD`M5+FAxsCl?T`k8xXKZ$$E`g`TTGtgzfcM5h_4TZ8g}%m(2oPgh)9(LyrnpXZ zo|pNAckRbN03@U-MhD03>|WO}g~#34bIb{eu+6eU&mC`ETkyku$)A^bNI+FHbH%YYa z6qnD;E_n4O$xL_qVirq;R}STB)Ksy{RTYWinrf**Yq3z~MgXVW+&x8pgx!y1yLvpA zcfQkFl+~_&`bJ1)l;B`M{?3}3yHxE(*MP_i?T|EXFuRT;f1jRk z>t#*1!US8sIS+cl$a8Hh7=ZbOxv2xOJ}i2MAm_o{`31&A8mwKV!z7WYF9agcbTSTX&H}Cw51VG?MNUCy6O`JxF*WbR> zvDBWYKJ)xLXJerT(qspQs+}#iJ2Yl%^M@+<-i+&#aP&^k>7(tA=saKf@tlYA;%6y(P z*W-Ic0H~`iI4oG*s;7aUxU&6He_e0tf<;Z#*no}|L9HO8jYpUpsBrX|VY7PdlLYsg za0%ICCj9qN_5Hl99?IuMaXVBDx=o26D0*(mjoD34#-8^Ov@l|QQYRwUa9e)|zkS=o zNx1x$j&8Yk3@UN>=C?utNUiz2HRIty0aIYo!+M~vq1L(c1w$;G1D5E509Qy02RR%hxg)|ow6Qf6`tC&d(E%Xo0Wf;X*y#u) z-W(QS3C_Z33XzV46|r>(n7zPH2<6J3`^5oxI5XSK;;v1Z+|Gc^KVvf}7&~Q4?dxb+ zP&D30+mbI@nKZ4;Kg(8U<4#yC#tU>E>RVU7(pJjf`gkvtRnSl-Rp(|Z=P965d=Gbu2}2rrL}G8Yj#$=fd< zqMr7kx+g4pTV&-s>9%~;BI*Y=BVwBZZIE@1Vg1Qs@crZ|`U8Y+@ea`P{%^ae{}qC2 zh4eH1L@Vq+CEEVoMLqlv0{MTLVE8WrIUvE%Qk=6~QbNIglaq%dQHg=z-vn}&VE};~ zT&GOMPd-nVkQgPw$Bc!Cw4nn@IWeoG>G11kba@#?K(#~QNd{U*k_)S3I-{(kBp|w+ zC05O_8LjfsVQNCgiKg*i35J={mJNh!<}jLeF0QrTwH0h^jNx>{XM#Ua08;CtLxkS2 ziH^lJyzOO7zY$Qg3J2F0T?@c0o)&3~srY(xGc~8!*#u*iqVsbWpBdY7Yr?cO=9aJ; zII8gyiG7-QLrwrxIdb9i0>~`i>v>QKOZprD;WFhn00E)%e$L{ht=ISvHF>0b5XI8| z%*E}ID#S^EiFP*RHW!#iCn8qb@d<=?_OK2)or4#4=km5R7qlB$d>fWIL?gq8v!0n+ zi%#`?ar_|yc__y--c52rcw+uAf9?Kmp8i3OpMaXO+j63ZoA|x!RSH&5U&O%vDzhXa zEr=@vtIB^P_$fCqYRTATpmPWGW;1-;>ib-+aysl&IonaSh@Tvi4Z>@uTs6y-?f> zHP?wAcp{XP3$~gai_!IDjh*5;)Wja z7ExP+aXrO1*fA^?w@BD1LFqS+ zicJ>wGYRUIOX&j#%lOqG*gI@<$xjQt`6cXiKwra5lR9p?8R;hR_J~&IAV|Z~W=Wba zpfHnj2SAp4wY3TN#fqe1);%Z;2AmFZO~*4@D&&J#9q1j6smw;&*yC_e&uYi#LA^_$ zU7+7hpCvgB4UqduL07(XXD)q>acO~8!2^M8BK*BXpp9F^GA;@~JK-qMgo+p)taAcw z>ONHLL5pus{*j{Re{Jt`i_kId<9Ma0y`z7p;L8EC0q*T=C{mzvN9m(O)n^5IbGVy- zN@W>IUcWzR-6`k_Db02P_x*!h)snO)z#HJea_pKym zI!Yc!3I);F*ToEZ=|OnTq976F2{yqfyP!p?E#=GX^d|1^4>{&Mb^X&uYOJ3)j#3L$ zhX)1Lnvu^L@)jkGy+M1~b?P`YN7ja&Ny4FI7rhhh!S?y`EK4!TB2v1W0!V}T|MtEAcr8~P`qmeo!wfKL{2U#En$df>Dm4M2nGHIPo z|CJ5u!2CSUYAmy#&{9IZEVHa^Ggs3oGoY?CSZst5zE%PjyjbBQ=&iS%r9K`)oZKP& z%UX<nqQ1=XHL_#aONQ?jWVuYs=Te1C6fEL^`+tGia93PSn#9n(IvIT5~<7 zf!Q}dX!|h_Ge%mFr|fX>ED;x(S+m83BHi|=o~(r?)w$vPbz3K+Z?PdVB*FbG5CtiX zK*2c(Oe)f*g)U-uWIuUN5DyX1(eb94(7s&TelvzF#1oY@mnU6gH z;sX18?WX1Vq;54-k6?ImFa!X#xO^YG1^2kSBzoZpw$x+*`t7$x6KhF}soyvM$7M(a z^fzEN#M-l1knohgTbXg)Z=j#2-xe4L?h5ixeqE;O_;=Z}uk4nJ%be5*1sz)gF*j;f z(eUpFDQnIU!8_`KpfeE4!EZWyb16d9U#WM%trmMS*F@gsui##e&VI?mPku=d_rqTl z>dr>HcOU8xw9!EGTuHY>=_sv1aO-}i)E-zF=Oo1D%T+eaIrCOO1Vn9u@k_{xCb;Jm z&u+y#LiAPCgpJcDpVJTM8I2K%Pn1xwD-!x0&k_Tpr`Qg0+c)Ce7Q&nczI6b1fW6f{ zk7q)0(COp@Yx#O9E1zFx)`l$>(}0yOsb)e&bqr?M#X9FU_lj4`u@q*VPvyF9H9hH0 z`mr~DM@t$G5cY#OLjD$xGNdU|GqJGJHgq79_BgJ+H>1|TfXT)z9+Sj* zX*CsP%QebC>HA`uzEvUFW9d1XMR`($g#cG^nNpmQH4C<(y7!{5k@`mwcFeWD7~92a zsOXJ@uBMV6m(xgNU&vTe;8? zCI6wg{ZHb5fLZ1NNEJnX;zPoJO@MzeQTOa*U`rj~6oWOS+VQgk6D1A}^NnF^0fbGZ zFr@y%o&;s-Uco||3A6&8f`MDtr3BX^)TesXW&9dFzWOg;7u=e1oT%EPHvSPb*Qc8} z6nx=N^|HTdYEe(C4gdsA-y+e|{txm0wImn`gSCcbK(0!L$Zd`*Fu*?2##1S9 z0|H_~`uiAx`4UTg1;Gd~5QB%1e~MX&V!#lCiobzSEgfnnGM&!H z*_^71^cr~EdO@plZ&KBwQn>hH-c$9{*<8EgSL%gpQ?YZW{nBj*g|!>9*R;>sWXIK2_7UpZfIgyA`u*Iu+-R3_0O-QI<9D&Qm=HwcT0R-5 z>cnH5IX4hs<6|p(H7HrC1CPs`4GQIzmW$)b3+NPVmGfYGbSmBLgSU3_^)UR)E-c*O zui71cBA5ZlfqCzWWp5lmXK03)PNDZghCLi;BO|uGOx=7oVWOaW#;0W2>tks}Mj4c? zR)&3?SPzXLjc6Yx{-l&VoXaa|IN0rhRe66W=H}?!*Dp=C;F9Z)a%j;P3j{4W5~)(5 zLIbHv$}2|kB$lx!Dmej0u@WgR5M#3~((@cgLNLUtP1M<)BU1Ac*qs2W@C<3D*+=?7xj>N%$H;bMvJrw-Q>%p zk&}fFH;4!9F+g4vO7%0&5pWypD=H1u2{T3FS2lpoCBE1NFM=IABU|6m$oa$n67yUKO+cdYw+)dZ||AJEU@%A)P*f9}hw%dC6?55>IknbAmFG zyI&#JeNe27PO8~vV2ea+kdyx_oa#5as!&o{!#)8^6%16#nn1T)HBk@3)^-Ix#TN7| zWd2cWnAT3NBUe{Ohrpj_`{SE~z2Ma(F4X--qqH%+Q^-!o(E3l8B-JE@Na;gnU+E_i zDw2`KjwI`*N4$?HwJ%;Mqre@p8GVf?hlCXZGv$c73M>TNvtM7brHIgAOf;BXWm#d6 z7X1PD)VW&b=AkXsd5?h0N__GyvzY70NGQDq_eoD;M@4gt`r1Z-<9YJyEIkjLQCp<~ zCcU;WlnQ&1V@`oma0%q3+z4N~G2@CYB$0NJFQmvlz9`abR1oet){}V^WED`bR(FQ4 zVN8JE5q-d-0^d)Y9z#mzQ_X=ki8C8n=re_1XxDu00`Fs`so~owc^gsO*WsegbpdVQ z#>~Xlix}St(PHT^JgAwr0#GU3he5XHR|Pv7uy;1Wg#f9F!M;5TUoSY_m)O$Qh2YQ8 zp46*Cs+b=6Uu$PHfoLgzUo+P2JU+^lhX|4~bXN-C70XYkWOuIVP)h66;`#~oLK1Hz z>Fzp$TW7vHs=`uz9iu-j5+{P(#~Kl8$Y#Tuuk~`h8LfK=M=z|w>K?R>nIzg@#e2IHau1ZOwr)dQ8m8laS zz}1V#*ky|PS!KD4j%$_LJ||?f2~pV~moeNJeRF!L_E1=41Z0oaT?B56*|^KDdVY5!u$i<^v=_MK`cRu;4p@%`i55eJ zUh^*eW(G-7;eQ9}D|+UU=^Y&+wzUf5!kQ{I69d-;Aik>#X^?WOWAP$8G)!qH{~*BH z(^MT)YU`szLBBQmwyqYzAxt z`r^7oKik&+Cn52tX~lgX@>ZxM&FUuI>Wzog#!MIfN8p*!)P@y4GVYjeWkv_=B*dn2 z^n<~fRl-uMpW`#7ZfN?t=1g@GOtg1BZZOSjwTVvm&Bnr;U~k4Mi;idnslel!{KR!d ze*T@$yL_(8WOupcB9d|mR#_!|>GLT*tVdNm#KL#g+N$@lH_zWpEGjRa$7c8j%{WJ* zQSeHqUh0o8i-(6I&fSYZLPWBaPA*C`9W*~nA&5ex0lxtPYueNK9)5RZVZD=qDV5dh z-H-y8(7kyUmJiH<7W?{(?74jlgUAkcLxT;CPaUZCoyzj@62ki({d8ey4bYLn{`Vc( z!3t^Z-b(EGC*-bY_@9m}M+aR!7cgoj{%6$OApr~o8Z&Uu-9Uh`wWT2y4IF}iW756T z!-FK&b6~8f{mIn@SyXb4ik=2f3JC2RN|!kYPpT`D!v}>i!cTgL&?%Tqfa;@XR4{c3 zvvC~$K0Q^S@D-aBoH&7v1O~v>Ro{u*gP!{J$<BO*4N?JYRV$|09gCf~l7nOA26|%9)amCJXvu3|EGo9fB9K{J2{! zy)esJ2QW@e{bQVZbFIOCsKfnbrJFaSyPw373|u&ZF?iFMOAt@r$pzi1u@`2Q>J7}A zT5t>m#5xa{Fspwk0@87}(Ik+1DJk=^oZXL0GiLUZuVJDz*RG_(ns=t0|!&Pj5jg~?c*$Y>y_0n>(JMdFQk zDcx<|O08Y&-WcG#VxNBOE_tb8d19_C#$BG1VT{j-m{iM3%SM*IsYdXRBrJBSDA72g2`q18#z@St}( zZs6t}f;F4l%(woiuLM@B2g8Lfm)<3ljUl?!dbq#pCJq@SCZGLShiwl~2bt0JSig~x zTQWsA2JlS7jL@{et)cw5?vN?{lX6&MrF|IngC!B|STSmrczL!f3-xe4q0HZC(Z=#Q zXo})(Z;{C8#X~0hQJ5IU1^ayc2U530=_D4CwPBY36>%KcL;Md00;v zeTQT1?8jXVNp!O&LWfUU22(y1DDFdxz6~@evtg7t4Z*8TbH$02g@jPjjMkV}ow@kd zDau-iv{EYX6lKF&Cn6vXn7~S~k5@S&y39hJ|7}Mh+W@Y*t2b38X9^s*I{; zMo(cKS%JczsNXNSNh5w;htHdlUji&y!4rSXVOaIo&nVp&(q&7utIB{D|9cq~@p{^H z4X8lN_Te4qX4i146ZIk%M`>l{)JE{1@$}QGBwZYX4$=9terDPW&uF4=ukF62Us|G^ zjLcdz&!ps}oCtl|_nULVzRVv)t*^#TAVjTKIO;IgJfh3Zm6^IeYfa@D`>dN-8#nPN z)~_q?u8D4dZY9m%$Ff#pnt|=i%J{b{r{mdTa|L!(g4<`8ahaAEE#N>~81JHR{YTAreN(SrJM0$U~Yx3&>BK)CeVFA8KUa*y)U>nF_nz} zgY%o&Ct1vxfOY+3*`9#=m^N<`uG|Zx$X~H`AfV97c70$q#me1BQWcSHc58xCy~>vq zEZR#TY2-l4ztNokQ*4(lb%Wl z=)|*uJ}<3E+>!Stk<;=}eJFOp-2rU!_HVp-4+;UlUT)I7;pEu~@2!;=a$9Q$C1PNwf$zs-g8vfFOVa518t9?D|2*jS_XCAv zcrAn(gdS?rC^!G*O^ozp6NG6YB(=`4&l~)L;PbM;YoT*QZ?pWEj<+cRq0ZHma9_b< zpKqR0s+NNgLA6jr5vfKgc|Y=JJXNjRnP=YPY+zE~(f}9#_-+3qT>ML*vp=XN^*n;i z29-a1FFjd+x!M(&@^b4o!yu6Atk&dMYbxJ$9Aj0vV{{CuPQ#xJqwCUQ$6E&0S_o>v zJOKNIHK+v+svSH+HNf8!9;+yu^iUW%3|q%&nm=CP;dgc*{I(!R_n5CB$pA~k@;e=+ z>d*m*2FV9hVmW|&f>&)9&cNDVZ1?#-rsW#4j3_S?=8wICFSrSL&Irk3taD$C9iuPO z9?xnR>4Z5OuTvc4~|-C|d9`%e8zdVIG1g?N||(7ho4kGmJ#-s=CG zx);}!X%Z2SCVVIMm#F>G^|SexdiD7#v58oCLVbXuq)kJoy5iE(99SfJz5hY+{vWo} zHz<`^okW15^M1yz_4Qp^n+$~Va!$Y zj@hm>`YFaKV3)MuA=CXf+XBVv8N4#pFnE4+3Xg*!T*H`j*B*lU(ZQNCUIuK{=E`d}}9{}b&BHnFX$eQs6m z>c`M1k~T3gG-6|{hqDl9sLx|rAP8-O{#({Ajp5c!8nvEZ8ZA~uq#3XCjQAIpczeB6 zEvzS{h?}$pjMXIK1BbY1W6rz@f<3j2+vK-wOZw4cm4VZYAeJ#Mx;iM0(2MKQZK|G6YS+T?ase6)`*%&{~uRY?^nW&#VMed`Ourd$Ep|l~)Xs1Rsxtmpc zV(M-8Qnv|;tZ;jtf2#{&SC9r@H$`PWHmIZBCKGK`$Mv8N@OuI2}ThE>`=BX+p~wp=Tf$Hw>GfqMUj})!$IKH*37| z{WlW^2%R2}?^F)AzB|FuZ9y?|)RObtmVxhAFAi!h6FBHr^*mnwcBFJ3sFE4#Q~QSO z^{{yCJ0oh~7oTyIDbSzdXqqZg*dUgGG;sjK2H~} z_h|_?i{NgU*brx%)+`- z%v*zD{PKCA#nzx_nzik$9}=6xazLI@_qQ8hG;)<@VI*V@lE>9jqTOk$JdZjdT@kKe zW|l1V?th+zq+jc9vA#~5ml#MqZ0k9}@&>6BvM_vgWKq^YNbfR#R?3lBmT#WjYC>hg z8_-Z7u>IXBARGz8Vc3P2mBQ+PKp(`{$kNGVhMB~IWqD3c)(WA7_xA&{^ST*vOvmX1 z$--b$!-*}w@EOl$HEvFBRn_y7XLz|icr}JRpfXS%DRSrkrr1-Ya}p7h*{fe^KM1%W zc&3_bt}$`O*r%J`{zxNa=6sAZ0`&@>4JR zxaXh*XX{GyW#_(4s8m{&8DR{olmNL0^7AB&9XgTC; zhU9zN1gP!LW=AaH9Y&cDb<1bj%QNEHD4daiz_6~iUwh-r&(80ZctMU@@F~>p0!f)y zYAsP_=(uI3tu{M5f*M-mTYdeiOv-MFnip*F!cK*rxedR*+5IZfC{@B1)lx>f>ul68 z=6^bGewA>y%6>+zqdGU%{1q3wG~PG5S#XvG#Z@?vntCv$HeQ%mGWDAsVkO% zCk4LcLEo8}=bYBU|Dfgi*LbDX2qGcfL!>6%-91Lf zkgkDUBF%u28zo&sI;3I9hY(SQf`9_@edhObJCVu9YU+S-1<0CrG<>fog;TAU9UThgG+TUC^M#oD_6P3h(R zMzIWk!|5vDEHiqb#@}K>Z=vHuLBiD)-=zLy=zp)S=$RVm1<+z-s22P*uj5SwnArJ( z>i@c&ePDu**Z;4N=l(D3gD|QT(E(3FZrsL-h9Gb0P(0?#6aEBxMJ}a6;O#7A`x7R! z2mh%KCn92%{2MT`C%`@!a)C;L-DucCbj260WH5gmJ0p7&H6RJ_qh%P7LJd*;35pxm zLaS>y-hB}IoBOIU()(#6++8>&UB6Cxm?*kI!$k3Q7kGE?>a8%xt^zusHAd5R+cE^iQ4_Z{?U1$$fsT-kvrYC0$4rARNMW;_A%#CW_5prH+{=z8&{_{w#l8Y0&qmxTM<=_`JN%<7EnQLqBh5TV}^*+e5Y>gLu z{|fBg9&wSep-AgsR@M65+qZ64hr(CC^S(XR6ID?V?&gmhygTk($ONWEWU$mNUpW+INB`FA!=3~S z8d7OhKFS+eTi6P#*QoU>sELQCyT^6yg}tC6CPfH*1Vxge>=d|Ct!@`;+IiEFQSQ-R zS)Bc#Vv8O&Z80o5nY-u6@QYWqWhj57^fN!*`UTK%f}CM#eaIFtSj)3Poc`@_gmg;477mbHw+wZ>{?m8YWb+6h%zVz*mP0Wz5!Zm@~)uoG)0@Enn)3 z(o!XaxH82tTKmt#)i!t|>+nc(hmXCMib=caNGN_X9PY9HYdT4{XpJ zr}@rsJrU}*tk91CjgI66!O~e#fgOC~mYpf+5~J@tvdxtZ0u)6gvW;UgXPyF_|16VK zoM^2mSN<6P5u2Nc@qwCW1*zUjSu<+nLhG8v4uU_pcSw!VMjHXIXBoNJzyYo5JS$#a z9e~b-_1kME^d@JmK8)k*3bH`yTSg?Zq#$yYcfy0fM{bda$*Z&2lV8JlDfW5QpA!fH zzh9s}*UIp+&BntmDN31Pz-q#%B$1LzYODCRI$rf3TZ4nrFDX35_MUGm&bTfb?Ox{n z;-hr^ZaTWy6J_ugB-?ALfaw`CDJ7! zU1p&6F?_FIR;5PYNH?lG<^z`ZHN1dr7V{w;^Np&oiXjlncwZva?_C{~dQd*e>`78Q z#W=hDM7M`Spgh7U)NfMGzy83(1yEk8uU|s8KXe*Dl=XhNFSpy{1{GcU`&v-O*EeSk zKCj+3U%s?C+o=^g^jm)OO8mF~^S9m~e>vl`zCxPgLagFkTcbI-N69HEXma z!ZB0ih%5RbsY+lwF%e#V0+YszX9+Us^BG4MA^|K8j6bk}DHg04_=wBaG;!9F{u2XFU{u zvrxQ1JmUegcSHIelps5J>DAQ}r9YZLxZrC`2A?POpv&3dh{))O*tXy`Gi}|np7B|Q z=dYPlPx(p6LT$)x?-2lNyZ)R5)vgRmkXx-heZu(Ay(U=ykqIYPf;bha$>Q|iL}}oW zgW>Fgl60ESYurpM^}@N9o$?yGy@d<)t>3Nc&oK3wNqElPI7q!;e5B#uMCs?e6sB(X z8PaFZIxqpSW@-Chz*?|l9$w9PHjhyS{Y}l5`BIY7dubiws5sU#r`m7sJ3@Cx ztxNOs`L}jFsvxI$_$Ac80%Hv|>fPb^a0&OzeppCeDp{K|>~Ef2bBVbMym}L6%Ooa~ zs6)>fKQn5jnsK`q%yoCiW<*`jH_9EDOP1gD5n~425|_T&s1HWD9&*^KrNoxaEB71- zM7H#kYdPxmN*8cw>rnSoXUfr+AG(;8tZ5{x%FtN1n;=1wBNHrqi1-_UmWL-TyOQ-WODNBWs*(P|KHBowQuJ9iraFpAf#c4*M+l zEYvy(Uz3-ws9v?s8yo% zJfqv;h$Hrqf_|xW!gW3`+GPW0@Zo4Vr#UlOMSj)f^UuRb%(?D6(t;mY`Vwx z$l?H=ex0X1RqQrs$ihY$1$YL@0vW&e2eFWa-YMPV>JUU-%9VRsWj+O7ZzSHFQS3vM za`AcFoy8r+NC(Ht2JRr^e)*V@j}xAt*R|WLr8_uh)rmMV9&UzMrgz2itUe6=ye3~_ zB!sP_lR;>*LA=C@cZ#Mj2@=#LZE4EvhC^t04Pd_lMP!jUlPpsv3Ntx)!|mjcZ2n{$ zrs|N7z08y6FO)<+lQ^;O4qhZo=6I$J3>~N&7 zm`}RCux_%KXe1S$PW}w1+0s;q@OWuAM4Oy(%Vl)%v4f$1ETD^)j$o7h3yJ&RP0s&U zNjOKE>%s(yW$ogF0|5s80C^&U+4y&pQ~s@7U$@*23_=cjB^|i+QcC+Xfsh=arPXpg za48UpM{DU^s=R7bh;GD~w(6`w!jH{@`5rS*-*^tc{jy<}?UjTks@M=w4e$sgvLS+c zdg*4ciupfAUJrl7?2Di8b{Ojjw_e`Y7a&L}Xj+S8q1Y^Xrm(TeEM#1H+2#LZpaNmGnBUKhQO*B(_S z`NpQzHyd|F?$L1^_P~+=YD}=Kk|84EQzRUYHb;i*5SCCiH6BrFboo&irUz8( zXQJg)6!yThfcy*AL=%2URrF{y*vrwiIG)!*S6A&Wz=KasE0w8eG2Jc;y81TRTE+SvlXJ9l1BJ`!q zSN!ppv7LxX(EqVF*5)0{pY`K@7-UJb$;YLXcwM6m!$A$@Z^~ntxK|R zgvbum@fwQK(fz~EcE&$H^DH0LcU;1zcfb|OANq~r{5J1xx^>R*o)s?+ZW-)sG4?w7 zuC1(}zre{QwA_}Y@v2_5(awUuZOi}bOLKnGR!#;IIbSc#v#p~OF}>?-n-O{&?xq(( zQddErsJvd*)tqWMgC_cGqP!Kz)xb$BRb0Jm9hW6?T@SwG&GorU`@qJX>(eLNdiZMW zj=?fP^wqjGu!zZuY*vlXC|k;)5AGT3iM1?k`l)mia}RP-(bGgIJV@5a&n>s2h%1G{5#zKP*kO__>^|%=E6*& zpr%Wiq!o|E&1ke`qBV7(tG7nDykA8HwUrv7Rv5m9;nlvN1u2`~){=+G-;&^1A@?-m z(>E8&f3k!aZ-$udqe7Hx)iGAOMb5fofbYuV$HyW=dE$~p8OD#^7O;?B-&OaK8pH$E zga>V>n&_uTO@bPnbj%^9qU%3v*X7BDrzguz_A*@X7rvNL9-&qj-Sh@It1ngaFxDTU zZ_M6mv`aXt?DV2fk{)SNg0$MqmyIE%NYOHyZxqm(|qFr+IIGz`YNYUDn z=0Q-ez=N(sGE9WobG6z^-m|nKHj4SP;uxEr-l19IjVIwM!aX#K4ME0^^*hov!1~B? zZnE5NQ$B8zhk3qOTr%9CZe8f`R{rmIG2)}3cbo1RtHWm;Zzxt;c%rwmH!v&>=4y>} zg%<5AsR`dg)(58YhF8q^r8hD`Z)P6Jih&;SE zc|l*GUgM8oHP50t!IlYFycF85ZP%+?ltN!f-Fdc=GvusMM{3NSz<%znU9%_QWk+|e z3A3RV!legVeK}saBkQzV38b-uom~M!h$o{2I;k&5s1&-vfkP73Fg5V`DwMc*nH8TO zGGF_XuNKgee)4pQ6&%YZ{x*~O@pd?5m8vHRYD;>P3yPWAI(xeS4Z#T27TGO@d`7S< zG6p3GFMMsXpg&WuEveH*JtY8%n0)pZ7sqza!{8oaWlNN}7mIRo>-f%9i!_0!s0*aM zVYf~lCXHfzuUty~{zXB>kma$ezvx^w%diPy#$ssB5J9~>rp4#*;*rXBH0D8sd*TM- YtAq@#t4{O4Iq$XV_Se2tS1;NB0r)k{A;pUNlMqi3TrfOi)g0JfMk&i!pG;gW*QR zSN$<>-ZDu~_3yn`ul`l_@@A$RjIjY$T?XGZcxAQMU)%HO6l2R)XO4W_2+s<5AgXeJ zv1*Du1Y^}h9(f<8T|i#>Ch&X6YYzi|kG#PL{u=qd85m9>Kd=}4zac-|qWQK=z;F)P z1Fa)JBR|~)eWS>`PlDzLkq^LAJ&pY8HQ=9+58q_0aU5A*0{>p*`Ii|RzKoo{0s2lL zAG;soFCm}U#Ms~#)`?-+7 z@DcJ?2SM*XlpwT}IXJsMyYyiG3|07Y{VY6fW@tqIxbveAVBbT z7=caLP?h#tAR{XZXh>suqC=%isl&1>q7-r#yh<)6N?UGSq?RU)%C*-(?ud6nW5Rfb zrwQ3`=F@7Sm$t2i&qXlTn_Mto2<weElfV^ZcT(_Oj$Mr5 z5uQoiSvv~0Ss2EhAyG*hsnl>^rLhfrN=KdZ&O@(uL|C3!!@cuJ!BrKO^;vpuH8|Yn zvkHL&g7;E{YRyUChTT$o9WKfhP4DQJXNJ6B3e$*AW8M<}ygR+}{QGT0#43}v42XyO z3WNL@%BtuIgCd0;&q}|n($OqBv`2HRtNQ=$Y@6=mT4%|R`~&<9^2Al(zP9qz4dC6# z&piwL67v3wlnM_Q`vLex3VNX)BNxJ zP2*2?fp;N)I7sK8evHol{0Q}*-%R<-VXA-mE;|34kLiAXboyRE{smfmrLWCi`3|_R zE&ee?vwIla(hXq9F%c)=g!9q8p>&c;u5b% zu}N_&#cdR~Q=FtYMR5nwNSF1fMvrRrmIPCc9@XekjULtLQH>te=uwSyohH@Tq#B!4 zV{^&ett2s}ZG^THnj|zuXh+^KnENAE$sbY_17*AG3LT0T_N0cEZK-AKqs3R91b$JB Q3|!Lx0kjb`A-5aRQ0=V}T356){e`xF9&&hqih5lOHqVJi9G~xG9d0_)VaitIVpQ(MAk7 zL}-0rA?cWPZz|Bm-n5nVV|*ScO0+t&K2^XKq=U=2&xLV384>t6t8jP!wd_@CnYM(P^R_TYM}jdJ{+n!iUD~upJMq}c zqN~!P%h3k{O05-ioGjH7CFbW2CPw2rn8wP|(EQxsHx&)9HlPGl7Zr8kUIZIMJ&aLL z%4gk|!n#ho6h(E4r#glEdb*O!)VXfWqK_4bQaeSke`M>@^Dwyv&zEG<_XIv?&dApun`06z4T9r*7V^XF~er zMcP~IdPrQ*r-k-mdHSvf(r%HRQSVEmCI`#?sd{SEL)G6Cva~Hu%gEuB>w2Sh>9L-5 z?C&t4aLaip-CIu*np@1!cITQ~ZGM+1ZtQ`J)`UpBac4d##0&oX`~?tK?&x+D007+) B8leCH literal 0 HcmV?d00001 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html new file mode 100644 index 000000000..13e5f70ce --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html @@ -0,0 +1,93 @@ + + +R: Correct signal outliers + + + + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css new file mode 100644 index 000000000..2ef6cd609 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css @@ -0,0 +1,120 @@ +@media screen { + .container { + padding-right: 10px; + padding-left: 10px; + margin-right: auto; + margin-left: auto; + max-width: 900px; + } +} + +.rimage img { /* from knitr - for examples and demos */ + width: 96%; + margin-left: 2%; +} + +.katex { font-size: 1.1em; } + +code { + color: inherit; + background: inherit; +} + +body { + line-height: 1.4; + background: white; + color: black; +} + +a:link { + background: white; + color: blue; +} + +a:visited { + background: white; + color: rgb(50%, 0%, 50%); +} + +h1 { + background: white; + color: rgb(55%, 55%, 55%); + font-family: monospace; + font-size: 1.4em; /* x-large; */ + text-align: center; +} + +h2 { + background: white; + color: rgb(40%, 40%, 40%); + font-family: monospace; + font-size: 1.2em; /* large; */ + text-align: center; +} + +h3 { + background: white; + color: rgb(40%, 40%, 40%); + font-family: monospace; + font-size: 1.2em; /* large; */ +} + +h4 { + background: white; + color: rgb(40%, 40%, 40%); + font-family: monospace; + font-style: italic; + font-size: 1.2em; /* large; */ +} + +h5 { + background: white; + color: rgb(40%, 40%, 40%); + font-family: monospace; +} + +h6 { + background: white; + color: rgb(40%, 40%, 40%); + font-family: monospace; + font-style: italic; +} + +img.toplogo { + width: 4em; + vertical-align: middle; +} + +img.arrow { + width: 30px; + height: 30px; + border: 0; +} + +span.acronym { + font-size: small; +} + +span.env { + font-family: monospace; +} + +span.file { + font-family: monospace; +} + +span.option{ + font-family: monospace; +} + +span.pkg { + font-weight: bold; +} + +span.samp{ + font-family: monospace; +} + +div.vignettes a:hover { + background: rgb(85%, 85%, 85%); +} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs new file mode 100644 index 000000000..8ad6d2508 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs @@ -0,0 +1,4 @@ +## A custom startup file for tests +## Run as if a system Rprofile, so no packages, no assignments +options(useFancyQuotes = FALSE) + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R new file mode 100644 index 000000000..83f3bb312 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(delphiBackfillCorrection) + +test_check("delphiBackfillCorrection", stop_on_warning = FALSE) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail new file mode 100644 index 000000000..7824a543f --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail @@ -0,0 +1,33 @@ + +R version 4.2.0 (2022-04-22) -- "Vigorous Calisthenics" +Copyright (C) 2022 The R Foundation for Statistical Computing +Platform: x86_64-pc-linux-gnu (64-bit) + +R is free software and comes with ABSOLUTELY NO WARRANTY. +You are welcome to redistribute it under certain conditions. +Type 'license()' or 'licence()' for distribution details. + +R is a collaborative project with many contributors. +Type 'contributors()' for more information and +'citation()' on how to cite R or R packages in publications. + +Type 'demo()' for some demos, 'help()' for on-line help, or +'help.start()' for an HTML browser interface to help. +Type 'q()' to quit R. + +> library(testthat) +> library(delphiBackfillCorrection) +> +> test_check("delphiBackfillCorrection", stop_on_warning = FALSE) +[ FAIL 1 | WARN 0 | SKIP 0 | PASS 250 ] + +== Failed tests ================================================================ +-- Failure (test-preprocessing.R:109:3): testing adding columns for each week of a month -- +all(...) is not TRUE + +`actual`: FALSE +`expected`: TRUE + +[ FAIL 1 | WARN 0 | SKIP 0 | PASS 250 ] +Error: Test failures +Execution halted diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R new file mode 100644 index 000000000..3d62d6a7f --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R @@ -0,0 +1,13 @@ +## Helper functions to relativize paths to the testing directory, so tests can +## be run via R CMD CHECK and do not depend on the current working directory +## being tests/testthat/. + +library(testthat) + +relativize_params <- function(params) { + params$export_dir <- test_path(params$export_dir) + params$cache_dir <- test_path(params$cache_dir) + params$input_dir <- test_path(params$input_dir) + + return(params) +} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template new file mode 100644 index 000000000..f2224855a --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template @@ -0,0 +1,8 @@ +{ + "training_end_date": "2022-01-01", + "training_days": 7, + "ref_lag": 3, + "input_dir": "./input", + "export_dir": "./output", + "cache_dir": "./cache" +} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template new file mode 100644 index 000000000..fb8309e94 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template @@ -0,0 +1,3 @@ +{ + "input_dir": "./test.tempt" +} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R new file mode 100644 index 000000000..59ea2beda --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R @@ -0,0 +1,130 @@ +context("Testing helper functions for beta prior estimation") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +geo <- "pa" +value_type <- "fraction" +model_save_dir <- "./cache" +training_end_date <- as.Date("2022-01-01") + +# Generate Test Data +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" + +set.seed(2022) +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(50, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} +prior_test_data <- test_data +covariates <- c(main_covariate, dayofweek_covariates) + + + +test_that("testing the sum of squared error", { + fit <- c(0, 1, 0) + actual <- c(1, 1, 1) + + expected <- 1^2 + 1^2 + computed <- delta(fit, actual) + expect_equal(expected, computed) +}) + + +test_that("testing the squared error objection function given the beta prior", { + theta <- c(log(1), log(2)) + x <- qbeta(TAUS, 1, 2) + + expected <-0 + computed <- objective(theta, x, TAUS) + expect_equal(expected, computed) +}) + + +test_that("testing the prior estimation", { + dw <- "Sat_ref" + priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, + covariates, response, LP_SOLVER, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir) + alpha <- priors[2] + beta <- priors[1] - alpha + expect_true((alpha > 1) & (alpha < 3)) + expect_true((beta > 4) & (beta < 6)) + + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + file.remove(model_path) + } +}) + + +test_that("testing the fraction adjustment with pseudo counts", { + value_raw <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) + + dw <- "Sat_ref" + value_raw <- frac_adj_with_pseudo(train_data, dw, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) +}) + + +test_that("testing the main beta prior adjustment function", { + set.seed(1) + updated_data <- frac_adj(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + lambda, value_type, geo, + training_end_date, model_save_dir, + taus = TAUS, lp_solver = LP_SOLVER) + updated_train_data <- updated_data[[1]] + updated_test_data <- updated_data[[2]] + + for (dw in c(dayofweek_covariates, "Sun_ref")){ + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + file.remove(model_path) + } + } + + expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) + expect_true(all(updated_train_data$value_raw < 3/(3+4))) + expect_true(all(updated_train_data$value_raw > 1/(1+6))) +}) + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R new file mode 100644 index 000000000..07636e140 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R @@ -0,0 +1,118 @@ +library(arrow) + +context("Testing io helper functions") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +geo <- "pa" +value_type <- "fraction" +date_format = "%Y%m%d" +training_end_date <- as.Date("2022-01-01") + +create_dir_not_exist("./input") +create_dir_not_exist("./output") +create_dir_not_exist("./cache") + +test_that("testing exporting the output file", { + params <- read_params("params-run.json", "params-run.json.template") + + test_data <- data.frame(test=TRUE) + coef_data <- data.frame(test=TRUE) + + export_test_result(test_data, coef_data, indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, params$export_dir) + prediction_file <- file.path(params$export_dir, "prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + coefs_file <- file.path(params$export_dir, "coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + + expect_true(file.exists(prediction_file)) + expect_true(file.exists(coefs_file)) + + # Remove + file.remove(prediction_file) + file.remove(coefs_file) + file.remove("params-run.json") +}) + + +test_that("testing creating file name pattern", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_pattern <- create_name_pattern(indicator, signal, "daily") + rollup_pattern <- create_name_pattern(indicator, signal, "rollup") + + # Create test files + daily_data <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_parquet(daily_data, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_parquet(rollup_data, rollup_file_name) + + + filtered_daily_file <- list.files( + params$input_dir, pattern = daily_pattern, full.names = TRUE) + expect_equal(filtered_daily_file, daily_file_name) + + filtered_rollup_file <- list.files( + params$input_dir, pattern = rollup_pattern, full.names = TRUE) + expect_equal(filtered_rollup_file, rollup_file_name) + + file.remove(daily_file_name) + file.remove(rollup_file_name) + file.remove("params-run.json") +}) + + +test_that("testing the filtration of the files for training and predicting", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY, date_format)}.parquet"))) + daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) + expect_equal(daily_valid_files, daily_files_list[2]) + + rollup_files_list <- c(file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) + rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) + expect_equal(rollup_valid_files, rollup_files_list[2]) + + file.remove("params-run.json") +}) + +test_that("testing fetching list of files for training and predicting", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_data <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_parquet(daily_data, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_parquet(rollup_data, rollup_file_name) + + + files <- get_files_list(indicator, signal, params) + expect_true(all(files == c(daily_file_name, rollup_file_name))) + + file.remove(daily_file_name) + file.remove(rollup_file_name) + file.remove("params-run.json") +}) + + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R new file mode 100644 index 000000000..2a1221344 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R @@ -0,0 +1,173 @@ +context("Testing the helper functions for modeling") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +test_lag <- 1 +model_save_dir <- "./cache" +geo <- "pa" +value_type <- "fraction" +training_end_date <- as.Date("2022-01-01") + +# Generate Test Data +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(61, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} +covariates <- c(main_covariate, dayofweek_covariates) + + +test_that("testing the generation of model filename prefix", { + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda) + expected <- "chng_outpatient_state_lambda0.1.model" + expect_equal(model_file_name, expected) +}) + +test_that("testing the evaluation", { + for (tau in TAUS){ + test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) + } + result <- evaluate(test_data, TAUS) + expect_true(mean(result$wis) < 0.3) +}) + +test_that("testing generating or loading the model", { + # Check the model that does not exist + tau = 0.5 + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(!file.exists(model_path)) + + # Generate the model and check again + obj <- get_model(model_path, train_data, covariates, tau, + lambda, LP_SOLVER, train_models=TRUE) + expect_true(file.exists(model_path)) + created <- file.info(model_path)$ctime + + # Check that the model was not generated again. + obj <- get_model(model_path, train_data, covariates, tau, + lambda, LP_SOLVER, train_models=FALSE) + expect_equal(file.info(model_path)$ctime, created) + + expect_silent(file.remove(model_path)) +}) + +test_that("testing model training and testing", { + result <- model_training_and_testing(train_data, test_data, TAUS, covariates, + LP_SOLVER, lambda, test_lag, + geo, value_type, model_save_dir, + indicator, signal, + geo_level, signal_suffix, + training_end_date, + train_models = TRUE, + make_predictions = TRUE) + test_result <- result[[1]] + coef_df <- result[[2]] + + for (tau in TAUS){ + cov <- paste0("predicted_tau", as.character(tau)) + expect_true(cov %in% colnames(test_result)) + + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau, + training_end_date=training_end_date) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + + expect_silent(file.remove(model_path)) + } + + for (cov in covariates){ + cov <- paste(cov, "coef", sep="_") + expect_true(cov %in% colnames(coef_df)) + } +}) + +test_that("testing adding square root scale", { + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in training data!") + + train_data$value_raw <- rbeta(nrow(train_data), 2, 5) + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in testing data!") + + test_data$value_raw <- rbeta(nrow(test_data), 2, 5) + expect_silent(result <- add_sqrtscale(train_data, test_data, 1, "value_raw")) + + new_train_data <- result[[1]] + new_test_data <- result[[2]] + sqrtscales <- result[[3]] + expect_true(length(sqrtscales) == 4) + for (cov in sqrtscales){ + expect_true(cov %in% colnames(new_train_data)) + expect_true(cov %in% colnames(new_test_data)) + } + expect_true(all(rowSums(new_train_data[sqrtscales]) %in% c(0, 1))) + expect_true(all(rowSums(new_test_data[sqrtscales]) %in% c(0, 1))) + + for (i in 0:2){ + m_l <- max(new_train_data[new_train_data[[paste0("sqrty", as.character(i))]] == 1, "value_raw"]) + m_r <- min(new_train_data[new_train_data[[paste0("sqrty", as.character(i+1))]] == 1, "value_raw"]) + expect_true(m_l <= m_r) + } + +}) + +test_that("testing data filteration", { + train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] + test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] + + # When test lag is small + test_lag <- 5 + result <- data_filteration(test_lag, train_data, test_data, 2) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(train_df$lag) == test_lag+2) + expect_true(min(train_df$lag) == test_lag-2) + expect_true(all(test_df$lag == test_lag)) + + # When test lag is large + test_lag <- 48 + result <- data_filteration(test_lag, train_data, test_data, 2) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(test_df$lag) == test_lag+7) + expect_true(min(test_df$lag) == test_lag-6) + + # Make sure that all lags are tested + included_lags = c() + for (test_lag in c(1:14, 21, 35, 51)){ + result <- data_filteration(test_lag, train_data, test_data, 2) + test_df <- result[[2]] + included_lags <- c(included_lags, unique(test_df$lag)) + } + expect_true(all(1:60 %in% included_lags)) +}) + + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R new file mode 100644 index 000000000..8bde8c68e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R @@ -0,0 +1,132 @@ +context("Testing preprocessing helper functions") + +refd_col <- "time_value" +lag_col <- "lag" +value_col <- "Counts_Products_Denom" +min_refd <- as.Date("2022-01-01") +max_refd <- as.Date("2022-01-07") +ref_lag <- 7 +fake_df <- data.frame(time_value = c(as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-03"), as.Date("2022-01-03"), + as.Date("2022-01-04"), as.Date("2022-01-04"), + as.Date("2022-01-04"), as.Date("2022-01-05"), + as.Date("2022-01-05")), + lag = c(0, 1, 3, 7, 0, 6, 7, 0, 7), + Counts_Products_Denom=c(100, 200, 500, 1000, 0, 200, 220, 50, 300)) +wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") +wm <- c("W1_issue", "W2_issue", "W3_issue") + + +test_that("testing rows filling for missing lags", { + # Make sure all reference date have enough rows for updates + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + n_refds <- as.numeric(max_refd - min_refd)+1 + + expect_equal(nrow(df_new), n_refds*(ref_lag+31)) + expect_equal(df_new %>% drop_na(), fake_df) +}) + + +test_that("testing NA filling for missing udpates", { + # Make sure all the updates are valid integers + + # Assuming the input data does not have enough rows for consecutive lags + expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), + "Risk exists in forward filling") + + # Assuming the input data is already prepared + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + n_refds <- as.numeric(max_refd - min_refd)+1 + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + + expect_equal(nrow(backfill_df), n_refds*(ref_lag+31)) + + for (d in seq(min_refd, max_refd, by="day")) { + expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) + } +}) + + +test_that("testing the calculation of 7-day moving average", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df$issue_date <- df[[refd_col]] + df[[lag_col]] + pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% + pivot_wider(id_cols=refd_col, names_from="issue_date", + values_from="value_raw") + pivot_df[is.na(pivot_df)] = 0 + backfill_df <- get_7dav(pivot_df, refd_col) + + + output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] + expected <- colSums(pivot_df[, -1]) / 7 + expect_true(all(output == expected)) +}) + +test_that("testing the data shifting", { + shifted_df <- add_shift(fake_df, 1, refd_col) + shifted_df[, refd_col] <- as.Date(shifted_df[, refd_col]) - 1 + + expect_equal(fake_df, shifted_df) +}) + + +test_that("testing adding columns for each day of a week", { + df_new <- add_dayofweek(fake_df, refd_col, "_ref", wd) + + expect_equal(ncol(fake_df) + 7, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) +}) + + +test_that("testing the calculation of week of a month", { + expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-01-09")), 2) + + expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) + expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) + expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) + + expect_equal(get_weekofmonth(as.Date("2022-10-01")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-02")), 1) + expect_equal(get_weekofmonth(as.Date("2022-10-09")), 2) + expect_equal(get_weekofmonth(as.Date("2022-10-16")), 3) + expect_equal(get_weekofmonth(as.Date("2022-10-23")), 4) + expect_equal(get_weekofmonth(as.Date("2022-10-30")), 1) + +}) + +test_that("testing adding columns for each week of a month", { + df_new <- add_weekofmonth(fake_df, refd_col, wm) + + expect_equal(ncol(fake_df) + 3, ncol(df_new)) + expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) + expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) +}) + + +test_that("testing adding 7 day avg and target", { + df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) + backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) + df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col, ref_lag) + + # Existing columns: + # time_value: reference date + # value_raw: raw counts + # lag: number of days between issue date and reference date + # Added columns + # issue_date: report/issue date + # value_7dav: 7day avg of the raw counts + # value_prev_7dav: 7day avg of the counts from -14 days to -8 days + # value_target: updated counts on the target date + # target_date: the date ref_lag days after the reference date + # and 5 log columns + expect_equal(ncol(df_new), 3 + 10) + expect_equal(nrow(df_new), 7 * (ref_lag + 30 + 1)) +}) + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R new file mode 100644 index 000000000..a733f2a1d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R @@ -0,0 +1,136 @@ +context("Testing utils helper functions") + +test_that("testing create directory if not exist", { + # If not exists + path = "test.test" + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # If already exists + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # Remove + unlink(path, recursive = TRUE) + expect_true(!file.exists(path)) +}) + + +test_that("testing number of available issue dates for training", { + start_date <- as.Date("2022-01-01") + end_date <- as.Date("2022-01-09") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_warning(training_days_check(issue_date, training_days = training_days), + "Only 9 days are available at most for training.") + + end_date <- as.Date("2022-01-10") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_silent(training_days_check(issue_date, training_days = training_days)) +}) + +test_that("testing get the top200 populous counties", { + counties <- get_populous_counties() + + expect_true(length(counties) == 200) + expect_true("06037" %in% counties) +}) + +test_that("testing read parameters", { + # No input file + expect_error(read_params(path = "params-test.json", template_path = "params-test.json.template", + train_models = TRUE, make_predictions = TRUE), + "input_dir must be set in `params` and exist") + + # Check parameters + params <- read_json("params-test.json", simplifyVector = TRUE) + # Check initialization + expect_true(!("export_dir" %in% names(params))) + expect_true(!("cache_dir" %in% names(params))) + + expect_true(!("parallel" %in% names(params))) + expect_true(!("parallel_max_cores" %in% names(params))) + + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + expect_true(!("lag_pad" %in% names(params))) + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + + expect_true(!("num_col" %in% names(params))) + expect_true(!("denom_col" %in% names(params))) + expect_true(!("geo_levels" %in% names(params))) + expect_true(!("value_types" %in% names(params))) + + expect_true(!("training_days" %in% names(params))) + expect_true(!("ref_lag" %in% names(params))) + expect_true(!("testing_window" %in% names(params))) + expect_true(!("test_dates" %in% names(params))) + + # Create input file + path = "test.tempt" + create_dir_not_exist(path) + expect_silent(params <- read_params(path = "params-test.json", + template_path = "params-test.json.template", + train_models = TRUE, make_predictions = TRUE)) + unlink(path, recursive = TRUE) + + + expect_true("export_dir" %in% names(params)) + expect_true("cache_dir" %in% names(params)) + + expect_true("parallel" %in% names(params)) + expect_true("parallel_max_cores" %in% names(params)) + + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + expect_true("lag_pad" %in% names(params)) + + expect_true("num_col" %in% names(params)) + expect_true("denom_col" %in% names(params)) + expect_true("geo_levels" %in% names(params)) + expect_true("value_types" %in% names(params)) + + expect_true("training_days" %in% names(params)) + expect_true("ref_lag" %in% names(params)) + expect_true("testing_window" %in% names(params)) + expect_true("test_dates" %in% names(params)) + + expect_true(params$export_dir == "./receiving") + expect_true(params$cache_dir == "./cache") + + expect_true(params$parallel == FALSE) + expect_true(params$parallel_max_cores == .Machine$integer.max) + + expect_true(all(params$taus == TAUS)) + expect_true(params$lambda == LAMBDA) + expect_true(params$lp_solver == LP_SOLVER) + expect_true(params$lag_pad == LAG_PAD) + + expect_true(params$num_col == "num") + expect_true(params$denom_col == "denom") + expect_true(all(params$geo_levels == c("state", "county"))) + expect_true(all(params$value_types == c("count", "fraction"))) + + expect_true(params$training_days == TRAINING_DAYS) + expect_true(params$ref_lag == REF_LAG) + expect_true(params$testing_window == TESTING_WINDOW) + start_date <- TODAY - params$testing_window + end_date <- TODAY - 1 + expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) + + expect_silent(file.remove("params-test.json")) +}) + + diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds new file mode 100644 index 0000000000000000000000000000000000000000..4ae5544a52e5d7530bcee31464fee74828076082 GIT binary patch literal 16934 zcmaL8c|26@8$bS3Peqb7QkW1bBq@?*iiCvhyD3X5q>?Sm`BchQBt_Oq$X2FFmcq1(78&2CHcK|i=n^asjtIr!CyQ}B5P5tik zcpv1uPx05aXA-t|i>7w?7}u)}j{0}$<#I}Q+VZd$t~lDc+S$dr*7I~dtE!X)6Wpe^ zda+a2KU7*?4X6!qAw4jF`!3kN=v~_=c#eOmL#B!3yvm>HiT1MPR{xOe)vlAh^z(ew zmStM6OKqLju+ouP&eP?VO)he&WoNQYJ04allvN+8_}EjW6S2Enk^S?35|3=|WG`;~ zakP-+<>jm1e{2rPoI@@LxcVIaU0s#RE&r#wI-^X>fH!!`Hs(LjU0Uo~@c=N}?y;{W@_Z;BMB1;E-?U zGG5eu!E61prrioBl~lj{F%i^MnfCfd;Yn}X;J5AXHE?BlVz!a_&x|~bx)goYx)jgd zVwdR~CFbQ-U%q;OdS7iSbsufG!7oOC;#{+ZQ^`Qp}fec z>icO^^$6G7ZdExanj20SmCats8f%&tdu4pbx~Zmo0CjNNSj$Yuq8&=_4l{@DS@-S4 ziHSRh1~5lYx+ZMh-=LPV;X03;eN0RCy7{}7aEUW76F5`HOW&SQA@uamWG`;f05sl>c!tHEqWlPJ@75i zrUu_y-#if>WoqBldqq3!nNBu0hG2T!Tr&yu&UQ6-I*UGSvyE1S-5EbBF> zJ5t@{qu(J~RQy;}qRsYEpMz_Yvw?SiPD9Gi@N}mx%g$#nj`IDK!VOfNjQ%I*VB;De zVm+6Kmqqsd)F?=*9(-ABP~WgMKnI4dL(x0F?l^+^V#f6@U zP8xfdtZ}kD`_Gn5V~+$UU#S1KvAavfnZ|BU4zr_=$7~J9`@hnDmJm*ByRzWR&fU-Y`~zLrR=(A@RYM(Ieh``-f!H!n8z`UHCKOzYXW zx8L**&d4nfciQ&vqnEngKWh#9SzfS<>GA*d%FPd`rUq#oUF>`Q(kq60&b0%ky_M=-JUrHHL%m zo3Bc5jNE$YJgHf9>)4(l*R7*_&iwo0!4Hw6O6Q`}PDWbzyjcC;=BrmFE}5Ku(a;rU z!a205Aq+h(O~Y*&EYn@~L3SS@4PfE!xFr?eNe9KUSyOh5|36~aA8|#Fex+3le96DI zAJ>CUKM*_)Z;<$;vXHd0a+uq7IsIt0djEd2>6I~-=bkLLDVJ@Jjs^cLpF4k!J^nf{ zOQjb-nB6n?zQXACY=6@cPl*rTNNRWIs-FA3mz1}$%U3eO6)Qh{-l?J{7ill?HE_$V z6Lc4All@23F6)H3@GdBebY!@Nye8aw`uN0_=i8$1t6{sIUZ$X+rD3oh^S+z&6V*9B7uzJ3NnDJ}`8R1-U`G@WHYjtKBq9;#B$(}j#>86^T zX7YafE7_6FN`xlOuGQ{C22Ufy|-=--RmS+{O4Vh+8t=nqPbKFq!m{`otm zOJ7v^5!?7<*~6>{9&3O2Q(GZ+y~HcL=#?;*6Ue-Yg~{6*)gd(Zv@n%g;&0QRafSW<}BBw z5{HSG@9Db24;7V4`du9EeR*n#%#=3%E-k_DCuE*ibXd~Y-Q4aKL&&WEr4N-Q_PLm4 ze{KKwZ>$ZK=-gAmagZ(WPfZr=BPVOgIq@^43qq%)kfSe&JXwBWd3=thQ3jnOpK9j} zA8wm+f&Cp+iQlJ7kdM}~(grF818rxuO~|&|oSs~2@J!{YKJ=*nY_W^-rMTM^hdhsL zAml6%Vo-jP&UT-&nQ5kHst|ZJvLWvY7WlL^Y}yWHW}a3(6@4~^?LG}e_OfL=r-r|O zk7jG|UmGb|{U*iwiw&YN9eiWd;&EG(6?sXdc@~$o3wxi6ut*V@x`o~cn~0?%YC8DM z)}g7K3h9@Sd^5oUN-dB7jo+EGst*+=&eazTXAa#iqEikmbFgPZOTj%MsR;Ya8p%EC169{cTb7au<})skNJC8 znoQAJUV~09T2N8uDXZO{pCdk~E6+kHYvpMRmHgZ^-}N2gWM6U#lvf_F40l&0>=Ee~ zWFA1C{Q)=*YU z1-2=xP{tZN%|w8bZFc0_f+y`ETx$Tg?MX#dH)V{$>ezxoe6i#b{0_((E|eA0uMgj( zG6qM^CWpY{tQBQ=oN&z@D0U3C!y4<*_rp;@v#~t9`l=GsQ066{6a|bJtqry$*{p9# zvgE}BH6YR^N(Jgkm*Gr8;-yq8HcJxkazm*eZue(ZL8d@G7_cR+PN*LJtE4SJJsW1N zF2j%hfGtt!re%VgVCfXLT^nqvvc4rkF77nH7d$cChg<@u2=&umnGhjb0>?Sf_);+h zWEEF;8f}##;Fd1Wg&Nu4xT{%@=W5ZjMX>m|{Iyciei)MBHghy(Uc%_C%mHnBB}__1 zo2T&Eb^b(8@Bl0N4)CH9BK$Mt z0$IuvTzud2;HeU1CYd5-4w=!#WaWix9)VLO_!P1{Skt51ij?4eLhs*C7HmsHofY9f zBj~#O*l=pvet>tvl&npI-*eVn(?YDF(h&`yGdo#)jwGib6@_WKv*I9aJo?!J4onkn z`i2nh_S2lB<6q$!^9y~3t|{%$|KmMJN|i;Fu@j_K$m1rhXyhHMmg}v>P&s3X^Wl8# zJ-;yY2Ub#Bp9`{hX`${WBVMP}cJUx^DB?b|WXT0*VeI;tzjkoMYH8EIWlEtdj+-;f zaS9Xurc{JY3VlKtC(7iD0oHIRwv~#o72!t!r^4s1-AK!}hBso<2O(LN72R9Fw=D?{ zZe=Vr1)5YGhEpM94om08`~p`>w~dwo2elnSoe|+nd>~lBPQXn=tYJ0(+#2w1YDWmK+-pSKgdqNJFCgs-{I;6IE$8(>n!LSKr!<-|LZu(N-1foe;b2BB8w%D{4giM?de<Z0)hHb z{cL^v*cOY^{2s=m;X&lNEF3b6j!H!Yfty1J(+WS1J6sjC)HbS3?{P?BGamf;qD`+3 z_s{12ZK>%Bg+CDF;OX*GyAwtw;(AdVI*SOm z%uH!?3r9>OlGpl@67~+%SrhvGv=zP>hPPN$CeQu@dMwi_PM~f%w=>J-VCptY6T?+)VM7w z9OC7{YW!(zVV9wiN3CF+`Ao$Xi+b>Dt_-TmW7cLCb2(N^$AqkPpjc|!Ij9QR?k)M&xI$7YR9%)6Z5yyyt~3hx2yKu8{%leu$qT$QrtTqvm}!@f@QC=|*O^6{sblP7 z%_lWM-~_Y>pDW26r_Lvs{gt^U$A4&Ab?O8Z)0lpL5bXd`fJ!gt9S~84oP6Lr`)Aw( zk7iB?hzny-d`b54rX|J57T?d%r|jeU9NC)p@pT4PPtnyapQcl2ZaC}MmQr(gC1uGG z+MvP-ZXBu50kuS#EVeV<2G+(#m{id|Z7c~NivjhWLy<)KX0ebO?S0OWf(i%r0b--A z;_v%!nn&$hJ;XGWHi!L%IVF8fqbsna9$rFI;atG#Mk(e9Ricky?+iT2YR0R?>5XVJg21 zt&@f(D7U87LS|08r>Vdm^M5F;OEB-i`&g)6^3`r~(O)JL69B zLqJMQoCDi$)n;2#BEB#|2qOmB&M#X|fo&nRRd1$*a~Y$VC55Gc91rWz)r8ydi$(&d z2x0tIj-Yht-=K#vi=ULLVOm@9dEjP!w9Ir`t( zM%6v9y;8Qk5G%ST3_MZ!T_n8QJ2TiICXS~0};20rY8 z@QVaj<5Mv%q5Js5GQAmGiljL#o(mua)&}Q!+fjnnN04lzp5bu`IX z3EH0UWe74U>60KG1|A{N_>f2^$a^i{WB2Dnbx#egxxwhy>NruV?%qM(pH8otkOmNQ z8-@9V*P%(#Avpq!FOpy+!A=8OLJIo@=SpbSk5=$9Fyl+VC9Oe}ZRJ9#!(27yE4V-_ z#meY5PJf}oVXhF<^;r#iHH}c*BwxfS7JH=`!<%E4dV=m| zc?&(!W9|;k152z%CQDAMh1{pP2#aI9oD>?9!vD}?LGmZlCwz<0bhdiPgYdixRx{W9 zAZqn0?oCe2`)~z%scgG5Kae=VS8GT8y&c?Q@_$6iXQGX8)+hKbPc$2#bEs z+I-q=NS{PxgzYP4?SGKF5MPke-YUv6j0hjmZm>+4P3pkN_>dj(Iyr(0io*ljjPT8% z(NHl zKTN1~{)>@WP#Hl#<95SjK3ra`3fiawsu@-S6l8>|GI7g18&BJgjRJknQO>2s9tM^L zej(ZOwznPpPLmX?!RJeIiu{{_bVUjc@x1kuGFe{oWnyq@QK=w6q#svT1sN&NqAfW3+C0BBRAnW%ZjFaO zxv0v-sGf#>eu4N)*JfkW)>!8SZ!`!zJmCrcmf%<+?FbX&=u^WqjBA#V#%D;fXcDQu zSEov#18w1ZPxH+}^U#V=UQpry%UC?u2WbFOTtE&pSgj3JwNT(GlaQn_`EW_|S^Gy3 zW%}@U&V1dU&uuiA)1#aE4eu9g!rGDO*mMF z`v8C#tYG5cMATE@sieJ$&sA|y*fMN@9UnqwN-}5#Lhx0zwmGQ@ncyD7Fe!E8PVA*+ z{+4XE<+DK$e2=ae(a(u_3dz&eWaruCv};&Ij1wtr!%G;ayZRnH6uDP<=>;Vh2RfPQ zdH%Kg#VAhV3{Xg1O7!{-iCA>C5PcwTfDm>_Z6AG+wgRcFYBAjbNX!M>!#nARaj=Q@ zY6^z}6krIJpd`?bfz4UZN2aMlOG2~>$^^%fo|7De8P*|(_mYzR zU?H2-gDhoK*vH-j0m$IZqp3u(O`sf5p0(*kL#eGV1f`;SP;u^> z6!0+oNnT)^e>I%$uUiA{R{2U(7nYfGz>T(jhcib)sF;080!WyTchiT`BAhsRck_M? zYk;-}TI<}6;b<6i5?#wE?s;FoVbVp8!ABy%wdi04v?U?)?aWrj=6s0RH5_+)7BzPn zt4AfO_EO}NXKKBFvCR-b(Y z0%_2Pl2UXV2Qm3%txA4?cKpuSOyXzv1>WWJlNZeD9p;=b-Qy%{W&9~yc3wFW2dYZ= zUffCSZd zwEKNX4VVAzj|K|>u+mDRRWDmR;{M24u}sdJvb8hjuiwf}xDR zAoTr=#1e6P?gv|bhfE=@+%!*fe>nB&tPMY7|MJ3VZ1#E{I$@(NY{E5VkY}Y98WqrQ z2$`CMwL)`%ZIhSVS5Bo1xgFR(65x2R6kRv%Qck@zpf)(0Q!szZ1jPl#q$BtI$A?Jg zQaJS=0P8aT3|-0(zAin4DOxK9fMW+a(@ndC6PXqndmBIgh21t2J^~ugg|mAg!o5sK z!A_4g-zy6tCrG6W(b>yeV)6JA+%xF6V1`g&c@Md9Pe{&L?CIdpI;B*Re+B8Q{LJQw>hyho@m(87_ zc}kCjB=WxG0wtMnsQ%A4{8Mtm6(03(c?5iER-7yd<BZ)CnP0 z?;(W)mll#SqEfK0Ek6c6CF7Q@DVu?To z)NEul5JFd*9k-D#;Wa10kXYdPyl6Rv!wcLN9hnmDGmL&3!LJ?&xz(2DF zwl<$>2jHs_tluE_XLP#2nuKB(Sk}A;My8mCfM;eCjFWc3NNLpR%P|oR{Opkds;&ls zPbd0A*gyMWU0Fc;vFYx%Y(@Bec|ttyb`B@}gvuN-0#svKg~B1mPG+uTevGHp5!Bv9 z4Zz>L9I+KLH;(al?n7-)pdNfRJK^E-mMpvmzK78^JcN8A$!6sfGC_w0M*>-`vIKiR z2UkKzOQ;1p5kIcmwonU3_CZOi3CMljS~*pM)U@M>4x**#vLxnZ&rp*^dQ4!MGy&7FF}H@+u1oB8&kgemEWPdqPkXx9t9(edC%uFABw zf7B8SaY+3{U6n2bdNonPgiL>}L`)L@&i$gDSLZn(Y)^QPKNLn*0q-A3LA*-gCo6cf z3te9d6p}tB2K8P~O->Z0mnUiX^|^W0wM|lNc)yb!ZZv5=L%!Q`$+a3PQ!UoDHm(cQ z&946`1YxO&a%(Q*^+D`zro+C|aA&c4i169R2o6q#XPKCtHLSA4#?2{$8qh~4n$iaV z+0^}nVeeLV-zr|e4Unzuh*Vw2GM1L-5yU&WYCOEl>eD&kZni=sWs2n-XV&Yg5M(SBr2s;)~J<5W(+2Sz|ox()SuQGjg2-qiMm zn(LmvRCHwub94q4B_Q6U@b7ezbo@1|K+#&}p2q46Nx*&N3ay)oh1yYR94)})eZ33f z1#KR`Q-uTVp&v`Lb4g5pjspDPFL(f8H5|zaDeO!~1s?$VGq8pGS(@8(ce=sx<`bl# zrG0{3<~_71><-Y1??^+AeNH}rjF-dNQihMrieDA9Gb+4aTk$t1b)bFyJQq(I#u||IWyI9nse34(WjcYXS-^I{XS~Nz?1$m2-Kg7bOlk zDzi-gq(JCNgb>bfT9Hcvabsx$AVUWRt>aWWrZfgEeEf|l0mvjakmcO zky6||P9^}G0wK|ktb6J}k~re?;|rjQF0b@X!tNkDG{JnY#Z$mkTMr~2R9U8}*upn? zUkLJAeJ=#Cu;J4MycP1`st^ihN};GW30AxKa2!^15Opd=0R05Z4jx?;{*dz^?kpgi zI-j6h;6yJ5^k^ACZL1%3{_uH_s8)y((#Ua;4IxR?3u_|so#4P~3xFQxFiILJ94b&! z!GL6#B~S2k0^0qB-%qy!z5z=ECPQgADYwnf6?QRS%tU3a(S_*iY`he3UlPGVjUZUY zf6jCSB+Pq~3HtUIV1&jSI78loG;&Njlhb-CUbLi-6v}>HbaCp~v!TsE>q7+j{ogtHI>F?{6U;zw~eCy9Qq1ZT`2 z(wwkE02tyQt{J7Jn|AV{hC-`4cj4S;8M+UWx;rR<=De#{Nlxaia1aJNv$Y)q=m@e` zHGBr!OZB!&p^pl|e{T-Y7jV&|71hu^@|oTE>{Jk;%-)_XXTa|}l&r~!^uy!YjO=NL z`kwIH_>iU<4)(qz=M89n#M;7#nPEi${UnSZ_)#LrO6nj+VToQKyPS%@qlyVH^7o}8 zz-Zv@h$aXenyNs=(vQ?9n_dmt!yk<)>rJCoD!Q^`!TVfjE=LLc`mwDKR9w)*Sf;h$ zn1GK0ikX40f#9a8BpRrYSzaK$7F-{tu~q~V!l$X>&Snia3c(blrt+1w5zF!dRMC9` z(Q$^kq>7EM5^Cf>I;z{r4C6p6U)513a8}K16R-2Qsi@yJZ!T8C<^H)M#j$n$hfT@D_h-4upqycTQ(5~i> z2lx}#X)~01nTW5O+1iRe>|+AkpP$@I`LEmC2ok@uvO@+D_!d$MRoTI21!M8m&^9n` zQe9000RgE`I?nL)iL7ErV4D?KW3>gW5+za9;M!;`We^1>UeFKWnr9b<7gs#y?jXXK z*XDm=v$ipzO(0*ok|~m^u%Ara1Q@$SriH7X=1+qDZ5BO6W;a^K-g^oey-V)ybcQyR zWT5P8N4A52V5ER@&FjvJv+TRhbWlmcy`DG920BOBx0G75bJgNT6bGAuew>C2)quC^ z?!~!eE$doQpXz(@Ba_Bpqf$^dkYi3}V3#LLHJV9+&MP>^L)7VD_b*;U-%5<)ZBFyX$Ic~xJ4*b z;m(_KpunMQT1q?E?&@x(0%`MK?&-%o!J0*Ul_~`t-&f@hDXDPqeLzFZoMj_8zcK+# z*uZ>5fxt~>gY}qVNe&>Erg$Oc0>GUJHm9I1Qh4Jka_|0plW>feQK3dZP)8nz zPaK0A0LjkKRh0vVQu(TTFe(p7q6bK4Blu(6?LpA-aGD8|fPPGrpl7I3NY}7+gD7(m zzNx=F&=DlJF2v0T-J>x5I*9i(TPdWVe5eA@$PC&|Y-Wv+>u2dwZo zRThJG5F4^C%KF&a0^Uuw1wxx0hOCVECFag#p9~}=e=9jz4w|l zHx1aveLpT$lF6+WYUEl!Z~RrusXqMrh#IJbR^TrIEDss4OG1+7W^gJ1ojUF>Y%pOZ z5ZQfrTG~X3&`BVNMJr*)bxPl+aCR-~^A{toTiV>A}@Fro5y-Pg#bG0oK85;?9HDzkx+o3PTz# z$rXAJv{x9?aZZZ|K<8o(XZjO`AWe4NK6C8vsP&MB>wqY*ztBDk>!QHd<`I2($O$$C z!7Q-kDFVl}_1E4C?Fg=!ZZYPlB;lWsbm$ph6Cax zdv;au^nKn514!sD!r11MPFAvWjdRwFKv;i}A!vAkNmSXIhWTLl6LsW%c)L6ii5G@* zTHC92e=)QR%>o+?tcQ?35js00^xP*}A07z#5Gm7QQYx_Mckj)-7b{5Q+o762BhDavs7s&YC053**>Z zqrn{D`d#>0|5MRHXf4AXv@R6xzpIz2OXBJ?R@tr(qOi=VL%2 zK(de@>yEOXP8M`#BLrcu)?M~*p%g{ufHpwzo)sDg{Zlu>q3+K&!vFu-ltQSvN@x;NfY3u)|8v%eRAC-zKEJ36fOGnK*fftkK@>u{mj#$6U?gD!{+q*oE>>~J#1$E5$nkn z4gk3XZYq!O!`G~5SNzy_WFZ75g+gC7vs?F73Zb?UkTJ#UVMFv5hRvB?yuqUlyY(X| zAzk6$kN-)LKP2Gqo@PnE%kRmS{158Ex$)`tSY1V15}OCw~uo)+W@65JLMRAibYpIZnX2m^ml zga@vFz*Iv=O8O0h8|71@MX)&BzSVcY)4OE>J<4XKl|x1XS91DJwqPxPWY2nAgg-3x{U=|07F;Tra?>37yOq;T$7VMIh29TBd7MWM~hFvl6ImWFrt6 z3~X)sOXy0vTPCWK&Y=3-VqZQR7EZ5EgX1;F@C*%boa?Y!mS*g`SCpX|=!D9cZiF7u z5#Db89<`bu3w&>osDev6PYEx6e+<*{S4ncwNfK=9=0tCA!+dAfTg2XL!PUx)zYs!v8q+@)WOw|X zwT+g3D8sZcH^5Q;>i)2orV5l~C<7&tW5DRPaBtL0&Jy=|k zTEj3va=LBdpc_Yr2q!?rL5bli0#z@hJs32!~>)4@2 zBn$r^b|4G`1~A)rSQNgByR5qy^CD9kix;oGzH@#2%QzT*TkJ+M?5w3I2GTz9QVwy>F%0Q z7rLh9(-|`5AXL0eiFINJuWjx^V|eT<{KoTaF)&=GX)dd~Gjc)oJWq+n{p@A{0K5wy z1UgFPQ=FR)eW)toX`NhL2TI7?p9RNtsEpwlL6=tn{wzNmcx}7z6z#L}_}kFMra*3n zDuL(izzZ}tMbCEobRs`}wm>!U-cX}O9^ncZj7CXRna4TK&73Y|CurdIrJ>KH2#iLZ zpbXMg1^5b~@p7VXCBVTLH|z^=d`M5Q0m}j~7_m=?J!|YjlfgM0bEw%vICxM3e?U1t(_IJ?c&?u}d&V??0WnUEmZQ(6c_8h$fvvsB_Rnep#WalSX}0j5=Wb^0 zXxn&km9AWHalBaO8sh*iN0NQxPC0yBU`|fougq&ZY8ILF4ax;LNidPawxl7p%cJq~ zRT|%(&o@gPfL{IBV?w^4j)V>a>YN%4M*a+`KPh+;)j2^0!`hSYZ!YEMw28UDxqI2>N@`-y_TRtLeoFN#@kC4?58BYBc!%E*G zEz~&n&3!?qw?;!I$F=N7jb}c7Nyz2qG_WKMKDpmC_-T|rT*Rd(D$vkPtM zdjG{|ALpNGAy(wp&R&wZte`{)R8C49LAPGGy{OG49EfaAY=|^h{zoSmscZD>TGwdP zwR!S0F?aEdDA>64p5oh9t|Qg^UD7N5+K)iNRKxCNgT=por&~lt*1>GV3FqanyqBgnQo_y(QLO zCIl`w2sq!GZ}A1&1CVMnC?|zCWdft0tzR8QQRfV`3vA8;|^yL)p^Y!_k?d) zMD=kG&-h)Ym*0mwZh@PFdbg^WdZ-;c&V4;tJS%BhXY%k}z^^8NvP(|yI-6vuDYc_L zZ0KU#xbaaE2KhKkiTbkS(C+`|pWAyP`={1crj{5^!M}ofyfcyu=t{NBdxKd=B{}-jE^39K}K7+{0}8+lkm6EBp1oDN#$-7$V8; z4*Q^*$j4ANQ-`}WykYW69K3n3$K!ec#wqby&@j{bs`m^|1WEQj$GKbGXW)i zOZM*jMs|ueZYgzSWV6-pUm#pr_#OJ_{^z28Rgs=^GJnmTxf^yT=g0MC!D5knaa7x% zmmZaBBk#INlS~g6$)2mfF_T#qku9#tVb&XHWRL$sCcUoUi+N-$n z9j}}6cf0p_)0ExD-QPL5e+;TD%PyN>v~W`nt||h zLzYylw)EK6=6=MUKShJ%12_H(BkGPhO-QGmVfCN>Q1%M8y*hAc9`i9)Z}jg5Le!cs zD^=Z6Z$k6TXH&-tox#a#iI>H!3Nz08ckj2&h@)I8e1n*iGp-53{Cm1t?NmEy@^?qE z+Ah(khAm2?s(%+L34Bjk&0S!vDtSGX?H7K2cs6kBnAeGWrnO3i!B>aP5-mHvHoex+HoiL`a%(t{9-)m6<2INvGX$N7i$ z``Z2BV@3TXF3RQLcll={8GGf#+jY&1T$481TW5S8zbEx@EzLi;L_1sQ)qWXC>LN8G z&$Wa0_h;!5g^bZ$H6v%Mdv7MNkFQ@EvPFEc-lupYWaar9xlAW?zV+=A>%U2gW4Re&4#=Y|-!G!s%JD&O}Q8BeEJdP<5TQ`uL$++`eBDi>b(r$5~>>Ez%Q7 zhvF-=IbZBqjlUmC{4`TmS{#08(=6I@U*^)EYoR%Qw+kjF;=>Z;2@hxH_YS6Zzx<>> zF|cw$Gnchu`r>+R_d)OtqvAh$dssymPc&ey8y8Y1HCbxI7bTL$#(r9lzYRD2U&s@s ze$D#E4@XCgk+FXwmwE%m)1;jbl4%F14vh|0x8?VVRiF5G-$LC^g&xzhkvB8_uerVv ztp4~Ng^yMJoitwf^`;G?Lan9y&pXGIwZ>*d_Y)iaTbgy{KaE*i9$)qP`u^xaWa7DS z(K2O`+nkk+%$^@BRTH#_zZagH{x!NHw_1~*@xiymcWMN8jq1l@9@J74{o9vC+&Wm$ zsn;X%w|5Y9MOxHl+3#)WRP`g1^n|^NC{`ExG*v9SDg0HyICd7DhV2E^c?Z0M| z8E2ghqoSK8_i=iM29GA+A{nY|nw$-EA>SKc7zhkkoJl;QQ}QI>*N+H3VwN)zs`O9s zRVR$8EAL;#gw)k^`j<_^6V8de?-wqhoQ;SGk@o~lM2$IQyr0GLe%hp@{!TEG?ce&;lD9fjM(vb(;d>h z=$2?@p45dTn+yHh8x>kNEtMxWN{?DzlAw7_42rdUy4`Uqavq~sJDnE~Mn?WICuhZ1 z!cLvI@z(ZMhT`ICWXRc?;lHk(`;B38wf%eVosix%S@Pk`w|fsZK^?cg8TKZyV#a(D zYF6F2^ogRG&zJnlS@~0&V*b@Xx`V5n%wMZ@xT@ID@rG)*C;3y;^s}V}Z0-k5_1{rS zQ+*>Dg7D}~AB|<7Gjg+bG`0o#=2qN!=o%RtX%`y?qnduUvvH&iF%0ix-!NG?C2vZ8~S(*6ONb(p`bg7~47zWs{ z&wI4obx3^cK(!f31bNQORsVl=1LL9MPu+&xl)lFvYtki76c;S99ADx!gE!B0j-KSO zFQALcaZ?E%W{MH~rH31y8LnJkd|~DPYwCIAZY2YWc5S@%jS=LtykcCj!B>>YT6Aiu z{=WpC)Q9fv2^(y3pYXXGzr&y19l3kf^ycQS;5JHyWyN2$naTEMgre+sb&T^*5ufpu z=GD@Q%ICTl-m852PLnmBLs8|k3I-BQZ7>B-n+}a0|IkBH^Ph7UWj+az&b4mMF9OMM z;GR?+F>8MJ`uCX0-{%SLXhOJY-h7Bo)r%3&T_S&Ps;v5$E<>%taG$3|t$&bN>F4z| z^4Puc#iRa1k9X%$niCG<9|n9>{p5S-PU_zF_-7plFtxTvb>*VEC%l&UKiy=?Qy&vV!>SX*_Wt_Sq1nZ3Y%9y2UcxfVwl~ku-mWlhF@0kwk$A5G zkB=LShH`j*evv~bX?o!T%sFNP<%E{+M;MdMx`N`UZ@rPL)G=Wou+qOt`a^)vpIOcf zo{1EGFS%&QTmG^jx)50+AL4cN`@wF%x5s|or)5(ryQMd?LqD9ptx>+DAQfy{e8pd_ zml7Z!_a{?Li^bSJJI}W|^^S~^cj*mv@$hkkS1)+Y9#xg2H*U#(^DIw3WD5E7vHa7x zO~yy;#0SQTQay7!`q!FEa}1!$R^um6HMUtYF!r-~@G$p$;WK3}ys|%`H|h3%") +importFrom(dplyr,across) +importFrom(dplyr,arrange) +importFrom(dplyr,bind_rows) +importFrom(dplyr,desc) +importFrom(dplyr,everything) +importFrom(dplyr,filter) +importFrom(dplyr,group_by) +importFrom(dplyr,group_split) +importFrom(dplyr,if_else) +importFrom(dplyr,pull) +importFrom(dplyr,select) +importFrom(dplyr,summarize) +importFrom(evalcast,weighted_interval_score) +importFrom(jsonlite,read_json) +importFrom(lubridate,day) +importFrom(lubridate,days_in_month) +importFrom(lubridate,make_date) +importFrom(lubridate,month) +importFrom(lubridate,year) +importFrom(parallel,detectCores) +importFrom(plyr,rbind.fill) +importFrom(quantgen,quantile_lasso) +importFrom(readr,read_csv) +importFrom(readr,write_csv) +importFrom(rlang,.data) +importFrom(rlang,.env) +importFrom(stats,coef) +importFrom(stats,nlm) +importFrom(stats,pbeta) +importFrom(stats,predict) +importFrom(stats,setNames) +importFrom(stringr,str_interp) +importFrom(stringr,str_split) +importFrom(tibble,tribble) +importFrom(tidyr,crossing) +importFrom(tidyr,drop_na) +importFrom(tidyr,fill) +importFrom(tidyr,pivot_longer) +importFrom(tidyr,pivot_wider) +importFrom(utils,head) +importFrom(zoo,rollmeanr) diff --git a/backfill_corrections/delphiBackfillCorrection/R/beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection/R/beta_prior_estimation.R new file mode 100644 index 000000000..dadb48984 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/beta_prior_estimation.R @@ -0,0 +1,212 @@ +## Functions for Beta Prior Approach. +## +## This is used only for the fraction prediction e.g. fraction of Covid claims, +## percentage of positive tests. We assume that the fraction follows a beta distribution +## that is day-of-week dependent. A quantile regression model is used first with lasso +## penalty for supporting quantile estimation and then a non-linear minimization is used +## for prior estimation. + +#' Sum of squared error +#' +#' @param fit estimated values +#' @param actual actual values +delta <- function(fit, actual) sum((fit-actual)^2) + +#' Generate objection function +#' @param theta parameters for the distribution in log scale +#' @param x vector of quantiles +#' @param prob the expected probabilities +#' @param ... additional arguments +#' +#' @importFrom stats pbeta +objective <- function(theta, x, prob, ...) { + ab <- exp(theta) # Parameters are the *logs* of alpha and beta + fit <- pbeta(x, ab[1], ab[2]) + return (delta(fit, prob)) +} + +#' Main function for the beta prior approach +#' Estimate the priors for the beta distribution based on data for +#' a certain day of a week +#' +#' @template train_data-template +#' @param prior_test_data Data Frame for testing +#' @template taus-template +#' @template covariates-template +#' @template lp_solver-template +#' @template lambda-template +#' @template geo_level-template +#' @template geo-template +#' @template indicator-template +#' @template signal-template +#' @template signal_suffix-template +#' @template value_type-template +#' @template train_models-template +#' @template make_predictions-template +#' @param dw column name to indicate which day of a week it is +#' @param response the column name of the response variable +#' @param start the initialization of the the points in nlm +#' @param base_pseudo_denom the pseudo counts added to denominator if little data for training +#' @param base_pseudo_num the pseudo counts added to numerator if little data for training +#' @param training_end_date the most recent training date +#' @param model_save_dir directory containing trained models +#' +#' @importFrom stats nlm predict +#' @importFrom dplyr %>% filter +#' @importFrom quantgen quantile_lasso +#' +est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, + covariates, response, lp_solver, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir, start=c(0, log(10)), + base_pseudo_denom=1000, base_pseudo_num=10, + train_models = TRUE, make_predictions = TRUE) { + sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) + sub_test_data <- prior_test_data %>% filter(prior_test_data[[dw]] == 1) + if (nrow(sub_test_data) == 0) { + pseudo_denom <- base_pseudo_denom + pseudo_num <- base_pseudo_num + } else { + # Using quantile regression to get estimated quantiles at log scale + quantiles <- list() + for (idx in 1:length(taus)) { + tau <- taus[idx] + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + + obj = get_model(model_path, sub_train_data, covariates, tau = tau, + lambda = lambda, lp_solver = lp_solver, train_models) + + y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[covariates]))) + quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale + } + quantiles <- as.vector(unlist(quantiles)) + # Using nlm to estimate priors + sol <- nlm(objective, start, x=quantiles, prob=taus, lower=0, upper=1, + typsize=c(1,1), fscale=1e-12, gradtol=1e-12) + parms <- exp(sol$estimate) + # Computing pseudo counts based on beta priors + pseudo_denom <- parms[1] + parms[2] + pseudo_num <- parms[1] + } + return (c(pseudo_denom, pseudo_num)) +} + +#' Update fraction based on the pseudo counts for numerators and denominators +#' +#' @param data Data Frame +#' @param dw character to indicate the day of a week. Can be NULL for all the days +#' @param pseudo_num the estimated counts to be added to numerators +#' @param pseudo_denom the estimated counts to be added to denominators +#' @template num_col-template +#' @template denom_col-template +#' +#' @export +frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) { + if (is.null(dw)) { + num_adj <- data[[num_col]] + pseudo_num + denom_adj <- data[[denom_col]] + pseudo_denom + } else { + num_adj <- data[[num_col]][data[[dw]] == 1] + pseudo_num + denom_adj <- data[data[[dw]] == 1, denom_col] + pseudo_denom + } + return (num_adj / denom_adj) +} + +#' Update fraction using beta prior approach +#' +#' @template train_data-template +#' @param test_data testing data +#' @param prior_test_data testing data for the lag -1 model +#' @param training_end_date the most recent training date +#' @param model_save_dir directory containing trained models +#' @template indicator-template +#' @template signal-template +#' @template geo-template +#' @template signal_suffix-template +#' @template lambda-template +#' @template value_type-template +#' @template geo_level-template +#' @template taus-template +#' @template lp_solver-template +#' @template train_models-template +#' @template make_predictions-template +#' +#' @export +frac_adj <- function(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + lambda, value_type, geo, + training_end_date, model_save_dir, + taus = TAUS, lp_solver = LP_SOLVER, + train_models = TRUE, + make_predictions = TRUE) { + train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") + train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") + prior_test_data$value_7dav <- frac_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") + + train_data$log_value_target <- log(train_data$value_target) + train_data$log_value_7dav <- log(train_data$value_7dav) + prior_test_data$log_value_7dav <- log(prior_test_data$value_7dav) + + pre_covariates = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", + "log_value_7dav") + #For training + train_data$value_raw = NaN + train_data$value_7dav = NaN + train_data$value_prev_7dav = NaN + + #For testing + test_data$value_raw = NaN + test_data$value_7dav = NaN + test_data$value_prev_7dav = NaN + + test_data$pseudo_num = NaN + test_data$pseudo_denum = NaN + + for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { + pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, + pre_covariates, "log_value_target", lp_solver, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir, + train_models = train_models, + make_predictions = make_predictions) + pseudo_denum = pseudo_counts[1] + pseudo_num = pseudo_counts[2] + # update current data + # For training + train_data$value_raw[train_data[[cov]] == 1] <- frac_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") + train_data$value_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") + train_data$value_prev_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( + train_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") + + #For testing + test_data$value_raw[test_data[[cov]] == 1] <- frac_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") + test_data$value_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") + test_data$value_prev_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( + test_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") + + test_data$pseudo_num[test_data[[cov]] == 1] = pseudo_num + test_data$pseudo_denum[test_data[[cov]] == 1] = pseudo_denum + } + + train_data$log_value_raw = log(train_data$value_raw) + train_data$log_value_7dav = log(train_data$value_7dav) + train_data$log_value_prev_7dav = log(train_data$value_prev_7dav) + train_data$log_7dav_slope = train_data$log_value_7dav - train_data$log_value_prev_7dav + + test_data$log_value_raw = log(test_data$value_raw) + test_data$log_value_7dav = log(test_data$value_7dav) + test_data$log_value_prev_7dav = log(test_data$value_prev_7dav) + test_data$log_7dav_slope = test_data$log_value_7dav - test_data$log_value_prev_7dav + + return (list(train_data, test_data)) +} diff --git a/backfill_corrections/delphiBackfillCorrection/R/constants.R b/backfill_corrections/delphiBackfillCorrection/R/constants.R new file mode 100644 index 000000000..e5a9cfd35 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/constants.R @@ -0,0 +1,33 @@ +# Constants for the backfill correction model +TAUS <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) +REF_LAG <- 60 +TEST_LAGS <- c(1:14, 21, 35, 51) +TRAINING_DAYS <- 270 +TESTING_WINDOW <- 14 +LAG_WINDOW <- 5 +LAMBDA <- 0.1 +LAG_PAD <- 2 +LP_SOLVER <-"gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" + +YITL <-"log_value_raw" +SLOPE <-"log_7dav_slope" +Y7DAV <-"log_value_7dav" + +SQRTSCALE_COVID <-c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') +SQRTSCALE_TOTAL <-c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') +SQRTSCALE <-c('sqrty0', 'sqrty1', "sqrty2") +LOG_LAG <-"inv_log_lag" + +# Dates +WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") # wd +WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") # wm +TODAY <- Sys.Date() + +INDICATORS_AND_SIGNALS <- tibble::tribble( + ~indicator, ~signal, ~name_suffix, ~sub_dir, + "changehc", "covid", "", "chng", + "changehc", "flu", "", "chng", + "claims_hosp", "", "", "claims_hosp", + # "dv",,, + "quidel", "covidtest", c("total", "age_0_4", "age_5_17", "age_18_49", "age_50_64", "age_65plus", "age_0_17"), "quidel_covidtest" +) diff --git a/backfill_corrections/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/backfill_corrections/delphiBackfillCorrection/R/delphiBackfillCorrection.R new file mode 100644 index 000000000..57d79fd47 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/delphiBackfillCorrection.R @@ -0,0 +1,3 @@ +# Load `tribble` for defining global variables +#' @importFrom tibble tribble +NULL diff --git a/backfill_corrections/delphiBackfillCorrection/R/io.R b/backfill_corrections/delphiBackfillCorrection/R/io.R new file mode 100644 index 000000000..bd506b6f7 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/io.R @@ -0,0 +1,133 @@ +#' Read a parquet file into a dataframe +#' +#' @template input_dir-template +#' +#' @importFrom arrow read_parquet +#' +#' @export +read_data <- function(input_dir) { + df <- read_parquet(input_dir, as_data_frame = TRUE) + return (df) +} + +#' Export the result to customized directory +#' +#' @param test_data test data containing prediction results +#' @param coef_data data frame containing the estimated coefficients +#' @template indicator-template +#' @template signal-template +#' @template geo_level-template +#' @template signal_suffix-template +#' @template lambda-template +#' @template value_type-template +#' @template export_dir-template +#' @param training_end_date the most recent training date +#' +#' @importFrom readr write_csv +#' @importFrom stringr str_interp str_split +export_test_result <- function(test_data, coef_data, indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, export_dir) { + base_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, value_type, model_mode=FALSE) + pred_output_dir <- str_interp("prediction_${base_name}") + write_csv(test_data, file.path(export_dir, pred_output_dir)) + + coef_output_dir <- str_interp("coefs_${base_name}") + write_csv(test_data, file.path(export_dir, coef_output_dir)) +} + +#' List valid input files. +#' +#' @template indicator-template +#' @template signal-template +#' @template params-template +#' @param sub_dir string specifying the indicator-specific directory within +#' the general input directory `params$input_dir` +get_files_list <- function(indicator, signal, params, sub_dir) { + # Make sure we're reading in both 4-week rollup and daily files. + if (!missing(sub_dir)) { + input_dir <- file.path(params$input_dir, sub_dir) + } else { + input_dir <- params$input_dir + } + + # Convert input_group into file names. + daily_pattern <- create_name_pattern(indicator, signal, "daily") + rollup_pattern <- create_name_pattern(indicator, signal, "rollup") + + # Filter files lists to only include those containing dates we need for training + daily_input_files <- list.files( + input_dir, pattern = daily_pattern, full.names = TRUE + ) %>% + subset_valid_files("daily", params) + rollup_input_files <- list.files( + input_dir, pattern = rollup_pattern, full.names = TRUE + ) %>% + subset_valid_files("rollup", params) + + return(c(daily_input_files, rollup_input_files)) +} + +#' Return file names only if they contain data to be used in training +#' +#' Parse filenames to find included dates. Use different patterns if file +#' includes daily or rollup (multiple days) data. +#' +#' @param files_list character vector of input files of a given `file_type` +#' @template file_type-template +#' @template params-template +subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { + file_type <- match.arg(file_type) + date_format = "%Y%m%d" + switch(file_type, + daily = { + start_dates <- as.Date( + sub("^.*/.*_as_of_([0-9]{8}).parquet$", "\\1", files_list), + format = date_format + ) + end_dates <- start_dates + }, + rollup = { + rollup_pattern <- "^.*/.*_from_([0-9]{8})_to_([0-9]{8}).parquet$" + start_dates <- as.Date( + sub(rollup_pattern, "\\1", files_list), + format = date_format + ) + end_dates <- as.Date( + sub(rollup_pattern, "\\2", files_list), + format = date_format + ) + } + ) + + ## TODO: start_date depends on if we're doing model training or just corrections. + start_date <- TODAY - params$training_days - params$ref_lag + end_date <- TODAY - 1 + + # Only keep files with data that falls at least somewhat between the desired + # start and end range dates. + files_list <- files_list[ + !(( start_dates < start_date & end_dates < start_date ) | + ( start_dates > end_date & end_dates > end_date ))] + + return(files_list) +} + +#' Create pattern to match input files of a given type and signal +#' +#' @template indicator-template +#' @template signal-template +#' @template file_type-template +#' +#' @importFrom stringr str_interp +create_name_pattern <- function(indicator, signal, + file_type = c("daily", "rollup")) { + file_type <- match.arg(file_type) + switch(file_type, + daily = str_interp("${indicator}_${signal}_as_of_[0-9]{8}.parquet$"), + rollup = str_interp("${indicator}_${signal}_from_[0-9]{8}_to_[0-9]{8}.parquet$") + ) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/R/main.R b/backfill_corrections/delphiBackfillCorrection/R/main.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/main.R rename to backfill_corrections/delphiBackfillCorrection/R/main.R diff --git a/Backfill_Correction/delphiBackfillCorrection/R/model.R b/backfill_corrections/delphiBackfillCorrection/R/model.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/R/model.R rename to backfill_corrections/delphiBackfillCorrection/R/model.R diff --git a/backfill_corrections/delphiBackfillCorrection/R/preprocessing.R b/backfill_corrections/delphiBackfillCorrection/R/preprocessing.R new file mode 100644 index 000000000..4424fde4b --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/preprocessing.R @@ -0,0 +1,229 @@ +## Data Preprocessing +## +## The raw input data should have 4/5 basic columns: +## time_value: reference date +## issue_date: issue date/date of reporting +## geo_value: location +## lag: the number of days between issue date and the reference date +## counts: the number of counts used for estimation + + +#' Re-index, fill na, make sure all reference date have enough rows for updates +#' @template df-template +#' @template refd_col-template +#' @template lag_col-template +#' @param min_refd the earliest reference date considered in the data +#' @param max_refd the latest reference date considered in the data +#' @template ref_lag-template +#' +#' @return df_new Data Frame with filled rows for missing lags +#' +#' @importFrom tidyr crossing +#' @importFrom stats setNames +#' +#' @export +fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) { + # Full list of lags + # +30 to have values for calculating 7-day averages + lags <- min(df[[lag_col]]): (ref_lag + 30) + refds <- seq(min_refd, max_refd, by="day") # Full list reference date + row_inds_df <- as.data.frame(crossing(refds, lags)) %>% + setNames(c(refd_col, lag_col)) + df_new = merge(x=df, y=row_inds_df, + by=c(refd_col, lag_col), all.y=TRUE) + return (df_new) +} + +#' Get pivot table, filling NANs. If there is no update on issue date D but +#' previous reports exist for issue date D_p < D, all the dates between +#' [D_p, D] are filled with with the reported value on date D_p. If there is +#' no update for any previous issue date, fill in with 0. +#' @template df-template +#' @template value_col-template +#' @template refd_col-template +#' @template lag_col-template +#' +#' @importFrom tidyr fill pivot_wider pivot_longer +#' @importFrom dplyr %>% everything select +#' +#' @export +fill_missing_updates <- function(df, value_col, refd_col, lag_col) { + pivot_df <- df[order(df[[lag_col]], decreasing=FALSE), ] %>% + pivot_wider(id_cols=lag_col, names_from=refd_col, values_from=value_col) + + if (any(diff(pivot_df[[lag_col]]) != 1)) { + stop("Risk exists in forward filling") + } + pivot_df <- pivot_df %>% fill(everything(), .direction="down") + + # Fill NAs with 0s + pivot_df[is.na(pivot_df)] <- 0 + + backfill_df <- pivot_df %>% + pivot_longer(-lag_col, values_to="value_raw", names_to=refd_col) + backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) + + return (as.data.frame(backfill_df)) +} + +#' Calculate 7 day moving average for each issue date +#' The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 +#' @param pivot_df Data Frame where the columns are issue dates and the rows are +#' reference dates +#' @template refd_col-template +#' +#' @importFrom zoo rollmeanr +#' +#' @export +get_7dav <- function(pivot_df, refd_col) { + for (col in colnames(pivot_df)) { + if (col == refd_col) next + pivot_df[, col] <- rollmeanr(pivot_df[, col], 7, align="right", fill=NA) + } + backfill_df <- pivot_df %>% + pivot_longer(-refd_col, values_to="value_raw", names_to="issue_date") + backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) + backfill_df[["issue_date"]] = as.Date(backfill_df[["issue_date"]]) + return (as.data.frame(backfill_df)) +} + +#' Used for data shifting in terms of reference date +#' +#' @template df-template +#' @param n_day number of days to be shifted +#' @template refd_col-template +#' +#' @export +add_shift <- function(df, n_day, refd_col) { + df[, refd_col] <- as.Date(df[, refd_col]) + n_day + return (df) +} + +#' Add one hot encoding for day of a week info in terms of reference +#' and issue date +#' +#' @template df-template +#' @param wd vector of days of a week +#' @template time_col-template +#' @param suffix suffix added to indicate which kind of date is used +#' +#' @export +add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR) { + dayofweek <- as.numeric(format(df[[time_col]], format="%u")) + for (i in 1:6) { + df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) + } + if (suffix == "_ref") { + df[, paste0("Sun", suffix)] <- as.numeric(dayofweek == 7) + } + return (df) +} + +#' Get week of a month info according to a date +#' +#' All the dates on or before the ith Sunday but after the (i-1)th Sunday +#' is considered to be the ith week. Notice that +#' If there are 4 or 5 weeks in total, the ith weeks is labeled as i +#' and the dates in the 5th week this month are actually in the same +#' week with the dates in the 1st week next month and those dates are +#' sparse. Thus, we assign the dates in the 5th week to the 1st week. +#' If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks +#' are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, +#' second and the last weeks together. +#' +#' @param date Date object +#' +#' @importFrom lubridate make_date days_in_month year month day +#' +#' @return a integer indicating which week it is in a month +get_weekofmonth <- function(date) { + year <- year(date) + month <- month(date) + day <- day(date) + firstdayofmonth <- as.numeric(format(make_date(year, month, 1), format="%u")) + n_days <- lubridate::days_in_month(date) + n_weeks <- (n_days + firstdayofmonth - 1) %/% 7 + 1 + extra_check <- as.integer(n_weeks > 5) + return (max((day + firstdayofmonth - 1) %/% 7 - extra_check, 0) %% 4 + 1) +} + +#' Add one hot encoding for week of a month info in terms of issue date +#' +#' @template df-template +#' @param wm vector of weeks of a month +#' @template time_col-template +#' +#' @export +add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES) { + weekofmonth <- get_weekofmonth(df[[time_col]]) + for (i in 1:3) { + df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) + } + return (df) +} + +#' Add 7dav and target to the data +#' Target is the updates made ref_lag days after the first release +#' @template df-template +#' @template value_col-template +#' @template refd_col-template +#' @template lag_col-template +#' @template ref_lag-template +#' +#' @importFrom dplyr %>% +#' @importFrom tidyr pivot_wider drop_na +#' +#' @export +add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) { + df$issue_date <- df[[refd_col]] + df[[lag_col]] + pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% + pivot_wider(id_cols=refd_col, names_from="issue_date", + values_from=value_col) + + # Add 7dav avg + avg_df <- get_7dav(pivot_df, refd_col) + avg_df <- add_shift(avg_df, 1, refd_col) # 7dav until yesterday + names(avg_df)[names(avg_df) == value_col] <- 'value_7dav' + avg_df_prev7 <- add_shift(avg_df, 7, refd_col) + names(avg_df_prev7)[names(avg_df_prev7) == 'value_7dav'] <- 'value_prev_7dav' + + backfill_df <- Reduce(function(x, y) merge(x, y, all=TRUE), + list(df, avg_df, avg_df_prev7)) + + # Add target + target_df <- df[df$lag==ref_lag, c(refd_col, value_col, "issue_date")] + names(target_df)[names(target_df) == value_col] <- 'value_target' + names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' + + backfill_df <- merge(backfill_df, target_df, by=refd_col, all.x=TRUE) + + # Add log values + backfill_df$log_value_raw = log(backfill_df$value_raw + 1) + backfill_df$log_value_7dav = log(backfill_df$value_7dav + 1) + backfill_df$log_value_target = log(backfill_df$value_target + 1) + backfill_df$log_value_prev_7dav = log(backfill_df$value_prev_7dav + 1) + backfill_df$log_7dav_slope = backfill_df$log_value_7dav - backfill_df$log_value_prev_7dav + + # Remove invalid rows + backfill_df <- backfill_df %>% drop_na(c(lag_col)) + + return (as.data.frame(backfill_df)) +} + +#' Add params related to date +#' +#' Target is the updates made ref_lag days after the first release +#' +#' @template df-template +#' @template refd_col-template +#' @template lag_col-template +add_params_for_dates <- function(df, refd_col, lag_col) { + # Add columns for day-of-week effect + df <- add_dayofweek(df, refd_col, "_ref", WEEKDAYS_ABBR) + df <- add_dayofweek(df, "issue_date", "_issue", WEEKDAYS_ABBR) + + # Add columns for week-of-month effect + df <- add_weekofmonth(df, "issue_date", WEEK_ISSUES) + + return (as.data.frame(df)) +} diff --git a/backfill_corrections/delphiBackfillCorrection/R/utils.R b/backfill_corrections/delphiBackfillCorrection/R/utils.R new file mode 100644 index 000000000..fdcaf42e4 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/R/utils.R @@ -0,0 +1,165 @@ +#' Return params file as an R list +#' +#' Reads a parameters file. If the file does not exist, the function will create a copy of +#' '"params.json.template" and read from that. +#' +#' A params list should contain the following fields. If not included, +#' they will be filled with default values when possible. +#' +#' params$ref_lag: reference lag, after x days, the update is considered to be +#' the response. 60 is a reasonable choice for CHNG outpatient data +#' params$input_dir: link to the input data file +#' params$testing_window: the testing window used for saving the runtime. Could +#' set it to be 1 if time allows +#' params$test_dates: list of two elements, the first one is the start date and +#' the second one is the end date +#' params$training_days: set it to be 270 or larger if you have enough data +#' params$num_col: the column name for the counts of the numerator, e.g. the +#' number of COVID claims +#' params$denom_col: the column name for the counts of the denominator, e.g. the +#' number of total claims +#' params$geo_level: character vector of "state" and "county", by default +#' params$taus: vector of considered quantiles +#' params$lambda: the level of lasso penalty +#' params$export_dir: directory to save corrected data to +#' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" +#' +#' @param path path to the parameters file; if not present, will try to copy the file +#' "params.json.template" +#' @param template_path path to the template parameters file +#' @template train_models-template +#' @template make_predictions-template +#' +#' @return a named list of parameters values +#' +#' @importFrom dplyr if_else +#' @importFrom jsonlite read_json +read_params <- function(path = "params.json", template_path = "params.json.template", + train_models = TRUE, make_predictions = TRUE) { + if (!file.exists(path)) {file.copy(template_path, path)} + params <- read_json(path, simplifyVector = TRUE) + + # Required parameters + if (!("input_dir" %in% names(params)) || !dir.exists(params$input_dir)) { + stop("input_dir must be set in `params` and exist") + } + params$train_models <- train_models + params$make_predictions <- make_predictions + + ## Set default parameter values if not specified + # Paths + if (!("export_dir" %in% names(params))) {params$export_dir <- "./receiving"} + if (!("cache_dir" %in% names(params))) {params$cache_dir <- "./cache"} + + # Parallel parameters + if (!("parallel" %in% names(params))) {params$parallel <- FALSE} + if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} + + # Model parameters + if (!("taus" %in% names(params))) {params$taus <- TAUS} + if (!("lambda" %in% names(params))) {params$lambda <- LAMBDA} + if (!("lp_solver" %in% names(params))) {params$lp_solver <- LP_SOLVER} + if (!("lag_pad" %in% names(params))) {params$lag_pad <- LAG_PAD} + + # Data parameters + if (!("num_col" %in% names(params))) {params$num_col <- "num"} + if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} + if (!("geo_levels" %in% names(params))) {params$geo_levels <- c("state", "county")} + if (!("value_types" %in% names(params))) {params$value_types <- c("count", "fraction")} + + # Date parameters + if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} + if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} + if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} + if (!("test_dates" %in% names(params)) || length(params$test_dates) == 0) { + start_date <- TODAY - params$testing_window + end_date <- TODAY - 1 + params$test_dates <- seq(start_date, end_date, by="days") + } + + return(params) +} + +#' Create directory if not already existing +#' +#' @param path string specifying a directory to create +#' +#' @export +create_dir_not_exist <- function(path) +{ + if (!dir.exists(path)) { dir.create(path) } +} + +#' Check input data for validity +#' +#' @template df-template +#' @template value_type-template +#' @template num_col-template +#' @template denom_col-template +#' @template signal_suffixes-template +#' +#' @return list of input dataframe augmented with lag column, if it +#' didn't already exist, and character vector of one or two value +#' column names, depending on requested `value_type` +validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) { + if (!missing(signal_suffixes)) { + num_col <- paste(num_col, signal_suffixes, sep = "_") + denom_col <- paste(num_col, signal_suffixes, sep = "_") + } + + # Check data type and required columns + if (value_type == "count") { + if (all(num_col %in% colnames(df))) {value_cols=c(num_col)} + else if (all(denom_col %in% colnames(df))) {value_cols=c(denom_col)} + else {stop("No valid column name detected for the count values!")} + } else if (value_type == "fraction") { + value_cols = c(num_col, denom_col) + if ( any(!(value_cols %in% colnames(df))) ) { + stop("No valid column name detected for the fraction values!") + } + } + + # time_value must exist in the dataset + if ( !"time_value" %in% colnames(df) ) { + stop("No 'time_value' column detected for the reference date!") + } + + # issue_date or lag should exist in the dataset + if ( !"lag" %in% colnames(df) ) { + if ( "issue_date" %in% colnames(df) ) { + df$lag = as.integer(df$issue_date - df$time_value) + } + else {stop("No issue_date or lag exists!")} + } + + return(list(df = df, value_cols = value_cols)) +} + +#' Check available training days +#' +#' @param issue_date contents of input data's `issue_date` column +#' @template training_days-template +training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { + valid_training_days = as.integer(max(issue_date) - min(issue_date)) + 1 + if (training_days > valid_training_days) { + warning(sprintf("Only %d days are available at most for training.", valid_training_days)) + } +} + +#' Subset list of counties to those included in the 200 most populous in the US +#' +#' @importFrom dplyr select %>% arrange desc pull +#' @importFrom rlang .data +#' @importFrom utils head +#' @import covidcast +get_populous_counties <- function() { + return( + covidcast::county_census %>% + dplyr::select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% + # Drop megacounties (states) + filter(!endsWith(.data$fips, "000")) %>% + arrange(desc(pop)) %>% + pull(.data$fips) %>% + head(n=200) + ) +} diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/covariates-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/covariates-template.R new file mode 100644 index 000000000..b343ffea6 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/covariates-template.R @@ -0,0 +1 @@ +#' @param covariates character vector of column names serving as the covariates for the model diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/denom_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/denom_col-template.R new file mode 100644 index 000000000..8b16d87bb --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/denom_col-template.R @@ -0,0 +1 @@ +#' @param denom_col name of denominator column in the input dataframe diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/df-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/df-template.R new file mode 100644 index 000000000..4aa746f51 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/df-template.R @@ -0,0 +1,2 @@ +#' @param df Data Frame of aggregated counts within a single location +#' reported for each reference date and issue date. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/export_dir-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/export_dir-template.R new file mode 100644 index 000000000..4d933cada --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/export_dir-template.R @@ -0,0 +1 @@ +#' @param export_dir path to directory to save output to diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/file_type-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/file_type-template.R new file mode 100644 index 000000000..36c241abd --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/file_type-template.R @@ -0,0 +1,2 @@ +#' @param file_type string specifying time period coverage of input files. +#' Either "daily" or "rollup" diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo-template.R new file mode 100644 index 000000000..ae9dfeef0 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo-template.R @@ -0,0 +1,2 @@ +#' @param geo string specifying the name of the geo region (e.g. FIPS +#' code for counties) diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo_level-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo_level-template.R new file mode 100644 index 000000000..778da39a4 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/geo_level-template.R @@ -0,0 +1,2 @@ +#' @param geo_level string describing geo coverage of input data. Either "state" +#' or "county". diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/indicator-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/indicator-template.R new file mode 100644 index 000000000..964cada2d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/indicator-template.R @@ -0,0 +1,3 @@ +#' @param indicator string specifying the name of the indicator as used in +#' `parquet` input data filenames. One indicator can be associated +#' with multiple signals. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/input_dir-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/input_dir-template.R new file mode 100644 index 000000000..a17583499 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/input_dir-template.R @@ -0,0 +1 @@ +#' @param input_dir path to the directory containing input data diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/lag_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lag_col-template.R new file mode 100644 index 000000000..b3e79f0fa --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lag_col-template.R @@ -0,0 +1,2 @@ +#' @param lag_col string specifying name of lag field within +#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/lambda-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lambda-template.R new file mode 100644 index 000000000..aacbb3865 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lambda-template.R @@ -0,0 +1 @@ +#' @param lambda the level of lasso penalty diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/lp_solver-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lp_solver-template.R new file mode 100644 index 000000000..d42a4435b --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/lp_solver-template.R @@ -0,0 +1,4 @@ +#' @param lp_solver string specifying the lp solver to use in +#' Quantgen fitting. Either "glpk" or "gurobi". For faster +#' optimization, use Gurobi (requires separate installation +#' of the `gurobi` package). diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/make_predictions-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/make_predictions-template.R new file mode 100644 index 000000000..ff57c25f6 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/make_predictions-template.R @@ -0,0 +1,2 @@ +#' @param make_predictions boolean indicating whether to generate and save +#' corrections (TRUE) or not. Default is TRUE. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/num_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/num_col-template.R new file mode 100644 index 000000000..76b0aa148 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/num_col-template.R @@ -0,0 +1 @@ +#' @param num_col name of numerator column in the input dataframe diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/params-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/params-template.R new file mode 100644 index 000000000..3af9823f3 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/params-template.R @@ -0,0 +1,4 @@ +#' @param params named list containing modeling and data settings. Must include +#' the following elements: `ref_lag`, `testing_window`, `test_dates`, +#' `training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +#' `lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/ref_lag-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/ref_lag-template.R new file mode 100644 index 000000000..b10e188c4 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/ref_lag-template.R @@ -0,0 +1 @@ +#' @param ref_lag max lag to use for training diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/refd_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/refd_col-template.R new file mode 100644 index 000000000..09644a4aa --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/refd_col-template.R @@ -0,0 +1,2 @@ +#' @param refd_col string specifying name of reference date field within +#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal-template.R new file mode 100644 index 000000000..d87790af7 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal-template.R @@ -0,0 +1,3 @@ +#' @param signal string specifying the name of the signal as used in +#' `parquet` input data filenames. One indicator can be associated +#' with multiple signals. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R new file mode 100644 index 000000000..eb3819558 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R @@ -0,0 +1,5 @@ +#' @param signal_suffix string specifying value column name +#' ending to be appended to standard value column names from +#' `params$num_col` and `params$denom_col`. Used for non-standard +#' value column names and when processing multiple signals from a +#' single input dataframe, as with `quidel`'s age buckets. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R new file mode 100644 index 000000000..e58e6cc4e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R @@ -0,0 +1,5 @@ +#' @param signal_suffixes character vector specifying value column name +#' endings to be appended to standard value column names from +#' `params$num_col` and `params$denom_col`. Used for non-standard +#' value column names and when processing multiple signals from a +#' single input dataframe, as with `quidel`'s age buckets. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/taus-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/taus-template.R new file mode 100644 index 000000000..b383e35f8 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/taus-template.R @@ -0,0 +1,2 @@ +#' @param taus numeric vector of quantiles to be predicted. Values +#' must be between 0 and 1. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/test_lag-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/test_lag-template.R new file mode 100644 index 000000000..bd26b3386 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/test_lag-template.R @@ -0,0 +1 @@ +#' @param test_lag integer number of days ago to predict for diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/testing_window-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/testing_window-template.R new file mode 100644 index 000000000..60b6c847b --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/testing_window-template.R @@ -0,0 +1,2 @@ +#' @param testing_window the testing window used for saving the runtime. Could +#' set it to be 1 if time allows diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/time_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/time_col-template.R new file mode 100644 index 000000000..3be84de74 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/time_col-template.R @@ -0,0 +1,2 @@ +#' @param time_col string specifying name of column used for the +#' date, can be either reference date or issue date diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_data-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_data-template.R new file mode 100644 index 000000000..2c8fd3de6 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_data-template.R @@ -0,0 +1 @@ +#' @param train_data Data Frame containing training data diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_models-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_models-template.R new file mode 100644 index 000000000..3048087af --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/train_models-template.R @@ -0,0 +1,3 @@ +#' @param train_models boolean indicating whether to train models (TRUE). If +#' FALSE previously trained models (stored locally) will be used instead. +#' Default is TRUE. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/training_days-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/training_days-template.R new file mode 100644 index 000000000..32f6c3a9d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/training_days-template.R @@ -0,0 +1 @@ +#' @param training_days integer number of days to use for training diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_col-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_col-template.R new file mode 100644 index 000000000..0cc922d14 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_col-template.R @@ -0,0 +1,2 @@ +#' @param value_col string specifying name of value (counts) field within +#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_type-template.R b/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_type-template.R new file mode 100644 index 000000000..c49b7e84b --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man-roxygen/value_type-template.R @@ -0,0 +1 @@ +#' @param value_type string describing signal type. Either "count" or "fraction". diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_7davs_and_target.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_7davs_and_target.Rd new file mode 100644 index 000000000..25a0dee23 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_7davs_and_target.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_7davs_and_target} +\alias{add_7davs_and_target} +\title{Add 7dav and target to the data +Target is the updates made ref_lag days after the first release} +\usage{ +add_7davs_and_target(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} + +\item{lag_col}{string specifying name of lag field within +the input dataframe.} + +\item{ref_lag}{max lag to use for training} +} +\description{ +Add 7dav and target to the data +Target is the updates made ref_lag days after the first release +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_dayofweek.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_dayofweek.Rd new file mode 100644 index 000000000..02cc129a0 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_dayofweek.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_dayofweek} +\alias{add_dayofweek} +\title{Add one hot encoding for day of a week info in terms of reference +and issue date} +\usage{ +add_dayofweek(df, time_col, suffix, wd = WEEKDAYS_ABBR) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{time_col}{string specifying name of column used for the +date, can be either reference date or issue date} + +\item{suffix}{suffix added to indicate which kind of date is used} + +\item{wd}{vector of days of a week} +} +\description{ +Add one hot encoding for day of a week info in terms of reference +and issue date +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_params_for_dates.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_params_for_dates.Rd new file mode 100644 index 000000000..d9303d7d6 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_params_for_dates.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_params_for_dates} +\alias{add_params_for_dates} +\title{Add params related to date} +\usage{ +add_params_for_dates(df, refd_col, lag_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} + +\item{lag_col}{string specifying name of lag field within +the input dataframe.} +} +\description{ +Target is the updates made ref_lag days after the first release +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_shift.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_shift.Rd new file mode 100644 index 000000000..d4adc5823 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_shift.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_shift} +\alias{add_shift} +\title{Used for data shifting in terms of reference date} +\usage{ +add_shift(df, n_day, refd_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{n_day}{number of days to be shifted} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} +} +\description{ +Used for data shifting in terms of reference date +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_sqrtscale.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_sqrtscale.Rd new file mode 100644 index 000000000..47af18a24 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_sqrtscale.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{add_sqrtscale} +\alias{add_sqrtscale} +\title{Add columns to indicate the scale of value at square root level} +\usage{ +add_sqrtscale(train_data, test_data, max_raw, value_col) +} +\arguments{ +\item{train_data}{Data Frame containing training data} + +\item{test_data}{Data Frame for testing} + +\item{max_raw}{the maximum value in the training data at square root level} + +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} +} +\description{ +Add columns to indicate the scale of value at square root level +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/add_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection/man/add_weekofmonth.Rd new file mode 100644 index 000000000..260efb519 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/add_weekofmonth.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{add_weekofmonth} +\alias{add_weekofmonth} +\title{Add one hot encoding for week of a month info in terms of issue date} +\usage{ +add_weekofmonth(df, time_col, wm = WEEK_ISSUES) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{time_col}{string specifying name of column used for the +date, can be either reference date or issue date} + +\item{wm}{vector of weeks of a month} +} +\description{ +Add one hot encoding for week of a month info in terms of issue date +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/create_dir_not_exist.Rd b/backfill_corrections/delphiBackfillCorrection/man/create_dir_not_exist.Rd new file mode 100644 index 000000000..1a9b887a5 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/create_dir_not_exist.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{create_dir_not_exist} +\alias{create_dir_not_exist} +\title{Create directory if not already existing} +\usage{ +create_dir_not_exist(path) +} +\arguments{ +\item{path}{string specifying a directory to create} +} +\description{ +Create directory if not already existing +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/create_name_pattern.Rd b/backfill_corrections/delphiBackfillCorrection/man/create_name_pattern.Rd new file mode 100644 index 000000000..603e25627 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/create_name_pattern.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{create_name_pattern} +\alias{create_name_pattern} +\title{Create pattern to match input files of a given type and signal} +\usage{ +create_name_pattern(indicator, signal, file_type = c("daily", "rollup")) +} +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{file_type}{string specifying time period coverage of input files. +Either "daily" or "rollup"} +} +\description{ +Create pattern to match input files of a given type and signal +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/data_filteration.Rd b/backfill_corrections/delphiBackfillCorrection/man/data_filteration.Rd new file mode 100644 index 000000000..d8589ecac --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/data_filteration.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{data_filteration} +\alias{data_filteration} +\title{Filtration for training and testing data with different lags} +\usage{ +data_filteration(test_lag, geo_train_data, geo_test_data, lag_pad) +} +\arguments{ +\item{test_lag}{integer number of days ago to predict for} + +\item{geo_train_data}{training data for a certain location} + +\item{geo_test_data}{testing data for a certain location} + +\item{lag_pad}{lag padding for training} +} +\description{ +Filtration for training and testing data with different lags +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/delta.Rd b/backfill_corrections/delphiBackfillCorrection/man/delta.Rd new file mode 100644 index 000000000..7d1af25ca --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/delta.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{delta} +\alias{delta} +\title{Sum of squared error} +\usage{ +delta(fit, actual) +} +\arguments{ +\item{fit}{estimated values} + +\item{actual}{actual values} +} +\description{ +Sum of squared error +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/est_priors.Rd b/backfill_corrections/delphiBackfillCorrection/man/est_priors.Rd new file mode 100644 index 000000000..881864341 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/est_priors.Rd @@ -0,0 +1,97 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{est_priors} +\alias{est_priors} +\title{Main function for the beta prior approach +Estimate the priors for the beta distribution based on data for +a certain day of a week} +\usage{ +est_priors( + train_data, + prior_test_data, + geo, + value_type, + dw, + taus, + covariates, + response, + lp_solver, + lambda, + indicator, + signal, + geo_level, + signal_suffix, + training_end_date, + model_save_dir, + start = c(0, log(10)), + base_pseudo_denom = 1000, + base_pseudo_num = 10, + train_models = TRUE, + make_predictions = TRUE +) +} +\arguments{ +\item{train_data}{Data Frame containing training data} + +\item{prior_test_data}{Data Frame for testing} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{dw}{column name to indicate which day of a week it is} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{covariates}{character vector of column names serving as the covariates for the model} + +\item{response}{the column name of the response variable} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} + +\item{lambda}{the level of lasso penalty} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{training_end_date}{the most recent training date} + +\item{model_save_dir}{directory containing trained models} + +\item{start}{the initialization of the the points in nlm} + +\item{base_pseudo_denom}{the pseudo counts added to denominator if little data for training} + +\item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} +} +\description{ +Main function for the beta prior approach +Estimate the priors for the beta distribution based on data for +a certain day of a week +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/evaluate.Rd b/backfill_corrections/delphiBackfillCorrection/man/evaluate.Rd new file mode 100644 index 000000000..fc4d3c347 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/evaluate.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{evaluate} +\alias{evaluate} +\title{Evaluation of the test results based on WIS score +The WIS score calculation is based on the weighted_interval_score function +from the `evalcast` package from Delphi} +\usage{ +evaluate(test_data, taus) +} +\arguments{ +\item{test_data}{dataframe with a column containing the prediction results of +each requested quantile. Each row represents an update with certain +(reference_date, issue_date, location) combination.} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} +} +\description{ +Evaluation of the test results based on WIS score +The WIS score calculation is based on the weighted_interval_score function +from the `evalcast` package from Delphi +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/export_test_result.Rd b/backfill_corrections/delphiBackfillCorrection/man/export_test_result.Rd new file mode 100644 index 000000000..77c2088d5 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/export_test_result.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{export_test_result} +\alias{export_test_result} +\title{Export the result to customized directory} +\usage{ +export_test_result( + test_data, + coef_data, + indicator, + signal, + geo_level, + signal_suffix, + lambda, + training_end_date, + value_type, + export_dir +) +} +\arguments{ +\item{test_data}{test data containing prediction results} + +\item{coef_data}{data frame containing the estimated coefficients} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{training_end_date}{the most recent training date} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{export_dir}{path to directory to save output to} +} +\description{ +Export the result to customized directory +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/fill_missing_updates.Rd b/backfill_corrections/delphiBackfillCorrection/man/fill_missing_updates.Rd new file mode 100644 index 000000000..6318730ee --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/fill_missing_updates.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{fill_missing_updates} +\alias{fill_missing_updates} +\title{Get pivot table, filling NANs. If there is no update on issue date D but +previous reports exist for issue date D_p < D, all the dates between +[D_p, D] are filled with with the reported value on date D_p. If there is +no update for any previous issue date, fill in with 0.} +\usage{ +fill_missing_updates(df, value_col, refd_col, lag_col) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_col}{string specifying name of value (counts) field within +the input dataframe.} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} + +\item{lag_col}{string specifying name of lag field within +the input dataframe.} +} +\description{ +Get pivot table, filling NANs. If there is no update on issue date D but +previous reports exist for issue date D_p < D, all the dates between +[D_p, D] are filled with with the reported value on date D_p. If there is +no update for any previous issue date, fill in with 0. +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/fill_rows.Rd b/backfill_corrections/delphiBackfillCorrection/man/fill_rows.Rd new file mode 100644 index 000000000..e446e6e1d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/fill_rows.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{fill_rows} +\alias{fill_rows} +\title{Re-index, fill na, make sure all reference date have enough rows for updates} +\usage{ +fill_rows(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} + +\item{lag_col}{string specifying name of lag field within +the input dataframe.} + +\item{min_refd}{the earliest reference date considered in the data} + +\item{max_refd}{the latest reference date considered in the data} + +\item{ref_lag}{max lag to use for training} +} +\value{ +df_new Data Frame with filled rows for missing lags +} +\description{ +Re-index, fill na, make sure all reference date have enough rows for updates +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/frac_adj.Rd b/backfill_corrections/delphiBackfillCorrection/man/frac_adj.Rd new file mode 100644 index 000000000..f2de00345 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/frac_adj.Rd @@ -0,0 +1,78 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{frac_adj} +\alias{frac_adj} +\title{Update fraction using beta prior approach} +\usage{ +frac_adj( + train_data, + test_data, + prior_test_data, + indicator, + signal, + geo_level, + signal_suffix, + lambda, + value_type, + geo, + training_end_date, + model_save_dir, + taus = TAUS, + lp_solver = LP_SOLVER, + train_models = TRUE, + make_predictions = TRUE +) +} +\arguments{ +\item{train_data}{Data Frame containing training data} + +\item{test_data}{testing data} + +\item{prior_test_data}{testing data for the lag -1 model} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{training_end_date}{the most recent training date} + +\item{model_save_dir}{directory containing trained models} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} +} +\description{ +Update fraction using beta prior approach +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd b/backfill_corrections/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd new file mode 100644 index 000000000..2ae59d33a --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{frac_adj_with_pseudo} +\alias{frac_adj_with_pseudo} +\title{Update fraction based on the pseudo counts for numerators and denominators} +\usage{ +frac_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) +} +\arguments{ +\item{data}{Data Frame} + +\item{dw}{character to indicate the day of a week. Can be NULL for all the days} + +\item{pseudo_num}{the estimated counts to be added to numerators} + +\item{pseudo_denom}{the estimated counts to be added to denominators} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} +} +\description{ +Update fraction based on the pseudo counts for numerators and denominators +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/generate_filename.Rd b/backfill_corrections/delphiBackfillCorrection/man/generate_filename.Rd new file mode 100644 index 000000000..ba40a8aa2 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/generate_filename.Rd @@ -0,0 +1,65 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{generate_filename} +\alias{generate_filename} +\title{Construct filename for model with given parameters} +\usage{ +generate_filename( + indicator, + signal, + geo_level, + signal_suffix, + lambda, + training_end_date = "", + geo = "", + value_type = "", + test_lag = "", + tau = "", + dw = "", + beta_prior_mode = FALSE, + model_mode = TRUE +) +} +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{lambda}{the level of lasso penalty} + +\item{training_end_date}{the most recent training date} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{test_lag}{integer number of days ago to predict for} + +\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} + +\item{dw}{string, indicate the day of a week} + +\item{beta_prior_mode}{bool, indicate whether it is for a beta prior model} + +\item{model_mode}{bool, indicate whether the file name is for a model} +} +\value{ +path to file containing model object +} +\description{ +Construct filename for model with given parameters +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/get_7dav.Rd b/backfill_corrections/delphiBackfillCorrection/man/get_7dav.Rd new file mode 100644 index 000000000..b328bfb2b --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/get_7dav.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{get_7dav} +\alias{get_7dav} +\title{Calculate 7 day moving average for each issue date +The 7dav for date D reported on issue date D_i is the average from D-7 to D-1} +\usage{ +get_7dav(pivot_df, refd_col) +} +\arguments{ +\item{pivot_df}{Data Frame where the columns are issue dates and the rows are +reference dates} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} +} +\description{ +Calculate 7 day moving average for each issue date +The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/get_files_list.Rd b/backfill_corrections/delphiBackfillCorrection/man/get_files_list.Rd new file mode 100644 index 000000000..6b193bba5 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/get_files_list.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{get_files_list} +\alias{get_files_list} +\title{List valid input files.} +\usage{ +get_files_list(indicator, signal, params, sub_dir) +} +\arguments{ +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} + +\item{sub_dir}{string specifying the indicator-specific directory within +the general input directory `params$input_dir`} +} +\description{ +List valid input files. +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/get_model.Rd b/backfill_corrections/delphiBackfillCorrection/man/get_model.Rd new file mode 100644 index 000000000..5eeb43213 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/get_model.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{get_model} +\alias{get_model} +\title{Train model using quantile regression with Lasso penalty, or load from disk} +\usage{ +get_model( + model_path, + train_data, + covariates, + tau, + lambda, + lp_solver, + train_models +) +} +\arguments{ +\item{model_path}{path to read model from or to save model to} + +\item{train_data}{Data Frame containing training data} + +\item{covariates}{character vector of column names serving as the covariates for the model} + +\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} + +\item{lambda}{the level of lasso penalty} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} +} +\description{ +Train model using quantile regression with Lasso penalty, or load from disk +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/get_populous_counties.Rd b/backfill_corrections/delphiBackfillCorrection/man/get_populous_counties.Rd new file mode 100644 index 000000000..9f53bfe65 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/get_populous_counties.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{get_populous_counties} +\alias{get_populous_counties} +\title{Subset list of counties to those included in the 200 most populous in the US} +\usage{ +get_populous_counties() +} +\description{ +Subset list of counties to those included in the 200 most populous in the US +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/get_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection/man/get_weekofmonth.Rd new file mode 100644 index 000000000..08d340d7f --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/get_weekofmonth.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/preprocessing.R +\name{get_weekofmonth} +\alias{get_weekofmonth} +\title{Get week of a month info according to a date} +\usage{ +get_weekofmonth(date) +} +\arguments{ +\item{date}{Date object} +} +\value{ +a integer indicating which week it is in a month +} +\description{ +All the dates on or before the ith Sunday but after the (i-1)th Sunday +is considered to be the ith week. Notice that + If there are 4 or 5 weeks in total, the ith weeks is labeled as i + and the dates in the 5th week this month are actually in the same + week with the dates in the 1st week next month and those dates are + sparse. Thus, we assign the dates in the 5th week to the 1st week. + If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks + are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, + second and the last weeks together. +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/main.Rd b/backfill_corrections/delphiBackfillCorrection/man/main.Rd new file mode 100644 index 000000000..ae211b289 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/main.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/main.R +\name{main} +\alias{main} +\title{Perform backfill correction on all desired signals and geo levels} +\usage{ +main(params) +} +\arguments{ +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} +} +\description{ +Perform backfill correction on all desired signals and geo levels +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd b/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd new file mode 100644 index 000000000..ae6ef023f --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tooling.R +\name{main_local} +\alias{main_local} +\title{Main function to correct a single local signal} +\usage{ +main_local( + input_dir, + export_dir, + test_start_date, + test_end_date, + num_col, + denom_col, + value_type = c("count", "fraction"), + training_days = TRAINING_DAYS, + testing_window = TESTING_WINDOW, + lambda = LAMBDA, + ref_lag = REF_LAG, + lp_solver = LP_SOLVER +) +} +\arguments{ +\item{input_dir}{path to the directory containing input data} + +\item{export_dir}{path to directory to save output to} + +\item{test_start_date}{Date or string in the format "YYYY-MM-DD" to start +making predictions on} + +\item{test_end_date}{Date or string in the format "YYYY-MM-DD" to stop +making predictions on} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{training_days}{integer number of days to use for training} + +\item{testing_window}{the testing window used for saving the runtime. Could +set it to be 1 if time allows} + +\item{lambda}{the level of lasso penalty} + +\item{ref_lag}{max lag to use for training} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} +} +\description{ +Main function to correct a single local signal +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/model_training_and_testing.Rd b/backfill_corrections/delphiBackfillCorrection/man/model_training_and_testing.Rd new file mode 100644 index 000000000..225a555a9 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/model_training_and_testing.Rd @@ -0,0 +1,81 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model.R +\name{model_training_and_testing} +\alias{model_training_and_testing} +\title{Fetch model and use to generate predictions/perform corrections} +\usage{ +model_training_and_testing( + train_data, + test_data, + taus, + covariates, + lp_solver, + lambda, + test_lag, + geo, + value_type, + model_save_dir, + indicator, + signal, + geo_level, + signal_suffix, + training_end_date, + train_models = TRUE, + make_predictions = TRUE +) +} +\arguments{ +\item{train_data}{Data Frame containing training data} + +\item{test_data}{Data frame for testing} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{covariates}{character vector of column names serving as the covariates for the model} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} + +\item{lambda}{the level of lasso penalty} + +\item{test_lag}{integer number of days ago to predict for} + +\item{geo}{string specifying the name of the geo region (e.g. FIPS +code for counties)} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{model_save_dir}{directory containing trained models} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{geo_level}{string describing geo coverage of input data. Either "state" +or "county".} + +\item{signal_suffix}{string specifying value column name +ending to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{training_end_date}{Most recent training date} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} +} +\description{ +Fetch model and use to generate predictions/perform corrections +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/objective.Rd b/backfill_corrections/delphiBackfillCorrection/man/objective.Rd new file mode 100644 index 000000000..375b69c2e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/objective.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/beta_prior_estimation.R +\name{objective} +\alias{objective} +\title{Generate objection function} +\usage{ +objective(theta, x, prob, ...) +} +\arguments{ +\item{theta}{parameters for the distribution in log scale} + +\item{x}{vector of quantiles} + +\item{prob}{the expected probabilities} + +\item{...}{additional arguments} +} +\description{ +Generate objection function +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/read_data.Rd b/backfill_corrections/delphiBackfillCorrection/man/read_data.Rd new file mode 100644 index 000000000..1b5f24726 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/read_data.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{read_data} +\alias{read_data} +\title{Read a parquet file into a dataframe} +\usage{ +read_data(input_dir) +} +\arguments{ +\item{input_dir}{path to the directory containing input data} +} +\description{ +Read a parquet file into a dataframe +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/read_params.Rd b/backfill_corrections/delphiBackfillCorrection/man/read_params.Rd new file mode 100644 index 000000000..426db62f9 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/read_params.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{read_params} +\alias{read_params} +\title{Return params file as an R list} +\usage{ +read_params( + path = "params.json", + template_path = "params.json.template", + train_models = TRUE, + make_predictions = TRUE +) +} +\arguments{ +\item{path}{path to the parameters file; if not present, will try to copy the file +"params.json.template"} + +\item{template_path}{path to the template parameters file} + +\item{train_models}{boolean indicating whether to train models (TRUE). If +FALSE previously trained models (stored locally) will be used instead. +Default is TRUE.} + +\item{make_predictions}{boolean indicating whether to generate and save +corrections (TRUE) or not. Default is TRUE.} +} +\value{ +a named list of parameters values +} +\description{ +Reads a parameters file. If the file does not exist, the function will create a copy of +'"params.json.template" and read from that. +} +\details{ +A params list should contain the following fields. If not included, +they will be filled with default values when possible. + +params$ref_lag: reference lag, after x days, the update is considered to be + the response. 60 is a reasonable choice for CHNG outpatient data +params$input_dir: link to the input data file +params$testing_window: the testing window used for saving the runtime. Could + set it to be 1 if time allows +params$test_dates: list of two elements, the first one is the start date and + the second one is the end date +params$training_days: set it to be 270 or larger if you have enough data +params$num_col: the column name for the counts of the numerator, e.g. the + number of COVID claims +params$denom_col: the column name for the counts of the denominator, e.g. the + number of total claims +params$geo_level: character vector of "state" and "county", by default +params$taus: vector of considered quantiles +params$lambda: the level of lasso penalty +params$export_dir: directory to save corrected data to +params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/run_backfill.Rd b/backfill_corrections/delphiBackfillCorrection/man/run_backfill.Rd new file mode 100644 index 000000000..aab815222 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/run_backfill.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/main.R +\name{run_backfill} +\alias{run_backfill} +\title{Get backfill-corrected estimates for a single signal + geo combination} +\usage{ +run_backfill( + df, + params, + training_end_date, + refd_col = "time_value", + lag_col = "lag", + signal_suffixes = c(""), + indicator = "", + signal = "" +) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} + +\item{training_end_date}{the most recent training date} + +\item{refd_col}{string specifying name of reference date field within +the input dataframe.} + +\item{lag_col}{string specifying name of lag field within +the input dataframe.} + +\item{signal_suffixes}{character vector specifying value column name +endings to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} + +\item{indicator}{string specifying the name of the indicator as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} + +\item{signal}{string specifying the name of the signal as used in +`parquet` input data filenames. One indicator can be associated +with multiple signals.} +} +\description{ +Get backfill-corrected estimates for a single signal + geo combination +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd b/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd new file mode 100644 index 000000000..6ee6bce71 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tooling.R +\name{run_backfill_local} +\alias{run_backfill_local} +\title{Corrected estimates from a single local signal} +\usage{ +run_backfill_local( + df, + export_dir, + test_date_list, + value_cols, + value_type, + taus = TAUS, + test_lags = TEST_LAGS, + training_days = TRAINING_DAYS, + testing_window = TESTING_WINDOW, + ref_lag = REF_LAG, + lambda = LAMBDA, + lp_solver = LP_SOLVER +) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{export_dir}{path to directory to save output to} + +\item{test_date_list}{Date vector of dates to make predictions for} + +\item{value_cols}{character vector of numerator and/or denominator field names} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{taus}{numeric vector of quantiles to be predicted. Values +must be between 0 and 1.} + +\item{test_lags}{integer vector of number of days ago to predict for} + +\item{training_days}{integer number of days to use for training} + +\item{testing_window}{the testing window used for saving the runtime. Could +set it to be 1 if time allows} + +\item{ref_lag}{max lag to use for training} + +\item{lambda}{the level of lasso penalty} + +\item{lp_solver}{string specifying the lp solver to use in +Quantgen fitting. Either "glpk" or "gurobi". For faster +optimization, use Gurobi (requires separate installation +of the `gurobi` package).} +} +\description{ +Corrected estimates from a single local signal +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/subset_valid_files.Rd b/backfill_corrections/delphiBackfillCorrection/man/subset_valid_files.Rd new file mode 100644 index 000000000..0fde2714c --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/subset_valid_files.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{subset_valid_files} +\alias{subset_valid_files} +\title{Return file names only if they contain data to be used in training} +\usage{ +subset_valid_files(files_list, file_type = c("daily", "rollup"), params) +} +\arguments{ +\item{files_list}{character vector of input files of a given `file_type`} + +\item{file_type}{string specifying time period coverage of input files. +Either "daily" or "rollup"} + +\item{params}{named list containing modeling and data settings. Must include +the following elements: `ref_lag`, `testing_window`, `test_dates`, +`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, +`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} +} +\description{ +Parse filenames to find included dates. Use different patterns if file +includes daily or rollup (multiple days) data. +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/training_days_check.Rd b/backfill_corrections/delphiBackfillCorrection/man/training_days_check.Rd new file mode 100644 index 000000000..1692da955 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/training_days_check.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{training_days_check} +\alias{training_days_check} +\title{Check available training days} +\usage{ +training_days_check(issue_date, training_days = TRAINING_DAYS) +} +\arguments{ +\item{issue_date}{contents of input data's `issue_date` column} + +\item{training_days}{integer number of days to use for training} +} +\description{ +Check available training days +} diff --git a/backfill_corrections/delphiBackfillCorrection/man/validity_checks.Rd b/backfill_corrections/delphiBackfillCorrection/man/validity_checks.Rd new file mode 100644 index 000000000..1e55d8d8e --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/man/validity_checks.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{validity_checks} +\alias{validity_checks} +\title{Check input data for validity} +\usage{ +validity_checks(df, value_type, num_col, denom_col, signal_suffixes) +} +\arguments{ +\item{df}{Data Frame of aggregated counts within a single location +reported for each reference date and issue date.} + +\item{value_type}{string describing signal type. Either "count" or "fraction".} + +\item{num_col}{name of numerator column in the input dataframe} + +\item{denom_col}{name of denominator column in the input dataframe} + +\item{signal_suffixes}{character vector specifying value column name +endings to be appended to standard value column names from +`params$num_col` and `params$denom_col`. Used for non-standard +value column names and when processing multiple signals from a +single input dataframe, as with `quidel`'s age buckets.} +} +\value{ +list of input dataframe augmented with lag column, if it + didn't already exist, and character vector of one or two value + column names, depending on requested `value_type` +} +\description{ +Check input data for validity +} diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat.R new file mode 100644 index 000000000..83f3bb312 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(delphiBackfillCorrection) + +test_check("delphiBackfillCorrection", stop_on_warning = FALSE) diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore rename to backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/cache/.gitignore diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R new file mode 100644 index 000000000..3d62d6a7f --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R @@ -0,0 +1,13 @@ +## Helper functions to relativize paths to the testing directory, so tests can +## be run via R CMD CHECK and do not depend on the current working directory +## being tests/testthat/. + +library(testthat) + +relativize_params <- function(params) { + params$export_dir <- test_path(params$export_dir) + params$cache_dir <- test_path(params$cache_dir) + params$input_dir <- test_path(params$input_dir) + + return(params) +} diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore rename to backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/input/.gitignore diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore rename to backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/output/.gitignore diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template new file mode 100644 index 000000000..f2224855a --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template @@ -0,0 +1,8 @@ +{ + "training_end_date": "2022-01-01", + "training_days": 7, + "ref_lag": 3, + "input_dir": "./input", + "export_dir": "./output", + "cache_dir": "./cache" +} diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template new file mode 100644 index 000000000..fb8309e94 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template @@ -0,0 +1,3 @@ +{ + "input_dir": "./test.tempt" +} diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R new file mode 100644 index 000000000..59ea2beda --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R @@ -0,0 +1,130 @@ +context("Testing helper functions for beta prior estimation") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +geo <- "pa" +value_type <- "fraction" +model_save_dir <- "./cache" +training_end_date <- as.Date("2022-01-01") + +# Generate Test Data +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" + +set.seed(2022) +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(50, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} +prior_test_data <- test_data +covariates <- c(main_covariate, dayofweek_covariates) + + + +test_that("testing the sum of squared error", { + fit <- c(0, 1, 0) + actual <- c(1, 1, 1) + + expected <- 1^2 + 1^2 + computed <- delta(fit, actual) + expect_equal(expected, computed) +}) + + +test_that("testing the squared error objection function given the beta prior", { + theta <- c(log(1), log(2)) + x <- qbeta(TAUS, 1, 2) + + expected <-0 + computed <- objective(theta, x, TAUS) + expect_equal(expected, computed) +}) + + +test_that("testing the prior estimation", { + dw <- "Sat_ref" + priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, + covariates, response, LP_SOLVER, lambda, + indicator, signal, geo_level, signal_suffix, + training_end_date, model_save_dir) + alpha <- priors[2] + beta <- priors[1] - alpha + expect_true((alpha > 1) & (alpha < 3)) + expect_true((beta > 4) & (beta < 6)) + + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + file.remove(model_path) + } +}) + + +test_that("testing the fraction adjustment with pseudo counts", { + value_raw <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) + + dw <- "Sat_ref" + value_raw <- frac_adj_with_pseudo(train_data, dw, 1, 100, "value_raw_num", "value_raw_denom") + expect_true(all(value_raw == 1/100)) +}) + + +test_that("testing the main beta prior adjustment function", { + set.seed(1) + updated_data <- frac_adj(train_data, test_data, prior_test_data, + indicator, signal, geo_level, signal_suffix, + lambda, value_type, geo, + training_end_date, model_save_dir, + taus = TAUS, lp_solver = LP_SOLVER) + updated_train_data <- updated_data[[1]] + updated_test_data <- updated_data[[2]] + + for (dw in c(dayofweek_covariates, "Sun_ref")){ + for (idx in 1:length(TAUS)) { + tau <- TAUS[idx] + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, dw=dw, tau=tau, + value_type=value_type, + training_end_date=training_end_date, + beta_prior_mode=TRUE) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + file.remove(model_path) + } + } + + expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) + expect_true(all(updated_train_data$value_raw < 3/(3+4))) + expect_true(all(updated_train_data$value_raw > 1/(1+6))) +}) + diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-io.R new file mode 100644 index 000000000..07636e140 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-io.R @@ -0,0 +1,118 @@ +library(arrow) + +context("Testing io helper functions") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +geo <- "pa" +value_type <- "fraction" +date_format = "%Y%m%d" +training_end_date <- as.Date("2022-01-01") + +create_dir_not_exist("./input") +create_dir_not_exist("./output") +create_dir_not_exist("./cache") + +test_that("testing exporting the output file", { + params <- read_params("params-run.json", "params-run.json.template") + + test_data <- data.frame(test=TRUE) + coef_data <- data.frame(test=TRUE) + + export_test_result(test_data, coef_data, indicator, signal, + geo_level, signal_suffix, lambda, + training_end_date, + value_type, params$export_dir) + prediction_file <- file.path(params$export_dir, "prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + coefs_file <- file.path(params$export_dir, "coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") + + expect_true(file.exists(prediction_file)) + expect_true(file.exists(coefs_file)) + + # Remove + file.remove(prediction_file) + file.remove(coefs_file) + file.remove("params-run.json") +}) + + +test_that("testing creating file name pattern", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_pattern <- create_name_pattern(indicator, signal, "daily") + rollup_pattern <- create_name_pattern(indicator, signal, "rollup") + + # Create test files + daily_data <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_parquet(daily_data, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_parquet(rollup_data, rollup_file_name) + + + filtered_daily_file <- list.files( + params$input_dir, pattern = daily_pattern, full.names = TRUE) + expect_equal(filtered_daily_file, daily_file_name) + + filtered_rollup_file <- list.files( + params$input_dir, pattern = rollup_pattern, full.names = TRUE) + expect_equal(filtered_rollup_file, rollup_file_name) + + file.remove(daily_file_name) + file.remove(rollup_file_name) + file.remove("params-run.json") +}) + + +test_that("testing the filtration of the files for training and predicting", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")), + file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY, date_format)}.parquet"))) + daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) + expect_equal(daily_valid_files, daily_files_list[2]) + + rollup_files_list <- c(file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")), + file.path(params$input_dir, str_interp( + "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) + rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) + expect_equal(rollup_valid_files, rollup_files_list[2]) + + file.remove("params-run.json") +}) + +test_that("testing fetching list of files for training and predicting", { + params <- read_params("params-run.json", "params-run.json.template") + + daily_data <- data.frame(test=TRUE) + daily_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) + write_parquet(daily_data, daily_file_name) + + rollup_file_name <- file.path(params$input_dir, + str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) + rollup_data <- data.frame(test=TRUE) + write_parquet(rollup_data, rollup_file_name) + + + files <- get_files_list(indicator, signal, params) + expect_true(all(files == c(daily_file_name, rollup_file_name))) + + file.remove(daily_file_name) + file.remove(rollup_file_name) + file.remove("params-run.json") +}) + + diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-model.R new file mode 100644 index 000000000..2a1221344 --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-model.R @@ -0,0 +1,173 @@ +context("Testing the helper functions for modeling") + +# Constants +indicator <- "chng" +signal <- "outpatient" +geo_level <- "state" +signal_suffix <- "" +lambda <- 0.1 +test_lag <- 1 +model_save_dir <- "./cache" +geo <- "pa" +value_type <- "fraction" +training_end_date <- as.Date("2022-01-01") + +# Generate Test Data +main_covariate <- c("log_value_7dav") +null_covariates <- c("value_raw_num", "value_raw_denom", + "value_7dav_num", "value_7dav_denom", + "value_prev_7dav_num", "value_prev_7dav_denom") +dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", + "Fri_ref", "Sat_ref") +response <- "log_value_target" +train_beta_vs <- log(rbeta(1000, 2, 5)) +test_beta_vs <- log(rbeta(61, 2, 5)) +train_data <- data.frame(log_value_7dav = train_beta_vs, + log_value_target = train_beta_vs) +train_data$value_target_num <- exp(train_beta_vs) * 100 +train_data$value_target_denom <- 100 +test_data <- data.frame(log_value_7dav = test_beta_vs, + log_value_target = test_beta_vs) +for (cov in null_covariates){ + train_data[[cov]] <- 0 + test_data[[cov]] <- 0 +} +for (cov in c(dayofweek_covariates, "Sun_ref")){ + train_data[[cov]] <- 1 + test_data[[cov]] <- 1 +} +covariates <- c(main_covariate, dayofweek_covariates) + + +test_that("testing the generation of model filename prefix", { + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda) + expected <- "chng_outpatient_state_lambda0.1.model" + expect_equal(model_file_name, expected) +}) + +test_that("testing the evaluation", { + for (tau in TAUS){ + test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) + } + result <- evaluate(test_data, TAUS) + expect_true(mean(result$wis) < 0.3) +}) + +test_that("testing generating or loading the model", { + # Check the model that does not exist + tau = 0.5 + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(!file.exists(model_path)) + + # Generate the model and check again + obj <- get_model(model_path, train_data, covariates, tau, + lambda, LP_SOLVER, train_models=TRUE) + expect_true(file.exists(model_path)) + created <- file.info(model_path)$ctime + + # Check that the model was not generated again. + obj <- get_model(model_path, train_data, covariates, tau, + lambda, LP_SOLVER, train_models=FALSE) + expect_equal(file.info(model_path)$ctime, created) + + expect_silent(file.remove(model_path)) +}) + +test_that("testing model training and testing", { + result <- model_training_and_testing(train_data, test_data, TAUS, covariates, + LP_SOLVER, lambda, test_lag, + geo, value_type, model_save_dir, + indicator, signal, + geo_level, signal_suffix, + training_end_date, + train_models = TRUE, + make_predictions = TRUE) + test_result <- result[[1]] + coef_df <- result[[2]] + + for (tau in TAUS){ + cov <- paste0("predicted_tau", as.character(tau)) + expect_true(cov %in% colnames(test_result)) + + model_file_name <- generate_filename(indicator, signal, + geo_level, signal_suffix, lambda, + geo=geo, test_lag=test_lag, tau=tau, + training_end_date=training_end_date) + model_path <- file.path(model_save_dir, model_file_name) + expect_true(file.exists(model_path)) + + expect_silent(file.remove(model_path)) + } + + for (cov in covariates){ + cov <- paste(cov, "coef", sep="_") + expect_true(cov %in% colnames(coef_df)) + } +}) + +test_that("testing adding square root scale", { + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in training data!") + + train_data$value_raw <- rbeta(nrow(train_data), 2, 5) + expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), + "value raw does not exist in testing data!") + + test_data$value_raw <- rbeta(nrow(test_data), 2, 5) + expect_silent(result <- add_sqrtscale(train_data, test_data, 1, "value_raw")) + + new_train_data <- result[[1]] + new_test_data <- result[[2]] + sqrtscales <- result[[3]] + expect_true(length(sqrtscales) == 4) + for (cov in sqrtscales){ + expect_true(cov %in% colnames(new_train_data)) + expect_true(cov %in% colnames(new_test_data)) + } + expect_true(all(rowSums(new_train_data[sqrtscales]) %in% c(0, 1))) + expect_true(all(rowSums(new_test_data[sqrtscales]) %in% c(0, 1))) + + for (i in 0:2){ + m_l <- max(new_train_data[new_train_data[[paste0("sqrty", as.character(i))]] == 1, "value_raw"]) + m_r <- min(new_train_data[new_train_data[[paste0("sqrty", as.character(i+1))]] == 1, "value_raw"]) + expect_true(m_l <= m_r) + } + +}) + +test_that("testing data filteration", { + train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] + test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] + + # When test lag is small + test_lag <- 5 + result <- data_filteration(test_lag, train_data, test_data, 2) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(train_df$lag) == test_lag+2) + expect_true(min(train_df$lag) == test_lag-2) + expect_true(all(test_df$lag == test_lag)) + + # When test lag is large + test_lag <- 48 + result <- data_filteration(test_lag, train_data, test_data, 2) + train_df <- result[[1]] + test_df <- result[[2]] + expect_true(max(test_df$lag) == test_lag+7) + expect_true(min(test_df$lag) == test_lag-6) + + # Make sure that all lags are tested + included_lags = c() + for (test_lag in c(1:14, 21, 35, 51)){ + result <- data_filteration(test_lag, train_data, test_data, 2) + test_df <- result[[2]] + included_lags <- c(included_lags, unique(test_df$lag)) + } + expect_true(all(1:60 %in% included_lags)) +}) + + diff --git a/Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R similarity index 100% rename from Backfill_Correction/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R rename to backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R diff --git a/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-utils.R new file mode 100644 index 000000000..a733f2a1d --- /dev/null +++ b/backfill_corrections/delphiBackfillCorrection/unit-tests/testthat/test-utils.R @@ -0,0 +1,136 @@ +context("Testing utils helper functions") + +test_that("testing create directory if not exist", { + # If not exists + path = "test.test" + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # If already exists + create_dir_not_exist(path) + expect_true(file.exists(path)) + + # Remove + unlink(path, recursive = TRUE) + expect_true(!file.exists(path)) +}) + + +test_that("testing number of available issue dates for training", { + start_date <- as.Date("2022-01-01") + end_date <- as.Date("2022-01-09") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_warning(training_days_check(issue_date, training_days = training_days), + "Only 9 days are available at most for training.") + + end_date <- as.Date("2022-01-10") + training_days = 10 + issue_date <- seq(start_date, end_date, by = "days") + expect_silent(training_days_check(issue_date, training_days = training_days)) +}) + +test_that("testing get the top200 populous counties", { + counties <- get_populous_counties() + + expect_true(length(counties) == 200) + expect_true("06037" %in% counties) +}) + +test_that("testing read parameters", { + # No input file + expect_error(read_params(path = "params-test.json", template_path = "params-test.json.template", + train_models = TRUE, make_predictions = TRUE), + "input_dir must be set in `params` and exist") + + # Check parameters + params <- read_json("params-test.json", simplifyVector = TRUE) + # Check initialization + expect_true(!("export_dir" %in% names(params))) + expect_true(!("cache_dir" %in% names(params))) + + expect_true(!("parallel" %in% names(params))) + expect_true(!("parallel_max_cores" %in% names(params))) + + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + expect_true(!("lag_pad" %in% names(params))) + + expect_true(!("taus" %in% names(params))) + expect_true(!("lambda" %in% names(params))) + expect_true(!("lp_solver" %in% names(params))) + + expect_true(!("num_col" %in% names(params))) + expect_true(!("denom_col" %in% names(params))) + expect_true(!("geo_levels" %in% names(params))) + expect_true(!("value_types" %in% names(params))) + + expect_true(!("training_days" %in% names(params))) + expect_true(!("ref_lag" %in% names(params))) + expect_true(!("testing_window" %in% names(params))) + expect_true(!("test_dates" %in% names(params))) + + # Create input file + path = "test.tempt" + create_dir_not_exist(path) + expect_silent(params <- read_params(path = "params-test.json", + template_path = "params-test.json.template", + train_models = TRUE, make_predictions = TRUE)) + unlink(path, recursive = TRUE) + + + expect_true("export_dir" %in% names(params)) + expect_true("cache_dir" %in% names(params)) + + expect_true("parallel" %in% names(params)) + expect_true("parallel_max_cores" %in% names(params)) + + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + + expect_true("taus" %in% names(params)) + expect_true("lambda" %in% names(params)) + expect_true("lp_solver" %in% names(params)) + expect_true("lag_pad" %in% names(params)) + + expect_true("num_col" %in% names(params)) + expect_true("denom_col" %in% names(params)) + expect_true("geo_levels" %in% names(params)) + expect_true("value_types" %in% names(params)) + + expect_true("training_days" %in% names(params)) + expect_true("ref_lag" %in% names(params)) + expect_true("testing_window" %in% names(params)) + expect_true("test_dates" %in% names(params)) + + expect_true(params$export_dir == "./receiving") + expect_true(params$cache_dir == "./cache") + + expect_true(params$parallel == FALSE) + expect_true(params$parallel_max_cores == .Machine$integer.max) + + expect_true(all(params$taus == TAUS)) + expect_true(params$lambda == LAMBDA) + expect_true(params$lp_solver == LP_SOLVER) + expect_true(params$lag_pad == LAG_PAD) + + expect_true(params$num_col == "num") + expect_true(params$denom_col == "denom") + expect_true(all(params$geo_levels == c("state", "county"))) + expect_true(all(params$value_types == c("count", "fraction"))) + + expect_true(params$training_days == TRAINING_DAYS) + expect_true(params$ref_lag == REF_LAG) + expect_true(params$testing_window == TESTING_WINDOW) + start_date <- TODAY - params$testing_window + end_date <- TODAY - 1 + expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) + + expect_silent(file.remove("params-test.json")) +}) + + diff --git a/backfill_corrections/delphiBackfillCorrection_1.0.tar.gz b/backfill_corrections/delphiBackfillCorrection_1.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9efc895026aa560e288d976095626b7d3a9bba33 GIT binary patch literal 28675 zcmV)2K+L}%iwFP!000001MR(ed)wBrINZPaDNgCQAsdP&sjX4VJU`WTQoq;RwVdSX zI;k!|sBoEUp zi-xl}ojiE-_~_xQCof+=dGY)Y*Zg$gv$wN@f4lp;9s4hSx;vfTAG$l+@c-@ooxR;Z zbh^9yoxMMZ&L8gJGtXy123?s1vxViBz5hZ#FJa|_)965i6&s=U`eF*NWx3kxc$UCV z`CjDl=_E)*I-ezRl;yQw;2)@TAiC{N?NKm;qP%R*@5`~;>qd1 z;Xm;@n4H$01@UAC|3&^A@wb1^;7{-W1(Ps~&fCNByd8z}A8L=He3-@48MX8}co*d& zn16_qILI!rFVp!ByeoEO}1j8&oB zD9xZq9tGL(4BHap6y_M_`F)h6({VJJwQEn~VKm8Ml12#d>63?#pC3Jj;X!L*ejr|n z#t(h5-QI0?nyuQC@ifh5@Hm_%7gSVZ5p7^a|Y0B z!~dGKSLuff=>78)chf#R>(m}khG_`nJP<#>{&DLswdYY3<`2{HG)@BE6_d0kw`{1k z;P+d-J<;7g=xiT!_o3p$zY0L=+8=(`pDMI}AUESpw6TQ#!%L?B+uM8ld)qyv|Gm!6 zhW_8h=iwYyd@@TeL@=FZ>3d1IfFeg3QuarWpT7LZ6QpPYD8xm(_B+Ry-wpmhKYaH1 z=;h(V$G49E+r8el=KngKeZ>ErZf}GCck#I=eu{weg7ZEUCl?~)v$h9JMha)>OhjQk z!xv}BE!CnA$bvM2FdXcM!TWpwPX{o6r_rowzYK$mbaWm?@0_RkSv+#f<-Y+#GYpc* zd5RU&(KwyV&h+yk@Hw++5XRX6x;2PC#JK^5jNJhC?+gb5n^(U;24@cS^aI@FgK?ZA zZ#S4vkU~nGK&IZ#wnun==39~@cq92Tbt95=1k`zX39wA3K zm}T$=`$LlnoDqE2MUcH4;P}jw`DAb+@wN1!Y9t=>%qd1tJ^2wh;|6np1Ra>T-{z6~ znpw>H{U7evOP&YAEY0(>7g+}UVN~+wB!@-`j~q9iZt`*fZpC6Dtc0a{u1Vm=0*7lZ}poo2Srqxkd;@NGbc zEIb|L!!&aXYkr(o2!A=R4U-O-R{bW2CI%9ZO3REvcEa5iRlQ$O_W~pv=amu$U_ECr zF>sbd?rPKIWVjRBym_s6T1HXg)qvzKrwofaG$X_OeaWly45qC3j%1cLBT?~rG;xc= zc`(ac!!#O|Jf0-ulBd&?$b&7PXFxu^Ey8c|X!bl9d-J1YH~7nYV_NyTxQV3z2xaiE z_X_1W!dV}g_*Iyt)4?Ptc~e*?emsre0})PA!1b*1^?4k6Z&`>ugSB!Vqkx&ENivRt z2~^lHWuF88_v*oowSi3D{_bwI{MYFy{%^Ot(*^!-XM4Z<2eG^1|32mVBT*YnvpCHL z$drtUwQ9e*Y5pI$|L(3U|M$Ax4gY^f{Qo`i<9tF(o|EMNcUX1t5?5UuqDCMXp0#WD z?!o`X>$5o5>P^%JP=(o9B# zD;;g2JRw3-s-o6a9G2;DrFvFtZ{sYV3FPuw0nGiK1j&p?ozJIuexZt312b4c`d;WU zd;uwHvV{ugAQNNY^T+XjxQJ{C0V6;{Gz&z#R=fADIGT@fZgOY?W>!SNwP*Mo|NDQ) zaE+m-GgS>ru=3h^4Ks#HZTUyOhLOUw{cTH(qZ*So}CCJ|3WlAa2Z{_Np=5bpsr)PP~DnYG6QTk?dU zf#d@PVaWI-IEjHFik*_}c3Xfr8Dn!e1KH^uweGo|Kf{6u;>rZ^aWy$O*opQ8(WqMD zLyOvLA@rvBSQ8>R!E(?WF47eD#7i@FST^eIpJA~1pK(wD5Zr(^2(>0?0RaLv-T?T) z$(!ym{plSy;e8g(=GjCvXwi6~unWy*?UGgn?@x3;^<)!b@TLkR9%JFha;H!kdY!vf zoJBeb7}fde1j`|eKn$Y{Z)_03Xh;H4Owa2q8c!2y10`FMUboaUXC?7yL2RcY;t~Gy zBhDQ6h$UekWH8TNVEPpZx*&C-L|;)L0pTxAK&q^IAB<1JpyK6elnxTSyQzL(@do6b zco@Kzu6WOPXbWEs^7&{Kf2b%!NIjTcOrwedybeU*s^*3yK|{UsvIPszVUCmecp@ga zNV8N8EfVKv@$k&u4kAVxY*hzBI|azR^=v>;1t016w2UCcSatD`<}(0D=q$#oxImpN z_>^{Y3PMwkgaxEDt1fP0sj3;k0g#TlG(CWR%&`*C3E1c`A}SD1aJfW05wW>!cZmBx+!w5m@!u<{Ubs>{wS{;u36Mi)HO*-G6rzPl z*0Q#H3$|p7NJ2~K>qGBjs6tEcu$CbOEs6Ej05tlxBr>)PEoiy;Sq-GHk}YqETD-;k z!$zlt2V%uOk~Bf|#p_o;KW-JMfPBFRT|PeIyjLD1K%d4WOV1k)$R@t;aA$>ZFdiB! z%$Z#Z5!WjNkD$b*K&7Arc~AVDI~gdVAb#s!?lH zq0O#1ZPXMtbejMRtL6p9{(qTgQ45% zHd|suBk6aet!@uaqAUn!X~I8w&VT_J=W3#$Pv5A%nxUYj%$%5Omr>E|z7ZbsQd&xF zfRrM>lV5wsCDc@vs_~>NIRch^HpTjW%F>kCoFq{ zhR58V1=t{Lc$>8)$<=0T+u~t>QzV}M{PZcckNaL0b3|2_DKtle>^igpP>S4P2@J!x zE`#C^-mv%pe*kQEg#bYH3h${5-!az8?mivXjfoBtb2IXma2H#4qGj6SqYY2=80kim zG{SRz-eGZQ5}RQ*K-LFd0Qa7KQVR*vDXv>_DyZY%Iozw8dgE~*tF`S_vPN9m6$?RB zPaYVkRfxexiWRc{I>NYN!j#;NKNd*qb}_9%NP<(b)#VfH6Us3+Bzsqj^s7ka3$+PL zMfPRth6Q?%WonM=Y6Hq>LZt|232y~yu6AvCtG0lXaRo(UHEUOu%_=Iw8lZ%9`16s= zEj@iXIC}B)m&dPKwUuSB@{$V$GBeNL$f1%Y4y{EAt#;N0&#o3yr7p^ib-O;&w7UO{ zC{evx)+(C3T)j=K?Y2lyOnct8S*6j!PPo|o2T^P%FP z%d1yUjqE<`oPlAhVeOI5urZOjz1SiK?y zbJSa}Yf{{YO9gkc zy0|q6|7su%CAWVruUtah{YOnt{GtuytSQS^j_Bo=kiywL!3K|F$Tr{1;qe;}P0JDW zjjh>(<<;zP6&*^b$8v=O}c$GkL?cg zPH@R*DFJa*e>Om;MHPQtG}4jDy?Xr_NBFTff5ijwRw_GBYh&2^=oTM(>{w)IvYjoY*tjV zM^nKtmQ^dvMAfD)0&wS{s!Lmoo8?(hnh{dYmWr38I^39Z-lYBC5crQ-01U)cZ2s2p z|LJwR+n)d5-tP9s|ND;oe;%r3Hg-UAH^4EYwu4mZp{fIqsbQnj?tnNA|MqtA4}Isa z^v^#1`#t?5zklDXy?XrP;OXH{2w|^Nd;R$6HU5+ky9eE!mgoVbZK$x@ZPs4DI(+i{ z$@8BEj}HGyg?js33!nY^YG_g-j1%&_q-x=3Ag2IR%_ZAW`{uV1`AeCoiMrL!QhV7R|~^0x)Uu5IeUw>sAV*5T=kpX8F(zdnBaU$}mQ!@vLi6|Xc(x>CxO60VkRrDQ9m+B#n5oreTt@Z{*|=f`rd)PL;` z;yjo2d)D`D`JHNxYp-8G+gRu5B5z}d8qHb_Hp-KShp%6}f=)euG&p+l)APfp1eu*E z4i2Oj1#({hXUUxZr-aV`!;4uhX#OAGV@h$cK0HGYsIwvVh86|C(2ujpX}wTnl*~&C zkd5r%EX}9tB^9tAqpm~L!}s-8i|YI~j{!qwSdxv36ynl07Zz%*6c_-%Y`=YQ3!wb_AX{{P*c75`;>2krlN zcD6U?|J|JbPtzb2ZzaWhOH4o*jreNqG)Yg$YMc!+bKky?>7|gBYp7=5kb$oS|Ks$! zjQ>jb-|cpH_WbxC-R=Di{@)4ye=A-^Kmf>L1Kq$m6j3|@HaH*+@kklU_&4j*`3$)p z`)=5$59DPoY%g(MrpQj(xEOeu8@N@9ZM?LCGQhVziV=uF@%r$e?@>^-j8+O_g)s5rp{95gL4QLa| zr!k-hOidgzDW>XqTOr5IP!7V+QePk`@=DYa`7|2FqYFjSOr!iF9$Kbbc(oCe!yqmA zStPe#A_rnhzUB76(bWEJ4Q)|qqyy9id#6TW%X0me>Mig5d*WH}4hBBYB5@vln?*!E zuuMBzcV+p=`mzT3h zhq3lH4#yTHWqa?___$?^*^lTWzV0rZGydFA)t> z8sGslIXr6z*=a*RZW5~C0iR~@8erD%|MUL%eppApETG84GgH_)rg@6{+WUw;7RL8_ z(>=EOZbSTAk?K zR44F$U!4Q@#OoK2UL07nBjYn5eL-C3b2&_r{&QdkEdl_hV!U4Ka*B>Q3lt2I_5S+w z)LbPJ2J<)r)+!0`7}1i4Rky1Strtl0-bK+=QU;0&!6Xj@O^kvhVJE)`BMs;2I6BAY zC(#Vp2=-Y4as^yXNZ{C)8Mz8*RZze*N&F-tTfB|_^@b+$kBx@AqwHb-$zEmqMN|B* zVcuN;KbVo*)vd?P=CN~vSbIU95f6zM(5Dy>b|OGk#FVHMj-Hu>GR?v@IMz~%yj&iv zl`u3*n9HTI%rXkTI^|x0K-)scHy?FxFG2R;{$5Hdtmu^?SIshtxB@()h6)_g!V=Jg z^BcpAFH--D0V{7r1H2^u)6RZp*Vq5<>~%N#-#gO({uIr$$;XyFRWa6(R7JydX*Rbk?w3M+zX@hUC^xH6JsK|hM2E(L|a6d3T*cjL0d$GR?^Bcntq zQO#AKfod61(tv#wv84-|{qq9ib2JoL1jAcRK1S=!1t+gwSp;8KM>YMhuM%X82^?IT z4G)7;V3}O55VrVs2hB(VI2GroXH<*h3nPz%J_2GKjq$pmW~n}NOKD>sN}W+1BdSDo zaX+5ix2iOCe2FI`3c3<$vABhSnBy4+A;WANlVKzaOtW+tkpc=C`JXofNxdvha zBn1N#6T{AGu8|F{-`7oQaqOOW1pApCol*N1B_U_hK>gh$Iu~`U&pS)LrnDanXb^|> zw%QpQ;^FCO7M*guC;*Go1`H1_C`~JMMi%DH0{<#?3q3^qJvd;jZEW%W(cNHw6#5Fg zj$f+IG|S*9v~Mu7H((9Sm|7Z*G~P6uE|_~5B97@rD1$2HEUTnhd>Z5KCw)ty|EiX1 z*ie~96IGLA8G_-mVy$>loW=StY8Hj1UZ=xUahgu&NjlF(gSEayFMmF2>d9BO?LfAm z(g51X`9fkgP<02YY@_KcjD^K!(h0L{X!a<4D|antSut2h-(K?E1ZXk$5r_j4)y>1) zObHcts2JAp$Xe6VKD>*Nm!ATzM-BVh!>M9qYIh)S-az%^W3S)Nn5v3gT9d@&g0^Ki z;>9qhjyu~j{Kq}<_j#PC1UES7C;<6SVY`{Re0~y+h;S(IX7I>P=(fUg_>X(jkz3I-pl3v!2eFLCI1E?a_lE!})e!}mh7ti9aqcSVyW z%j2rB{`9R^?=AFcZq%v|Y$qLtB~JtqoFe1SVFA&Sbr_&IE35t%_&RB9LXdAsSX0+ncMt6u9+6u4BmzsAw48 zCbYVSW4VP98?cK<&E>8v z#>qv0*f8CwlPNffDs_vF*HuUZVm4bCy{CQAFAKt;XfHsmD<3+?X|7Dyepy?H8%zsO zRnEFr7dIBpp$E0Ug%R>4^$!IEg;YDhunCY~ri}^DE0Q zK07KY=RLZIEr~0Hk`gCO)Io^ynr{Q-#|r8%oXP6B{DIiKlF8f>`^`1r{H;%HU=fgu z#Y{sjdqnAsd(D70Hy_~UngMTCu(^^`HG~ykF%m*qI0PFNSB{EM6&gVTE02&+P1Wp0 zeFL(@ak0FO@^0~-Q{1NPBm$5hF1kWBaC^CJTOPS~7l}Kh7Lh_z;E#gin~#jVt*wpe z=eyDXTiyh$7vIyL54{cWESd(PJJs$qJAEw9voptDS)AmBc<5vbt3pP(AP!HmsKbl* z#maj^2;H+{(QSWZRxBPRf_n=@-hnL^cRCHnc@CKm>D=aHQB`pY1=A9i|6EIuV`9s6 zyONO+a{0A3WY|Do$5t_1rWqCn?(`0lXYTbqWk&F1R~a%!nQ2NhYGMp4Go<+wS4E>G z38J_))_Ba`HeqNAlPWhq6ITbvTv}V8mOiL;Nd8hY-}Q;?>eSTR45drSYGqDsfLicS zg!4=X;)5?RB5Bh{7JC>jm9H6GXfjbVuqDt7*OESeU|DxK)sTs)Ite_o>0xnkgB8)9 zRb_cq@m`_0(}K!2*eKtBO|P!MN5T(86lBzAPqk;Z+wpNzd`HG9r5!0L|Au1clGaZb zD;f6Unlv#^E&DjUyeA${LY=n(r597!ddW&!TdQHclsXS3b-Bv56&L;r+LyJ}c305U z>LpT)Fhv_zIT0JBirl6zGQ$#;SNHY1Ud^J1L&NDy%Z0t1Y50=8ANq1TSpE>DEZ$|! z{af=kE9$Fp%4>zE*rGIzdKqPC6IU8h0kZ@piLwk+idH%w2{zvw7H-vxm*?d-%zAhD zuT`2xVWnyG?}O3w!?_2xZB5Dez2wvpfBKX7;}R4dM|mEcMvXez*^i5?f#pplol+>B z5@ykvXSngx(de(RoH6nMlEGukG;>jDkqReRxxS-(6^+yPkq+CO!%jBwW0mPv?%V=N zK)u9jyQzV;GithFMSs5_FqT=Jx^gw)kQ@z%Wx$Dk3r_)MrBQjo z*TCv#y#S~^`!K6Ze(l~paTLwa?>9jIr^$SL5|KePCMM;ZgYhs4rm(Z{I6n)L5ocaO zQ~lt55GUv!+;-<)Z8SCS3=723pIR(yJ*#Ys8Xn zU#gjN-%DdfICb8|(<#yn*^HLTwOY-({=COdu`!cqc`3(TLLwzkO50gSvVe>uYABXH zzj|Vu`rAB(0rTb!zGJ3KyT@{Q=SU(2m__ED`6tDofw}JN({MCif06}wcoq%cNh@n* zF`Pxe&FQd|h7MP`lQ%A&E(SwvDsPmmg(K$GJi$<37Zq5eAst3;Y`VCp}xjk~%b zC)Z9sl;N1xPDTy#DrlB5%2ztJtwZHn=GBYre{nKy!T@+#{@2cK&$s{G>uut{-Ie|C zkH)UicCNHFbxeoZ??sy^kc?kUGa=PvW#+R|k4$MIrkI)&AXlWm^A!2ES#2NsqqXcLRUxi|$VIqp?_%Mf>t^;YGLK zDLm`ZvrBC`VW8thLkaNx60mzEVD~lH5+L7~0QpM^knbf^Y%O%(UwLG5YK6-^9KsKM zT9$A0n6}*CwAIWtVexw0um4?HeLJkF>Ybgi?&^K(Ubk~S5}sC!jfz|MFf=xzeu^W$ z!T>>vRdm5jCLI=8n$DzI?3HuMl&5yJ^SsFGG%}|TKE&hsSaw9(jJgD%s&{@!l11r^ zoLi+8(wFE9CJowBhcxsjyX($T|v1v`he1!-WBrZ10pq}wb-JT6ycdCJHZkaQ$F%@n zv)*s-QK?07`c?mURM@wkDuM!yQ^p-bujY5YY6_7GG$cSu!tHU6e-f27f1rxcS*%y& zI#(Tb*PE_}70r9~j{Zqh)mz-n^?Io{>>D}V$4W!L1caA0?}tO;J4EBPY3c!v=buM$&}oYI@Ugdoe*zRS6+DZZTB{hRn6;3^gg1N}BHbs)6j z8acca!>xHH42F78oF-yr8Zrxb-ZJ^Y)dugYFU<-_Z#3~acGf#g+rY6UjSTNfTFzoL zn`QY$yoirpGn(^z%;r9Hmp@`5HKIe9t=DwU;$NQ}fpnc_bo#%BV)}uAehudd!-#V? z&GC5@pPu1}9=?&FU3r(yZxw~TNw`hDcgav;Kgm(BYIF0z-Bhv4BS&$7EsRO*S zaxakT0Y5}(F~%tjSw`?9_cgqkVIN0@!f5SiCVq6NNU@ibP%FfTI;R;% zCIDhVNd6SFr-&(VV9D&Fh4H%*3^`BaDU9=XZiGIIaiM=emd0CfQJBO zQDz-ksIoy@C%kr58|_4fE)yRO)Jxm&zV7K#|7yf;j7-6{V0Q3-jACrb6HrF!fI4bGLoK#2gBT=Fe5I* z+F@R-Y(AXnprvdFN6?9|aXdSs84+x({AOb>f6DChOH^P>lhZG_0}jt60cyGTPhoJu zQ35ga@~seKhKD|l(m|2T15Z*m*pXd1KZ~F@8HU`6Rpcwn4U*HK`KQ$)KsYuXKG(qX z0bMZ$zC54RNl6V^y!+U4*jOzNA~L`Qy~(mL+qK)(oD}A!kFlLlK&;o7Hg0>oQcNf< zPtp&NH}P+Hj(?Vb^0$trgVrvvRc2%e$%NF2ks+UXq2l@JHy`=$W!;v-^)0Xi$6V(7 zkWRn(s9t)20GnJG@W87MkDz)mcWZisSM|+f3Y2Sp)_v;im8@H^&HUNct2!!XU0PQA z5WPaf{JjDSBH}ciL^9?1(6z)Wx}3MH&XsL~k}bbfv1@Jm>Rs+AIB*wgEuM@FI^moq zQVCVFf?@@e3jx%1b|JA?hGP2KA0z*kB*8QXG#V(5Mo*;-(rX2KWwqcY zhUf1t{^u0+HY&*6b?g9_?(t3XOM9_v@D%?; zWhGiL$h;g_EWTHJD2(8eQOv^n>rFX#J>CFS;c!2$b zUI23RDzfDA>YUFzjIzBm$d*4MzhMn=L~z60vZa0j!=o8SY5g zo|XqerGSRYzN~QXokz3h6cN$A^VSjI3h_@@3I*3vBZSjhra4yGW`4AzVM@o?d*Zw8 z4!cslm+}}7P~Ql^AosUWC<-t_+bOxjVr3Ql4g=cB{v01S4@h}#g)Oad2v*L65dGFD z>+#7&zYZ->u3Xbe4_7cI_&M-~VXlH0*OIWd-+!S!wh!X5ELg_SqpMpq*juec`IN#3?pXY51 z!HR+#$!e2SU00+IU3oSGLW$&@RXhIQmIW0fCF#oxrEmUcPhF#D2XA)Gf* zq$M65ldGfL^@IWR57RJg03Ne7UpmpC-995U({GYCfmPJ~G`*5UES758PJ2a)RBl>% zEs9l+$J9MqslhqY7x_I&C#O+X9aq_km&{RG{ac-~z-a}n*P|>8%0-yXy-`78-=tYc z(MT<{#5ioI?81g*zOt4$*2rNEtr1fTd0&$lvgo83`+d_s6AFH6%utPKD6DjY!uqPd z*)#(Ae-m7WEkj?$`8(dSIc@~lE$2ZNDvZzqpgmOmUB(+&ZjCV>A7|%f3Z&_QN16sP zQURnG8Yo=^Z|&oG^JuGU9qxKA?Q@q13D#Gp;sFsB)k1E#Dm*RmrM$m&PmX*As^BB> zf6|mvP>iEslKE(hmlQM!j$^tUfx%LlRs-#b&_WtREGheFy}j%^1qw#qx2kJ@M7=pl zWM+p<)wzcEvL*JBy2q!m>odG?VkMdrH2qMF&$UiWJ91h4*l4Bg=cbEr`TA2|CgW^}&^g}Pw5e?X10@a6> zP?}}veqT88u;jYfFf5ZXPF+REw*rReH$U#z@6YSTJTb<(?jGzZDnctNRdna!oA_9D zqj~JLVp<1^6T5a+G({Mck)w8%z^)(7C-v1}@B2pWW=|peOT}nC$LVBt#v3RY4uOXu zlOLJ;SzR+6I$SH-J7s`Kqg0*~c$!1ka8Dvz9E@i2-lGw3b({JnABY4;?P9bHd4y&fQa;7Zh;2CI z(d_LUn$1zUyr`S0(@wj5p7u(osEg(vJ+y<{&fuRNyxZT!fA-Ye>8Z*hdk%+$ze-Q~`!UtDIFK?wl~3OidJ_Qrf`R{8UOO zO7I>Bp;iwvqhSsZ+8gi^Gb=`)PCc=L8na9jQLfdn6&hB$CVzMM;n&wvP*iFo!;upg znG_ecjPRXsbOzO04kwz(+FB0NVN1>7hHI~tUgTHkKJHoD?>)v9g@0wAmDE@!DLMr; zgWuq1blMrF{vLzJOf(75b-bj>rLUrJK8zZAs{t>4(Nx-5_)a=Z*03(Hy-U^czctfG>3LqTMafDnrZaqR>u5Po={v2^H0f2r8Bewr)J1Js9YB=^6jcHzBMh& zDYnAXmrpNGr1h|6#!VV|Gep#7f6%&YJzq(BDsjPuZdv-VgEsuO8!`c@`2ek|OxwJw zHoaU}T~ezm0P^)h5PFAJLNO&O0O7kqo}^^;vtXQC%etUuUWN>eClobG$cS6s;@Uq^ zJE~&*4;%KC$65*!TyYKygvTJpmc~=nlFhDvsNT{=WLdI{<#%i9(##H3Ar&55>1d1C zx@a^)aX{q(b5sK!R2HUNo+e;HRre550k=y4l=c$AU^kgzD(R=or1{O+Y~G@cBKOBF z+kec`G`Xhb$4d4ed!3!G7yoIm*XeESKkg|0)5EfqugI_JoUdqokyPgEuA~?y#O|U{ z1`#_)k)UsD9I+deOZ=S^L;>;4f})S{TxbhF3k=CI6k85B=?6$hFi0PIkENN+7&(!~ z0BhMt7vd^dW1?GnrB-mdV(X%V(-eoNyzteiT&E8ens0%fH5NIKCt-TNhRCv_op`C& z%CdyEg?PjGntU%YhHi4*ytP$nYX`hZ{R77A!Irn2rZSL&=qjj%FI&?#D31B*&Pqp($ZZe>u}`6T{rP8S-$I~lVt znS;}tY%8rPDI}IBjm#qLH>u+b;4_$@4>ITr`R9fsub7pqR%fZD5VZ25Hd_VVU^-O^)rwz0{0@uzCSh+qmH?l@W(e}lT*k!^}QRH?mVb522Mzw;2X=S7= zB_mlAaIFM%3uECqdFM=EgwnVPn!*+G%!z*KaL|`uel1znwNjw1ocwHQ+A}xbnTbqd zh`*JLEOI-rMj`3^S{OeUF|r(Ot+be{U5S`@2g-sanK>4-q}hU|SJ+LAW(lb3>vtws zTU9156KyJYR2H>mkq+&cSPS#8`7daCW&SFa6a&xlzVO0jsVQpKOeC{nIa2MkoN3|; zB_r02wKOUv958p}P(7D}0P z!&SywY=R@||Af!hvu9h69@Qym4c1d8QN^@}!ii!}E$MgwYqpb4mp7wFEK`|O%!f{F za^-LmH>Jy5sI6S0=v+mYiIIjf&bl+q-gz1C6hUCjQc6yrS`T>y9T&Ao*?ZQ<%_5Cq|Jfwdi$yA zgoUsO6emA-Om+&PbBcA8xw_wq8;joQi;kz@^h6V|mHT1UaC)KPtDrsJ6;hu(9hab) z@_NjTU(&M7N@aPKJPW8oBBh#}5sAr$e459GA@SJbK=bM39>B5RDA4TmLI#ojw$5$0|DC6kwqnof^6*xn zBJn=5tsGKRZ;9^9m=rSZm1K*PBt0i%aJ=Maw{GlMJQ*hQFbc^p0ZLwQw@zHAYn2F9 zI_96BVVpN~bf+w*+y|MgO;S%g)&&t$wzv8qBnj5GS!FITic@Dd?|cfcVPf|>ST?{Z zfO(;lX?T{Ra}0_^5C8G}CrY6n%wh}=ri`p*w=`pP0CP0aJ|k8n4_q*-f_q3DX#Rs7 zBA7AMVKMUHy^0K+p+W2zdQ0h9jkCuLT_@PTr;D5d7Q*`znUL6OlrK6CbcBT2c?ujs zG{%rcEz4jH&HSa6n_-D?kJz&(y-cjKDqWx5s(m~jxSj3ocPK(BUOr^l@r!isgz@m^ zS@MSmmcy*ju>$!SU9HLp(b^sb_<;6l8=tA}Td8bc{PN_H7$!kHwkJz-psO}!*}5!h zj=Y14&FKJE2c?!Zb&RGVk(9*{Os{fsp>W5Z?_dtoY^qw@Uq$q@8)i0y2T2{^xEWRu zS*x(&rVgwe8Y(>OP@6WfNRZmiC@$v!#1kP!6{v{OKx03KYNPpAQ9qq$=}BD2{ZT(n zrtj)*THrDo!~i@dy``B!b{Ut_u%j%Md;@`$R^RviFnHu-x4JtSSD;l zNs@R}*IK_#slvpPYQTLaB{nx(xgNSYt|gk~(`XpO@gLGmy@a{6=El&C(#gRb3O<41 zD1#3$9A}K@U;Wa6(Jb&wwSZk#!xwM{(BbH6I5{>Y9+6tdX+oD>}angw|If z6s53oIzm|&m-d7cM^aaQ8oWGwBi*+Q9ud{Zb{ftKb2nif}t2AQpttI)??+HMbO6e@HMi;jlNlXN~v@Wi~}vxmdJ`COU2 zGvO2Ag07@pZjr@O&%C%SPI-atXx8+>?;NIl?qe%$hQrS&UpI}$Q*9Fuue8ytd*v*> zd%51#j+a%viuY4IpBWA(l?mpjD90(W8nJ`LmKM%ftZ%}?IGlXztz?TBiZamyr8^XO zKv1OND=3ykl%v8Pqy^HZQ`Q<4?EkHqv9~o(`(NQ>TXhrA@~6jQ0;R!a!!M!G%_8R< zvR8xmYEX72WL^i-oW-HyYt_A6>yBiR*|4@N#tUFI6gL+w`Y1t3qr&bFRY{pwuGTOcwyt^b60ZD;i021m@7W1^ z^U~oNYprz#q55UUp>0bq)zyY&l}~KDN&74P@^f;%rLO+bHZLvqQ@HW~^zmC${#(`6 zLNDF$O-_bkI~pu2T~3uQZ*f1(SzOvdDrFn=E+|Vb@qBY>MXhk%`QLK z+^g`aD>47lS>FT4;lM9wtpi>dTDOIQeJZRcO&NehCzR1}ekg;kR$V8Bp+tG3M*YPk zxe)h5>6(QRbo5jQGfu-m8(IQu*IT9Fd2{oU9nDWrLRLp3k(`))+Bg!BQbnt6-s^O@ zrD-~yClsrbU;KPj9cF?fT#^ZHkY&N-6bVH>6aXPv;7#SfqPre(2B)v}22B>@5c8!` z*c;WYfgXyz^gOk4b4yzxS^MCC<$}R5n&dDnWAaO5I5^-zG@t=kezyI6`QjzAyw46_ zKkjw9-(xVnX|CV?_~hl0Z8|Jt;*X=#KzFV|Qi--gla|GQ1fC%O6_^oeV~k)y!02>3 z^`;F`PG$oq6FcU-#L2bl+plm68|g&;0_m^8RleOdi}&8+gmNzq?y;|3@Fs z|95A9cjpgccXR*uN!NcXOFvwkM%Ug4>#zUb&d%;;{Xd)aFG-iTj$Odw`+xYcz2~lf zueZCix&Obb`+vzdTK)-TLbokk1J{vp*y3D@Q@W~R`#HbW#=hpO!2hC_Xf3m#rTE_| z;Q#ja?gszw7XOU~hSUuhr^1SrA<;t+L{ z{C{V^Tj2k9H~HW082>Fp7iC9QG^j&^(>dBoZgBT^g#S7_=q>U8ySoMce{U22?~d`` zWYaBLOOW;UG|J)>c>$SLNM9;p&AhE{;UCA8_qrYiadN@NvY0hzKCN$P#utkJb}ITS z2mnj*f5+4R_PX7j&G~=F_>VsJC76E}X?*~G$5J2_pxHr#Tng|i&KTf??2Di-;hGzi z|4Q(m4L)z_{NE{_|2v!W|L*ah7gE*_5sh+v3H< zu0ISW=u3n?c0;^D)ZUzAFg%_ovv`^)JLG&rNQ7ZvW%!|GWG9I~)AJTl|-cTypxWlOOHbIcEl6 z1loZYJd1$MLH%{%zY}(ET@~Qc`@h{CAOH7yoeloqHU5(e^@6Ay8*KT#;(sA_=8f|I z+eQ1|o$dzz?;8Jg{DX4&KbaPS5QX%?%TB zL-=1A0%$cdu-yL7$N%m9-bVhvWBf0~-#JOsBnl=uP_RuD%^<4^8u#0LNG7XMZF zx^eJw zC7_w4lPv{ZK`Chj;c)JrEFDG^khefsIJr=uGuLor1CNElJjhI!-u^a^0cYNR3(!v^ zaWWsii)LRiby^?)*AD`=6#siZ|G&MvvH$y|_+J(TY;{_YZ-~KHh5x8oy@mtua`}JT z=l?hUKX;D*lzxRS{ovcBr|6Sw1Tx21GuUi=cIV$>TiX=edKc2iB%>EtWzqzShn+hyF z|Mz_R|LvVk{Ett8|5nneTg?Bu!MZOE|1tN;E#ChXU;nq=*~I_+H25#GbT9V(p`t<-WgnlmcrsGF0Og%VMZxQJx?-BoXLekrc|GBq`|M$u9UoY|k1pr^GH#i+gb7KJfyTbn~CjeNA z|Gi!@|HsDv=ab;SodCcJ04!~O@vRB&!Rg>Bx`)n@zQOgc2mc=gVK~?igZKF$n1lns zlhbI{eig3U#uEJR?(Xjw?0+`?e|Ph_FMg7NJVJ4DAtc4`9f&Z+m>nVt;~Bmvea$yRmOFF#(5qbpm{xopLtW9 z`H!_*-hQ7$a`>h69|n+)&ZFqv6$t-|^dFNwc=;bY+Z+1-RndRFjU2*nKQ$CTokZdc z+nx+laxFnDg`z|_1r~?aMkxiPiL!C-keZrH4qXzj+aubpZoFH9jow@0JOony>*L4& z_2}@QM}xz^|NYhHM9@`#7m>xXEBbZs=5>QSEtm4IfEi?4et9bpiSx4bmQ&oDT>j62#D3ZQKYLybU=27j%PsK#-MvEmpY6Sk{_m@z z|Nb^|j)9Vw<}{FTiGXLKPGVZp0$uk35P2`~>z@-X|3cVjpFV#D9Pj3{7X8o9;?eBt zy#FfnAB{a6`rqB}?(J{r|JOwSX&X62Up-+;ue~`rxE#LYOfw}sUdaJK6^QDi3EPDs z{+|=gr>^jUY2I~oQGID{pQvO1WF{I_|K0n3>}4=G5ecN!@04P(^Y z5-PeVd>scL23c@!srSpz2~*uUFRF~9jiyF5*{gRa%;NR(AKZ9 zP@B`>^QHg8EP@rni0A{L1%v2AoZo`|PiJ?hp#R_5+3^2gApM`u;w0B}zij(x;$QKS zPk@K?(n>xYkEl060x!Y~L8Xa}R#d(WTmussmlJe+Q$|(;t4&~S8ETR6t=hlOpSZ1p z|G1a~m@%N%WbqaOWTWT47&hu*5GNP)mZ)cGlFX;|W;yxL zfXZlyA1~R2Jhnn6UpH2Ad5~mdxxusX@hZ`rZ@FuF#!GvLVPd9H7N^KpzlV)^N+}!_t@%mqX z`)K-K@`8!~kMN8iIhMI>e=CDg5ji3n6UHMFYG-K8kQ>tPgKwzNb}fNfoot;j{Wf}G zG@O_QVTCeSLb?37a)dlh^xSqYKe|d?9BUwkQHEM&tvGNi8;!Cv5lgDd0bSx3AcCQm z(5sD}ud@alVgBo)|4}jvu4ntPiu}K`>(T%2-uCt;{`VJ1|4$;Iyi?%bvH?{D4w-MFr3YUit9QkaQRU-9LY%Ixu%6& z^HToRZ+-e#)|0Q7t}VoWJacF@^J@`-CHTL)x83pWzc%{6uMq#ucG5(^dBl|988R}X z`DAEVkTZ%Z9tbLlSp~8*7@pN0OH8K%48<)U#wcN)%n8y-fbuPT(R@?QFiwtpfuTF5 zdySf~BBZt87Z1}gkMIZ>HT+{dsqt5Mj{jnGb^HrLZIGdyOuw=ypQe+X%IUnM^n-Gk z;y0tSs0pRD;HEe|sd+3@H?&kwF_ud-3CSWF9};suz*Oz1CZ&3?^=G8`=m77Vo;JFj zW)olFTn(mqG!N4OD!b7GtT|1x(*6 z$`L3AYjld+rxCSJ+v3M3FOOL%YBZ`^-()Jdfbs#2C$;~^`2e~nfP=t&VpiugnZD!2Ih|+eNnD3L z0KY~-4#Tabz%z{F{}7pL5wM@AlxSqpZ}T{#p`&vGIz~+Z-I9dwgyk^aGSIgmnhoCp z#cA3${73HuQ%`SA&jW+V?UDI^53||rbi&{wRf!QFisI#bEOBE(x z)ctV}v>V+DIkm2yOzWD7bFP^&=ZdLv45jiO*HN6RMCWf_+B^Hg%Mthk4r04KDP zV^qfl?e|$*6kGKLz)#PB-l4d;SpGwzK*64_r2<-d|GT^I%YPgD@2^1rueOho_RIs8 z^&ayBo8m@9@N+=X++fVFPmV+mNFCK)LmB-Gj{9UdPhA66w%P5cNSeQt!Om0b%ms&&H|FJm826*zjx;1D* zf5sZ1n|G1Ju&FJGwRco%s zPxc7=YXYlkwf7_MT`piF_=1Y1%ky#@i4~LH~NB&zTm6T7p$Qk zyRvHG#+A8-FkF%aGN}e^dt(${MLM71q|DM1HfdGzxH7A<@chR|gKI(#$}`OJ)atie_ROzlVOzP{~i`x(OoR^TX#k>Y@-8G7362 zNtNJ3B7kx)5|4m(%tTFTs|0&vq*Ne#`D|-m8+uNGyE?IC*F0?+)>JN_>r7-mTsbx ze&*qoHv8S(?;CrC(=qn37LEp!=-keO zNe7ZV0`-2A{FgF)chB;h+|jpp4lbtuAi@rU@ZYz4|F^Tfw_CXX>ul`5zdHL*wVli{ zU>?XQfRaU&suRFbM6Z&ocZWC8DG zxm@7bce!@a#3m?{Xyynk>+8HIIM@v~*rqQ$ZwX_4t-lkTmXPd9OK29QLP6wl+^$cL zcA4=bs5+LwZSj!#!{eKfHzdn3bt0rNM@=EIqmm|4?`fLSi%KushiMnFF;ie7-)xCL`R z(HHf)grL7$xs%l+6)na*2P^hWaO0tL@To3hcSZ0z}#D=EGh zr|;`TngTL&2~5-wShqy0P^vU2VyOlNc~u@$YXO^T92Z?`#yWtO?Xu4DKe?&P;8m84 z?rMn*{i}q-8O9R1fEuKPV=nZRwI!9cR8oAdq$mFduBR9~aP70`%VQFk@P9MPihaxe zf4%OuAOC586aVGwvHwup$YTHW1IhJC_pM0m6JZ&r94xsSEz3D~Yv*M5W@1<5WI<=T z#AhoR(BVx#zO1?C0Qntf0{he}u+hqXru3geWvrCsvy|PH&Ea_k__Y=H>+=$9I-8G(IR?CJ0m!ZH( zt2nWoPBNnFQ)u`ZN*VEFn9Re7&ZAM9B)cMtW~s;;pnK89!JOW+c!}SP4;BK_7sU9AW{uy7w?jDXBK1RS2@n{` z)kfya-&(83d#`Hw^yF{xi+E_ce_6*hE3y)`jJk-D=1oV72YCf1Rh_R(>AzIz-J<XblAHB|o{(lAfU%7o8>aU(K;eTzc737t@)>&lHX@-(SD&0Yz+S;HN>Q0h02$_)# zUpj9vtbfYl>v2c~YPDn=E2BSg|Ynn3totO|tj*ohpX4=t{pQ4}V zH7)gSH5T6uE0oqy^lhwn?!bD-KR<2|AvZGJS0MjQ)9E~cEjLhl@AY|s<@w)sefyvN z&H4W&p8szCx6xA@#deJIT!8w`hO zrtZfBFVC+B4Ndu?O2fQ}80<*i{Mk2!flzm@4ugh~woQO-K~A02PBAI2<4TVK?K0rQ zMyb3IK*s54K0zHjI_d|bnY_kq#9Q5_ep!P?ET=|hw^e<%41Au>;vqf^W&-3BVI1!9 zK4Ayj-IYmvkr<^j)RNgC(Dfcz?~@3f8bKV08H~D`-~mWFwkzwv52(azg3WL;4d6#* zxg3oc5DFF6aRMAz-CVBkB>FH@u&7Zat@3^(V=y_e+XhDn6Yo>R{?L z(k_{sz0zrc8ZFVAge|e1!9P17Fe22J*i-Yx)00K^qzxP0mVp2Fpc+(y?@hT~#=zz0 z*kxkH$5D0~$@}UITH?_~u*RXDaS(%MvTL*Slw^Y24uPL?2Z4T6mhlNA8uB)DBil(|dG(-mwxY!a z)9ztIT5h&1yUnI!CrgWTJ5Wnd1QLtWKms6iV;KOd&^B&?x!EuKlSwu_ldDD!uK3|4-XyQ0-1 z{Cq7JON&#METf<{m$vx)r~6F3XW$4J$faJ3A6ORuXV1U?@AdY2oBZEj{QZCR_A%VR zq9QUUcr-(03SCH|w1F68hYzp__=BkwV5~?27Q0Z{){@vk<+&Rx@-^yQ<5l?}i_w5? zF2&>L5-;eH2~A&?eep|8OZYp?EqKkO_g{v;)perRY%EGQ&X-?d^2KLMy(u>_iI&<@ zU6XaW3vKE`{-1A0HFB-t z%`Orc#dD0T?cDsV9#e$a~jjOI^@tho;Fp4CFQv%41p>%M1l<=rl9L)4Y zM$GvDrwh&`J_=6aBxai?Sr9sR`7sD#?w~0=XjLc65&=|B!_88Kh4_!Q%f!*&F8=R! zeE;A5?#BP?OT>T6|4Z*3jqLUl74$s_MIa=T$g$;DY~)`N(!{q%leJ!%UpjS3M^-F88Di^;{66r zrp(gYVligMH)z9jdZFW()XMv6sbj36xs9qm;IpYzgUt&8$eNRJkGlWfz{pWpYZ+@%kxn8NK!YyFb&?o8UDA})Dj2azn;D*c_j};>vA5qL|1BVHX!$%67wO!| zNMg^bRB|0y0ZxRDj>OL>qNY?U>YMYHAoVUhqh1?D^1~OuJb5IBNf3|q>}aLgs?Aup z>y-_WwXfHZRze<#vI_&LV;`j~RQsYq?`thGsFAG-^xcV+@k=ARTshe2mzAi`+s-An6DwSt=D)w2)6MPw6}ncI_4xYaf5WieD*ty4 zKageoUuVaU|JUv8Zuq~i!2hGTfl3)c&mZJr83`bIz1Sb52+}ZrB@Q8Y=2A34W1|V@ zNM5wq^s+T7lekT&d67dB>zSEimc}V@;eGb zR*Y)D4t3pdRlj5YFQ+g_1Bxcf(RXiy|Lg8`3;F-L-R%wk_eIgu%d& zLq^fTd1H{vRYBiqD5Q}Bj&?-c@C>a{U<&{r3Y(Q~m1Xf<(6ZpF@S=!@)`{Q@G}))& z6CJb*>A$m51DMj`yQ>@kOYOh+dp`Z|ZSuc-`SxEW+ee=POdYNXRAGbnUiAj%XtES` zCHa|K{&nr=q$j&^&C=%)x@2>sxaN(bu|-wJfF2AH%ujK@Dw|AHa&b3X$aBUsGIk5& zaPsX;1PR)TUMRa$96_0wg64vF3Z17sodm)jJ7UpP`o#j4qZ$gB`N(0S-&UMm6_~tH zDSZk0pZO%7ZK2?nKe%oic*yVX?pE^u@bTk+Zufxy-`$-5pT+v)zi4m%;Fi~aXM1~p zXKxqzKZ^glS^v*#{oAi@#r_i(+t2^d+1})TxSLNBpJYLH(NJsLtf?neJEK{vkv@+F z?s`?RdW*vB4bsWrJjh6Jm8tEUo8$irrT=G8ILfxN2yY$V$Nz~|qX=YX59h!p z{==Po?%fmrpjFjwX*p*tQ%g)ustyUykHPdD3A}ap2r3a-# zgo}?P;S{`Uuq;tE9BrfUx@p(amkXtp^joKnzMUwoWBIdFrHpRDtgz;v3I213wqSW( z=frE`e{b8z{~b&TyutrF_#i&i3r6ns15xjFdcCbq7yeH^40T7f4F&e;sl4}t-`n(C zZ)F70en3CDzQHDb#gnnU` z^_nz&haa7G7pu|pX;80Oi5c*bz6h(i(Ol`JW+@7s4!#cZHX4~XY?g<{rlYXoaP%zdWHtI=w%HE=b{V;eBbtm&A8CcYNm;@TnWv zZcTc!N>vQlQc~YMW+2TP@OJ44GmsT~|16yhfZM~`ud&zs_17rWPtWFAE`OBwhP(7* z7ON^p!Hm8(Ybx3YEti?k83v?)V+f7F+j$g)4P2upuj7D7(R=D3RB2@RyU_)jLh0Cc zn@!eKl@{Bz3h98?ScI&x=)$exuB+$^r`d`r7k=;kdUk`~*adNMum`{?H{2TH&jN;3 zS&vo^Yf>QG;)qj<(`bSR5p2ek&AQ;{EYa=|V>W-lf--N#YrAF9EcxmZ-Cp9fSS7EAc7tw4F#p8>osea%>~`J`&|-|Qrfm+QFBnp@lo7O9tz@K zx7i{b>@~4hMC5-XbR_30%A;5uYR4O4S@=(^f9-)5PB1tYOZ+Ar=TgKU|rIHR*0x)?6R8|2ir+}AHpb|(iR)Uwo zfOae}k+jM@6>oaS@bE;gynNO@7F%4$7osk9c8^h2|GG!&>gfLa4AKH%aG>3IJ0)2B@Ik&jumBa8Tj<-OxyIAimO!j2S0|91Xw4+jdC4*6LVdogu{q#I!Nf#oNm z6p-N?O~;x@$oz5&6|7=q#BdQ@n#5`Eb4BF}mu(WHVT$_8lga2pux+ zVN|Grb8RY^O*FvmKQLQwF93ndaUj>HpN>|hUki&-KB(N8_?%*=~=28wzT-bC@F71V%e3lK+%Q2fa>i%X7 z@a^v-jM~DC1+HDDoPB$;xGRRz=w$gbPQ*e8F)Gz~T`Q4TiAt&^vMze4@ z=-}gzrt1%9FiX8M3&$(_ziiFiX|XD`3Ns)lhVf~J@uvr7>gWmtP`J(E5K1l7S_*yO z40X1A3pjuzX|qcdXGADyTua)lS~^|S8o|2#bMa}d4qB~NPNGebN(N!SE`xAIf+mv( zz`;furqrYp$Q_qqGO1J)7I?1PpmrwA1?%t^;MV%2pu2z;RHN#RcHxuvpt^lStE~to zxC$k4P5i~uhmHwi;^9F7a;vc7Z+gc?Dtm>^K=XK0)O4%cUAAPOqFXoFr`3A3v`gP@ zFWy^nQI~J7!s;7#q;h)|fwzjgh&I@2bh3m@jxf06iKvj&mPoVu(0d3iMYmt5DN_ygr5$Km`1bTg=&%N$#8GU|rcAsCT+2_?P zJDJ6+P}N9#0;#GY;-q6oqVNGkO$r{jYK*Y#`lRWZkJgu59ki*pbziXa#?aC*E9Epw zMTk4eHmRhwMw<;-YqI2!7J6pcrWg^s)2QqC=dcgwb&R9b)`5%=LbJ)Mi%70wr#Fh2 zM3xlIro80m8!N&&?pb8tUU{~p#GH?#VA9}f-<-#QnSXk zrj=&_j!_Dfq@Pn-PcLaGCm;ay>$G=kw{OvMW7aI%l0}`~ef33KwKQwjGXJpG=Z{sk zAL_zHALC}SIVcuISv-Zk0>#pke=7-&idc|Z!ZsVTY0^UT9A)2SneEzncv<8Wwbkrc z@V1cdeL_7~@nn?RWxg3=#9)0(B)1oADl_c!fOpra4*W;MZ-0{Ub3_nf+udkIebwk% zY8D(*@j;Hef7Yll@~K2siFmfkNWPL-YKR4c)9fP@9mt|pHic|sXU*2C*i&NWt+?xX9khSf(|{tLEtuRcL;)^h!hOwY}7tVp@EWun6(yX>~R zjj)ik7r{VG(X?M5)K}Rza{D%o349633_@aO*e^(ADI6yGFwo{oijJeZP{ZH})EeZ! zWiu*TMKHUzL|iXk!tqOiHjXZvWaj`N@#l}qSfbpv63~-G!bz5%Tb*f20r8U#Lj~D! zMp3jV^#`F!6vrSp*!2b>J2HvRJ)3Zs0MdI~7In#>ZZ)^xZPPn@Yf3Y3Q=H6fcbXm+ zSnofsu)W>RuS7|m@n~f975A_JNAt1Yz#G%ObG$`xn3jRbr&|{$YD6HKDMmM zVg->>*(zsJI#&*fh8jqXVirvsor67?QlAzy-*gZBXU7gMR)8?uMMKb!jumtMD@GJ1 z11^LT=OQ1&Ca@CBA>>_$0Yv}-T>ko|rJ`E8PPwx&P~J%luzE+uOU{ zoi4_I>uzu2zkHheKWBMMT|ZQ2bJ8NCK8EiJ@Y3#7KgXDGHJyc%9z7(V*ul#z4d+An z@`$dRYCr-}qaRjZ!U0KLP-34hjz;Pb*sqOHbY~i58+hoB5@{^L*~XM#<&RerGVcWB zPC3Y(n+Lgj4UoGF>?b+9tqORt-4gpP0pIu3m;K6`r5mU}Y>+c@5C7T4fA}s?J?i!7 zIkZHM#^<5A$&|O4@&;4iUgDP@vshkD!tOdB%R0Yy2b95sYV@q{+w%K@t0G_mb7V&; z%^KaO$BbflZt42~bI{`%?`04j; zNQ-KiQ~sh5uO$?MwN(0H90$M5CDO@6wGEAnTe^PpyKb$hza_4hy9C=y0{e4AHt^kc zw^_7F4X7%IP;D>${IGOh=iwA5rebPQ9WJMixG1tz6f$2^uK2D) zSkNEOHCW|#EZOM@W3G0D{Ez#hGMqo?Fpd`Ayt1(GAC@%vOs81OF5!YFGv zU@&71-Vb4iZZ93xd>cM<|{9%H?j+R6K-Z=~`e)6;(4fGo3Rp`odwmX^%w}q{vc6$=zX|MF3PK?UU|)OKf$I zo8kfRRK;yAI|MCh3xI_t0Gw6yEFM`mYxEPHg+^K@iBHU??7CB7%D*u$j%~KEyhKy5 zRY>8EQMF^LRUb1_>CMTJa+Br@(Ty^^qS;6R({NDQsH6e3AA+`G+`)C$U|T1`L) zV3FvNJXjYg5xEF1B1-%Ypyn|t8oH(RWj*<4>8>IkSKEah-x%Cof~zdcG~6{PAP6m1}-~rAdOC-94(gy?)K!_LZg=YHoL| z>0NvaSRjbyc;}KtyYUjDcs^c*7;b;2#88WdOR1Ke!hJeKg7?z;gw0)Vg=f1tzS>_= z=cJV=fqxi=lnj)5rC}OY5j%we8y*-?UcYdn4#d7%ap~S$(oxSmyI4o2 zB&EcYTiqRurK~Bl$?enpo5i`1v%P$ZczU(bNt?9{HNU=-~VT4 zlmFop>HleY%k=)DGWbelayDTiT5ZdsRc}wMhdFuR&dxAGlT0)LCADR%7dc7AXRC}_ zMVW*c=u{JkN5}@fAW%6fKf16V$S4G{=$2_ihdl*Qr^_ z&aiD~OT3q$Lkm5ezOU;GZ!vZG* zHtU|H(_W_|rs;H^r1P9l_E@XF)la2sQsoBfMMGO8mKb2ELc}CPGyXQOclJ8l`*meB zB@0(|8IhCSpCe!#jB@^YDpk^mG)3DxhRSN2MBeVlG}qwtJIK>IJa~YM69g(ru?VBv6d%r}QndaM-X>{^ZKk!MqRG?Z#GyQ0DU zO%da((&gqV>{@Z3EF*?~@x_#|1pY!oSXFol6)Yb~B^B`tE+8R=q6_KA=h6uea5ecY z>F^?I=a*ef>ip76=$v1C8IdcS?4=aWud#@{6`(JtZDmy!5VqnFig|f~>8!UO0P;rh zdu7g0Qp4or zS<#k^c>$%K9zOf~qr<9VCSbo8vA+~1?S2OO)8OUdqv|=4Tc2hSBSchy)VrPvR_A$; z#m%*{}pUr3U*?hjf&;JjQTtIIC1_J<79j!+I literal 0 HcmV?d00001 diff --git a/Backfill_Correction/params.json.production.template b/backfill_corrections/params.json.production.template similarity index 100% rename from Backfill_Correction/params.json.production.template rename to backfill_corrections/params.json.production.template diff --git a/Backfill_Correction/params.json.template b/backfill_corrections/params.json.template similarity index 100% rename from Backfill_Correction/params.json.template rename to backfill_corrections/params.json.template diff --git a/Backfill_Correction/run.R b/backfill_corrections/run.R similarity index 100% rename from Backfill_Correction/run.R rename to backfill_corrections/run.R From fb38e07ac9b292a070bc958a1cd183da6cda33b1 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 29 Sep 2022 10:31:22 -0400 Subject: [PATCH 140/145] remove local test files --- backfill_corrections/.Rhistory | 10 - .../delphiBackfillCorrection/DESCRIPTION | 21 -- .../delphiBackfillCorrection/LICENSE | 2 - .../delphiBackfillCorrection/NAMESPACE | 65 ----- .../R/beta_prior_estimation.R | 212 -------------- .../delphiBackfillCorrection/R/constants.R | 33 --- .../R/delphiBackfillCorrection.R | 3 - .../delphiBackfillCorrection/R/io.R | 133 --------- .../delphiBackfillCorrection/R/main.R | 273 ------------------ .../delphiBackfillCorrection/R/model.R | 259 ----------------- .../R/preprocessing.R | 229 --------------- .../delphiBackfillCorrection/R/tooling.R | 178 ------------ .../delphiBackfillCorrection/R/utils.R | 165 ----------- .../man-roxygen/covariates-template.R | 1 - .../man-roxygen/denom_col-template.R | 1 - .../man-roxygen/df-template.R | 2 - .../man-roxygen/export_dir-template.R | 1 - .../man-roxygen/file_type-template.R | 2 - .../man-roxygen/geo-template.R | 2 - .../man-roxygen/geo_level-template.R | 2 - .../man-roxygen/indicator-template.R | 3 - .../man-roxygen/input_dir-template.R | 1 - .../man-roxygen/lag_col-template.R | 2 - .../man-roxygen/lambda-template.R | 1 - .../man-roxygen/lp_solver-template.R | 4 - .../man-roxygen/make_predictions-template.R | 2 - .../man-roxygen/num_col-template.R | 1 - .../man-roxygen/params-template.R | 4 - .../man-roxygen/ref_lag-template.R | 1 - .../man-roxygen/refd_col-template.R | 2 - .../man-roxygen/signal-template.R | 3 - .../man-roxygen/signal_suffix-template.R | 5 - .../man-roxygen/signal_suffixes-template.R | 5 - .../man-roxygen/taus-template.R | 2 - .../man-roxygen/test_lag-template.R | 1 - .../man-roxygen/testing_window-template.R | 2 - .../man-roxygen/time_col-template.R | 2 - .../man-roxygen/train_data-template.R | 1 - .../man-roxygen/train_models-template.R | 3 - .../man-roxygen/training_days-template.R | 1 - .../man-roxygen/value_col-template.R | 2 - .../man-roxygen/value_type-template.R | 1 - .../man/add_7davs_and_target.Rd | 28 -- .../man/add_dayofweek.Rd | 24 -- .../man/add_params_for_dates.Rd | 21 -- .../delphiBackfillCorrection/man/add_shift.Rd | 20 -- .../man/add_sqrtscale.Rd | 21 -- .../man/add_weekofmonth.Rd | 20 -- .../man/create_dir_not_exist.Rd | 14 - .../man/create_name_pattern.Rd | 23 -- .../man/data_filteration.Rd | 20 -- .../delphiBackfillCorrection/man/delta.Rd | 16 - .../man/est_priors.Rd | 97 ------- .../delphiBackfillCorrection/man/evaluate.Rd | 23 -- .../man/export_test_result.Rd | 52 ---- .../man/fill_missing_updates.Rd | 30 -- .../delphiBackfillCorrection/man/fill_rows.Rd | 30 -- .../delphiBackfillCorrection/man/frac_adj.Rd | 78 ----- .../man/frac_adj_with_pseudo.Rd | 24 -- .../man/generate_filename.Rd | 65 ----- .../delphiBackfillCorrection/man/get_7dav.Rd | 20 -- .../man/get_files_list.Rd | 28 -- .../delphiBackfillCorrection/man/get_model.Rd | 39 --- .../man/get_populous_counties.Rd | 11 - .../man/get_weekofmonth.Rd | 25 -- .../delphiBackfillCorrection/man/main.Rd | 17 -- .../man/main_local.Rd | 55 ---- .../man/model_training_and_testing.Rd | 81 ------ .../delphiBackfillCorrection/man/objective.Rd | 20 -- .../delphiBackfillCorrection/man/read_data.Rd | 14 - .../man/read_params.Rd | 55 ---- .../man/run_backfill.Rd | 51 ---- .../man/run_backfill_local.Rd | 55 ---- .../man/subset_valid_files.Rd | 23 -- .../man/training_days_check.Rd | 16 - .../man/validity_checks.Rd | 32 -- .../unit-tests/testthat.R | 4 - .../unit-tests/testthat/helper-relativize.R | 13 - .../testthat/params-run.json.template | 8 - .../testthat/params-test.json.template | 3 - .../testthat/test-beta_prior_estimation.R | 130 --------- .../unit-tests/testthat/test-io.R | 118 -------- .../unit-tests/testthat/test-model.R | 173 ----------- .../unit-tests/testthat/test-preprocessing.R | 132 --------- .../unit-tests/testthat/test-utils.R | 136 --------- .../00install.out | 11 - .../delphiBackfillCorrection-manual.pdf | Bin 120174 -> 0 bytes .../delphiBackfillCorrection/DESCRIPTION | 22 -- .../delphiBackfillCorrection/INDEX | 65 ----- .../delphiBackfillCorrection/LICENSE | 2 - .../delphiBackfillCorrection/Meta/Rd.rds | Bin 1959 -> 0 bytes .../Meta/features.rds | Bin 121 -> 0 bytes .../delphiBackfillCorrection/Meta/hsearch.rds | Bin 1809 -> 0 bytes .../delphiBackfillCorrection/Meta/links.rds | Bin 663 -> 0 bytes .../delphiBackfillCorrection/Meta/nsInfo.rds | Bin 827 -> 0 bytes .../delphiBackfillCorrection/Meta/package.rds | Bin 1081 -> 0 bytes .../delphiBackfillCorrection/NAMESPACE | 65 ----- .../R/delphiBackfillCorrection | 27 -- .../R/delphiBackfillCorrection.rdb | Bin 57192 -> 0 bytes .../R/delphiBackfillCorrection.rdx | Bin 1098 -> 0 bytes .../delphiBackfillCorrection/help/AnIndex | 34 --- .../delphiBackfillCorrection/help/aliases.rds | Bin 427 -> 0 bytes .../help/delphiBackfillCorrection.rdb | Bin 57034 -> 0 bytes .../help/delphiBackfillCorrection.rdx | Bin 994 -> 0 bytes .../delphiBackfillCorrection/help/paths.rds | Bin 563 -> 0 bytes .../html/00Index.html | 93 ------ .../delphiBackfillCorrection/html/R.css | 120 -------- .../tests/startup.Rs | 4 - .../tests/testthat.R | 4 - .../tests/testthat.Rout.fail | 33 --- .../tests/testthat/helper-relativize.R | 13 - .../tests/testthat/params-run.json.template | 8 - .../tests/testthat/params-test.json.template | 3 - .../testthat/test-beta_prior_estimation.R | 130 --------- .../tests/testthat/test-io.R | 118 -------- .../tests/testthat/test-model.R | 173 ----------- .../tests/testthat/test-preprocessing.R | 132 --------- .../tests/testthat/test-utils.R | 136 --------- .../tests/testthat/testthat-problems.rds | Bin 16934 -> 0 bytes .../delphiBackfillCorrection_1.0.tar.gz | Bin 28675 -> 0 bytes 120 files changed, 4701 deletions(-) delete mode 100644 backfill_corrections/.Rhistory delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/links.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/nsInfo.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/NAMESPACE delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdx delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/paths.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R delete mode 100644 backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds delete mode 100644 backfill_corrections/delphiBackfillCorrection_1.0.tar.gz diff --git a/backfill_corrections/.Rhistory b/backfill_corrections/.Rhistory deleted file mode 100644 index 8756eb27e..000000000 --- a/backfill_corrections/.Rhistory +++ /dev/null @@ -1,10 +0,0 @@ -getwd() -test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") -testthat::test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") -testthat::test_dir("delphiBackfillCorrection/unit-tests/testthat", package="delphiBackfillCorrection") -df_new -fake_df -refd_col -wm -rowSums(df_new[, -c(1:ncol(fake_df))]) -df_new[, -c(1:ncol(fake_df))] diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION deleted file mode 100644 index dfdd673b4..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/DESCRIPTION +++ /dev/null @@ -1,21 +0,0 @@ -Package: delphiBackfillCorrection -Type: Package -Title: Correct signal outliers -Version: 1.0 -Date: 2022-08-24 -Author: Jingjing Tang -Maintainer: Jingjing Tang -Description: Takes auxiliary output from COVIDcast API data pipelines and - adjusts unusual values using a lasso-penalized quantile regression. - Output is used for research and model development. -License: file LICENSE -Depends: R (>= 3.5.0), -Imports: dplyr, plyr, readr, tibble, stringr, covidcast, quantgen, - arrow, evalcast, jsonlite, lubridate, tidyr, zoo, utils, rlang, - parallel -Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), testthat (>= 1.0.1), - covr (>= 2.2.2) -RoxygenNote: 7.2.0 -Encoding: UTF-8 -NeedsCompilation: no -Packaged: 2022-09-26 15:03:17 UTC; nat diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE deleted file mode 100644 index 2d1447e00..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/LICENSE +++ /dev/null @@ -1,2 +0,0 @@ -Currently approved for internal DELPHI use only. - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE deleted file mode 100644 index 133d2a5b7..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/NAMESPACE +++ /dev/null @@ -1,65 +0,0 @@ -# Generated by roxygen2: do not edit by hand - -export(add_7davs_and_target) -export(add_dayofweek) -export(add_shift) -export(add_sqrtscale) -export(add_weekofmonth) -export(create_dir_not_exist) -export(data_filteration) -export(evaluate) -export(fill_missing_updates) -export(fill_rows) -export(frac_adj) -export(frac_adj_with_pseudo) -export(get_7dav) -export(main) -export(main_local) -export(model_training_and_testing) -export(read_data) -export(run_backfill) -export(run_backfill_local) -import(covidcast) -importFrom(arrow,read_parquet) -importFrom(dplyr,"%>%") -importFrom(dplyr,across) -importFrom(dplyr,arrange) -importFrom(dplyr,bind_rows) -importFrom(dplyr,desc) -importFrom(dplyr,everything) -importFrom(dplyr,filter) -importFrom(dplyr,group_by) -importFrom(dplyr,group_split) -importFrom(dplyr,if_else) -importFrom(dplyr,pull) -importFrom(dplyr,select) -importFrom(dplyr,summarize) -importFrom(evalcast,weighted_interval_score) -importFrom(jsonlite,read_json) -importFrom(lubridate,day) -importFrom(lubridate,days_in_month) -importFrom(lubridate,make_date) -importFrom(lubridate,month) -importFrom(lubridate,year) -importFrom(parallel,detectCores) -importFrom(plyr,rbind.fill) -importFrom(quantgen,quantile_lasso) -importFrom(readr,read_csv) -importFrom(readr,write_csv) -importFrom(rlang,.data) -importFrom(rlang,.env) -importFrom(stats,coef) -importFrom(stats,nlm) -importFrom(stats,pbeta) -importFrom(stats,predict) -importFrom(stats,setNames) -importFrom(stringr,str_interp) -importFrom(stringr,str_split) -importFrom(tibble,tribble) -importFrom(tidyr,crossing) -importFrom(tidyr,drop_na) -importFrom(tidyr,fill) -importFrom(tidyr,pivot_longer) -importFrom(tidyr,pivot_wider) -importFrom(utils,head) -importFrom(zoo,rollmeanr) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R deleted file mode 100644 index dadb48984..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/beta_prior_estimation.R +++ /dev/null @@ -1,212 +0,0 @@ -## Functions for Beta Prior Approach. -## -## This is used only for the fraction prediction e.g. fraction of Covid claims, -## percentage of positive tests. We assume that the fraction follows a beta distribution -## that is day-of-week dependent. A quantile regression model is used first with lasso -## penalty for supporting quantile estimation and then a non-linear minimization is used -## for prior estimation. - -#' Sum of squared error -#' -#' @param fit estimated values -#' @param actual actual values -delta <- function(fit, actual) sum((fit-actual)^2) - -#' Generate objection function -#' @param theta parameters for the distribution in log scale -#' @param x vector of quantiles -#' @param prob the expected probabilities -#' @param ... additional arguments -#' -#' @importFrom stats pbeta -objective <- function(theta, x, prob, ...) { - ab <- exp(theta) # Parameters are the *logs* of alpha and beta - fit <- pbeta(x, ab[1], ab[2]) - return (delta(fit, prob)) -} - -#' Main function for the beta prior approach -#' Estimate the priors for the beta distribution based on data for -#' a certain day of a week -#' -#' @template train_data-template -#' @param prior_test_data Data Frame for testing -#' @template taus-template -#' @template covariates-template -#' @template lp_solver-template -#' @template lambda-template -#' @template geo_level-template -#' @template geo-template -#' @template indicator-template -#' @template signal-template -#' @template signal_suffix-template -#' @template value_type-template -#' @template train_models-template -#' @template make_predictions-template -#' @param dw column name to indicate which day of a week it is -#' @param response the column name of the response variable -#' @param start the initialization of the the points in nlm -#' @param base_pseudo_denom the pseudo counts added to denominator if little data for training -#' @param base_pseudo_num the pseudo counts added to numerator if little data for training -#' @param training_end_date the most recent training date -#' @param model_save_dir directory containing trained models -#' -#' @importFrom stats nlm predict -#' @importFrom dplyr %>% filter -#' @importFrom quantgen quantile_lasso -#' -est_priors <- function(train_data, prior_test_data, geo, value_type, dw, taus, - covariates, response, lp_solver, lambda, - indicator, signal, geo_level, signal_suffix, - training_end_date, model_save_dir, start=c(0, log(10)), - base_pseudo_denom=1000, base_pseudo_num=10, - train_models = TRUE, make_predictions = TRUE) { - sub_train_data <- train_data %>% filter(train_data[[dw]] == 1) - sub_test_data <- prior_test_data %>% filter(prior_test_data[[dw]] == 1) - if (nrow(sub_test_data) == 0) { - pseudo_denom <- base_pseudo_denom - pseudo_num <- base_pseudo_num - } else { - # Using quantile regression to get estimated quantiles at log scale - quantiles <- list() - for (idx in 1:length(taus)) { - tau <- taus[idx] - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, dw=dw, tau=tau, - value_type=value_type, - training_end_date=training_end_date, - beta_prior_mode=TRUE) - model_path <- file.path(model_save_dir, model_file_name) - - obj = get_model(model_path, sub_train_data, covariates, tau = tau, - lambda = lambda, lp_solver = lp_solver, train_models) - - y_hat_all <- as.numeric(predict(obj, newx = as.matrix(sub_test_data[covariates]))) - quantiles[idx] <- exp(mean(y_hat_all, na.rm=TRUE)) # back to the actual scale - } - quantiles <- as.vector(unlist(quantiles)) - # Using nlm to estimate priors - sol <- nlm(objective, start, x=quantiles, prob=taus, lower=0, upper=1, - typsize=c(1,1), fscale=1e-12, gradtol=1e-12) - parms <- exp(sol$estimate) - # Computing pseudo counts based on beta priors - pseudo_denom <- parms[1] + parms[2] - pseudo_num <- parms[1] - } - return (c(pseudo_denom, pseudo_num)) -} - -#' Update fraction based on the pseudo counts for numerators and denominators -#' -#' @param data Data Frame -#' @param dw character to indicate the day of a week. Can be NULL for all the days -#' @param pseudo_num the estimated counts to be added to numerators -#' @param pseudo_denom the estimated counts to be added to denominators -#' @template num_col-template -#' @template denom_col-template -#' -#' @export -frac_adj_with_pseudo <- function(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) { - if (is.null(dw)) { - num_adj <- data[[num_col]] + pseudo_num - denom_adj <- data[[denom_col]] + pseudo_denom - } else { - num_adj <- data[[num_col]][data[[dw]] == 1] + pseudo_num - denom_adj <- data[data[[dw]] == 1, denom_col] + pseudo_denom - } - return (num_adj / denom_adj) -} - -#' Update fraction using beta prior approach -#' -#' @template train_data-template -#' @param test_data testing data -#' @param prior_test_data testing data for the lag -1 model -#' @param training_end_date the most recent training date -#' @param model_save_dir directory containing trained models -#' @template indicator-template -#' @template signal-template -#' @template geo-template -#' @template signal_suffix-template -#' @template lambda-template -#' @template value_type-template -#' @template geo_level-template -#' @template taus-template -#' @template lp_solver-template -#' @template train_models-template -#' @template make_predictions-template -#' -#' @export -frac_adj <- function(train_data, test_data, prior_test_data, - indicator, signal, geo_level, signal_suffix, - lambda, value_type, geo, - training_end_date, model_save_dir, - taus = TAUS, lp_solver = LP_SOLVER, - train_models = TRUE, - make_predictions = TRUE) { - train_data$value_target <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_target_num", "value_target_denom") - train_data$value_7dav <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") - prior_test_data$value_7dav <- frac_adj_with_pseudo(prior_test_data, NULL, 1, 100, "value_7dav_num", "value_7dav_denom") - - train_data$log_value_target <- log(train_data$value_target) - train_data$log_value_7dav <- log(train_data$value_7dav) - prior_test_data$log_value_7dav <- log(prior_test_data$value_7dav) - - pre_covariates = c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", - "log_value_7dav") - #For training - train_data$value_raw = NaN - train_data$value_7dav = NaN - train_data$value_prev_7dav = NaN - - #For testing - test_data$value_raw = NaN - test_data$value_7dav = NaN - test_data$value_prev_7dav = NaN - - test_data$pseudo_num = NaN - test_data$pseudo_denum = NaN - - for (cov in c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", "Fri_ref", "Sat_ref", "Sun_ref")) { - pseudo_counts <- est_priors(train_data, prior_test_data, geo, value_type, cov, taus, - pre_covariates, "log_value_target", lp_solver, lambda, - indicator, signal, geo_level, signal_suffix, - training_end_date, model_save_dir, - train_models = train_models, - make_predictions = make_predictions) - pseudo_denum = pseudo_counts[1] - pseudo_num = pseudo_counts[2] - # update current data - # For training - train_data$value_raw[train_data[[cov]] == 1] <- frac_adj_with_pseudo( - train_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - train_data$value_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( - train_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - train_data$value_prev_7dav[train_data[[cov]] == 1] <- frac_adj_with_pseudo( - train_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") - - #For testing - test_data$value_raw[test_data[[cov]] == 1] <- frac_adj_with_pseudo( - test_data, cov, pseudo_num, pseudo_denum, "value_raw_num", "value_raw_denom") - test_data$value_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( - test_data, cov, pseudo_num, pseudo_denum, "value_7dav_num", "value_7dav_denom") - test_data$value_prev_7dav[test_data[[cov]] == 1] <- frac_adj_with_pseudo( - test_data, cov, pseudo_num, pseudo_denum, "value_prev_7dav_num", "value_prev_7dav_denom") - - test_data$pseudo_num[test_data[[cov]] == 1] = pseudo_num - test_data$pseudo_denum[test_data[[cov]] == 1] = pseudo_denum - } - - train_data$log_value_raw = log(train_data$value_raw) - train_data$log_value_7dav = log(train_data$value_7dav) - train_data$log_value_prev_7dav = log(train_data$value_prev_7dav) - train_data$log_7dav_slope = train_data$log_value_7dav - train_data$log_value_prev_7dav - - test_data$log_value_raw = log(test_data$value_raw) - test_data$log_value_7dav = log(test_data$value_7dav) - test_data$log_value_prev_7dav = log(test_data$value_prev_7dav) - test_data$log_7dav_slope = test_data$log_value_7dav - test_data$log_value_prev_7dav - - return (list(train_data, test_data)) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R deleted file mode 100644 index e5a9cfd35..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/constants.R +++ /dev/null @@ -1,33 +0,0 @@ -# Constants for the backfill correction model -TAUS <- c(0.01, 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975, 0.99) -REF_LAG <- 60 -TEST_LAGS <- c(1:14, 21, 35, 51) -TRAINING_DAYS <- 270 -TESTING_WINDOW <- 14 -LAG_WINDOW <- 5 -LAMBDA <- 0.1 -LAG_PAD <- 2 -LP_SOLVER <-"gurobi" # LP solver to use in quantile_lasso(); "gurobi" or "glpk" - -YITL <-"log_value_raw" -SLOPE <-"log_7dav_slope" -Y7DAV <-"log_value_7dav" - -SQRTSCALE_COVID <-c('sqrty0_covid', 'sqrty1_covid', 'sqrty2_covid') -SQRTSCALE_TOTAL <-c('sqrty0_total', 'sqrty1_total', 'sqrty2_total') -SQRTSCALE <-c('sqrty0', 'sqrty1', "sqrty2") -LOG_LAG <-"inv_log_lag" - -# Dates -WEEKDAYS_ABBR <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") # wd -WEEK_ISSUES <- c("W1_issue", "W2_issue", "W3_issue") # wm -TODAY <- Sys.Date() - -INDICATORS_AND_SIGNALS <- tibble::tribble( - ~indicator, ~signal, ~name_suffix, ~sub_dir, - "changehc", "covid", "", "chng", - "changehc", "flu", "", "chng", - "claims_hosp", "", "", "claims_hosp", - # "dv",,, - "quidel", "covidtest", c("total", "age_0_4", "age_5_17", "age_18_49", "age_50_64", "age_65plus", "age_0_17"), "quidel_covidtest" -) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R deleted file mode 100644 index 57d79fd47..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/delphiBackfillCorrection.R +++ /dev/null @@ -1,3 +0,0 @@ -# Load `tribble` for defining global variables -#' @importFrom tibble tribble -NULL diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R deleted file mode 100644 index bd506b6f7..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/io.R +++ /dev/null @@ -1,133 +0,0 @@ -#' Read a parquet file into a dataframe -#' -#' @template input_dir-template -#' -#' @importFrom arrow read_parquet -#' -#' @export -read_data <- function(input_dir) { - df <- read_parquet(input_dir, as_data_frame = TRUE) - return (df) -} - -#' Export the result to customized directory -#' -#' @param test_data test data containing prediction results -#' @param coef_data data frame containing the estimated coefficients -#' @template indicator-template -#' @template signal-template -#' @template geo_level-template -#' @template signal_suffix-template -#' @template lambda-template -#' @template value_type-template -#' @template export_dir-template -#' @param training_end_date the most recent training date -#' -#' @importFrom readr write_csv -#' @importFrom stringr str_interp str_split -export_test_result <- function(test_data, coef_data, indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, - value_type, export_dir) { - base_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, value_type, model_mode=FALSE) - pred_output_dir <- str_interp("prediction_${base_name}") - write_csv(test_data, file.path(export_dir, pred_output_dir)) - - coef_output_dir <- str_interp("coefs_${base_name}") - write_csv(test_data, file.path(export_dir, coef_output_dir)) -} - -#' List valid input files. -#' -#' @template indicator-template -#' @template signal-template -#' @template params-template -#' @param sub_dir string specifying the indicator-specific directory within -#' the general input directory `params$input_dir` -get_files_list <- function(indicator, signal, params, sub_dir) { - # Make sure we're reading in both 4-week rollup and daily files. - if (!missing(sub_dir)) { - input_dir <- file.path(params$input_dir, sub_dir) - } else { - input_dir <- params$input_dir - } - - # Convert input_group into file names. - daily_pattern <- create_name_pattern(indicator, signal, "daily") - rollup_pattern <- create_name_pattern(indicator, signal, "rollup") - - # Filter files lists to only include those containing dates we need for training - daily_input_files <- list.files( - input_dir, pattern = daily_pattern, full.names = TRUE - ) %>% - subset_valid_files("daily", params) - rollup_input_files <- list.files( - input_dir, pattern = rollup_pattern, full.names = TRUE - ) %>% - subset_valid_files("rollup", params) - - return(c(daily_input_files, rollup_input_files)) -} - -#' Return file names only if they contain data to be used in training -#' -#' Parse filenames to find included dates. Use different patterns if file -#' includes daily or rollup (multiple days) data. -#' -#' @param files_list character vector of input files of a given `file_type` -#' @template file_type-template -#' @template params-template -subset_valid_files <- function(files_list, file_type = c("daily", "rollup"), params) { - file_type <- match.arg(file_type) - date_format = "%Y%m%d" - switch(file_type, - daily = { - start_dates <- as.Date( - sub("^.*/.*_as_of_([0-9]{8}).parquet$", "\\1", files_list), - format = date_format - ) - end_dates <- start_dates - }, - rollup = { - rollup_pattern <- "^.*/.*_from_([0-9]{8})_to_([0-9]{8}).parquet$" - start_dates <- as.Date( - sub(rollup_pattern, "\\1", files_list), - format = date_format - ) - end_dates <- as.Date( - sub(rollup_pattern, "\\2", files_list), - format = date_format - ) - } - ) - - ## TODO: start_date depends on if we're doing model training or just corrections. - start_date <- TODAY - params$training_days - params$ref_lag - end_date <- TODAY - 1 - - # Only keep files with data that falls at least somewhat between the desired - # start and end range dates. - files_list <- files_list[ - !(( start_dates < start_date & end_dates < start_date ) | - ( start_dates > end_date & end_dates > end_date ))] - - return(files_list) -} - -#' Create pattern to match input files of a given type and signal -#' -#' @template indicator-template -#' @template signal-template -#' @template file_type-template -#' -#' @importFrom stringr str_interp -create_name_pattern <- function(indicator, signal, - file_type = c("daily", "rollup")) { - file_type <- match.arg(file_type) - switch(file_type, - daily = str_interp("${indicator}_${signal}_as_of_[0-9]{8}.parquet$"), - rollup = str_interp("${indicator}_${signal}_from_[0-9]{8}_to_[0-9]{8}.parquet$") - ) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R deleted file mode 100644 index 37ae2f98e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/main.R +++ /dev/null @@ -1,273 +0,0 @@ -#' Get backfill-corrected estimates for a single signal + geo combination -#' -#' @template df-template -#' @template params-template -#' @template refd_col-template -#' @template lag_col-template -#' @template signal_suffixes-template -#' @template indicator-template -#' @template signal-template -#' @param training_end_date the most recent training date -#' -#' @importFrom dplyr %>% filter select group_by summarize across everything group_split -#' @importFrom tidyr drop_na -#' @importFrom rlang .data .env -#' -#' @export -run_backfill <- function(df, params, training_end_date, - refd_col = "time_value", lag_col = "lag", - signal_suffixes = c(""), - indicator = "", signal = "") { - df <- filter(df, .data$lag < params$ref_lag + 30) # a rough filtration to save memory - - geo_levels <- params$geo_levels - if ("state" %in% geo_levels) { - # If state included, do it last since state processing modifies the - # `df` object. - geo_levels <- c(setdiff(geo_levels, c("state")), "state") - } - - for (geo_level in geo_levels) { - # Get full list of interested locations - if (geo_level == "state") { - # Drop county field and make new "geo_value" field from "state_id". - # Aggregate counties up to state level - df <- df %>% - dplyr::select(-.data$geo_value, geo_value = .data$state_id) %>% - dplyr::group_by(across(c("geo_value", refd_col, lag_col))) %>% - # Summarized columns keep original names - dplyr::summarize(across(everything(), sum)) - } - if (geo_level == "county") { - # Keep only 200 most populous (within the US) counties - top_200_geos <- get_populous_counties() - df <- filter(df, geo_value %in% top_200_geos) - } - - test_data_list <- list() - coef_list <- list() - - for (value_type in params$value_types) { - for (signal_suffix in signal_suffixes) { - key = paste(value_type, signal_suffix) - test_data_list[[key]] <- list() - coef_list[[key]] <- list() - } - } - - group_dfs <- group_split(df, geo_value) - - # Build model for each location - for (subdf in group_dfs) { - geo <- group_df$geo_value[1] - min_refd <- min(subdf[[refd_col]]) - max_refd <- max(subdf[[refd_col]]) - subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) - - for (signal_suffix in signal_suffixes) { - # For each suffix listed in `signal_suffixes`, run training/testing - # process again. Main use case is for quidel which has overall and - # age-based signals. - if (signal_suffix != "") { - num_col <- paste(params$num_col, signal_suffix, sep = "_") - denom_col <- paste(params$denom_col, signal_suffix, sep = "_") - } else { - num_col <- params$num_col - denom_col <- params$denom_col - } - - for (value_type in params$value_types) { - # Handle different signal types - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_df <- add_7davs_and_target(combined_df, "value_raw", refd_col, lag_col) - - } else if (value_type == "fraction") { - combined_num_df <- fill_missing_updates(subdf, num_col, refd_col, lag_col) - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", refd_col, lag_col) - - combined_denom_df <- fill_missing_updates(subdf, denom_col, refd_col, lag_col) - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", refd_col, lag_col) - - combined_df <- merge( - combined_num_df, combined_denom_df, - by=c(refd_col, "issue_date", lag_col, "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom") - ) - } - combined_df <- add_params_for_dates(combined_df, refd_col, lag_col) - combined_df <- combined_df %>% filter(.data$lag < params$ref_lag) - - geo_train_data <- combined_df %>% - filter(.data$issue_date < training_end_date) %>% - filter(.data$target_date <= training_end_date) %>% - filter(.data$target_date > training_end_date - params$training_days) %>% - drop_na() - geo_test_data <- combined_df %>% - filter(.data$issue_date %in% params$test_dates) %>% - drop_na() - if (nrow(geo_test_data) == 0) next - if (nrow(geo_train_data) <= 200) next - - if (value_type == "fraction") { - # Use beta prior approach to adjust fractions - geo_prior_test_data = combined_df %>% - filter(.data$issue_date > min(params$test_dates) - 7) %>% - filter(.data$issue_date <= max(params$test_dates)) - updated_data <- frac_adj(train_data, test_data, prior_test_data, - indicator, signal, geo_level, signal_suffix, - lambda, value_type, geo, - training_end_date, params$cache_dir, - train_models = params$train_models, - make_predictions = params$make_predictions) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in c(1:14, 21, 35, 51)) { - filtered_data <- data_filteration(test_lag, geo_train_data, - geo_test_data, params$lag_pad) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list( - Y7DAV, paste0(WEEKDAYS_ABBR, "_issue"), - paste0(WEEKDAYS_ABBR, "_ref"), WEEK_ISSUES, SLOPE, sqrtscale - ) - params_list <- c(YITL, as.vector(unlist(covariates))) - - # Model training and testing - prediction_results <- model_training_and_testing( - train_data, test_data, params$taus, params_list, params$lp_solver, - params$lambda, test_lag, geo, value_type, params$cache_dir, - indicator, signal, geo_level, signal_suffix,training_end_date, - train_models = params$train_models, - make_predictions = params$make_predictions - ) - - # Model objects are saved during training, so only need to export - # output if making predictions/corrections - if (params$make_predictions) { - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evaluate(test_data, params$taus) - - idx <- length(test_data_list[[value_type]]) + 1 - test_data_list[[value_type]][[idx]] <- test_data - coef_list[[value_type]][[idx]] <- coefs - } - }# End for test lags - }# End for value types - }# End for signal suffixes - - if (params$make_predictions) { - for (value_type in params$value_types) { - for (signal_suffix in signal_suffixes) { - key = paste(value_type, signal_suffix) - test_combined <- bind_rows(test_data_list[[key]]) - coef_combined <- bind_rows(coef_list[[key]]) - export_test_result(test_combined, coef_combined, - indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, - value_type, export_dir=params$export_dir) - } - } - } - - }# End for geo list - }# End for geo type -} - -#' Perform backfill correction on all desired signals and geo levels -#' -#' @template params-template -#' -#' @importFrom dplyr bind_rows -#' @importFrom parallel detectCores -#' -#' @export -main <- function(params) { - if (!params$train_models && !params$make_predictions) { - message("both model training and prediction generation are turned off; exiting") - return - } - - if (params$train_models) { - # Remove all the stored models - files_list <- list.files(params$cache_dir, pattern="*.model", full.names = TRUE) - file.remove(file.path(mydir, files_list)) - } - - training_end_date <- as.Date(readLines( - file.path(params$cache_dir, "training_end_date.txt"))) - - ## Set default number of cores for mclapply to half of those available. - if (params$parallel) { - cores <- detectCores() - - if (is.na(cores)) { - warning("Could not detect the number of CPU cores; parallel mode disabled") - params$parallel <- FALSE - } else { - options(mc.cores = min(params$parallel_max_cores, floor(cores / 2))) - } - } - - # Loop over every indicator + signal combination. - for (input_group in INDICATORS_AND_SIGNALS) { - files_list <- get_files_list( - input_group$indicator, input_group$signal, params, input_group$sub_dir - ) - - if (length(files_list) == 0) { - warning(str_interp( - "No files found for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" - )) - next - } - - # Read in all listed files and combine - input_data <- lapply( - files_list, - function(file) { - input_data[[file]] <- read_data(file) - } - ) %>% bind_rows - - if (nrow(input_data) == 0) { - warning(str_interp( - "No data available for indicator ${input_group$indicator} signal ${input_group$signal}, skipping" - )) - next - } - - # Check data type and required columns - for (value_type in params$value_types) { - result <- validity_checks( - input_data, value_type, - params$num_col, params$denom_col, input_group$name_suffix - ) - input_data <- result[["df"]] - } - - # Check available training days - training_days_check(input_data$issue_date, params$training_days) - - # Perform backfill corrections and save result - run_backfill(input_data, params, training_end_date, - indicator = input_group$indicator, signal = input_group$signal, - signal_suffixes = input_group$name_suffix) - - if (params$train_models) { - # Save the training end date to a text file. - writeLines(as.character(TODAY), - file.path(params$cache_dir, "training_end_date.txt")) - } - } -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R deleted file mode 100644 index 48b02780d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/model.R +++ /dev/null @@ -1,259 +0,0 @@ -#' Filtration for training and testing data with different lags -#' -#' @template test_lag-template -#' @param lag_pad lag padding for training -#' @param geo_train_data training data for a certain location -#' @param geo_test_data testing data for a certain location -#' -#' @importFrom rlang .data .env -#' -#' @export -data_filteration <- function(test_lag, geo_train_data, geo_test_data, lag_pad) { - if (test_lag <= 14){ - test_lag_pad=lag_pad - test_lag_pad1=0 - test_lag_pad2=0 - }else if (test_lag < 51){ - test_lag_pad=7 - test_lag_pad1=6 - test_lag_pad2=7 - }else { - test_lag_pad=9 - test_lag_pad1=8 - test_lag_pad2=9 - } - train_data = geo_train_data %>% - filter(.data$lag >= .env$test_lag - .env$test_lag_pad ) %>% - filter(.data$lag <= .env$test_lag + .env$test_lag_pad ) - test_data = geo_test_data %>% - filter(.data$lag >= .env$test_lag - .env$test_lag_pad1 ) %>% - filter(.data$lag <= .env$test_lag + .env$test_lag_pad2) - - return (list(train_data, test_data)) -} - -#' Add columns to indicate the scale of value at square root level -#' -#' @template train_data-template -#' @param test_data Data Frame for testing -#' @param max_raw the maximum value in the training data at square root level -#' @template value_col-template -#' -#' @export -add_sqrtscale<- function(train_data, test_data, max_raw, value_col) { - if (!(value_col %in% colnames(train_data))){ - stop("value raw does not exist in training data!") - } - - if (!(value_col %in% colnames(test_data))){ - stop("value raw does not exist in testing data!") - } - - sqrtscale = c() - sub_max_raw = sqrt(max(train_data[[value_col]])) / 2 - - for (split in seq(0, 3)){ - if (sub_max_raw < (max_raw * (split+1) * 0.1)) break - train_data[paste0("sqrty", as.character(split))] = 0 - test_data[paste0("sqrty", as.character(split))] = 0 - qv_pre = max_raw * split * 0.2 - qv_next = max_raw * (split+1) * 0.2 - - train_data[(train_data[[value_col]] <= (qv_next)^2) - & (train_data[[value_col]] > (qv_pre)^2), - paste0("sqrty", as.character(split))] = 1 - test_data[(test_data[[value_col]] <= (qv_next)^2) - & (test_data[[value_col]] > (qv_pre)^2), - paste0("sqrty", as.character(split))] = 1 - sqrtscale[split+1] = paste0("sqrty", as.character(split)) - } - return (list(train_data, test_data, sqrtscale)) -} - -#' Fetch model and use to generate predictions/perform corrections -#' -#' @template train_data-template -#' @param test_data Data frame for testing -#' @template taus-template -#' @template covariates-template -#' @template lp_solver-template -#' @template lambda-template -#' @template geo_level-template -#' @template geo-template -#' @template indicator-template -#' @template signal-template -#' @template signal_suffix-template -#' @template value_type-template -#' @template test_lag-template -#' @template train_models-template -#' @template make_predictions-template -#' @param model_save_dir directory containing trained models -#' @param training_end_date Most recent training date -#' -#' @importFrom stats predict coef -#' -#' @export -model_training_and_testing <- function(train_data, test_data, taus, covariates, - lp_solver, lambda, test_lag, - geo, value_type, model_save_dir, - indicator, signal, - geo_level, signal_suffix, - training_end_date, - train_models = TRUE, - make_predictions = TRUE) { - success = 0 - coefs_result = list() - coef_list = c("intercept", paste(covariates, '_coef', sep='')) - for (tau in taus) { - tryCatch( - expr = { - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, geo, - value_type, test_lag, tau) - model_path <- file.path(model_save_dir, model_file_name) - obj <- get_model(model_path, train_data, covariates, tau, - lambda, lp_solver, train_models=TRUE) - - if (make_predictions) { - y_hat_all = as.numeric(predict(obj, newx = as.matrix(test_data[covariates]))) - test_data[[paste0("predicted_tau", as.character(tau))]] = y_hat_all - - coefs_result[[success+1]] = coef(obj) - } - - success = success + 1 - }, - error=function(e) {print(paste("Training failed for", model_path, sep=" "))} - ) - } - if (success < 9) {return (NULL)} - if (!make_predictions) {return (list())} - - coef_combined_result = data.frame(tau=taus, geo=geo, test_lag=test_lag) - coef_combined_result[coef_list] = as.matrix(do.call(rbind, coefs_result)) - - return (list(test_data, coef_combined_result)) -} - -#' Evaluation of the test results based on WIS score -#' The WIS score calculation is based on the weighted_interval_score function -#' from the `evalcast` package from Delphi -#' -#' @param test_data dataframe with a column containing the prediction results of -#' each requested quantile. Each row represents an update with certain -#' (reference_date, issue_date, location) combination. -#' @template taus-template -#' -#' @importFrom evalcast weighted_interval_score -#' -#' @export -evaluate <- function(test_data, taus) { - n_row = nrow(test_data) - taus_list = as.list(data.frame(matrix(replicate(n_row, taus), ncol=n_row))) - - # Calculate WIS - predicted_all = as.matrix(test_data[c("predicted_tau0.01", "predicted_tau0.025", - "predicted_tau0.1", "predicted_tau0.25", - "predicted_tau0.5", "predicted_tau0.75", - "predicted_tau0.9", "predicted_tau0.975", - "predicted_tau0.99")]) - predicted_all_exp = exp(predicted_all) - predicted_trans = as.list(data.frame(t(predicted_all - test_data$log_value_target))) - test_data$wis = mapply(weighted_interval_score, taus_list, predicted_trans, 0) - - return (test_data) -} - -#' Train model using quantile regression with Lasso penalty, or load from disk -#' -#' @param model_path path to read model from or to save model to -#' @template train_data-template -#' @template covariates-template -#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. -#' @template lp_solver-template -#' @template lambda-template -#' @template train_models-template -#' -#' @importFrom quantgen quantile_lasso -#' @importFrom stringr str_interp -get_model <- function(model_path, train_data, covariates, tau, - lambda, lp_solver, train_models) { - if (train_models || !file.exists(model_path)) { - if (!train_models && !file.exists(model_path)) { - warning(str_interp("user requested use of cached model but file {model_path}"), - " does not exist; training new model") - } - # Quantile regression - obj <- quantile_lasso(as.matrix(train_data[covariates]), - train_data$log_value_target, tau = tau, - lambda = lambda, standardize = FALSE, lp_solver = lp_solver) - - # Save model to cache. - create_dir_not_exist(dirname(model_path)) - save(obj, file=model_path) - } else { - # Load model from cache. - obj <- load(model_path) - } - - return(obj) -} - -#' Construct filename for model with given parameters -#' -#' @template indicator-template -#' @template signal-template -#' @template geo-template -#' @template signal_suffix-template -#' @template lambda-template -#' @template value_type-template -#' @template test_lag-template -#' @template geo_level-template -#' @template test_lag-template -#' @param dw string, indicate the day of a week -#' @param tau decimal quantile to be predicted. Values must be between 0 and 1. -#' @param beta_prior_mode bool, indicate whether it is for a beta prior model -#' @param model_mode bool, indicate whether the file name is for a model -#' @param training_end_date the most recent training date -#' -#' @return path to file containing model object -#' -#' @importFrom stringr str_interp -#' -generate_filename <- function(indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date="", geo="", - value_type = "", test_lag="", tau="", dw="", - beta_prior_mode = FALSE, model_mode = TRUE) { - if (lambda != "") { - lambda <- str_interp("lambda${lambda}") - } - if (test_lag != "") { - test_lag <- str_interp("lag${test_lag}") - } - if (tau != "") { - tau <- str_interp("tau${tau}") - } - if (beta_prior_mode) { - beta_prior <- "beta_prior" - } else { - beta_prior <- "" - } - if (model_mode) { - file_type <- ".model" - } else { - file_type <- ".csv" - } - components <- c(as.character(training_end_date), beta_prior, - indicator, signal, signal_suffix, - geo_level, lambda, - geo, test_lag, dw, tau) - - filename = paste0( - # Drop any empty strings. - paste(components[components != ""], collapse="_"), - file_type - ) - return(filename) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R deleted file mode 100644 index 4424fde4b..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/preprocessing.R +++ /dev/null @@ -1,229 +0,0 @@ -## Data Preprocessing -## -## The raw input data should have 4/5 basic columns: -## time_value: reference date -## issue_date: issue date/date of reporting -## geo_value: location -## lag: the number of days between issue date and the reference date -## counts: the number of counts used for estimation - - -#' Re-index, fill na, make sure all reference date have enough rows for updates -#' @template df-template -#' @template refd_col-template -#' @template lag_col-template -#' @param min_refd the earliest reference date considered in the data -#' @param max_refd the latest reference date considered in the data -#' @template ref_lag-template -#' -#' @return df_new Data Frame with filled rows for missing lags -#' -#' @importFrom tidyr crossing -#' @importFrom stats setNames -#' -#' @export -fill_rows <- function(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) { - # Full list of lags - # +30 to have values for calculating 7-day averages - lags <- min(df[[lag_col]]): (ref_lag + 30) - refds <- seq(min_refd, max_refd, by="day") # Full list reference date - row_inds_df <- as.data.frame(crossing(refds, lags)) %>% - setNames(c(refd_col, lag_col)) - df_new = merge(x=df, y=row_inds_df, - by=c(refd_col, lag_col), all.y=TRUE) - return (df_new) -} - -#' Get pivot table, filling NANs. If there is no update on issue date D but -#' previous reports exist for issue date D_p < D, all the dates between -#' [D_p, D] are filled with with the reported value on date D_p. If there is -#' no update for any previous issue date, fill in with 0. -#' @template df-template -#' @template value_col-template -#' @template refd_col-template -#' @template lag_col-template -#' -#' @importFrom tidyr fill pivot_wider pivot_longer -#' @importFrom dplyr %>% everything select -#' -#' @export -fill_missing_updates <- function(df, value_col, refd_col, lag_col) { - pivot_df <- df[order(df[[lag_col]], decreasing=FALSE), ] %>% - pivot_wider(id_cols=lag_col, names_from=refd_col, values_from=value_col) - - if (any(diff(pivot_df[[lag_col]]) != 1)) { - stop("Risk exists in forward filling") - } - pivot_df <- pivot_df %>% fill(everything(), .direction="down") - - # Fill NAs with 0s - pivot_df[is.na(pivot_df)] <- 0 - - backfill_df <- pivot_df %>% - pivot_longer(-lag_col, values_to="value_raw", names_to=refd_col) - backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) - - return (as.data.frame(backfill_df)) -} - -#' Calculate 7 day moving average for each issue date -#' The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 -#' @param pivot_df Data Frame where the columns are issue dates and the rows are -#' reference dates -#' @template refd_col-template -#' -#' @importFrom zoo rollmeanr -#' -#' @export -get_7dav <- function(pivot_df, refd_col) { - for (col in colnames(pivot_df)) { - if (col == refd_col) next - pivot_df[, col] <- rollmeanr(pivot_df[, col], 7, align="right", fill=NA) - } - backfill_df <- pivot_df %>% - pivot_longer(-refd_col, values_to="value_raw", names_to="issue_date") - backfill_df[[refd_col]] = as.Date(backfill_df[[refd_col]]) - backfill_df[["issue_date"]] = as.Date(backfill_df[["issue_date"]]) - return (as.data.frame(backfill_df)) -} - -#' Used for data shifting in terms of reference date -#' -#' @template df-template -#' @param n_day number of days to be shifted -#' @template refd_col-template -#' -#' @export -add_shift <- function(df, n_day, refd_col) { - df[, refd_col] <- as.Date(df[, refd_col]) + n_day - return (df) -} - -#' Add one hot encoding for day of a week info in terms of reference -#' and issue date -#' -#' @template df-template -#' @param wd vector of days of a week -#' @template time_col-template -#' @param suffix suffix added to indicate which kind of date is used -#' -#' @export -add_dayofweek <- function(df, time_col, suffix, wd = WEEKDAYS_ABBR) { - dayofweek <- as.numeric(format(df[[time_col]], format="%u")) - for (i in 1:6) { - df[, paste0(wd[i], suffix)] <- as.numeric(dayofweek == i) - } - if (suffix == "_ref") { - df[, paste0("Sun", suffix)] <- as.numeric(dayofweek == 7) - } - return (df) -} - -#' Get week of a month info according to a date -#' -#' All the dates on or before the ith Sunday but after the (i-1)th Sunday -#' is considered to be the ith week. Notice that -#' If there are 4 or 5 weeks in total, the ith weeks is labeled as i -#' and the dates in the 5th week this month are actually in the same -#' week with the dates in the 1st week next month and those dates are -#' sparse. Thus, we assign the dates in the 5th week to the 1st week. -#' If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks -#' are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, -#' second and the last weeks together. -#' -#' @param date Date object -#' -#' @importFrom lubridate make_date days_in_month year month day -#' -#' @return a integer indicating which week it is in a month -get_weekofmonth <- function(date) { - year <- year(date) - month <- month(date) - day <- day(date) - firstdayofmonth <- as.numeric(format(make_date(year, month, 1), format="%u")) - n_days <- lubridate::days_in_month(date) - n_weeks <- (n_days + firstdayofmonth - 1) %/% 7 + 1 - extra_check <- as.integer(n_weeks > 5) - return (max((day + firstdayofmonth - 1) %/% 7 - extra_check, 0) %% 4 + 1) -} - -#' Add one hot encoding for week of a month info in terms of issue date -#' -#' @template df-template -#' @param wm vector of weeks of a month -#' @template time_col-template -#' -#' @export -add_weekofmonth <- function(df, time_col, wm = WEEK_ISSUES) { - weekofmonth <- get_weekofmonth(df[[time_col]]) - for (i in 1:3) { - df[, paste0(wm[i])] <- as.numeric(weekofmonth == i) - } - return (df) -} - -#' Add 7dav and target to the data -#' Target is the updates made ref_lag days after the first release -#' @template df-template -#' @template value_col-template -#' @template refd_col-template -#' @template lag_col-template -#' @template ref_lag-template -#' -#' @importFrom dplyr %>% -#' @importFrom tidyr pivot_wider drop_na -#' -#' @export -add_7davs_and_target <- function(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) { - df$issue_date <- df[[refd_col]] + df[[lag_col]] - pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% - pivot_wider(id_cols=refd_col, names_from="issue_date", - values_from=value_col) - - # Add 7dav avg - avg_df <- get_7dav(pivot_df, refd_col) - avg_df <- add_shift(avg_df, 1, refd_col) # 7dav until yesterday - names(avg_df)[names(avg_df) == value_col] <- 'value_7dav' - avg_df_prev7 <- add_shift(avg_df, 7, refd_col) - names(avg_df_prev7)[names(avg_df_prev7) == 'value_7dav'] <- 'value_prev_7dav' - - backfill_df <- Reduce(function(x, y) merge(x, y, all=TRUE), - list(df, avg_df, avg_df_prev7)) - - # Add target - target_df <- df[df$lag==ref_lag, c(refd_col, value_col, "issue_date")] - names(target_df)[names(target_df) == value_col] <- 'value_target' - names(target_df)[names(target_df) == 'issue_date'] <- 'target_date' - - backfill_df <- merge(backfill_df, target_df, by=refd_col, all.x=TRUE) - - # Add log values - backfill_df$log_value_raw = log(backfill_df$value_raw + 1) - backfill_df$log_value_7dav = log(backfill_df$value_7dav + 1) - backfill_df$log_value_target = log(backfill_df$value_target + 1) - backfill_df$log_value_prev_7dav = log(backfill_df$value_prev_7dav + 1) - backfill_df$log_7dav_slope = backfill_df$log_value_7dav - backfill_df$log_value_prev_7dav - - # Remove invalid rows - backfill_df <- backfill_df %>% drop_na(c(lag_col)) - - return (as.data.frame(backfill_df)) -} - -#' Add params related to date -#' -#' Target is the updates made ref_lag days after the first release -#' -#' @template df-template -#' @template refd_col-template -#' @template lag_col-template -add_params_for_dates <- function(df, refd_col, lag_col) { - # Add columns for day-of-week effect - df <- add_dayofweek(df, refd_col, "_ref", WEEKDAYS_ABBR) - df <- add_dayofweek(df, "issue_date", "_issue", WEEKDAYS_ABBR) - - # Add columns for week-of-month effect - df <- add_weekofmonth(df, "issue_date", WEEK_ISSUES) - - return (as.data.frame(df)) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R deleted file mode 100644 index d7cf36ff8..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/tooling.R +++ /dev/null @@ -1,178 +0,0 @@ -#' Corrected estimates from a single local signal -#' -#' @template df-template -#' @template export_dir-template -#' @param test_date_list Date vector of dates to make predictions for -#' @param value_cols character vector of numerator and/or denominator field names -#' @template value_type-template -#' @template taus-template -#' @param test_lags integer vector of number of days ago to predict for -#' @template training_days-template -#' @template testing_window-template -#' @template ref_lag-template -#' @template lambda-template -#' @template lp_solver-template -#' -#' @importFrom dplyr %>% filter -#' @importFrom plyr rbind.fill -#' @importFrom tidyr drop_na -#' @importFrom rlang .data .env -#' -#' @export -run_backfill_local <- function(df, export_dir, test_date_list, value_cols, value_type, - taus = TAUS, test_lags = TEST_LAGS, - training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - ref_lag = REF_LAG, lambda = LAMBDA, lp_solver = LP_SOLVER) { - # Get all the locations that are considered - geo_list <- unique(df[df$time_value %in% test_date_list, "geo_value"]) - # Build model for each location - res_list = list() - res_indx = 1 - coef_df_list = list() - - for (geo in geo_list) { - subdf <- df %>% filter(.data$geo_value == .env$geo) %>% filter(.data$lag < .env$ref_lag) - min_refd <- min(subdf$time_value) - max_refd <- max(subdf$time_value) - subdf <- fill_rows(subdf, "time_value", "lag", min_refd, max_refd) - if (value_type == "count") { # For counts data only - combined_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") - combined_df <- add_7davs_and_target(combined_df, "value_raw", "time_value", "lag", ref_lag) - } else if (value_type == "fraction") { - combined_num_df <- fill_missing_updates(subdf, value_cols[1], "time_value", "lag") - combined_num_df <- add_7davs_and_target(combined_num_df, "value_raw", "time_value", "lag", ref_lag) - - combined_denom_df <- fill_missing_updates(subdf, value_cols[2], "time_value", "lag") - combined_denom_df <- add_7davs_and_target(combined_denom_df, "value_raw", "time_value", "lag", ref_lag) - - combined_df <- merge(combined_num_df, combined_denom_df, - by=c("time_value", "issue_date", "lag", "target_date"), all.y=TRUE, - suffixes=c("_num", "_denom")) - } - combined_df <- add_params_for_dates(combined_df, "time_value", "lag") - - for (test_date in test_date_list) { - geo_train_data = combined_df %>% - filter(.data$issue_date < .env$test_date) %>% - filter(.data$target_date <= .env$test_date) %>% - filter(.data$target_date > .env$test_date - .env$training_days) %>% - drop_na() - geo_test_data = combined_df %>% - filter(.data$issue_date >= .env$test_date) %>% - filter(.data$issue_date < .env$test_date + .env$testing_window) %>% - drop_na() - if (nrow(geo_test_data) == 0) next - if (nrow(geo_train_data) <= 200) next - if (value_type == "fraction") { - geo_prior_test_data = combined_df %>% - filter(.data$issue_date > .env$test_date - 7) %>% - filter(.data$issue_date <= .env$test_date) - - updated_data <- ratio_adj(geo_train_data, geo_test_data, geo_prior_test_data) - geo_train_data <- updated_data[[1]] - geo_test_data <- updated_data[[2]] - } - - max_raw = sqrt(max(geo_train_data$value_raw)) - for (test_lag in test_lags) { - filtered_data <- data_filteration(test_lag, geo_train_data, geo_test_data) - train_data <- filtered_data[[1]] - test_data <- filtered_data[[2]] - - updated_data <- add_sqrtscale(train_data, test_data, max_raw, "value_raw") - train_data <- updated_data[[1]] - test_data <- updated_data[[2]] - sqrtscale <- updated_data[[3]] - - covariates <- list( - Y7DAV, paste0(WEEKDAYS_ABBR, "_ref"), paste0(WEEKDAYS_ABBR, "_issue"), - WEEK_ISSUES, SLOPE, SQRTSCALE - ) - params_list <- c(YITL, as.vector(unlist(covariates))) - - # Model training and testing - model_path_prefix <- generate_model_filename_prefix( - indicator, signal, geo, signal_suffix, value_type, test_lag, tau, lambda) - prediction_results <- model_training_and_testing( - train_data, test_data, taus, params_list, lp_solver, - lambda, test_date, geo, value_type = value_type, test_lag = test_lag - ) - test_data <- prediction_results[[1]] - coefs <- prediction_results[[2]] - test_data <- evaluate(test_data, taus) - test_data$test_date <- test_date - coefs$test_date <- test_date - coefs$test_lag <- test_lag - coefs$geo_value <- geo - - res_list[[res_indx]] = test_data - coef_df_list[[res_indx]] = coefs - res_indx = res_indx+1 - export_test_result(test_data, coefs, export_dir, - geo, test_lag) - }# End for test lags - }# End for test date list - result_df = do.call(rbind, res_list) - coefs_df = do.call(rbind.fill, coef_df_list) - export_test_result(result_df, coefs_df, export_dir, geo) - }# End for geo list -} - -#' Main function to correct a single local signal -#' -#' @template input_dir-template -#' @template export_dir-template -#' @param test_start_date Date or string in the format "YYYY-MM-DD" to start -#' making predictions on -#' @param test_end_date Date or string in the format "YYYY-MM-DD" to stop -#' making predictions on -#' @template num_col-template -#' @template denom_col-template -#' @template value_type-template -#' @template training_days-template -#' @template testing_window-template -#' @template lambda-template -#' @template ref_lag-template -#' @template lp_solver-template -#' -#' @importFrom readr read_csv -#' -#' @export -main_local <- function(input_dir, export_dir, - test_start_date, test_end_date, - num_col, denom_col,value_type = c("count", "fraction"), - training_days = TRAINING_DAYS, testing_window = TESTING_WINDOW, - lambda = LAMBDA, ref_lag = REF_LAG, lp_solver = LP_SOLVER) { - value_type <- match.arg(value_type) - - # Check input data - df = read_csv(input_dir) - - # Check data type and required columns - result <- validity_checks(df, value_type, num_col, denom_col) - df <- result[["df"]] - value_cols <- result[["value_cols"]] - - # Get test date list according to the test start date - if (is.null(test_start_date)) { - test_start_date = max(df$issue_date) - } else { - test_start_date = as.Date(test_start_date) - } - - if (is.null(test_end_date)) { - test_end_date = max(df$issue_date) - } else { - test_end_date = as.Date(test_end_date) - } - - test_date_list = seq(test_start_date, test_end_date, by="days") - - # Check available training days - training_days_check(df$issue_date, training_days) - - run_backfill_local(df, export_dir, - test_date_list, value_cols, value_type, - TAUS, TEST_LAGS, training_days, testing_window, - ref_lag, lambda, lp_solver) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R deleted file mode 100644 index fdcaf42e4..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/R/utils.R +++ /dev/null @@ -1,165 +0,0 @@ -#' Return params file as an R list -#' -#' Reads a parameters file. If the file does not exist, the function will create a copy of -#' '"params.json.template" and read from that. -#' -#' A params list should contain the following fields. If not included, -#' they will be filled with default values when possible. -#' -#' params$ref_lag: reference lag, after x days, the update is considered to be -#' the response. 60 is a reasonable choice for CHNG outpatient data -#' params$input_dir: link to the input data file -#' params$testing_window: the testing window used for saving the runtime. Could -#' set it to be 1 if time allows -#' params$test_dates: list of two elements, the first one is the start date and -#' the second one is the end date -#' params$training_days: set it to be 270 or larger if you have enough data -#' params$num_col: the column name for the counts of the numerator, e.g. the -#' number of COVID claims -#' params$denom_col: the column name for the counts of the denominator, e.g. the -#' number of total claims -#' params$geo_level: character vector of "state" and "county", by default -#' params$taus: vector of considered quantiles -#' params$lambda: the level of lasso penalty -#' params$export_dir: directory to save corrected data to -#' params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" -#' -#' @param path path to the parameters file; if not present, will try to copy the file -#' "params.json.template" -#' @param template_path path to the template parameters file -#' @template train_models-template -#' @template make_predictions-template -#' -#' @return a named list of parameters values -#' -#' @importFrom dplyr if_else -#' @importFrom jsonlite read_json -read_params <- function(path = "params.json", template_path = "params.json.template", - train_models = TRUE, make_predictions = TRUE) { - if (!file.exists(path)) {file.copy(template_path, path)} - params <- read_json(path, simplifyVector = TRUE) - - # Required parameters - if (!("input_dir" %in% names(params)) || !dir.exists(params$input_dir)) { - stop("input_dir must be set in `params` and exist") - } - params$train_models <- train_models - params$make_predictions <- make_predictions - - ## Set default parameter values if not specified - # Paths - if (!("export_dir" %in% names(params))) {params$export_dir <- "./receiving"} - if (!("cache_dir" %in% names(params))) {params$cache_dir <- "./cache"} - - # Parallel parameters - if (!("parallel" %in% names(params))) {params$parallel <- FALSE} - if (!("parallel_max_cores" %in% names(params))) {params$parallel_max_cores <- .Machine$integer.max} - - # Model parameters - if (!("taus" %in% names(params))) {params$taus <- TAUS} - if (!("lambda" %in% names(params))) {params$lambda <- LAMBDA} - if (!("lp_solver" %in% names(params))) {params$lp_solver <- LP_SOLVER} - if (!("lag_pad" %in% names(params))) {params$lag_pad <- LAG_PAD} - - # Data parameters - if (!("num_col" %in% names(params))) {params$num_col <- "num"} - if (!("denom_col" %in% names(params))) {params$denom_col <- "denom"} - if (!("geo_levels" %in% names(params))) {params$geo_levels <- c("state", "county")} - if (!("value_types" %in% names(params))) {params$value_types <- c("count", "fraction")} - - # Date parameters - if (!("training_days" %in% names(params))) {params$training_days <- TRAINING_DAYS} - if (!("ref_lag" %in% names(params))) {params$ref_lag <- REF_LAG} - if (!("testing_window" %in% names(params))) {params$testing_window <- TESTING_WINDOW} - if (!("test_dates" %in% names(params)) || length(params$test_dates) == 0) { - start_date <- TODAY - params$testing_window - end_date <- TODAY - 1 - params$test_dates <- seq(start_date, end_date, by="days") - } - - return(params) -} - -#' Create directory if not already existing -#' -#' @param path string specifying a directory to create -#' -#' @export -create_dir_not_exist <- function(path) -{ - if (!dir.exists(path)) { dir.create(path) } -} - -#' Check input data for validity -#' -#' @template df-template -#' @template value_type-template -#' @template num_col-template -#' @template denom_col-template -#' @template signal_suffixes-template -#' -#' @return list of input dataframe augmented with lag column, if it -#' didn't already exist, and character vector of one or two value -#' column names, depending on requested `value_type` -validity_checks <- function(df, value_type, num_col, denom_col, signal_suffixes) { - if (!missing(signal_suffixes)) { - num_col <- paste(num_col, signal_suffixes, sep = "_") - denom_col <- paste(num_col, signal_suffixes, sep = "_") - } - - # Check data type and required columns - if (value_type == "count") { - if (all(num_col %in% colnames(df))) {value_cols=c(num_col)} - else if (all(denom_col %in% colnames(df))) {value_cols=c(denom_col)} - else {stop("No valid column name detected for the count values!")} - } else if (value_type == "fraction") { - value_cols = c(num_col, denom_col) - if ( any(!(value_cols %in% colnames(df))) ) { - stop("No valid column name detected for the fraction values!") - } - } - - # time_value must exist in the dataset - if ( !"time_value" %in% colnames(df) ) { - stop("No 'time_value' column detected for the reference date!") - } - - # issue_date or lag should exist in the dataset - if ( !"lag" %in% colnames(df) ) { - if ( "issue_date" %in% colnames(df) ) { - df$lag = as.integer(df$issue_date - df$time_value) - } - else {stop("No issue_date or lag exists!")} - } - - return(list(df = df, value_cols = value_cols)) -} - -#' Check available training days -#' -#' @param issue_date contents of input data's `issue_date` column -#' @template training_days-template -training_days_check <- function(issue_date, training_days = TRAINING_DAYS) { - valid_training_days = as.integer(max(issue_date) - min(issue_date)) + 1 - if (training_days > valid_training_days) { - warning(sprintf("Only %d days are available at most for training.", valid_training_days)) - } -} - -#' Subset list of counties to those included in the 200 most populous in the US -#' -#' @importFrom dplyr select %>% arrange desc pull -#' @importFrom rlang .data -#' @importFrom utils head -#' @import covidcast -get_populous_counties <- function() { - return( - covidcast::county_census %>% - dplyr::select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% - # Drop megacounties (states) - filter(!endsWith(.data$fips, "000")) %>% - arrange(desc(pop)) %>% - pull(.data$fips) %>% - head(n=200) - ) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R deleted file mode 100644 index b343ffea6..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/covariates-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param covariates character vector of column names serving as the covariates for the model diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R deleted file mode 100644 index 8b16d87bb..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/denom_col-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param denom_col name of denominator column in the input dataframe diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R deleted file mode 100644 index 4aa746f51..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/df-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param df Data Frame of aggregated counts within a single location -#' reported for each reference date and issue date. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R deleted file mode 100644 index 4d933cada..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/export_dir-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param export_dir path to directory to save output to diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R deleted file mode 100644 index 36c241abd..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/file_type-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param file_type string specifying time period coverage of input files. -#' Either "daily" or "rollup" diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R deleted file mode 100644 index ae9dfeef0..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param geo string specifying the name of the geo region (e.g. FIPS -#' code for counties) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R deleted file mode 100644 index 778da39a4..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/geo_level-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param geo_level string describing geo coverage of input data. Either "state" -#' or "county". diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R deleted file mode 100644 index 964cada2d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/indicator-template.R +++ /dev/null @@ -1,3 +0,0 @@ -#' @param indicator string specifying the name of the indicator as used in -#' `parquet` input data filenames. One indicator can be associated -#' with multiple signals. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R deleted file mode 100644 index a17583499..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/input_dir-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param input_dir path to the directory containing input data diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R deleted file mode 100644 index b3e79f0fa..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lag_col-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param lag_col string specifying name of lag field within -#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R deleted file mode 100644 index aacbb3865..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lambda-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param lambda the level of lasso penalty diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R deleted file mode 100644 index d42a4435b..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/lp_solver-template.R +++ /dev/null @@ -1,4 +0,0 @@ -#' @param lp_solver string specifying the lp solver to use in -#' Quantgen fitting. Either "glpk" or "gurobi". For faster -#' optimization, use Gurobi (requires separate installation -#' of the `gurobi` package). diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R deleted file mode 100644 index ff57c25f6..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/make_predictions-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param make_predictions boolean indicating whether to generate and save -#' corrections (TRUE) or not. Default is TRUE. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R deleted file mode 100644 index 76b0aa148..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/num_col-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param num_col name of numerator column in the input dataframe diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R deleted file mode 100644 index 3af9823f3..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/params-template.R +++ /dev/null @@ -1,4 +0,0 @@ -#' @param params named list containing modeling and data settings. Must include -#' the following elements: `ref_lag`, `testing_window`, `test_dates`, -#' `training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, -#' `lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R deleted file mode 100644 index b10e188c4..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/ref_lag-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param ref_lag max lag to use for training diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R deleted file mode 100644 index 09644a4aa..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/refd_col-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param refd_col string specifying name of reference date field within -#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R deleted file mode 100644 index d87790af7..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal-template.R +++ /dev/null @@ -1,3 +0,0 @@ -#' @param signal string specifying the name of the signal as used in -#' `parquet` input data filenames. One indicator can be associated -#' with multiple signals. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R deleted file mode 100644 index eb3819558..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffix-template.R +++ /dev/null @@ -1,5 +0,0 @@ -#' @param signal_suffix string specifying value column name -#' ending to be appended to standard value column names from -#' `params$num_col` and `params$denom_col`. Used for non-standard -#' value column names and when processing multiple signals from a -#' single input dataframe, as with `quidel`'s age buckets. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R deleted file mode 100644 index e58e6cc4e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/signal_suffixes-template.R +++ /dev/null @@ -1,5 +0,0 @@ -#' @param signal_suffixes character vector specifying value column name -#' endings to be appended to standard value column names from -#' `params$num_col` and `params$denom_col`. Used for non-standard -#' value column names and when processing multiple signals from a -#' single input dataframe, as with `quidel`'s age buckets. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R deleted file mode 100644 index b383e35f8..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/taus-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param taus numeric vector of quantiles to be predicted. Values -#' must be between 0 and 1. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R deleted file mode 100644 index bd26b3386..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/test_lag-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param test_lag integer number of days ago to predict for diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R deleted file mode 100644 index 60b6c847b..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/testing_window-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param testing_window the testing window used for saving the runtime. Could -#' set it to be 1 if time allows diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R deleted file mode 100644 index 3be84de74..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/time_col-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param time_col string specifying name of column used for the -#' date, can be either reference date or issue date diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R deleted file mode 100644 index 2c8fd3de6..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_data-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param train_data Data Frame containing training data diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R deleted file mode 100644 index 3048087af..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/train_models-template.R +++ /dev/null @@ -1,3 +0,0 @@ -#' @param train_models boolean indicating whether to train models (TRUE). If -#' FALSE previously trained models (stored locally) will be used instead. -#' Default is TRUE. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R deleted file mode 100644 index 32f6c3a9d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/training_days-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param training_days integer number of days to use for training diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R deleted file mode 100644 index 0cc922d14..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_col-template.R +++ /dev/null @@ -1,2 +0,0 @@ -#' @param value_col string specifying name of value (counts) field within -#' the input dataframe. diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R deleted file mode 100644 index c49b7e84b..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man-roxygen/value_type-template.R +++ /dev/null @@ -1 +0,0 @@ -#' @param value_type string describing signal type. Either "count" or "fraction". diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd deleted file mode 100644 index 25a0dee23..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_7davs_and_target.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{add_7davs_and_target} -\alias{add_7davs_and_target} -\title{Add 7dav and target to the data -Target is the updates made ref_lag days after the first release} -\usage{ -add_7davs_and_target(df, value_col, refd_col, lag_col, ref_lag = REF_LAG) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{value_col}{string specifying name of value (counts) field within -the input dataframe.} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} - -\item{lag_col}{string specifying name of lag field within -the input dataframe.} - -\item{ref_lag}{max lag to use for training} -} -\description{ -Add 7dav and target to the data -Target is the updates made ref_lag days after the first release -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd deleted file mode 100644 index 02cc129a0..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_dayofweek.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{add_dayofweek} -\alias{add_dayofweek} -\title{Add one hot encoding for day of a week info in terms of reference -and issue date} -\usage{ -add_dayofweek(df, time_col, suffix, wd = WEEKDAYS_ABBR) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{time_col}{string specifying name of column used for the -date, can be either reference date or issue date} - -\item{suffix}{suffix added to indicate which kind of date is used} - -\item{wd}{vector of days of a week} -} -\description{ -Add one hot encoding for day of a week info in terms of reference -and issue date -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd deleted file mode 100644 index d9303d7d6..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_params_for_dates.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{add_params_for_dates} -\alias{add_params_for_dates} -\title{Add params related to date} -\usage{ -add_params_for_dates(df, refd_col, lag_col) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} - -\item{lag_col}{string specifying name of lag field within -the input dataframe.} -} -\description{ -Target is the updates made ref_lag days after the first release -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd deleted file mode 100644 index d4adc5823..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_shift.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{add_shift} -\alias{add_shift} -\title{Used for data shifting in terms of reference date} -\usage{ -add_shift(df, n_day, refd_col) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{n_day}{number of days to be shifted} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} -} -\description{ -Used for data shifting in terms of reference date -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd deleted file mode 100644 index 47af18a24..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_sqrtscale.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{add_sqrtscale} -\alias{add_sqrtscale} -\title{Add columns to indicate the scale of value at square root level} -\usage{ -add_sqrtscale(train_data, test_data, max_raw, value_col) -} -\arguments{ -\item{train_data}{Data Frame containing training data} - -\item{test_data}{Data Frame for testing} - -\item{max_raw}{the maximum value in the training data at square root level} - -\item{value_col}{string specifying name of value (counts) field within -the input dataframe.} -} -\description{ -Add columns to indicate the scale of value at square root level -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd deleted file mode 100644 index 260efb519..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/add_weekofmonth.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{add_weekofmonth} -\alias{add_weekofmonth} -\title{Add one hot encoding for week of a month info in terms of issue date} -\usage{ -add_weekofmonth(df, time_col, wm = WEEK_ISSUES) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{time_col}{string specifying name of column used for the -date, can be either reference date or issue date} - -\item{wm}{vector of weeks of a month} -} -\description{ -Add one hot encoding for week of a month info in terms of issue date -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd deleted file mode 100644 index 1a9b887a5..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_dir_not_exist.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{create_dir_not_exist} -\alias{create_dir_not_exist} -\title{Create directory if not already existing} -\usage{ -create_dir_not_exist(path) -} -\arguments{ -\item{path}{string specifying a directory to create} -} -\description{ -Create directory if not already existing -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd deleted file mode 100644 index 603e25627..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/create_name_pattern.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/io.R -\name{create_name_pattern} -\alias{create_name_pattern} -\title{Create pattern to match input files of a given type and signal} -\usage{ -create_name_pattern(indicator, signal, file_type = c("daily", "rollup")) -} -\arguments{ -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{file_type}{string specifying time period coverage of input files. -Either "daily" or "rollup"} -} -\description{ -Create pattern to match input files of a given type and signal -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd deleted file mode 100644 index d8589ecac..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/data_filteration.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{data_filteration} -\alias{data_filteration} -\title{Filtration for training and testing data with different lags} -\usage{ -data_filteration(test_lag, geo_train_data, geo_test_data, lag_pad) -} -\arguments{ -\item{test_lag}{integer number of days ago to predict for} - -\item{geo_train_data}{training data for a certain location} - -\item{geo_test_data}{testing data for a certain location} - -\item{lag_pad}{lag padding for training} -} -\description{ -Filtration for training and testing data with different lags -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd deleted file mode 100644 index 7d1af25ca..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/delta.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{delta} -\alias{delta} -\title{Sum of squared error} -\usage{ -delta(fit, actual) -} -\arguments{ -\item{fit}{estimated values} - -\item{actual}{actual values} -} -\description{ -Sum of squared error -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd deleted file mode 100644 index 881864341..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/est_priors.Rd +++ /dev/null @@ -1,97 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{est_priors} -\alias{est_priors} -\title{Main function for the beta prior approach -Estimate the priors for the beta distribution based on data for -a certain day of a week} -\usage{ -est_priors( - train_data, - prior_test_data, - geo, - value_type, - dw, - taus, - covariates, - response, - lp_solver, - lambda, - indicator, - signal, - geo_level, - signal_suffix, - training_end_date, - model_save_dir, - start = c(0, log(10)), - base_pseudo_denom = 1000, - base_pseudo_num = 10, - train_models = TRUE, - make_predictions = TRUE -) -} -\arguments{ -\item{train_data}{Data Frame containing training data} - -\item{prior_test_data}{Data Frame for testing} - -\item{geo}{string specifying the name of the geo region (e.g. FIPS -code for counties)} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{dw}{column name to indicate which day of a week it is} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{covariates}{character vector of column names serving as the covariates for the model} - -\item{response}{the column name of the response variable} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} - -\item{lambda}{the level of lasso penalty} - -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{training_end_date}{the most recent training date} - -\item{model_save_dir}{directory containing trained models} - -\item{start}{the initialization of the the points in nlm} - -\item{base_pseudo_denom}{the pseudo counts added to denominator if little data for training} - -\item{base_pseudo_num}{the pseudo counts added to numerator if little data for training} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} - -\item{make_predictions}{boolean indicating whether to generate and save -corrections (TRUE) or not. Default is TRUE.} -} -\description{ -Main function for the beta prior approach -Estimate the priors for the beta distribution based on data for -a certain day of a week -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd deleted file mode 100644 index fc4d3c347..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/evaluate.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{evaluate} -\alias{evaluate} -\title{Evaluation of the test results based on WIS score -The WIS score calculation is based on the weighted_interval_score function -from the `evalcast` package from Delphi} -\usage{ -evaluate(test_data, taus) -} -\arguments{ -\item{test_data}{dataframe with a column containing the prediction results of -each requested quantile. Each row represents an update with certain -(reference_date, issue_date, location) combination.} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} -} -\description{ -Evaluation of the test results based on WIS score -The WIS score calculation is based on the weighted_interval_score function -from the `evalcast` package from Delphi -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd deleted file mode 100644 index 77c2088d5..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/export_test_result.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/io.R -\name{export_test_result} -\alias{export_test_result} -\title{Export the result to customized directory} -\usage{ -export_test_result( - test_data, - coef_data, - indicator, - signal, - geo_level, - signal_suffix, - lambda, - training_end_date, - value_type, - export_dir -) -} -\arguments{ -\item{test_data}{test data containing prediction results} - -\item{coef_data}{data frame containing the estimated coefficients} - -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{lambda}{the level of lasso penalty} - -\item{training_end_date}{the most recent training date} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{export_dir}{path to directory to save output to} -} -\description{ -Export the result to customized directory -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd deleted file mode 100644 index 6318730ee..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_missing_updates.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{fill_missing_updates} -\alias{fill_missing_updates} -\title{Get pivot table, filling NANs. If there is no update on issue date D but -previous reports exist for issue date D_p < D, all the dates between -[D_p, D] are filled with with the reported value on date D_p. If there is -no update for any previous issue date, fill in with 0.} -\usage{ -fill_missing_updates(df, value_col, refd_col, lag_col) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{value_col}{string specifying name of value (counts) field within -the input dataframe.} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} - -\item{lag_col}{string specifying name of lag field within -the input dataframe.} -} -\description{ -Get pivot table, filling NANs. If there is no update on issue date D but -previous reports exist for issue date D_p < D, all the dates between -[D_p, D] are filled with with the reported value on date D_p. If there is -no update for any previous issue date, fill in with 0. -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd deleted file mode 100644 index e446e6e1d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/fill_rows.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{fill_rows} -\alias{fill_rows} -\title{Re-index, fill na, make sure all reference date have enough rows for updates} -\usage{ -fill_rows(df, refd_col, lag_col, min_refd, max_refd, ref_lag = REF_LAG) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} - -\item{lag_col}{string specifying name of lag field within -the input dataframe.} - -\item{min_refd}{the earliest reference date considered in the data} - -\item{max_refd}{the latest reference date considered in the data} - -\item{ref_lag}{max lag to use for training} -} -\value{ -df_new Data Frame with filled rows for missing lags -} -\description{ -Re-index, fill na, make sure all reference date have enough rows for updates -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd deleted file mode 100644 index f2de00345..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj.Rd +++ /dev/null @@ -1,78 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{frac_adj} -\alias{frac_adj} -\title{Update fraction using beta prior approach} -\usage{ -frac_adj( - train_data, - test_data, - prior_test_data, - indicator, - signal, - geo_level, - signal_suffix, - lambda, - value_type, - geo, - training_end_date, - model_save_dir, - taus = TAUS, - lp_solver = LP_SOLVER, - train_models = TRUE, - make_predictions = TRUE -) -} -\arguments{ -\item{train_data}{Data Frame containing training data} - -\item{test_data}{testing data} - -\item{prior_test_data}{testing data for the lag -1 model} - -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{lambda}{the level of lasso penalty} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{geo}{string specifying the name of the geo region (e.g. FIPS -code for counties)} - -\item{training_end_date}{the most recent training date} - -\item{model_save_dir}{directory containing trained models} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} - -\item{make_predictions}{boolean indicating whether to generate and save -corrections (TRUE) or not. Default is TRUE.} -} -\description{ -Update fraction using beta prior approach -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd deleted file mode 100644 index 2ae59d33a..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/frac_adj_with_pseudo.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{frac_adj_with_pseudo} -\alias{frac_adj_with_pseudo} -\title{Update fraction based on the pseudo counts for numerators and denominators} -\usage{ -frac_adj_with_pseudo(data, dw, pseudo_num, pseudo_denom, num_col, denom_col) -} -\arguments{ -\item{data}{Data Frame} - -\item{dw}{character to indicate the day of a week. Can be NULL for all the days} - -\item{pseudo_num}{the estimated counts to be added to numerators} - -\item{pseudo_denom}{the estimated counts to be added to denominators} - -\item{num_col}{name of numerator column in the input dataframe} - -\item{denom_col}{name of denominator column in the input dataframe} -} -\description{ -Update fraction based on the pseudo counts for numerators and denominators -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd deleted file mode 100644 index ba40a8aa2..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/generate_filename.Rd +++ /dev/null @@ -1,65 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{generate_filename} -\alias{generate_filename} -\title{Construct filename for model with given parameters} -\usage{ -generate_filename( - indicator, - signal, - geo_level, - signal_suffix, - lambda, - training_end_date = "", - geo = "", - value_type = "", - test_lag = "", - tau = "", - dw = "", - beta_prior_mode = FALSE, - model_mode = TRUE -) -} -\arguments{ -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{lambda}{the level of lasso penalty} - -\item{training_end_date}{the most recent training date} - -\item{geo}{string specifying the name of the geo region (e.g. FIPS -code for counties)} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{test_lag}{integer number of days ago to predict for} - -\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} - -\item{dw}{string, indicate the day of a week} - -\item{beta_prior_mode}{bool, indicate whether it is for a beta prior model} - -\item{model_mode}{bool, indicate whether the file name is for a model} -} -\value{ -path to file containing model object -} -\description{ -Construct filename for model with given parameters -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd deleted file mode 100644 index b328bfb2b..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_7dav.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{get_7dav} -\alias{get_7dav} -\title{Calculate 7 day moving average for each issue date -The 7dav for date D reported on issue date D_i is the average from D-7 to D-1} -\usage{ -get_7dav(pivot_df, refd_col) -} -\arguments{ -\item{pivot_df}{Data Frame where the columns are issue dates and the rows are -reference dates} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} -} -\description{ -Calculate 7 day moving average for each issue date -The 7dav for date D reported on issue date D_i is the average from D-7 to D-1 -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd deleted file mode 100644 index 6b193bba5..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_files_list.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/io.R -\name{get_files_list} -\alias{get_files_list} -\title{List valid input files.} -\usage{ -get_files_list(indicator, signal, params, sub_dir) -} -\arguments{ -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{params}{named list containing modeling and data settings. Must include -the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, -`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} - -\item{sub_dir}{string specifying the indicator-specific directory within -the general input directory `params$input_dir`} -} -\description{ -List valid input files. -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd deleted file mode 100644 index 5eeb43213..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_model.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{get_model} -\alias{get_model} -\title{Train model using quantile regression with Lasso penalty, or load from disk} -\usage{ -get_model( - model_path, - train_data, - covariates, - tau, - lambda, - lp_solver, - train_models -) -} -\arguments{ -\item{model_path}{path to read model from or to save model to} - -\item{train_data}{Data Frame containing training data} - -\item{covariates}{character vector of column names serving as the covariates for the model} - -\item{tau}{decimal quantile to be predicted. Values must be between 0 and 1.} - -\item{lambda}{the level of lasso penalty} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} -} -\description{ -Train model using quantile regression with Lasso penalty, or load from disk -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd deleted file mode 100644 index 9f53bfe65..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_populous_counties.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{get_populous_counties} -\alias{get_populous_counties} -\title{Subset list of counties to those included in the 200 most populous in the US} -\usage{ -get_populous_counties() -} -\description{ -Subset list of counties to those included in the 200 most populous in the US -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd deleted file mode 100644 index 08d340d7f..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/get_weekofmonth.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/preprocessing.R -\name{get_weekofmonth} -\alias{get_weekofmonth} -\title{Get week of a month info according to a date} -\usage{ -get_weekofmonth(date) -} -\arguments{ -\item{date}{Date object} -} -\value{ -a integer indicating which week it is in a month -} -\description{ -All the dates on or before the ith Sunday but after the (i-1)th Sunday -is considered to be the ith week. Notice that - If there are 4 or 5 weeks in total, the ith weeks is labeled as i - and the dates in the 5th week this month are actually in the same - week with the dates in the 1st week next month and those dates are - sparse. Thus, we assign the dates in the 5th week to the 1st week. - If there are 6 weeks in total, the 1st, 2nd, 3rd, 4th, 5th, 6th weeks - are labeled as c(1, 1, 2, 3, 4, 1) which means we will merge the first, - second and the last weeks together. -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd deleted file mode 100644 index ae211b289..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/main.R -\name{main} -\alias{main} -\title{Perform backfill correction on all desired signals and geo levels} -\usage{ -main(params) -} -\arguments{ -\item{params}{named list containing modeling and data settings. Must include -the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, -`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} -} -\description{ -Perform backfill correction on all desired signals and geo levels -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd deleted file mode 100644 index ae6ef023f..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/main_local.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tooling.R -\name{main_local} -\alias{main_local} -\title{Main function to correct a single local signal} -\usage{ -main_local( - input_dir, - export_dir, - test_start_date, - test_end_date, - num_col, - denom_col, - value_type = c("count", "fraction"), - training_days = TRAINING_DAYS, - testing_window = TESTING_WINDOW, - lambda = LAMBDA, - ref_lag = REF_LAG, - lp_solver = LP_SOLVER -) -} -\arguments{ -\item{input_dir}{path to the directory containing input data} - -\item{export_dir}{path to directory to save output to} - -\item{test_start_date}{Date or string in the format "YYYY-MM-DD" to start -making predictions on} - -\item{test_end_date}{Date or string in the format "YYYY-MM-DD" to stop -making predictions on} - -\item{num_col}{name of numerator column in the input dataframe} - -\item{denom_col}{name of denominator column in the input dataframe} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{training_days}{integer number of days to use for training} - -\item{testing_window}{the testing window used for saving the runtime. Could -set it to be 1 if time allows} - -\item{lambda}{the level of lasso penalty} - -\item{ref_lag}{max lag to use for training} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} -} -\description{ -Main function to correct a single local signal -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd deleted file mode 100644 index 225a555a9..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/model_training_and_testing.Rd +++ /dev/null @@ -1,81 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model.R -\name{model_training_and_testing} -\alias{model_training_and_testing} -\title{Fetch model and use to generate predictions/perform corrections} -\usage{ -model_training_and_testing( - train_data, - test_data, - taus, - covariates, - lp_solver, - lambda, - test_lag, - geo, - value_type, - model_save_dir, - indicator, - signal, - geo_level, - signal_suffix, - training_end_date, - train_models = TRUE, - make_predictions = TRUE -) -} -\arguments{ -\item{train_data}{Data Frame containing training data} - -\item{test_data}{Data frame for testing} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{covariates}{character vector of column names serving as the covariates for the model} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} - -\item{lambda}{the level of lasso penalty} - -\item{test_lag}{integer number of days ago to predict for} - -\item{geo}{string specifying the name of the geo region (e.g. FIPS -code for counties)} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{model_save_dir}{directory containing trained models} - -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{geo_level}{string describing geo coverage of input data. Either "state" -or "county".} - -\item{signal_suffix}{string specifying value column name -ending to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{training_end_date}{Most recent training date} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} - -\item{make_predictions}{boolean indicating whether to generate and save -corrections (TRUE) or not. Default is TRUE.} -} -\description{ -Fetch model and use to generate predictions/perform corrections -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd deleted file mode 100644 index 375b69c2e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/objective.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/beta_prior_estimation.R -\name{objective} -\alias{objective} -\title{Generate objection function} -\usage{ -objective(theta, x, prob, ...) -} -\arguments{ -\item{theta}{parameters for the distribution in log scale} - -\item{x}{vector of quantiles} - -\item{prob}{the expected probabilities} - -\item{...}{additional arguments} -} -\description{ -Generate objection function -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd deleted file mode 100644 index 1b5f24726..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_data.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/io.R -\name{read_data} -\alias{read_data} -\title{Read a parquet file into a dataframe} -\usage{ -read_data(input_dir) -} -\arguments{ -\item{input_dir}{path to the directory containing input data} -} -\description{ -Read a parquet file into a dataframe -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd deleted file mode 100644 index 426db62f9..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/read_params.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{read_params} -\alias{read_params} -\title{Return params file as an R list} -\usage{ -read_params( - path = "params.json", - template_path = "params.json.template", - train_models = TRUE, - make_predictions = TRUE -) -} -\arguments{ -\item{path}{path to the parameters file; if not present, will try to copy the file -"params.json.template"} - -\item{template_path}{path to the template parameters file} - -\item{train_models}{boolean indicating whether to train models (TRUE). If -FALSE previously trained models (stored locally) will be used instead. -Default is TRUE.} - -\item{make_predictions}{boolean indicating whether to generate and save -corrections (TRUE) or not. Default is TRUE.} -} -\value{ -a named list of parameters values -} -\description{ -Reads a parameters file. If the file does not exist, the function will create a copy of -'"params.json.template" and read from that. -} -\details{ -A params list should contain the following fields. If not included, -they will be filled with default values when possible. - -params$ref_lag: reference lag, after x days, the update is considered to be - the response. 60 is a reasonable choice for CHNG outpatient data -params$input_dir: link to the input data file -params$testing_window: the testing window used for saving the runtime. Could - set it to be 1 if time allows -params$test_dates: list of two elements, the first one is the start date and - the second one is the end date -params$training_days: set it to be 270 or larger if you have enough data -params$num_col: the column name for the counts of the numerator, e.g. the - number of COVID claims -params$denom_col: the column name for the counts of the denominator, e.g. the - number of total claims -params$geo_level: character vector of "state" and "county", by default -params$taus: vector of considered quantiles -params$lambda: the level of lasso penalty -params$export_dir: directory to save corrected data to -params$lp_solver: LP solver to use in quantile_lasso(); "gurobi" or "glpk" -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd deleted file mode 100644 index aab815222..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill.Rd +++ /dev/null @@ -1,51 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/main.R -\name{run_backfill} -\alias{run_backfill} -\title{Get backfill-corrected estimates for a single signal + geo combination} -\usage{ -run_backfill( - df, - params, - training_end_date, - refd_col = "time_value", - lag_col = "lag", - signal_suffixes = c(""), - indicator = "", - signal = "" -) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{params}{named list containing modeling and data settings. Must include -the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, -`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} - -\item{training_end_date}{the most recent training date} - -\item{refd_col}{string specifying name of reference date field within -the input dataframe.} - -\item{lag_col}{string specifying name of lag field within -the input dataframe.} - -\item{signal_suffixes}{character vector specifying value column name -endings to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} - -\item{indicator}{string specifying the name of the indicator as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} - -\item{signal}{string specifying the name of the signal as used in -`parquet` input data filenames. One indicator can be associated -with multiple signals.} -} -\description{ -Get backfill-corrected estimates for a single signal + geo combination -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd deleted file mode 100644 index 6ee6bce71..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/run_backfill_local.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tooling.R -\name{run_backfill_local} -\alias{run_backfill_local} -\title{Corrected estimates from a single local signal} -\usage{ -run_backfill_local( - df, - export_dir, - test_date_list, - value_cols, - value_type, - taus = TAUS, - test_lags = TEST_LAGS, - training_days = TRAINING_DAYS, - testing_window = TESTING_WINDOW, - ref_lag = REF_LAG, - lambda = LAMBDA, - lp_solver = LP_SOLVER -) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{export_dir}{path to directory to save output to} - -\item{test_date_list}{Date vector of dates to make predictions for} - -\item{value_cols}{character vector of numerator and/or denominator field names} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{test_lags}{integer vector of number of days ago to predict for} - -\item{training_days}{integer number of days to use for training} - -\item{testing_window}{the testing window used for saving the runtime. Could -set it to be 1 if time allows} - -\item{ref_lag}{max lag to use for training} - -\item{lambda}{the level of lasso penalty} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} -} -\description{ -Corrected estimates from a single local signal -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd deleted file mode 100644 index 0fde2714c..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/subset_valid_files.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/io.R -\name{subset_valid_files} -\alias{subset_valid_files} -\title{Return file names only if they contain data to be used in training} -\usage{ -subset_valid_files(files_list, file_type = c("daily", "rollup"), params) -} -\arguments{ -\item{files_list}{character vector of input files of a given `file_type`} - -\item{file_type}{string specifying time period coverage of input files. -Either "daily" or "rollup"} - -\item{params}{named list containing modeling and data settings. Must include -the following elements: `ref_lag`, `testing_window`, `test_dates`, -`training_days`, `num_col`, `denom_col`, `taus`, `lambda`, `export_dir`, -`lp_solver`, `input_dir`, `cache_dir`, `geo_levels`, and `value_types`.} -} -\description{ -Parse filenames to find included dates. Use different patterns if file -includes daily or rollup (multiple days) data. -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd deleted file mode 100644 index 1692da955..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/training_days_check.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{training_days_check} -\alias{training_days_check} -\title{Check available training days} -\usage{ -training_days_check(issue_date, training_days = TRAINING_DAYS) -} -\arguments{ -\item{issue_date}{contents of input data's `issue_date` column} - -\item{training_days}{integer number of days to use for training} -} -\description{ -Check available training days -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd deleted file mode 100644 index 1e55d8d8e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/man/validity_checks.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{validity_checks} -\alias{validity_checks} -\title{Check input data for validity} -\usage{ -validity_checks(df, value_type, num_col, denom_col, signal_suffixes) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{num_col}{name of numerator column in the input dataframe} - -\item{denom_col}{name of denominator column in the input dataframe} - -\item{signal_suffixes}{character vector specifying value column name -endings to be appended to standard value column names from -`params$num_col` and `params$denom_col`. Used for non-standard -value column names and when processing multiple signals from a -single input dataframe, as with `quidel`'s age buckets.} -} -\value{ -list of input dataframe augmented with lag column, if it - didn't already exist, and character vector of one or two value - column names, depending on requested `value_type` -} -\description{ -Check input data for validity -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R deleted file mode 100644 index 83f3bb312..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat.R +++ /dev/null @@ -1,4 +0,0 @@ -library(testthat) -library(delphiBackfillCorrection) - -test_check("delphiBackfillCorrection", stop_on_warning = FALSE) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R deleted file mode 100644 index 3d62d6a7f..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/helper-relativize.R +++ /dev/null @@ -1,13 +0,0 @@ -## Helper functions to relativize paths to the testing directory, so tests can -## be run via R CMD CHECK and do not depend on the current working directory -## being tests/testthat/. - -library(testthat) - -relativize_params <- function(params) { - params$export_dir <- test_path(params$export_dir) - params$cache_dir <- test_path(params$cache_dir) - params$input_dir <- test_path(params$input_dir) - - return(params) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template deleted file mode 100644 index f2224855a..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-run.json.template +++ /dev/null @@ -1,8 +0,0 @@ -{ - "training_end_date": "2022-01-01", - "training_days": 7, - "ref_lag": 3, - "input_dir": "./input", - "export_dir": "./output", - "cache_dir": "./cache" -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template deleted file mode 100644 index fb8309e94..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/params-test.json.template +++ /dev/null @@ -1,3 +0,0 @@ -{ - "input_dir": "./test.tempt" -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R deleted file mode 100644 index 59ea2beda..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-beta_prior_estimation.R +++ /dev/null @@ -1,130 +0,0 @@ -context("Testing helper functions for beta prior estimation") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -geo <- "pa" -value_type <- "fraction" -model_save_dir <- "./cache" -training_end_date <- as.Date("2022-01-01") - -# Generate Test Data -main_covariate <- c("log_value_7dav") -null_covariates <- c("value_raw_num", "value_raw_denom", - "value_7dav_num", "value_7dav_denom", - "value_prev_7dav_num", "value_prev_7dav_denom") -dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", - "Fri_ref", "Sat_ref") -response <- "log_value_target" - -set.seed(2022) -train_beta_vs <- log(rbeta(1000, 2, 5)) -test_beta_vs <- log(rbeta(50, 2, 5)) -train_data <- data.frame(log_value_7dav = train_beta_vs, - log_value_target = train_beta_vs) -train_data$value_target_num <- exp(train_beta_vs) * 100 -train_data$value_target_denom <- 100 -test_data <- data.frame(log_value_7dav = test_beta_vs, - log_value_target = test_beta_vs) -for (cov in null_covariates){ - train_data[[cov]] <- 0 - test_data[[cov]] <- 0 -} -for (cov in c(dayofweek_covariates, "Sun_ref")){ - train_data[[cov]] <- 1 - test_data[[cov]] <- 1 -} -prior_test_data <- test_data -covariates <- c(main_covariate, dayofweek_covariates) - - - -test_that("testing the sum of squared error", { - fit <- c(0, 1, 0) - actual <- c(1, 1, 1) - - expected <- 1^2 + 1^2 - computed <- delta(fit, actual) - expect_equal(expected, computed) -}) - - -test_that("testing the squared error objection function given the beta prior", { - theta <- c(log(1), log(2)) - x <- qbeta(TAUS, 1, 2) - - expected <-0 - computed <- objective(theta, x, TAUS) - expect_equal(expected, computed) -}) - - -test_that("testing the prior estimation", { - dw <- "Sat_ref" - priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, - covariates, response, LP_SOLVER, lambda, - indicator, signal, geo_level, signal_suffix, - training_end_date, model_save_dir) - alpha <- priors[2] - beta <- priors[1] - alpha - expect_true((alpha > 1) & (alpha < 3)) - expect_true((beta > 4) & (beta < 6)) - - for (idx in 1:length(TAUS)) { - tau <- TAUS[idx] - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, dw=dw, tau=tau, - value_type=value_type, - training_end_date=training_end_date, - beta_prior_mode=TRUE) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - file.remove(model_path) - } -}) - - -test_that("testing the fraction adjustment with pseudo counts", { - value_raw <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_raw_num", "value_raw_denom") - expect_true(all(value_raw == 1/100)) - - dw <- "Sat_ref" - value_raw <- frac_adj_with_pseudo(train_data, dw, 1, 100, "value_raw_num", "value_raw_denom") - expect_true(all(value_raw == 1/100)) -}) - - -test_that("testing the main beta prior adjustment function", { - set.seed(1) - updated_data <- frac_adj(train_data, test_data, prior_test_data, - indicator, signal, geo_level, signal_suffix, - lambda, value_type, geo, - training_end_date, model_save_dir, - taus = TAUS, lp_solver = LP_SOLVER) - updated_train_data <- updated_data[[1]] - updated_test_data <- updated_data[[2]] - - for (dw in c(dayofweek_covariates, "Sun_ref")){ - for (idx in 1:length(TAUS)) { - tau <- TAUS[idx] - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, dw=dw, tau=tau, - value_type=value_type, - training_end_date=training_end_date, - beta_prior_mode=TRUE) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - file.remove(model_path) - } - } - - expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) - expect_true(all(updated_train_data$value_raw < 3/(3+4))) - expect_true(all(updated_train_data$value_raw > 1/(1+6))) -}) - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R deleted file mode 100644 index 07636e140..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-io.R +++ /dev/null @@ -1,118 +0,0 @@ -library(arrow) - -context("Testing io helper functions") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -geo <- "pa" -value_type <- "fraction" -date_format = "%Y%m%d" -training_end_date <- as.Date("2022-01-01") - -create_dir_not_exist("./input") -create_dir_not_exist("./output") -create_dir_not_exist("./cache") - -test_that("testing exporting the output file", { - params <- read_params("params-run.json", "params-run.json.template") - - test_data <- data.frame(test=TRUE) - coef_data <- data.frame(test=TRUE) - - export_test_result(test_data, coef_data, indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, - value_type, params$export_dir) - prediction_file <- file.path(params$export_dir, "prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") - coefs_file <- file.path(params$export_dir, "coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") - - expect_true(file.exists(prediction_file)) - expect_true(file.exists(coefs_file)) - - # Remove - file.remove(prediction_file) - file.remove(coefs_file) - file.remove("params-run.json") -}) - - -test_that("testing creating file name pattern", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_pattern <- create_name_pattern(indicator, signal, "daily") - rollup_pattern <- create_name_pattern(indicator, signal, "rollup") - - # Create test files - daily_data <- data.frame(test=TRUE) - daily_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_data, daily_file_name) - - rollup_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) - rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_data, rollup_file_name) - - - filtered_daily_file <- list.files( - params$input_dir, pattern = daily_pattern, full.names = TRUE) - expect_equal(filtered_daily_file, daily_file_name) - - filtered_rollup_file <- list.files( - params$input_dir, pattern = rollup_pattern, full.names = TRUE) - expect_equal(filtered_rollup_file, rollup_file_name) - - file.remove(daily_file_name) - file.remove(rollup_file_name) - file.remove("params-run.json") -}) - - -test_that("testing the filtration of the files for training and predicting", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), - file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")), - file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY, date_format)}.parquet"))) - daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) - expect_equal(daily_valid_files, daily_files_list[2]) - - rollup_files_list <- c(file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet")), - file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")), - file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) - rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) - expect_equal(rollup_valid_files, rollup_files_list[2]) - - file.remove("params-run.json") -}) - -test_that("testing fetching list of files for training and predicting", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_data <- data.frame(test=TRUE) - daily_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_data, daily_file_name) - - rollup_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) - rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_data, rollup_file_name) - - - files <- get_files_list(indicator, signal, params) - expect_true(all(files == c(daily_file_name, rollup_file_name))) - - file.remove(daily_file_name) - file.remove(rollup_file_name) - file.remove("params-run.json") -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R deleted file mode 100644 index 2a1221344..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-model.R +++ /dev/null @@ -1,173 +0,0 @@ -context("Testing the helper functions for modeling") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -test_lag <- 1 -model_save_dir <- "./cache" -geo <- "pa" -value_type <- "fraction" -training_end_date <- as.Date("2022-01-01") - -# Generate Test Data -main_covariate <- c("log_value_7dav") -null_covariates <- c("value_raw_num", "value_raw_denom", - "value_7dav_num", "value_7dav_denom", - "value_prev_7dav_num", "value_prev_7dav_denom") -dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", - "Fri_ref", "Sat_ref") -response <- "log_value_target" -train_beta_vs <- log(rbeta(1000, 2, 5)) -test_beta_vs <- log(rbeta(61, 2, 5)) -train_data <- data.frame(log_value_7dav = train_beta_vs, - log_value_target = train_beta_vs) -train_data$value_target_num <- exp(train_beta_vs) * 100 -train_data$value_target_denom <- 100 -test_data <- data.frame(log_value_7dav = test_beta_vs, - log_value_target = test_beta_vs) -for (cov in null_covariates){ - train_data[[cov]] <- 0 - test_data[[cov]] <- 0 -} -for (cov in c(dayofweek_covariates, "Sun_ref")){ - train_data[[cov]] <- 1 - test_data[[cov]] <- 1 -} -covariates <- c(main_covariate, dayofweek_covariates) - - -test_that("testing the generation of model filename prefix", { - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda) - expected <- "chng_outpatient_state_lambda0.1.model" - expect_equal(model_file_name, expected) -}) - -test_that("testing the evaluation", { - for (tau in TAUS){ - test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) - } - result <- evaluate(test_data, TAUS) - expect_true(mean(result$wis) < 0.3) -}) - -test_that("testing generating or loading the model", { - # Check the model that does not exist - tau = 0.5 - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, test_lag=test_lag, tau=tau) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(!file.exists(model_path)) - - # Generate the model and check again - obj <- get_model(model_path, train_data, covariates, tau, - lambda, LP_SOLVER, train_models=TRUE) - expect_true(file.exists(model_path)) - created <- file.info(model_path)$ctime - - # Check that the model was not generated again. - obj <- get_model(model_path, train_data, covariates, tau, - lambda, LP_SOLVER, train_models=FALSE) - expect_equal(file.info(model_path)$ctime, created) - - expect_silent(file.remove(model_path)) -}) - -test_that("testing model training and testing", { - result <- model_training_and_testing(train_data, test_data, TAUS, covariates, - LP_SOLVER, lambda, test_lag, - geo, value_type, model_save_dir, - indicator, signal, - geo_level, signal_suffix, - training_end_date, - train_models = TRUE, - make_predictions = TRUE) - test_result <- result[[1]] - coef_df <- result[[2]] - - for (tau in TAUS){ - cov <- paste0("predicted_tau", as.character(tau)) - expect_true(cov %in% colnames(test_result)) - - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, test_lag=test_lag, tau=tau, - training_end_date=training_end_date) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - - expect_silent(file.remove(model_path)) - } - - for (cov in covariates){ - cov <- paste(cov, "coef", sep="_") - expect_true(cov %in% colnames(coef_df)) - } -}) - -test_that("testing adding square root scale", { - expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), - "value raw does not exist in training data!") - - train_data$value_raw <- rbeta(nrow(train_data), 2, 5) - expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), - "value raw does not exist in testing data!") - - test_data$value_raw <- rbeta(nrow(test_data), 2, 5) - expect_silent(result <- add_sqrtscale(train_data, test_data, 1, "value_raw")) - - new_train_data <- result[[1]] - new_test_data <- result[[2]] - sqrtscales <- result[[3]] - expect_true(length(sqrtscales) == 4) - for (cov in sqrtscales){ - expect_true(cov %in% colnames(new_train_data)) - expect_true(cov %in% colnames(new_test_data)) - } - expect_true(all(rowSums(new_train_data[sqrtscales]) %in% c(0, 1))) - expect_true(all(rowSums(new_test_data[sqrtscales]) %in% c(0, 1))) - - for (i in 0:2){ - m_l <- max(new_train_data[new_train_data[[paste0("sqrty", as.character(i))]] == 1, "value_raw"]) - m_r <- min(new_train_data[new_train_data[[paste0("sqrty", as.character(i+1))]] == 1, "value_raw"]) - expect_true(m_l <= m_r) - } - -}) - -test_that("testing data filteration", { - train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] - test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] - - # When test lag is small - test_lag <- 5 - result <- data_filteration(test_lag, train_data, test_data, 2) - train_df <- result[[1]] - test_df <- result[[2]] - expect_true(max(train_df$lag) == test_lag+2) - expect_true(min(train_df$lag) == test_lag-2) - expect_true(all(test_df$lag == test_lag)) - - # When test lag is large - test_lag <- 48 - result <- data_filteration(test_lag, train_data, test_data, 2) - train_df <- result[[1]] - test_df <- result[[2]] - expect_true(max(test_df$lag) == test_lag+7) - expect_true(min(test_df$lag) == test_lag-6) - - # Make sure that all lags are tested - included_lags = c() - for (test_lag in c(1:14, 21, 35, 51)){ - result <- data_filteration(test_lag, train_data, test_data, 2) - test_df <- result[[2]] - included_lags <- c(included_lags, unique(test_df$lag)) - } - expect_true(all(1:60 %in% included_lags)) -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R deleted file mode 100644 index 8bde8c68e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-preprocessing.R +++ /dev/null @@ -1,132 +0,0 @@ -context("Testing preprocessing helper functions") - -refd_col <- "time_value" -lag_col <- "lag" -value_col <- "Counts_Products_Denom" -min_refd <- as.Date("2022-01-01") -max_refd <- as.Date("2022-01-07") -ref_lag <- 7 -fake_df <- data.frame(time_value = c(as.Date("2022-01-03"), as.Date("2022-01-03"), - as.Date("2022-01-03"), as.Date("2022-01-03"), - as.Date("2022-01-04"), as.Date("2022-01-04"), - as.Date("2022-01-04"), as.Date("2022-01-05"), - as.Date("2022-01-05")), - lag = c(0, 1, 3, 7, 0, 6, 7, 0, 7), - Counts_Products_Denom=c(100, 200, 500, 1000, 0, 200, 220, 50, 300)) -wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -wm <- c("W1_issue", "W2_issue", "W3_issue") - - -test_that("testing rows filling for missing lags", { - # Make sure all reference date have enough rows for updates - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - n_refds <- as.numeric(max_refd - min_refd)+1 - - expect_equal(nrow(df_new), n_refds*(ref_lag+31)) - expect_equal(df_new %>% drop_na(), fake_df) -}) - - -test_that("testing NA filling for missing udpates", { - # Make sure all the updates are valid integers - - # Assuming the input data does not have enough rows for consecutive lags - expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), - "Risk exists in forward filling") - - # Assuming the input data is already prepared - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - n_refds <- as.numeric(max_refd - min_refd)+1 - backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - - expect_equal(nrow(backfill_df), n_refds*(ref_lag+31)) - - for (d in seq(min_refd, max_refd, by="day")) { - expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) - } -}) - - -test_that("testing the calculation of 7-day moving average", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - df$issue_date <- df[[refd_col]] + df[[lag_col]] - pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% - pivot_wider(id_cols=refd_col, names_from="issue_date", - values_from="value_raw") - pivot_df[is.na(pivot_df)] = 0 - backfill_df <- get_7dav(pivot_df, refd_col) - - - output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] - expected <- colSums(pivot_df[, -1]) / 7 - expect_true(all(output == expected)) -}) - -test_that("testing the data shifting", { - shifted_df <- add_shift(fake_df, 1, refd_col) - shifted_df[, refd_col] <- as.Date(shifted_df[, refd_col]) - 1 - - expect_equal(fake_df, shifted_df) -}) - - -test_that("testing adding columns for each day of a week", { - df_new <- add_dayofweek(fake_df, refd_col, "_ref", wd) - - expect_equal(ncol(fake_df) + 7, ncol(df_new)) - expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) -}) - - -test_that("testing the calculation of week of a month", { - expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-09")), 2) - - expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) - expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) - expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) - - expect_equal(get_weekofmonth(as.Date("2022-10-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-10-02")), 1) - expect_equal(get_weekofmonth(as.Date("2022-10-09")), 2) - expect_equal(get_weekofmonth(as.Date("2022-10-16")), 3) - expect_equal(get_weekofmonth(as.Date("2022-10-23")), 4) - expect_equal(get_weekofmonth(as.Date("2022-10-30")), 1) - -}) - -test_that("testing adding columns for each week of a month", { - df_new <- add_weekofmonth(fake_df, refd_col, wm) - - expect_equal(ncol(fake_df) + 3, ncol(df_new)) - expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) -}) - - -test_that("testing adding 7 day avg and target", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col, ref_lag) - - # Existing columns: - # time_value: reference date - # value_raw: raw counts - # lag: number of days between issue date and reference date - # Added columns - # issue_date: report/issue date - # value_7dav: 7day avg of the raw counts - # value_prev_7dav: 7day avg of the counts from -14 days to -8 days - # value_target: updated counts on the target date - # target_date: the date ref_lag days after the reference date - # and 5 log columns - expect_equal(ncol(df_new), 3 + 10) - expect_equal(nrow(df_new), 7 * (ref_lag + 30 + 1)) -}) - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R deleted file mode 100644 index a733f2a1d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00_pkg_src/delphiBackfillCorrection/unit-tests/testthat/test-utils.R +++ /dev/null @@ -1,136 +0,0 @@ -context("Testing utils helper functions") - -test_that("testing create directory if not exist", { - # If not exists - path = "test.test" - create_dir_not_exist(path) - expect_true(file.exists(path)) - - # If already exists - create_dir_not_exist(path) - expect_true(file.exists(path)) - - # Remove - unlink(path, recursive = TRUE) - expect_true(!file.exists(path)) -}) - - -test_that("testing number of available issue dates for training", { - start_date <- as.Date("2022-01-01") - end_date <- as.Date("2022-01-09") - training_days = 10 - issue_date <- seq(start_date, end_date, by = "days") - expect_warning(training_days_check(issue_date, training_days = training_days), - "Only 9 days are available at most for training.") - - end_date <- as.Date("2022-01-10") - training_days = 10 - issue_date <- seq(start_date, end_date, by = "days") - expect_silent(training_days_check(issue_date, training_days = training_days)) -}) - -test_that("testing get the top200 populous counties", { - counties <- get_populous_counties() - - expect_true(length(counties) == 200) - expect_true("06037" %in% counties) -}) - -test_that("testing read parameters", { - # No input file - expect_error(read_params(path = "params-test.json", template_path = "params-test.json.template", - train_models = TRUE, make_predictions = TRUE), - "input_dir must be set in `params` and exist") - - # Check parameters - params <- read_json("params-test.json", simplifyVector = TRUE) - # Check initialization - expect_true(!("export_dir" %in% names(params))) - expect_true(!("cache_dir" %in% names(params))) - - expect_true(!("parallel" %in% names(params))) - expect_true(!("parallel_max_cores" %in% names(params))) - - - expect_true(!("taus" %in% names(params))) - expect_true(!("lambda" %in% names(params))) - expect_true(!("lp_solver" %in% names(params))) - expect_true(!("lag_pad" %in% names(params))) - - expect_true(!("taus" %in% names(params))) - expect_true(!("lambda" %in% names(params))) - expect_true(!("lp_solver" %in% names(params))) - - expect_true(!("num_col" %in% names(params))) - expect_true(!("denom_col" %in% names(params))) - expect_true(!("geo_levels" %in% names(params))) - expect_true(!("value_types" %in% names(params))) - - expect_true(!("training_days" %in% names(params))) - expect_true(!("ref_lag" %in% names(params))) - expect_true(!("testing_window" %in% names(params))) - expect_true(!("test_dates" %in% names(params))) - - # Create input file - path = "test.tempt" - create_dir_not_exist(path) - expect_silent(params <- read_params(path = "params-test.json", - template_path = "params-test.json.template", - train_models = TRUE, make_predictions = TRUE)) - unlink(path, recursive = TRUE) - - - expect_true("export_dir" %in% names(params)) - expect_true("cache_dir" %in% names(params)) - - expect_true("parallel" %in% names(params)) - expect_true("parallel_max_cores" %in% names(params)) - - - expect_true("taus" %in% names(params)) - expect_true("lambda" %in% names(params)) - expect_true("lp_solver" %in% names(params)) - - expect_true("taus" %in% names(params)) - expect_true("lambda" %in% names(params)) - expect_true("lp_solver" %in% names(params)) - expect_true("lag_pad" %in% names(params)) - - expect_true("num_col" %in% names(params)) - expect_true("denom_col" %in% names(params)) - expect_true("geo_levels" %in% names(params)) - expect_true("value_types" %in% names(params)) - - expect_true("training_days" %in% names(params)) - expect_true("ref_lag" %in% names(params)) - expect_true("testing_window" %in% names(params)) - expect_true("test_dates" %in% names(params)) - - expect_true(params$export_dir == "./receiving") - expect_true(params$cache_dir == "./cache") - - expect_true(params$parallel == FALSE) - expect_true(params$parallel_max_cores == .Machine$integer.max) - - expect_true(all(params$taus == TAUS)) - expect_true(params$lambda == LAMBDA) - expect_true(params$lp_solver == LP_SOLVER) - expect_true(params$lag_pad == LAG_PAD) - - expect_true(params$num_col == "num") - expect_true(params$denom_col == "denom") - expect_true(all(params$geo_levels == c("state", "county"))) - expect_true(all(params$value_types == c("count", "fraction"))) - - expect_true(params$training_days == TRAINING_DAYS) - expect_true(params$ref_lag == REF_LAG) - expect_true(params$testing_window == TESTING_WINDOW) - start_date <- TODAY - params$testing_window - end_date <- TODAY - 1 - expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) - - expect_silent(file.remove("params-test.json")) -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out b/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out deleted file mode 100644 index 8e09ae745..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/00install.out +++ /dev/null @@ -1,11 +0,0 @@ -* installing *source* package ‘delphiBackfillCorrection’ ... -** using staged installation -** R -** byte-compile and prepare package for lazy loading -** help -*** installing help indices -** building package indices -** testing if installed package can be loaded from temporary location -** testing if installed package can be loaded from final location -** testing if installed package keeps a record of temporary installation path -* DONE (delphiBackfillCorrection) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection-manual.pdf deleted file mode 100644 index 65f45b183ce8d809898987d8e7669d86b536e4ac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 120174 zcmb5UQ;w6BBJ#k zr1HX|Gz_#XP^9xqBWq9$ENley1a^j&P~6;5bkZiaX3pjW42%q{1pn)Uq7${Sb~bS& zpcAz=a5fP(F|so@f#T(ba&mSwF|dJh-?&!SQQBff@i|p{tLb8@CYng)EcIr)8$~1X*G$(y@T;UBn*J~*K z=q+Z!LwYYq(rqA&NApmLA+z?GXtiBfjh%%DUV2zqhAee4O4^5LQF`6 zAQ%O9FPRiK1WHk;0B)uM6sjGbqK(GwW}}j&4W$#qwQtQqp5F9^$rwaVkED+OI24B< z!N~?OSj1oZh&4;gmUR%F#4DTCgBd$;CJ|dd|oL^ zRHYaD2anwCd;?Xb(!_$bI1e=ZgQwH!aZA=xfn(0Yhtz4}*`;o*(bmPYaCx%Dm3QOF zduLb1W4YJ+*S&Ge$utYc)tL#0B4QXgsJmdFXdZ@HVE(QYak&DEE#rTprPKL+1@J18zl+$3%} zgG-^aNg1+saWN}Bb5v4*&>9k97Q&F^j2|9%M{W9UN>67~4938nzg28si_7Znvr5r| z3F?+SoXqF%?Bthb+vRY61?XI7Yr@!Ahq)b9kpS0rmGU}@&iHhsyH|c@2}Ns;qJ;k! zdv}R)O^VE&30LuqEAc`=Ns}aM2Q#j-F1{6{lF*Z{Nl)Kyilr6{Me9Z3cmcijZrM_z zRqiQ4+gRj9xh}1U30}h9qacApA5X+w1dC_|t-g zHhdNWSVNmc*BE)^-dwCNGfmjV1(u_4{-|0j!dCb}YzD;MDEeIMFV>Q7LZ$SMPT~q9 zz#ql1htIDWYP>x0)>o|200fPeBGr=V_tIm6&~vQkDS?8GYs~|VV{n5eovfA|Q{HM^ zk~7oZ%ZX5)h&Uwo1x=>PWD6~Erj(UdgvhH?UorMIrhayyEGkKb_<37_aWsX|G89#R zOF$P_1NWu|ZK!zJ#68H&S&Sb)y+D$V4(w9IA-`xtxlOHAZtt#RpA5v^unPOCx-M1| z5MgX^HW;%n%^Yng2VT$)GP!^SeEv2**)~6+wwkRfOTes%5FKcJ$9~?M3B<2F1}+3g z7b-bxo$!M)+5{dU%7m-ql}45IhNH zt4mqxtdGC9emT1$?$rcf5YELJc0}U|QpgZX;(%OnX07U`Z6BoW&_;S0pwRos*#?W=oD{fO5(=1QV7_GzJZ1tMbLxL3E|xEZ?R5} zZ%JAEq))dVrGtqw0m~-7c9UvIp1E0^5c`3BkvkFg(2dA+o>Ujb92$n3Wj$i`yCe03 zR%!g9@u-Y}ubPnLWM$8W|BGBeGpff7$&?N|bq^cGcV4ZMO;0qpSb4PIH0=Iz5*Oe{ zuYs%zYm%}>o*D2<`BsUMH?G)e^LBnY~djob;ZcDkEBKBIcC+fQvcr#rmNvBl71xABg1x&)S+H@_>pZ5ds3H)|c%w6n`ZJn+(Up0YQs4G1|YuVEC5B1pfOfi0ci&Gb@{_89Vr`UHhxY-;*2XvDrSH z8v^)`7ENt0VANz<-jO4FM2#1Zv&p+68WH}Kr^$EA+v7x9(6uprbZgQ8!JQz!<{CVA zItf>XZWeW28K!PPUlYVbA_kmrjuO>(8^DGotWwftdZZ-gP=ZeR9xAd=UZ*3pKaFkr zIX!QCB~1>Yy5~=}08vEHg=%2AK%Zo1J0C#3D4@(Rl&M9g#Wj3-FXpo>Ey)}G; z&)*AZZ510wrNlyWmsaKdabj_L%TAZ+ysXYnZ8ZgD9&Oa#nndqPU((}S%ZQbl)QajV zgN$wqnUI7lQ;hhoFqDjz>RHyn$(P`+qwY=5%j<-`pp?zUTR}a|5xjS_G3)($8&mKbAz(W5?v7hrw)rACGe;405ZQS~;K((2WmHFK+4j2AG?qJ6b3 z@%sw#luLZm)d|R}jP=XoiZkq)O|QR9>Zub?)#F1uWpFFKhu>+K+e&txx8M043E+Et zVA1Xm|5IEU|36t%*~8w1fKJZPQpwo{icXe*fu8=q(x#)6GXXmT^Z(7FY%I+GR(g9h zWbCln5PB}v?$)#^eEk4K{k{8b{Lvt?aW*=6t4ms%BC1u?s)pZReTI%}Q8YBcLu!Qh zY2Q!oXYARFaV9{TrbSQGlc^aiXp#aYtmaLiev1tz_2-#T=RrzC`au>)rIQUe08KLx z6DY@`N41T~g!|J;(@j^j@-vV#E#|UOrNK|S4MiF0riUp`NPDRtRhbUc-SWe!A%+0p zkbqdAY3a+M5cP6Jq}Tcj!yF+;7-h2sr7q*ab(v1*!KfkXNE!xd&Vh%LmFF+^3kMl@ zieNF|?KLQ>#T4R`0C-^$JZum`0C<5<_p+EVqEX1qqxPB*5C_Z*PzFvB+97z$Kq7?3 zCL%yOoSU=r-LV#+W$__^7YOb}2H>GeQ9K_oH~vCq4l`jQT1CjjCrl&iVFu;&Q^+kt)Z0=z2l}YtUz7cn zf~l?ukWr@;Q4q9K#iIe|np2ul$SCf`Ls-ykxWv!}!%QIbTT^zl zpG$)JX}}^e!nlIOFPmbZ@UswvFGaOv6@$z%En&U2Au9Ve)Aa zr*C#bKT=J(uYw*_88y;qtE9D~b~%s+msTiWq7NV<1H>cQLwz%e^U+F^bu6SR=qw=T zKNP*@U%1(738?&6^_gG9$)c`RNlbhaRNeE>2Yd9NRUHgf85K4mTYcsU=^!zfKC~!U zkDaWeTjH}%m2KxgE#HYU9Xr&a^KRkJtPdC4Uk1Eyec~i;%bq$kc1nItAA|#tLfn{w z+tKd9O%;7S1O*MmrZNndQ2k$z`XLR@8>e}GgA}B0A=R?j4YX{J9{>uH&iMf0NZ(C`=WyZg$=p!^XK^{cQ-vt0`PQ7dVaKKHqS2Or3QQScqVmy6Rj=b zV2>#!_Sr>%S$r?`+w&{8WT(Aq zJA>Vx9Ri*t;>8E*)Sr7S>muG=Su=jW`KUCWe;%}EOYiP2B~Q7=gk_T-8G96#j(f9=g#MDLQ+i58BUQIt>(cSu0oxIy zVsObmN4B`?!bF!vBBL$im(`oq+mU=8l%3IXwE#6% zGJL4%UVH!5tmx!PacDA2DqpweYpn(Wr$*TXR0=BEpacQ(s_;a1%naZUa{H?ng-~Z3W@FXgO&a^lEz0f1JYZf%b8*gE~U{Z=p(cpU#pYjyc z%~DGfr?MOAqZ!M3&s7X4vO9%%nT9tA1jPf}jX8f@t4y{SaI-d}2&=EA4vu9fBEc*S zMUD=Tg*WnAz5UfgH{~$&(2U{QC(ja45+R z8{C-ubs$GIs4e?Pnb*9(iTSL7J-uvoufBC70ywv1AdVZb#K#j>)p5V=UH_gs=As$N z`RHjEI|$^^1IG(;YXABXzwPr7_8*Ltk@^2)rT;y)VQ2sEsSUxu7~7~xIc3o!^nR+H zK`=2SAiz7F4P=nyNXXf2ojWH+i-`a+H4K1ilmGTsfn(rswLF^B@mxbm{PEfK-DqKu zL=?pbH%l5gFOgF?5)Q!5mk5Lyg?k1Pdg%`XC9u~Scw08d2768V*XS)_TMjY65x`W5r#~QU`ezv;2 zh4`U;Pb2q2K{0tFK&k(ps7m|^yflVW-9sZ6C8o@VW>UmtmZ3{BD>PL+xYfg2){G;q zE$k9DAb%YGiKVGhGvee+lfu6%qiiW+{0!p+bttz_h}0K_gLqF@XJ1>{A%o_!dfe;F z*8L^|?1rACJ_n?ZxKn5yIwF>!@SFuxMNZ!z%6s(fLyGEVLHPBB|RId>hvunvS z%4a$W+3U;*Af9HJnfAm~y(W>wi#~{D^(BaPmzI_{C$3LVZp_R~j{t%LWU##%vUOou z@W|zvI@t$7n%AT8dD3$o9@yR3W`y0WWm<>7W-5^;*j2^1zmb_HM8^A1iw`u?!1HOl z`UEzWU(Pa9ib01q)?6h&ofEQ!?y&TPVvJ-1^(1SwDl&WENRz1m<5dqsQgW?iGEwew zR1;{Q*vkAKe&Rgg(y*PP7rUK`&V|{2OtmHD0ksG7;rt{^0)0_92y}s3l=f$pBqG2i zGvHlCZpQP&3%MyY)1_ZJ2cUG6;_VeLPLqwAg=Ow#Es_fikLHC~e3PX0YMU*CE539k zJ35($ix#>&T3d(q93v@^{cRIjolMSJF*g&caDqEMjL=vnKHPpeW=-`YBUe$Mr zo!xa=z-|0lP8aR7;M?(hg^?7PU1w(CY-!@)1?ddBp*WSgQkRQTc9!M*#OdQqoZ{|1 z0ixZ)7X_Jq(JBky(Bd$j036`qxTMIoT83|$qOH_S9u_qQy3KFqtVU-IOzKkMRwTi$ zpvLk{Z-@S}-i6hGFVE8kw*P+43t0QnS=`Lj_ip!($|GYmON5ueV<3~)4D@KJ$MtVY zYXAWY%?X(3l!@B2Mz8krHEZfPHSTUs49YIF-e=%0klbnCs@-m-2mC)^%*66PF=k`` z?^(mYz<5M;$~K!GWqVG|9a7Q3IvSQ*siY!VXZf5eMied`lLQYkuK4{EYm*(ADMu`+ z`Hx;u2k-0Fp%H>QFi0;gFC9AiDj+aJbjDmiBQOS`_)!vq(YQEFDrLJ1(5tP7lY`8} zilCs7xC6yvIVrb{+O^SQ3nbley?4Mgo?dFy7-%XX&|0Zk0er@7P_g#b4q^Oc8XJFa zB=KE!YAQiI#GwCpJHRgbu66Hwxh27mD=dZpnts1IfCi2o;MS1_8z6ZU6q()!ee9k3 zQk-gW?Wxj{u}e`B@S|w+IT!Cb?QMM8`^%Nj+(5#L}u_+9M!u1gvFDI&5@ev+cCPi;WYYizR5>PrnzvZ?K{OYc?W8-UR}1qktL0;r38?&4+q6pU_n&A(```~ z3w2(YfW^rdR5B$6a_w{B8LWGGHrAM%UKkKL*2BPQ?0PBZ+$B?KSQf4vRm8gJ*oW5Z zww*zZcjQKHrM@sB42B}}<<>=ss*eX zLiTm#4T)}_9i<$Kw@M8J*c+IHD{Fh7i{4bXjNZ^NHf0`$gEjzC;GIJDX!h;$#wd_Z2 zxBc-^!$&ye)WdZvEz_B=ZN5Ffj?5<|Iy*cX@^zu>2C$~I3>b)}+ET)u4Rcv87sgvtd@{S8&nIZXWe_Srn(!(EMSl zS?P5U_*Akqg}$~F&$?4va(`i7qq6{b>-5BJ#abm~V>e}6QZE-uEtG>M+`z_7lTD+N zrL3=bjoEQ+R2&p1!L*7u|4#W@J_nprPSCaz1xdM;4v4s%laC6~8^BLE)$AVlo4wTa z_lH(*PHR)5$ODb*TGZI&UcZmHj8%9rk_?I3x~InS&`wJRU>Fl^kjnUyMasX&^PI# zK~C@SD%;w>O6b=R1<mLC6r|lpImMrT&DRt5Nx9n<66 zb2Q~TqT~*>ZZ*1GT~c!OpbA!raSv(q0)1$1bOU{inz2qiHr+E{-7as>$-TMZv_`sc zy%b*&f~GaE0#=G>lt<@>#^?Hmf6;)X=bG*+=j9jXCqULrFE=g2P+N)q^UFo9tt_6;%!?BoUUV z#g%)`z=fi$?{E)3#2d9PIWO3MepGNYo#Eby+|_ZVt*ck|_}E*okw890IT0qO`>dM|8#9S zL}f#*tf{uj33SZn_(jR0>fpOzsgm+oJ+%Hic?-l%|1T&tv{Gj?K-}v$(H7Gp+0|H0AhrTuZXU~_M^?ftap!Z00-B)0#N1#rz!oQeA5JSg=^+cZ zvFV3R2Xd;}dcv@?xo?I&ig?w5INIImm3Un%onLbC6_i15iRE8rw_h&me=ja*rBM!6 zcteM4b9wVKRRuAY#W1sRL79yDlh5BnPXHBX?8aZh$y=fV_ajM9}yaN&* zJN=BYX(ax1KGp~otd%(^z?kmzOgRMxyFWvp#rgXE1L~~o|Kyb{|L^SeUqsO!6$QKh zpzb?YcSkG^+cLnqZvdaQB3s!URyG9b=xnAkw#gB9Pxc$2A z#)@HsAev{2pV~BKg;qc!?mqt4F$Q8(K$s&E64E_6-z4^?TRT(gVDrWEtQV$~xoQ#b8X2%i~-7tvx3#emo>QNu&8 zop)f5Ojk;Zsu|)p7$iWt^h9|ALl_~4bfc#u&F07g?g?qg{M<$@BYu0lNhqrKK3pk%~|1NGS1O9*=(8o3G4*@JA z(tY+Y2I|K<8r`wz(pL*M0&2PRw8C@x@6nP6nw+F z7L0)|Buk9ofs`nNC<(aG?^)v_*j3&u4{t1+`K09CIdQB5|IqzSZCZuEAa;+Wt`SR& zdnH(6Q!N17A^?omB7ImaLG?1ivsZmI*=DXA<6K;7C6mOooFf;C<>eMhfwR4f6vyx% zwa)Lh%SZQ>POj$Dzo249w4H!Q7GqC_!pnGTyaJV7+H^+NkulXdKR#~X#UCEzIaBXR zi!~xu+0Yt2=rVwff5;a^D|K`z-D(`uDm%BPMti7QtF~rp${&c3J6-LRM@o@Okl~m~ zX^wSM%~(q-b?TzQI^S4RCZc+W~{V^9=@z@+Kt*Y=Y<_ZYr~h1@0B-VG zYS?wk1$O0GzG}0Z!P92t74rHm{sTj79RHJ{|1@ma|Lp=}MqMIq^FO=exjOo2pp*c0 zL{bl~r+A`60+E2$h(#$Sck_rB1mdVjagU3ikKQqnROZb7Oxm*uuJ_bi8yL|r1$s}n z$M2JqXU^O>)gMc|S+T6|aF77}f$2a5KfD^zEa#`Jw(PIo_x)#2G!|CTsQEZEklMdm z>B^ghM}^7luVnS`-MV51hx5#lMtr)oVvF{GNeLWXMTs`h6P3!=N~uwY#+=YVw_J+= zA_ITAQE0P)RI1jm&KtTf@Gf`?B_|)R6^f~oYQD4J^K8B3aL$3b6&sVuBcr~~DSga4 z7q*&Zec?=p$7ym|MEK)BJMa;ku4Om~1Ki4Esnu-OT{dH>@9`R+PN&GMC{>z^{8K&A zo%~v0au#+DZYszB59!Ua~ zk$^}f*2Q!Hm3Shs}H`OMtR@RhF(= z2Tykc6HG({k^nx!{}^KdLap5T{hTGoyS_^cr~H{M9$I#p)P`o z;`(%-OY#E0ancGvbB&AUSu2`sb)(6UQBtL0DHVh@5g#4@UT2kmA9zDV8 z#Wj`wtV!K6`R*bJagPA;(%+C+>0>;Ga%TyOu8w(BaUMTFB{dw8ibKNV|9F&3B#aZ8 zd)QNDFqLS0TmeRzg3BZgvg4W=%ry6R`JU9a3=T1h~9?uS~NH71zq?)VEykY z0R-xY6Tr2jtYuuF)uE6oceIRNvd>>n?{dk8!#(ZN`$YFk$iG}=3iWp{7-khq2tNMNwI?fZl%H6{4dzHqZ`pWA)+-Z@f8NFjW-M2G5*>JZo z32rNr77zUJFg=}o81|SvH;NxX?SQ(287#HcF)h)O7CaII{S#G>^5z=9RJs5)6a+Np z`|8Lsxb;zx_g4TDHZ{7s%R*E#(3~>cVAcXZ`8{-~N^Z8D<;mw4N5ZcNp_o}O=L%lf zmx_Lq8h?tH8Zu98#v&PI<`32NJ|OB{W!aUw$JLo?CkJv@Mi2KJc+Ned6a>R-7Ke9YY+Nw`$Qa@N~dB~KT-4r zELlNk-A;GT`qm$&x&&0R=?4=z-6n!|xiFM+g$KuDZ~Jg%1e9LW zf^Te-fbhk0>zA8cXkuF_N7k-w1vWOQ8mp~F8jug-hDg~;rSjW+OEYk>+bv}5t1cjK zmU{CC2hDKC@c5NXy4Ftl%EPp1i%2~OY6FzZXPdI3dXn8&bA{}^DHzCY1ha6#tvoHt zsS=yAH0)B+NV9)mt=DkKf4W;(Aw9(b7>#oFZ=Zu(c=D^ zz+s!%Jc?agUR?)|V}RK=Rswgig5m^qF%7_ikEht)<1V`Fx~+{Jc|RH;f$x-QB?(@h zzj`g!l1Ht1Jsyr}F$G7em-T2XGjg%ff?=}im@FiJR2$^bd z5jMQXw<1$;l~)bzGAc#--`4UWJ6~d8?y(k``BH*=&S>HrV6;9V3aThy0;97`=7$8v zWMDzGrSk!$r=k%1Pnb+g6G=LIpWLFnx~iDATHp*?(k9y}-nD(7zY?r>ytq0&qHiby zSN?ZqCfP;ngFGO#Gx*J^BAmbQ0SX)dn0FTR`Jyn5WnO(@Yd{hd&hLm`Z64o924eN@-ViT2wbzcU5*R|7Krn>b_SFo zYK%-LJs%vBU!agfLHbRrB_53u7LkmhwV|Y7KRUTX}ZDx z@v#clZ8h!FsNHsGV-;7JaHs$p8&6eFrQRnSoD>o3(0e(#wJm)yy#Ny3hd8$7<4;JN z0l+Oy)6)&XZZ#sF6SM6iko{`yW-c8bCK2J=IvfrD04EHJ|Hu>RFYi;r zrw@$^a(ITS9O_-_pH3nzCXsH;Ba9*w$)2|7(r`ekqo&jqpzC_bXX-r~$-qa>Z9*;0 zOkkG85stxV~}XD-jQZ>(W6DM~r2 zJU~uQZ9S4oMRIOcC*;e8R|+~hleD<;Q5Hfv@_$sRd%g_tQ|sJ$QLy^K-lRk$rKoR4 zM7r_m$53J#G8PfE+z+4&YsGfl z1`74#d&ILkt-5#e^(cEzRxfR(?F*<8Qpn7Kim#JPCOHW?WNA*lC!0XL3on-@wYM0`|p~Uv;`0Wp;_{pUg z0{8e23c>Ji+c#`1|F?a^#LV&U!>f*+G8=-=uHOFG%g8EIF1%J`yg!9aiE5;OSSunt ziYhAAWFZ2{$F0{1Yke2VdS-nV5ou%H?fu*In|rNUg#%TDri?OV4GHU0R_R7jOL4V1 zPX^~kYPU->j|*6@k*xv%7aqNpK+X_SK7$U(8f`@LeviQtWj}^XdK4%WIqliQR?;b zl>tTu&YICn%2xK(e<9I94AMf=IZdmKFojF7MmEf4^wIx#K>F#gA$qFn_Y6fFhJY2_ z^|hh_<8)ylOyA%UCl(7+kaF$^Y9d*W0&yh;^BgQB`g58WsZNy zXA2LRiFlFXH9)i(06HPCc0b(l7a0ZIlfX_fSWB{jpdzja-Vtl=1%@dCV)iGS=3(U6 zLpdl4TNC~bm7|rgdJA^9fX&laBy9z)Q^7PvP-_Cu*U;L|(nOk}uw%nwW8N29ar}xzX^78q#Hvr`KS;U{|fU}CC&d6#D z4!+A5rszcRi41-q{W{6dX>xi66CRYTZKH;B@xviMhbHkx z3~r10?hX#V&KGvD9A(>t-$YzjW@{QYGo&rk?$(TKW>yU#nUt77Jv&;MA0LV$e$klP^x?Lg8_7d&IC>&<;LVgXfUwHkV2NUQINYIBNLNr$( zTw{4DM`S**zXO^TCXCbg$`;r_uvdEXrg2xzk__x_=@N~=u1ae;9B-OUja!OMio?3C z;i{x@wlyb6dVGxhAWQ%Yh!8JCsI^xcgS}1abtp_HIQqehsyVZ zvzPpKq|Rc^xQ@#o&e~0xE6vDYn4vHLv3+ABb(N|6!+s158p|LCdleL=X>b6iQ|*iV zplCx*-yJLzN8({^Uer7J>5N}))W69i+q zK2D%Tg=C%;qL#)9mxNICFuD&$o4pGru!_Isqh?#JM-RF1QzzuiMms!ftwo!nDD`); zsWYs+`EtbUuuHVWN8o3jZjRuuMy09w znh&eubQU<|V8z;K;n}am8inXuYu`r8l^W66)&6>Wpt=`jVY9w@2Gny*`h%}P>b(q9O$4k6t$kf=4Sn|=HA+cQ!$rmK>ny*=j89hD^9ES_i`!f0KDY;y*B#Pc&I zeyJ{baJRq1T*U(0!LFBSB-U-l#rO_PvoKYBHhFzJ&?d3j;FW2rGc%@7cSs(Hg>Mz$ zLG+-L$X52yOBTj=#F`0%xa8@KT(Q+4yTBTzN)uD0@7an7?Fe%+wJ(-?&PjJnA8)RJ zDkcI|RcP)@@o5N{Xd?;xj+j5lq5hKz&=8(I|5dJE_fnYnDl<|(xh$^0#_b3J10Du2 zQ%P76`?A&YZcnbE$)-ZIJR)%j+~_boUK+q?Es9;xl)rWAh-P%L&^|B0B2&6a=hK?> zetkOyKs3|DY}qD-e!>3$uRGJoRTO(%xOOAk5%Ej|*4oY)^>BzX*}!;L0uw@Zi;9gNW71$>uN69evEitj$5|zMXTXq1@?WZVc4+M{jiI%`%r`FKqc7Z@>8*&+6T2 za#4OyfosjB_O2X@iH6%BZ}~Y}!SiUf(=s1@O7;xLvLImHA=;ordN0$R+q4a+K?1-a z<p#a ztLN=vIU?|aC zguo1O-9B<_qgTAVotg6&2zKG;O0I_o@Gd>$$X+hJX`~u zYGY5n^)+Qo6VTjKdR}G5NIl#^8ENEuOAkb}xM`K%%NHFiuV~r5y5x5JDGa%X0;BUR zKonK!C5)$m_KcHQC_Q?ffMzlHV^pk`pu?g_5eauVY9c^moJitoCAmUtpbrGy#c{J3 zinRq&v5&y5HlbSp4Uq(=U@&Fd;^hqj4l#l;brkv10xN&;^ycMFppZbcv?Lgz4#L8CFK=mnIDuCs(>g~fc3;Hn<-hj6YqqyQ>z7xoi2&&Wegp>HgXWUJv>L}SU>oSJ~9Wyh!2j*OpP;dn+^h0CMjwbI^2uRUC-?{ch~>#y?Stc zUh$orNn8W-I)T$v*7ivrEEpn(doO`(I2}BC*vx6&IHkd34zQNS#TMma5OMWMo* zJ3ia(>2FRQln6$4VgGm>@zpTN^I^Jq9aSy8`S0nFKc8kc%op_t+5~v(vCTv`K*#G` zw~cr_A@C7G2Tclo(KOFIKAtG;VqTbpyMZB#WzF+g2vEEE{`?*)p<>V z`lihDPHt@YpB^>MWk^WiuPq=cOa_dy-eTfKh*b-C;yitC*z3rh@M;o?=1r*PR$5-em9XM4fsWYoWk>_{{$1RLT@nott;WNMX(a5UEy z(FZl%1Ktf+l}4;>DP-#^gOGV|0pg+S1=Bp9r4)uwTa>S)+mUGl9-E2CDqbdX|L)i^IE>=^DS(jr6Cr1G#TvKb=z>{o#O0VR`icTE^H(dhsnAO7cd+n zY92Lz`HpB^Dfvm4NsjNxK`-{Je3@47rjyp8Z#4>h>h|TE6UN=p#RFR5BRJ6_fs0HMlt@@{BN%>42&88ZK=mr|vttRzgjg9P?>m*EH5}cURpVr`haDOc+eM z8FpZ2Neo`5vQ~jxtsKL4nOt^#=<`aejNGcxV0pf&TITs4z$V`CEn(>1{eu87{8K7r zW}yG~vx1q}O;&{7TO|z9fJy%9@I;e3c%I^MPH{v6i+ zO0C#dbsEpJ5gfxkYKSigRkY~$M{^EM zvm!ojG}#~R-xo(zF|)+KhUZQCaHkja6byjdbe7VZ8c z;jX;)>*2+`sRy98p)pGB-k4*ZGB8+ZZQtG97V8-M&?Ph? zsO2uTQTc8dvr46c^vzCB2~3SEG`!)sy1>qW{cJ~cRglr#KTilEHQlaUICR(G=OBUW zKd!!P{CWE7J;zGWocug+Ir%(d78dE@x>ZKt1ILw7Su~PPW?PvrJMu48UqUIh-k8cV z=b!m!?5CJ+qJ3JM(^6GxD~-pOmCyp;*NmVrEX$~(CefkOj#SMj&6#glPzt*N;15}{ ze~iPObudAT{5?8REPA4K8&B!g?26(nDOSKo*%o8{Oae(cV9xkkiFJiQ4 zvogg4!7Ani5F{C{qlo4O&GCo(mv_<8+MA|gHtz$&YmS|2R5FEVC(M6n>iX1!u{BOh znjJZJ(X|KffeFiehVJ12<_t=l?AlvzZ4aQGGzkhZT}-(Ss!>ADM%`>es_ppD?A7#fsFvxEemC=2hO z_4VCEYs_y~9^5s6HiaKB(RFo=yBn*@ygawYw>gcZBK3*fQaA}*Y`jzOpX5ZG#QEKY zyxH?0#%6^J!~vuoW=?i;?meB*NW1YLLa0j3B10eH2(0AJC1|6p) zO9ZV1Ay3H1Ml)QjGtgYyl*up@jpX9W?O6EL|*#f3wEzoXAym*<0vJ zW!`j0nOz6h+GFyPx^m0B%2>1AphPweIr6nsv(~8sz}ACCnZsxq5q@@h-JV~DYNq@% z+RwzNKb;D64&R$?DLsn={2cpGbl;W*&Nq5ULrPx~$P{5(#9j~g&r1Of5(FW+f27+_F*bG%*rt-61B@vvxl5oazZO~rxyIf$?CVQVShbcawc4)1BgU)syff)Xr9GEw zy}5|nX|xplkOw_g^poTbc4tD%a$YX2?4zd(HONKnZM(rCH4ZR5+}Tsy{^XknhekkL zrP+VsCTu`+4b7I1_Tv7T$!>uJ@;L0Fz8w>OP0P9Otyp3W(`LxgVX|W}5Jl?T3~3tz znQ@`H(oS*l9sfN^*Vfhl)-T3>*hx2v?_~wlhAan2p;YW&2D&t~F?j@n%1J)K@p`Cz zU0fbn4Ncxsm)kq%Swf0_A7H)mhUHt2)+#CP7fqzu1Y2A~Je7BYw9n3;nGSzNIOpw2 z5H()GCvWQS>LUD}dDz5_shTi6>G5>)5>m4FP^xoWHuvD>!Z3Vo=)-!V46}q_KtEP2 z@qhb!B%xJVawQsdJtb2Dp5=sYUI+RP$c&h3b88QO4{o-D|6_oW?ylt(Y_QfK20^>N z0h~b=)erHPXdF->I1zx_Dz*kf1h=NRa&SxH*DL?3WI}wJgl-S5^5tCV&gIu^kKk&{ zy{Y#D{L2iq?w=(6AG!IuQJ{L?#h}wHktt;BISkQEs)$ulPzYjzEDokLyZTc z1^+*ky#sfkQMaub+fFLBZQHhO+qR90ZQFKIv2EKqwja#_b=={_u6|s zb1rDYzpyL`L0x@$7_)^egoRu4%~`c70x>`!UD8a{_}XSaK0PpcyM8?oS}U3Ne(#C(&lnCxy0JQ*6}Q;QqTcEp=-TYm@X0dO%82Nbxwxd36n1k{+@Y z3_)Foj`j@8S09)HZ@ZU2K3dcYKQ$=tD)|ETDk0*`h+$U1#&|33_P3fqzd3p&sLV=> z|3x@Qbez!HJ_PTjE`l*w&r+afjnTl_pFWI8d8Wzq#PCv3WG3?8yor$`Qw14Fdf&pC zEZr#?(UBQItRR7ew_iPBAWP~;L2lTo z(tfr6o~1OD+_wr^d};HVID=ip99vG8S4QS?gO1Y1UY@N}uiXV?gh#^7_-(3E6Z_2P zC@YYZq;#5=R5ZVj8}lDaDWK%zT?8}@EOTooGc$SC5~#Ugv|~okSljJD>2d8^iK^W* zpo8xWJer7`AjGj-O|Rx<_qv{)T~*%qJZ@H=Dqoj}nRzWpmo^rpMzu11R8Oi>HgiZ7 zTWUP0;AK^RRH1Fgdr2;EmbVjM^hf&z@M|D{eq7xyi57%15`!5%z?ij*F-^jXaILP; zK`Y%g#ISaFg2hKq)!cQ5lvmoi#xoTXd6i_AGnjR_OYhG)wv+0?Wx(HV7Lw{?q|x_Y zPRH#I4MVfe8i@_Ul0xI)uiC&ZWurjii*1;v#7p3>y+*uC zZ63CeUh|VKlZW)fFq3m?qYh4n4uJk5(2oe90mi6p_=}-N{!4fy2yVcoa#?c7+7tb% zS4NVR#Dmcf11F`?IEQ8CPN_@noMMeW5l-&D;hO+9QUYfAeb0n?faJ8(;G3ZQ@BW=w ziaq@zDN@)rJ@Ymco`y+6Q!B)a-b4AvTs&n*4gTv_dB;&mmLcaL+dsh3z*jmdSxH3g zpr(15NEys)Et!K(l8KfPK=$gd1hCswte}Vepv>Pu{w35mJP=|iNN&jii*Dx`y3!Rb z#wVRxG883OwrO8JLC`@M!04j9W5-~m(^`bBNtAX`im`OEQ^Vk>7i{VzH?~W-HCC%= z)0-QhbCW1+mQR9a1M$o%;a>9*NA@eqpm^0G&HI7h@+~4TVY!FAU3B1{TqGtW@cy*R z<|e*Ku5{ap3^yWUWs;SnWt~_6KnrjP7*xxz^6}4`v)nl0>(!Z*1j&-g`o0V{yhe+aBS~Vo^OYzbG%{8;{sZGPDN*~gC0I9dE9MzPd;B~VtZyIWPV*^Pf?<6;@@nYQ6ow0#(RWX;( z)q+`HMigTzN@_KXE`oZQkqV9vk5@jizhZr#NUcLAn7G6yu`f^L%czZcWL`y1#@Sb|q+0OG z%Jybm-Cu4lV0Q{Wy-f=_Vo&TQ)gl{dQ;GB*@9u2SS=#m16}43}m1Y(jG__sDUQ$0% zxB~LwO{y^d62@;&RBif-@;`BmykUzv0R=LQ4d^gMJO|+dF_fr(y`N%GTn%1?53h|$ zV%NmrO$N-r>1Su-rTzA9?ncHrs~Z%G?Y>U{tg)!YahtV?<(lfl(*~i^CaNCLv(IU2 zSI$CDC-{6@SJKy2?y_Z%LAnk2sAJlvmP*C(r$g#{_y`{hJLlOidI6g;$jn=TGAQ=jz zqc=`!W`(T6?VwnhA;lSwKm>6 zUfnczoDBK|!6n;45fvAO&Ns91yw+Kn`l&4&>J5SH!G%AvJY`ZpvJ({?(CC$T%Xj#aC`XG-JI`f(#9*fGQ@ zgdbf-N0aeZw&dNQd(D*eUUj3TO#&%DbH%|6Fy^BB^W zC&88o^JESP+R)~YwY?f`h&kP~{!DduZ*tjT1(6(%*73zey-CG3vg~hCSzO$M?vA9Ca{+AG4eQsPq-W2Pbx6ZSnK1{&uH+}MzcZVp&+k>z zLGe^UXsI>rFbse;3_+ReHYpnS01*UIfaRO)tO?^D?ADM|YKMbE!SbH6kkpzgTuUFT za#w3-AIq#9nG&l!xjr1uQ^5>TYsnm*SI0i1swC*X14u;_{j$^$b0Rae_K#?Ue9@c` zEQ(+(5_Ea37Ml`GOJmJ4oC+%UA9#A%DzcXL9&Dp%NGm$hp7ch3h2B#xn)7K@Hu3eI zw3EkT(ebRES-KO)_Lbwc=a=_ zANnz)?eKE|AtKYd6~#Rtjx4%r0;-8Ba`2fVPAhm;41K-|jrw03U8lcRYuW9|O;+R_ z$pbK|!WE`6MVr`)f44H`ZT_4r!efQ4XuyK zGvdoyRo2Jar$H0LUCUsY)Zob>d`?P@#_ZRg&QR+-o29msg7mi&;os#e4BApwp+7O)as z|Ilfnc>&s+>C@NFr;iH3)u*_TE$;Z+)|S8IdyAY?C(}dlRyeW0a%lIesYvk^njym$ zEMQp+TQ1cj&|>9OtP55O_L_+aHaSB#S@nb+1m^Wh6KX>9>kDQvSd777=Qs}44D!>r zb>}dy86F8>QgCt{hK7u(iC>o=y0f3zkL6E1?Z`U_MLcp*c?b&o0@EHo=s7z;R%iym zr<|VysqSZAdUAnngLI|Bn4I8FO{@%)g~NNKio@Ja6rdhov#(}!<--iz#uAb3z{DP= zTlCDa#N$9pd`ayIa#x-sAJ8$35AHui=r1hv-@_zX*#6@pv|D}4?(egp-;5r?qXd|x z3JQsa@8xXKDLu?;;&U;TqGo|j&AYt_rN;BhO)NKM;z85+?3F|*k;lt{kCa0l^#1PE znw*+hE$uwFy4VHxcu=KH-#+_(p9TZkkvEREO8;p5QIwn-UvJ0Qt1;OL|0wq1Di4=M zja~6)gTBM6O=b;?7q>H766I`?j4a*Pok;@_shgngi@P@)a|4T~>*wp+c_5k6sz{x& zNHV&t?1D-|4{vxFMv0rw(Oif4C+Ow1N#bz+*=A!}fn##Y$(YR!OYHgkn=1{P1ok$y zI@$#IsQv=>ILvoq9yMm%va$05^pdXHKzQlS9_)3?JFVv&LL`zmV1aTc5&75SRdC>q zAXVqDIVyu9U(O~+I!^^G2~j6g${04o)+haod<&edzeUTM>N6XWv4C_Vo?ZC7Et&+H zr}ng3Vbn8I9QRvCv^GI#sa{iJu_Z|R*9E?2p(RILGHf@Vrv^ox1S(A`Hqg&tAOl*Y z=PkN9n9Z>m>pXvILV#g|n3Tap!HH)B+Ej_BsDDgoFck@=M}1Cb9h&BX2z&!-bGuhd z|HWYQ_4zM|R5GH4l2*XPd*(&(+x}-;@7A(}GWgd+0;twmul3pXlRS$;%dH2t#~s=E z4LpY|3}C~4J(1}FC(uNRp(q`n*ZYCF>|xlkRC$pVjbntKRal=^1-e~(P2xRJpgw@3 zJIYPNE#&nQ@E$ktLae7&`U)#OASwg6zwCIkVLUB>0@};{MV==NruNc3K!mAztz|Q! zi_KB|;oqHGsK>_@pF8eX@JZ;3)Wpr#r2W+Z`4Y`of7YQ1)i<%skQerdF!GMLWA`$T ziLEzl_8j4fEipkIliL37lQ6RP;S{#huw%3JU|26XxYDbg)2TKl{A;G-m$MHWB6X$G z@qOg1XqmVWPEvBPi(26DjeM!mW<^I18ADqlYoTA>r2Kbp0xeu&eCSIsSMk1Oj`*A+vm3CA!& zHEB72wVUp&F$1JpPI{Kj#<%$s=(AlH8xcEPvdff8M1Wt+%y4JokqmrHj@o3SU4+-4 z{9cFET#FE(npz;IF#q--a#^bknp##`n7ONC&GqaOutA~R;kf3iWIJs_O%Twb_6+K1 zEnK#MJ@+g__pzHzBFln|1Lp(DHb#&Qe(w&2dYICm37W5t`9Q^jj+o4^9>TdA zqVT1+tz7L$zce(syuq>5aOb$&u30z3xhGC1sUWo&?sG6QciS~4WE zQgcbq9=akdx2r@3r4G0O_ zn31~rMQ~j69ON6}9W4E;S+#RH%9C6ir`Ck_>rI`I{pj5o-sy>a-ABOXnARMGnDT}1ioU97tYQ52fWmR=XCC{iiiUN){QX(6O#N15Uu zDc1S~$oil&(oj6zjpP<0*@Y^k73 zk52z*KD65NL1nol^%j5<}{QOtIdr&s7C zt~3&>$>u=9rCxAGDM92>l&&0TLkvT3=KMg z9!d>kIM4uvrnbzlN?<_98a+`#!K1)`m-s=!>@PU#3m-zF61wIziPjQ8srATPoSqhX z(NM~2^y`?jwU*Po$Uuatf}nl9z$UC=<;*jr0!;>McBs9gY~9js$ZA0aR7=HXhY@LZ zyTmo~a$0$*s6D-e%X*(hLL#|e(nso9VuFr?oDl0|O|Nn_sk=nIXbbCPx_A{$x=~fm zNt#f$91i%;0QCtKY7OCZehfWaFqx$HM25~dYuFnx*+B9fH@IKP7!fk|Nl18bG5xdp zo>IUJUU^*&(g>2TzzjF}BGgi}KH;dT2%$-n@jmiyJrRvkha)CrD!6?o*rij(oi}aP ziwOQ+L&#*vgn5fVX#pmEuQ4n=E#Zn3#^na6az@>wMb@G(zIqUO(O3p7(Oi&RhRqwf zUu_bKV%Qm2vuk5;1jVNMpu3K<1MVHJg~^$L5v0+TD_IBkdd}w&vKz9hJe!|}(8GkU z4}D#J-_4F>)wO!0iAAQ^Uc(U4C61uiv`f3St;!IM!B`SbO!Zh~PeZ4l#z3*A0H=7~1#w_|eeQV?in*zG=F=ar|x0c|NmJ@euCQ z)Ace(M%ME#1!cY|b9W+JFUKcWCrwP0CrRNa;5r(oYmZMRtr7SiTb1n}Ta|%<{y)B_ zcB@InW&QQ3ywvjh09okM+r_SBI~V=+kGekBO^6Nwhvw~}Zan_Etd)BMv_RlF-zJ_M zW;IdsQTf)U?G!a>8|mD#d+FQ|LclCf64f={OapV&84crO5R^SE9q}>nW>zrt>G(N# zMtCP<>PC4FfcHGiYM~QPa)`XBxvqe56B&U#m_@g8t7(Ba9zZ5}E*gF|d_AMmu`3JF zZp6}>cW6=Xv9*Kmaf{?-uuf)ID0p1lqQ9>yJHr|%A9Xk^iePJJ(ElE8=$>WqsNJ9w zcHxH~_mI+$x<=0ke(UBu9c?MUtceY5hRP?F-hl!=lL>dqKaRz3#FN1i9>^u zxsu_#$E5qy0a1M;tXoDM)jfqV3dC@H2sFlepT#U}`IfC-XfGcVzmOzfBdGV>G>mom zZTCtY9BWc5%~f*cs&*6qm<~`+_qh7?)bl)8>(Kmb zgZd_OGW*h=i$kLVO&KBb?U2d**T!}9Ol6~zV*ZupAJDURJW-mI#jT%+kMPU{<4=>e zK&4U`N{C5|FZwtQ9hbH_ae+#sHQu6yR+heH*{cWN%U9p=37)>RrZuh=ycmib^w$ub zYtgO#IsYtWnLdAJcqPAWq0 z3^_w==)90b@uGr4rn@dY#3HdSJf zGSHY4SB*e#8jVmVaczY9pyk2O&%Q4%VfxKhdF`{X^1JS~mWNl@2V^Vc{19N+ zd@|8w9L*sVV-Y6B8yJ4qyje7&CM?}mVhI<*fQ+Bau7Qi44T^9Fv>?T@cfhyHOCR23bR7co1{Vr-z7y1Vz~JEvDhx<>91&Ag4jd8V?>slpdNlhO`-DDLvxgP zwzR5hlOF8xl(sN<5$+Z?`ypIi(;|Rv<@_pYC-^3?9Ery%A(+1= z8+?1#iU6`o?WlerY5yfr|42e$U}pZ0Z?RMAQg#P_;g%D%zi^A>|At%swnAIPyH*RL zS~_Lvwg&P^G!X@(G!q>Ey11ur38ZQo>!NWz_iqUtad*72DY1=%>~Fu1N0+9y9FgGB z`VFyUz$>r(gAEYkN`o*7`k8fMRL^$d?Z^~IRw@cS642K&(9Sd&%K4vvw7;BNgP%rm zMhGBHomO@IB+=RqNt?j8X%5PoT~TKC^<_9A-k)#>jlSS_e!rKi0li=p$5#iS>dcp$*wQjJXvAU9>&_=an<4qBu;6l zI*df6h4Z7?WOm+`AE&WrW8!!WgwgulIa@jePHR90mAtosM&`|ySl&USnR!^J$VcvqJcUHI1M zpC;4|IP)#=1ETo@x3}ena#T5pMc@~b%rq6`JKtOGB6gpqEj9UnSIH!*e1I`5dK|X{ zVqCYp6h@ppaz@l>l`A#3c6Vt@^}pnO3?ECXSonYR08B;JA?pgK-A?VW> zmkjT0uzC=roM_)3z3$?ftfbYI6CEdm_Q~j)jV3^T2B#_Qlvxj=!@^+~(oYAtqt)a{ znJ+OLhniU0<0L&`?$oaD0&ojtOrIptPl4qcFHFX@Mw*p&ju=83V-R_F)EyE25uVl9 zlnj>DM1v*Z`pTr>mWEDY&?Ex?2xmbvP6bWF^ZmLU!{;G+5nk*olkT8uQuBWnueNd_ zUvm-t>xWnA;ou~g0ialxTwTK2O+*h1)7;B{3jei9=ok1lnCCVmeMfwjQ@zjy9@TF+ zH5X>5eMoJiP|z>MdQO{C0;(vr1!NuI z@88q2EX~ut*Ptr8xD=KeAI9-3n~2dTWDVj+QC+(QZbi3PF~Eb;hGb%T>1rAG1 zt2U>7@tcnqY+KW*#q6`!ASM({r@C+wv<;@S=`73i;@oKyh^@}Bbh7~RguV++(n`aw z^-WK$i2nDSgaNsrG!A%?Rf&#B_&r1UoHs=M2Jj8*r>oJ4Ipm8&MN&HpbO|~|_2ubZ z78Ac?Y3%sGQa-sYRlSf!&77p$Jy)CeO5N-DpNk4CP`2y_!QY_jMG56EWe&A3oaK|) zCgFyQip&|V*g#t&1o&D@@{3}lbAP;iOkG>5ga?Y4bpyW<*H;^7S#fMuF>T7Ef(Q>T zsYf2I&Rew7@fyz{BL&*fb9!FL5G>RZ+T0@W01=7l)NL4DEE1=7tGWCRQhWh{#E5|& z!q;{S?d?d~PIvo1M}SkF=q}d%z-VQoVi2+jR1!pl3^d^{g|ua`jKB%1wYW#auh~E* z)Um61-wtNIyy=e`aKGzVuI6)Ny50EveCc({AY39cqcrb@tc?86(>LwG+bN}irZ{nr z2m^qA@~CK_L8m(xt~AagJld5mw;JzN3AubOxUG3SBt8ri;Ew4vfoiE$NBT=o(B zq01t`WzYAqG~Kfp9=7{QjuT)96A2e#tuhmYmDRP<{rKCSnT<`;P0fGP1Vc$TJNWg? zpB{0o@K)J=19I6BTKz-y|7D{Zjaw5`pSQsVfS zsOw_?u76g^Pfrv{I6=#(>_Sdc_|cVD=Ol0gF@H^<+`{fYW3;&BLb#Z52R@8HR96IM zVS@NLGZ7mR+(GBr({M2gS2`gYih3mbMwVpdC9jeyOF7^xkX}i@Q6D|X0OuvSDGGaf zZD1xBK4woR=_{$sSgp-*WUx999Owwg8?N`&Zst=W8Z9oS%i6~;EsSX9^eEE zNt1o>dySQciK@=~!5)~fjCe>hWyhAnD-<1I+9WXXVCGcZQ?cq)Pr9I$=yxM5`g2le zXr#-amh(Y(LO{`(Z*8@sKViv=e+)}+Kf;lN=Nud7L#pHMxrgGo@@R_tpC2``#oD;L zbaI+27A3WGmU>D}j(~<71WYZp(${_unm^2Y9n5rTcjV<%dbc(66d>zi6fY>dug#P) z`BcW!9mqbz_TDvk8!6<3I}ELNL}>x!T)2YT ztWDZh&}1RkJSOPco3-5-x^nmbA%5d(JP13=CE1$fJ!QEu3Gt@iAAypo-(LOrqx>^- z$1Sh+hP~1VzkNKR=Bj2RbKL5cl@_8>LO27diour{YbLYEj46$+BX=2TCM7s`BVqZ- zVIk0I&sc$sE|&t~BZF#GO-2W*BUKY>CbM05Qy#Iurucxdtz-t1ut?RRjD$KbL63?G z4S*Fa5kibO3WXAD$V=c(87dJm_|(@VypClkIG90fAgzws!;v$cM1Sn$*wv3;8x2Nk zOF|tP>fp$)2@kA#j)hRQ_=_52Ok@Cv(zF8sps~-0fEk;5k7cxoFi_NHT%cGHqc2mF zU%wtHH8ONyz<%9ReHP*vL%Gf^Q+Uo?9m?53$W)l(6r9Z-_6;%$M8N>kG9d>dn~hv8VnuLe6d$80zf@LI&$W~H{mMo@n6N-Y76Z+ElkTH)T>&lejW$wi&sgYpI zLhP3LjK*p-f}k6gP9W|o2>=1L=K=)YOQFAq>gTI?MxhXPgh{Xn`0Op>^@M{(+h8k) zy&UAwI1r39o$L-_Y$7iB_gFV578s-e2#wl+-Crz*cPA1$%|`i>SPoVEg2aVsRKijxE>n z%Ngj?wDb0WZ#fbZrl;qxu^~_*dc(4X_Es-maLgkqx7_~HffIc6mq&`naK{_n?d{*U z2j8?hgiEzE3)ffQ4?3RemMXxChf=V@&0I3T3SnX__WnMz{#v%$tpG_Ufq~p0tfkR5 z%%Mjam2(*Cp5wtj9AL*eUOPb1k~tD!z{Yo*GP#LY`HFZ}uN1|&5bw%s^99bsfZhex z7G2Ib`gqLpMzeRnKK%1f@Qdjr?g$iveFP-A0+U->c^+JB?>#chD; zuXie6;$7$^RB>VAEhjbg)_H_eo26uHbR31R5|MtU}&JV8)cauv8%2gIngJU9BIj*>U9 zK*Sn3&mNJtmiR!|Iig`Xb0lNvoe1fl2M%_@1vZ;`hxOJuYFXqvT$EaNr8>m$jeKhf zkTA8<^er+`#RXLFdmjtr#@ueHS;JI`04~G!8Iq`B83z$@&{eIzD~3NA6TBk|J~|55 z>i|Jk>1L{_7`;fzrO!7(w42h756vN#P3EAQH4y0at`67 z5p)Z9lolIeYgEx4P`N!~^fZRcGq>EUavvTuoS2ghDf~fX2KgUP2+xHU)v0Bj ziMi#UKQYuwrd>;_M+zgpV~~AEEYl@|!MSE|zn64#i0^&nZ{BWi8*|P1{o54f$FfA7S z4xSHRe4x3l$L9I=EMb&p=E6u4Lb2`y8ebv^vNcLvGL?Z8oz0{R$$1rJY=`_2{;Jb# z(BC&LrZ5ZDI|0A1?LxEV4J?VWi=C>V@>>3f@t z^EsfW3==Ai)4nl#Sbf>rF+UsS@?l0Z%M5<%_L^zO!jh;;h(ten&vPWW2#8sl>b#gU zg8yW~)`;k$NB#0mTVuwHK-@Qjm-uRg0<5!wD_DH$CTZh)BBdJ)pg)DhP@WW-od%vL z5a{gS|8kGx1{Ds-9eyr=#Ajo1x7FH>cG1ZWeGU`HyQ=S0@Kb|3X070e>c*ED&AShx zV6Z7%f38=mZ(g06p%v(UdmFTU?tj&%W0REVzcQzg>?2tbX>TkvrRzI6FQ{{eOQ5-@ zxU28&NEhk(xW=kKN9@E2q%*ZR>Uh2bdJM!x3)Q+&A#&O` ztylzWBB$30->gnlB!IG$2~`NKCNtI*I-Db9*cFjs%9htnPPcYU7<7jUn*ALJk}EFW zCiE4Gvv*Wffy6Jcjn}NSMVG8~Y&9m_mzMhy$*% z)(|Pu%+KosNrXzF!8Dq^Q9MXjbEq|I~tTxOhPRiJ2n(%@G&7sb4CLUaU^I zBWWrP;N}WOVKsol9Qly4kpvIHji84?%%=xcqC3nP@r-i>23jk&5js3Zv}=)D6%;NG zebFV<_PJW&2c-EC0qdmZ(NDT1*8&m6D)%Hp{IlxM=f*w zLN*qqJtERnhENoLfI)tdz}e9H$DLJmWEb-^)rGlOHXo;`5Po8=*tftg3A1u$B+nT` zdjV<9Lsrp1(F<1^UI8zKMy?>JqI>`r#wcd$%rdNnT#vx1y=U4B> zc*OF?#www}yX7R(|3!zg3gU!)fi6;W>k81$E)8z^O99q~`^O4Q==`g)l40eq*_WwA zBdh{j@EQnG>v!EQHpDDze9KS5(n4bZebit}Zh}D-x(Q!0esjL?xUeC9S+ z3hWTB=BoD{s>{LT?Te)WJnu1H$|_t*K_>e^E@DJME60v9<$L-#Jy!8XJ1owVC9cjd ztxfsZE6s^b$a1lPV?Gb9&Fhe%S_YiR0&!tYlk@9C5p?9<96ZAgAR(J9DeEXoiYiO$ z!VB5lzB#;yVVfUojP$ksx$c+_p0xTGhuwbKFea2N_l<=w=NMeKo-A+hbPS-m|NTg0 zrvJyh00zeYd^q9%?wV3)JH*;T8a3U_qIaBv2-mLJLB~edMC; z^Nw4n(0D_E%}Us@A*@+w+}rDv{hzF$!7R6^oz^I}R)zf{-3*Y3MjGr~20D1>;8Z^t10H4T)SiyLzlbd0^LC=NS2tt?ngS@1wnw4~s=9d{pn+mPMbpa!P{n&LRR=DXTQX81`YKWeb%fg>v=!^KGsow!WrF7A?dVlg@LW%_?;kCn960-^O$b}0?! z?LX;EhvnxlvlR`|(rq-ZSd?a*1IFxE(Hxa`K2G|x4p^Gbt_n?U0m2i=YE~8mMMpIH z@iQ&wX8Upq+|m`v9@>1Krr*B+p;g}w#{zfu%_+9pCi{f!ZUuM|x9j-{e_O5bEN-Je zXq~yPDrO6-qR9=aE2aaCCqp`I8uALA+?YTc`xmON=TKw81tqT z+TQ%eOX0A@S|cNe{X0(-H76}Lw1k23uRojlJT8<_1*wh?H>5ddofKITu}igMD9X33AvPCWUNF$vx`Q&z2^aNr(%U`LN!eay*| zORUVfHarEEb)r_20@LqMKSngmMZ`t&^mFi3#Kj`gSUCB@#JrZW>D@0QdfczUA8=?X zS%QCv>;ESp?Z0L!{ZB?=SF1_cZTxLb{}or>asKM?<-unIc-sg5Nb3%nO|+;Mdy5Fg z;ITw0MThQQUo-KESd=9zaIbwM_CdQG><(4S_GOiut1dTJO!wu*^w7fD3^FtaozVv& zE9r1#PbDi{Ej=4u)F_e|+}`$U@)>jw%sKB{pYQKWB-v{ZpEULZNC;JFBKWvRZKtK- zu9rqJ{c}xYHz{^FY`!ebxr&#kPOD*lZ#zH>&U)7K(}{b)vDFv-9Icn`I4+MXh4THF zoJv7$vd0KiFyzr(Fd3D^%XUI83VqmK0>eXTvLe+po$-V$u2-XkX@L7G`n=!KU1*U z$yL9(*=4dU^lM_0*dh-zuH@|+^)3=8MY8Tl*X#pu{2`9j57Xnb#d6Gb>qV50_W?9) zSE@dMGy{r(SDJrj2fd%qzF*`WGg@NxHfPY;8Y@3!uZT-E0O5d9TWESZ*PgDPu9o(_ zpI>fngqCGghmMYii^o2W-k(1Q(`0R@kw7-|J3HHi%d$_LIy6U`96n8-Pc|MjUe2Bb zli!dJWI}tO5!WpU=qqJc*Wsj~5-LB)km5BAV2}3ikWpD1gqC6D zt7aK&t{5gJUO*Pk3Hjh-x{|_}i5TDGArGR24J5}&ho56ES(NY4FVCLx5d&U=UIlY+ zJ6ciVJ!UsByP2skNcPy+LUUrYKO?Wt`BPK1*Bo9imYZij?Gfw!0>FYXWbsezeiBCh zisUs^#~NWVb3n@%yHM1*PrAE_TDe;-1fQVb#`vrQ0yTy&?X(GxZ}h}%EYLO?&IzxE zn3RD)Mfluzml<%)=Fvqa;2;){fz+EjvbDiwz5<9phY7{S_yJ0+TJEZCsI^Eg+3c6qbieV8U7>+0(;!4fNKRwR_9Cn7@L-kZsZ8t!b zK9?{RWd@FwDzPI7&B9<5KRd@U&#|)JH-1@`nVLJ+nA0m!DmnYP=`_7?&X_?V4|Sbw zPhtzADHU9l=+A?jxEd#DzL}G)8SMeDp3D%@XxOSowahxS!shuELUp={P5$(k+d#ey zvHuJwp*JxNk&79U4}r)GHddQWSeAQuGSZuId2S6V%~kEvHkIE9`wm`Bg8;ZDVGV8O zYC-nSE0>QWs*!q4t#0sBK6nCa5fwfVT2Da=d4Oasp*7>9GS><&focggQ_X19ARq>* zvDi&MENZl<_4yD2{xa}}*dcy(G5EM}ZfU$V`9hZhB!B&vApQ&Irf2+*A1@*oft07+zq2_l~8k~#Cab?EwR^Y}UhjgVy~$^vDN z>WiII;5mY|d-jlkXQaDUzmw3@#D7_QnA*=3fsi^3S)h`L3<4?DRiNYb7S@FftL0&z zSl%`()bwItPaJv2-0Rl*Ywb|4;)WV z^JmCO?-OW&6N|P>$9gwy9{_ptBMJs>ya-*^AEn+9#lDPGd#=1*Wfbg5LbECKC-0;v zFOV&7-pOuc7gyzEl2v<}jDJlvI=D9xG{Qzm<_?w51U?jmBMt{}$RGZW9xph_-nU^g zx*Xf&j$7V_BDy7MS^1&geo;ISvKrkU`vn05NX`N-V`0)oF(l8}3f%A;Mn9Y6`(sWP zlGp&*uW_+3=fixg@hh^f=DX;NlPf9?5MaR-XQ`9LJ_T!4JL-vt0pU#(AH zR9o=O89e(IW3CTvGI~Dcmynuj$?+rHdaw+2G6 z2kr2DmZ9@&$rq~H%Lt>*A(9R4GiKya80X5uMnE^<5)BFU?IS-ld<-1X7kCYEP$Oa* ziAE+*@J>B{80S@!|I>#3+GV+3u-SG;)kwi5h_l*=gU?N+{e-i<3Q;XotJ1D)Gv|#( zsL?UDdVpD-Y!5LpW?(72=-g#EbJS!MN*AZ3d%yVt9p}PBE0#u-%ewp8CDjL*b5R)t z(Q7(ii6ci_vMsZ4)@01e*JrOkL|RmCSZ%I*hk|sm zT=u;i`#Nnoo=5UZ!FGK`;lo)|HgZb-M1)G;uRp+MJj0bcOt4=+1kQ0CLi0wF(o}gyW94KXuH*r60pULSB_ylF==!A z)Q*by`11oHj0mUbxOFoz$Oj}R;0LMkFfPy&=9u4k=cdnEv8B%bH+AQtMr`1Qsp*ar zXzH*N=BhLlZ3VI3wv;o^sVq(ZyKc{oAkwqIfr8-7dAJ>tbstYKY&p}KKk6}L#Im4}YdD#X2(?TNrM!98Ft>qvXL1>G+C2mHdYh4fS`rLChZVF95Jm9MA$E z!^k`~b5wZ3rn%c|Tyz1Ko*Bvp119}X?@q%c{Vc&;bODco>)O#xCE4F4k^kWE@?7WT zG+34ds)0l@o$oA;0mdwwl^lQhx zoJ9O}e@#0wgO4%#dKxdb>kAi<;c5I^0XY+?si`yHEw0V=U-~fI^L{gnkA1NVU;%^#%qWz^O^08;*f(JNScZE~mr_b;Z zTbfP>{`y^Dv2trKloGS(FPmcrAm(wY-@if`Ho{c|k?g%A`FEgGUR>A~tF`C63fiHt z*0^}rz{kHMjvvTr6$g$7kTPnoL?!wNLqmU5yO1*deL>pb4zPAbKz?8~)RR|oW&{tO z6*l4Gvg>|1a_=157^#KCUpZEvOWq%=#+YdMp|7L7ktBg2e$IIjJ6GOOALjXvpOQ%{ zW=4y6B=}YK6+ss1F&FQ6Me_@8W#;69M%qN^>8f(=RLBx@}CH$n*Qk!eH1)4oW`Vb&{&OD zwA3~af_|qmdrR$=Q;#2Zy0r1my|5ZqlsvH2H3G`3ss_uEBWS|wQyVCGOL5po zr7hX)rS!_Na?|Pl9XK_1a*&2$^W_<#M^E6%E3MzR?w+bNuE|JNuQ6CWo{cV;GkNUB z|BI;kbH`u-zpzC~JNboa_M_v7DRAWhc|~DFIB)hQuSeMSPUy**uy#@`Q_A@~?Dvkd z09{TuASy9*Tb{j_xx>RWrN>IZw(^!>B^WK6RrM|pcCkf+w(PsDtW4pm+09l zU}K}FiPK$pXNhFmImh*={?qs7EbJn63oKz_jdgMD9U*hIrYDJ(J4qCox#kcU;Con7 z-Fl6kQ$9)uI`c_A=J|9}rF3h}#LfW7ovN!WW=qM3bhB;emfZZsdQQuWORw;mJKu7M zd2B7uh<4>t;U5UM_cZ4Y|D~m(Nj%|Wf@oZSy>x3zh4WF>a6fdVk=5%XLUnW6kbK>FNN}_pg41D z8R;aWQy6`E8-5rL#|8h;1-5@=%CR&4SF`6oVECKWCT%z95xP#OoUH>g5Gl@GXl6mK z>ThYtAw~*pNE%KPm2v%ciibf||B##Z4S-+ZPe(s9bV?`5B$BS6pJo2sOh!#oWsn6I zG*CJ;Xp^zaD2PTC;e%3Iq*G@e9#78nAj;|eFUHO>Mzo+?({0-}PTRI^+qUh~wr$(C zZQHipJ!kS|W^QtmnfbGmz4xl5epDrE)q3lBf2sYtbGo`F#1#v%bQ?<-jN?lo4)0xd zn5(Scf=s*Y()KNhqj+Gohv)M;2L}3S z=tW&zii-AU9l!q!b&ClC;^(LVA~k40!rcfDObB~dh#aE$2p--eV)TD^k2AbfT$KINaiK+(nhJu(e{sH?S_<&JAM>oqYZ z%Pg?g6M=|?Sk4e7BOL2xt6{Y$Ha7;S17J@*k*>MM(G}};-N^iS1xAORDQ?<`=(B1n z$V<5a&Ei_t{>2LF9Jo+6NJwk>&}2$UQFD7y>h5TN(`-Lo)V(qdTp~kXeU2_&HBzG~X&idy?H>CwVN;Z|I^aY5#Q;RG*`BaCtYdcLe|d?wIL;mk z?!$|0mGmcpAv1);tYw^$ulk>Bm&s1iZ@Qnu4EDQNH=5W$oQqaC3BdGRZ8*CM{j+r% zz3olFn?!&b)evT|^xg-~E;rJyO@jVQl<~k<3Z5;?yFY6*o2EE;3V)?TS2usz2JBP% zeS%0(Y=c~MW2tL-Ht)>nAF9>2N88>IJa?wS9fKT*|_=jh_VFA z=L!V>a>5fUjsDhmy@506F`WGOO~}OYUpL|ZO<=iua=nOK}r*zVcd zG9&MDiiWWd01)6G{{#R?eRWO}jWdGD$iA?8t131B$J<9;xmW4inwDiGPqKl6){ctx zDm9WwV+T$)cna}~e(EDtK`V31xUBgWg5+;JWlx0oJWykv;4Dr}$n|wKZJ*T4KEHJ`>47MmlU|Tiv@90hQo1ioy~fvk+N2 zcT~oQrFT*I8s(1Cz4U`%v~7L75EHL}CYXfhi=z6y>i7{M2}!K9B=;T69qJA7MSWxW zFc0`(Q!~HQE^i7s{i<_%Xc;xf<*R`F_8+E(W^ zD^P}&UB!YBQsw}4cNPnEwc#*9FIAocH-0EY2vS*Mni(U!S%K0=fOsy5CX0 zUy;w4tRCfX^^fUKJwKnu&yUM%bCCiefy%IL9!s6^^*S##MAO-LsEr&9R(z@{dt8T_ zcuh?a7{Q}pjg*4ePGG)MdT}|w-zDrOd5T-d>%<{!Kr<+m~H~jmyl>-FjkZz1?fSuAlFh z@9j=2xOIcG+f}~bhnLMwz5&!zsyn^rOKj=K7r5JjV0fn4)a~!4tDR64wdS*L;1R}B z9u0$Nic=>9vv3m{m|8Tg6_=s4K831U;u+KWfy`cNMx_IyvC)uF-qgdFG z`nyM%5CQkZj5+%lDg(+U;iZq3ss zav2Zm;@lv0EoVJQU<+L9yeeYR60C2j&8jidT=Qn%7S&C1^JX7$g*8&m83Ae*G(Cwc zad7u3b+IfG#&#`{ES~DoFNvWIo|Pi1@M~nWKi|O6U%g(1SvhOfEh?^7?c6O|*O%9> zvRrQTEXhs?gV^`XbrEShV@^5}Sz1#bq^D6RBVO+V-fGiw)_D`;>wQ0&nVDL39Z=jc zNYHYHp)UkrIU?}YQPnXvcv{$6_?wI^#C+KA>tnNGzPt~Q@wu^I-bbgzeE9G0|M^5k z+^dkn8nGi<{y61fF6L`;q$c{G$8+Cp{24gM^-Jx}uQT7%@5|)0pX;Zc-K}p=54LdD z&ed2BUh znch4#Yd=BpV1NtGBdP0~QaLGESU3mf&@XpL7j8%)&fa2T){s0>Y$$qwKe70c?$L-E zKO&ui{MpDjeCQHrs)rVaS?8Oe0}11p@{{ogFRZow5s< zp5P@-l38Od!$&G{sO5}$*(kCBDEg|9m{#Kgv7rqj#KOw7jjR?%eGP3Omz1*Q1PDxG zv-3imx+Rgv*f^#D8c@pED`>2y{!T5NLHh`(khvWutdNy@IN`boR`YaVB#!3Lw2(z} zY@lsDRy936gRK`KOadG7raULmEd{BPPS|dp*?BG7LyOL z^F267J!VJF4A((@LJ%*%!$*+x%f?E)ECdICAA6*uj)Nr^5IBK7SV0y8ghNMXjLK!O zZ+Iv}PefV;BFIXA;rE_$D22ej?XVO)W~-REQb;u#g4Npx0iA6msBaibXZiVfg5$be zR{fx{xl2RmM&aF7bxo^gO~*qr0NtB^jD3#-2e1)s=TUO-S5YJ3fXVlA5OJZ%>?h<@ zd~gZ=_3O?@RmVFr`Tv*n1d`?22YKnQPf_`ar!d}9AV4ZC8L_tpdUZ7Sqc z8~=XVqSQ5Qa`hxnc7Y)FoJdwx7i`?f#_3ub zE1g+_0+)q(2OcS#na$F}7I6E@efD00Y|zueJ&l=Tb#G~g#e0Z%XBEE6Yk)m5a% z(B$qHpBfZG;J+sx%kEwzM~%tS9v)c7dgJrxkFNy8&8EJxZvVNayMkbL{$WRCaQ=Z%asUJ}hUEc} zrUoE$r8Q-(iG?;5C7bc%*x+ykG^UA~h7DK&FmVzR^CG}$ugmTZEeuX?XF}iGO$CrN zs)*$mW$lPHH`Vk{$nWOh2RpL_Z2FD`kW_*eBRA6$!Y1f$-shtOnX-nVsXC9owk|XO zZT%}P{VV;JTZM;Ttl{VWb(py`Gk^6<^#HxCmv8+``OC-n)rbmE9KFa#3!)vBEqxY1 zMyvevyH93h0Mq0GlmVb^E#n^pU?PF(e?IC1sQ&x-HHb+7*96Mx8?^wUe+2k;SrYD( z1pF15n;riRP3*t0wZ5Jc8L(f=zSt}MBwO;-!fysv5sMhPkpZ!}z6M~EoxPWa0Z4{6 zG6O)ImmW-mGpJXt%*|_Q43q{S^BZmgn~h7e1-KFQ6cr^6Fw)EW+q3YE5geXf-8_o# z)*ibLgpstoJe6%ef7KSf5{gNp7#}cR|fE#E;%#vZWIDA!RICu8lOD>b_l=tOHk;?{*}A* zsCKn$a`*c8H(|cF=n;OiclpsT>dTJtBh~rm;r;gFo$9adY0&9!NoKG2&!+I$uiy2; z@8i!`@AcHLgWS*UWk z76911AF-<_K+qcg5Qsj)Z%_{a2FY(8JlHwkS&+a9ydgOK*pC3dx1--2c)T}ZGWt7c{<2E- zXoULH<{g*u3+u~lbI*wNEXl*vlo!v>S<@IkGpW!tuZa~Wl|03U_vB5{}q8~5OkI$DvGtb=oz>Qs@(FaEG zVDElvYz?mV6|ZiDs9)yJC>?Z2Z*<7H5t?6z!8bIZPJrL-UeGE0*ktS)pX_*%x`Q@GF$Z5S7^qLy7>na)bBR)$XGezRa?+oKUQi zRfyn2wupJ54ZesV2G>(4L5$q?zLUldse*6vU{Ho^6WF~GF#s+uPGWK4qzs;ZkXz<7r0z5+* z72blrkU8nL@(phDBwovq3?eWHO9NPohn$-f@0Zv98VceqzuuKHHI~M+qoLmN?L#T} zo5<{(Wyi0$1TqAjx_d49GKX?OID!dvS};bi-ZbN$&A#$HkTS(MKGR1+V0tVgjoO)p z#uNvY^3NkmHL93Ydu4(1qENSNdQ(fKO};|;yXq#bw?aXsO(2$vdv|O?L-7O$DrEd9 zHcso?M2)sJLPqMS0r*bUo-ap~zE}J$=r zxG?}E5xD>)9~1>m79iW!u;W}onSM-?V*GtbyeFNY#BMYTjtB9TSEsONOEjD0cFLk@(_r+&--i2j?1fC2w1C{V*l4pX zXk2S5q5cfxwv(6?UV^g7s1ExE!&C36arFjUBmcq`BbN{-2j8T5F(~qhj0N*aN07A( zTV93JdQMRSZ53weB6J`0x>>ey7fZ7(mcOv9b}_zH_oLh{Bgna@SgOdpA?2vla{!ig z!I2=*OB6?hqWt_|vR>$uj;p7#4+$}StGr;W!6w*NAcX)cHd5oq0>&ePOhV~ z8Hxy3G7U|q*9%XWgV#0sDz~H6XnL{f;_TjwwC#8fp+d}2x^ZdwM&vRuJD5c9-mOR^ zX{AhrgRb#kn|%hZ5yF+4EBN0jigx16fl;MDHDMUAGIG`mp4Hg~5`eyyF!-)KO4?c* z768W`Mp4GjX{_b^7lJg=SpSS<qCApm#YTm$ zIdSsp1B1ELSoggb7|l&a0Us#NC3-g7MMRQr*ZjK%z@FbZA8sraTIp5@-LE+cg)i^w zio>6o<&413B82b<0-&4RHGV#zEP(|rZ2D_bzaej#q79*0mp3(g3Q}FvC(m?ih1+BH z?MCxPt8ESlJKY{sna|zL8of37IYw_I*-8;bcY=x6l!m~O5{R?J#S5KgWayb zFP|=(z9bhD4zR}?!sZR!5V#>=ifTB`FxJGr7&tNCNc%?g`#D#5!5zcI>|qu`h0i_@ zMcLYLLRX@i4%qdPBn>YZuc3*~T(Vtx;5=6|m!+g3(%@70yf*hwLnqXDt>))UBwP~z za@UC6J5w-Ax9_4DoETs}i9&0bC!OdR+od052naCmGdv{1L*zF+pcCq?=2Z5d;`XBO ztEtwm4$H13dc!tIxOs7#oZ!YQYfL6iv2;0I*5Qx}(LzzQzxP8RpS*|lMf^F}Gynx$ zwsU_?(SsXc+=1g_Yl2<o<}J)_xv!iSi@LTNzD z^AP)(wn8wKmNmOmy0e>xp(YdL;N3EPpwyF4GI$X$u+pCB zEMGF%DMHP!lRjcsC^ZUi&z{G=PEi5xx2XVXp((!b z8;8g8KcpY>g8-bGd)6|g!u*iYJanMh}n!*q22K^;v|3m9jhL;A6>X7eY&lsKsB z`hZX#zxSc>Sgo1>YE|Nyq&iizYV+#xex5r|1UnqKiejC|Q^r<%OkH|>69*?S#3X0V z=Vr$3s7CB@Dj3IDB~5upk~oN#sCe5Er)Rr z1Na9O=DELlM|Yf3bCNAhiLT=$!*M$fU4|M@V2!6mU9xjOKE(k4Gj?Wbw>UzJALh)j?G z)iT}`PT8>xf%GutMt>IX9~MPdQ;?sWi=A1F@TZ5n?6D08x5%wpJ3A9YR8{| zd<>u|;hCu?GN{l(m5vf0rUoAw5=+J#`?;JYX@~nP5GD(6^(b4yw&75ph0+3V$uQT2d02tH~~#U;~9oNNnz#QVgzVU~5;G{p{Ar;lR>Xq_n8B?FY zvT=X1L3}V%)Sen07&|oS*DR)1Yxw@=DmlhM#PrJi1~n%5hZ2?^YA2<^rske|yU2|W zg(DViYR3RrZcMEC6x7q2Dt5rOipra)C0LOu>gT)qzP(auY4M$?T zBQ6@~uwh7)leIXHTfE7$Mhgu)89aMk3(-<~yR|Qw!-ayvECZ!h;W&qrEGl7`cjJE0 zSZ{$B+YB*f0~y5i4rJfID;mHkPVCggQ}qc?1&5ir5POAOENNCq0Cs+UGWeSj z`bvv`^dGR#0!dL%E(0cZbi^3OIY-Kb&3%#X7vV)Zys1+{DH0&R)_rYHKbR(lf+Q*@PZcNM? z)&=cTpRw80ONbAXIx*vIhwp{2Ph#d?+h)`d^%x53OAJEWV2aJK@c-aOd)139RH}by zsmmi7U@z6bA+d$#h>75KN{n+Yl`C)dRXv+z|F(oVVJ>Zb4)P~GWKc8`cKqP(1lM86 zSBF5VQ38qt3eN~yFOe~lu%%k=^#?{Sp0m^c@)ZnU7y_TyZA}0Z?%}#SPH|yaEt$ob z?g?&20u46&YlQPy#o$FSi+Kx7w|X!uSpw)h2T~h<*}XkqMK&Hv*>p|!qG23O^IX3l zQo>6Y7eqt;q=*kaeb<|?3`m)mRZOKrG*r0jtT}0RNwK2htci#%*~ak~LQ zZWw-XrW>%`Ydbwj&LqIjeEefbm~>UDMJbB2Ld4o{jM*|%H^OI^x?VmjA;XzY!4*?L zRTin#!aGYe6|Z*&vfoGy+I5~gp&(?=DV-nfLf44ct3lUoLuZhpP|w5k#G*0Yq7YK0 z*6peh6DXS8_M$k_6d0C#U40E@v-8P4+jn1pS2>ymA`$H3h^?yr<;e6N;AOBP-{?bi zfH6h-m65^Bj~q7>A`+It^v$&>vr5~x3KC@;tW@IA26m9Kk2Q!zTC3`A?}h7fC@!3E z<$_vt3rVC_6s?~WwBLC_5!Oo5l=V%5m}3-Y?B-BW~UbWTda@ggW* z=SuvV@apo)<>9$Sy6^(y=9XU`;NY8hz#4mdZIYfCa3$q-{xYXXh5c7Goq{ZcZ|l39 zjXvLJ{YZAyefen<@eiAoCjW5|Ljgr-_O58jL;G#&2$UpkJmB0#Pi@zQoScymdIqwR zB!QNi#!+?unbS6hUe&WBj6W;POf(ap(FXV0k%(HOilvojhts%H5;q&J*EScsgg53I zm~ZmU)>CPEFe2=#Y5OSND>t0CWa`x5I$fxQ@|j}Jiwlz=)e zCqDyw-rt{zt+Lli3K(?9oz`%fyY%?g@r+=r&p_h|+bUW3~1G5DmYXSh%8pNo!pg`r%P%T}QAzDG50z z0Md`w1&^)K>gfk`bW&24T+6}|u=v?VE>CtP2w>1=NkZJi&}{2dQgdbuBlA2QnW;dSNpDXA4bmPFe+)A2NA(-Kh4y|d4v_XE(ViF?A}Zd=uCWu=eI z6NIR6YO72_laOFMMRp^(z6m><#$uM(bK_Q1acBOXTk?p0|GPCSy0S5&3hq;3mz2aJ zeK$=2w!dJE@Ugr*akR^<>V~g^b~X@3 zG1x=(s&x9Jd_pEI#yx}UEnKesxQDb7TJ|&VaAMm4~nAe#gE*78iH_JUAfXx1?S zI>sw2HpdzcyOafj7*c*dmsi@MBC`wsq#CoHElxE?pRd{33g~(Tc%L(#;eAYW3~>k0 zre*i%9kNfQ)Y`_IpyZx-7btz^k}F}$R*us3Z{K*0fG!Mc3`JY~^Z4eDgivrsj@DP{ zxf>zuLGgm#>``OpxyD-!Mb&IS7MP&paI4Yz_FpS`c>VbdRv%|oj#K!n0)z0ol~l4; z6({U|qZ2f+iBNg4$|_oWX+(s>hAak;&eUO#BU}`%H5D+N{Lh1z zY~EEzl}%P@1SzYID7CWyT`ny!bv@Dm;^1Q=EG`{^c@3lQ+^XdH5tH>>CDme@6i8x2 zFD*FRTU$)=djUC=y^|PHLi-qn?a6kmLA|W&KH~H76klpO90X&J_2$fDDUP@cy*oZ0 zx5L+s)5V^xR3?#{VosB~fKfbEWl_S`6~8X{+z9jVVJgvSly1V(dBhz1J9L4>o5g($ zJFk}9(PTG3p_Q<->k3A5uw`a;3}hrDq?)xxBD$gBNw#b3Q%IVV51<6g_ zXzuu-96up5ee-2G{ToXBGsEQR;qa%vmR#~B%~6HAW@#4Lp}GKsMUdd;{gy>wrGxvR zF`)V_8L=lnwo%{S_-{kZjJT#-`kibTGK)ciMc@JhPJ#~1&w)k?S$n%8qO9yjNAU2YWpP)ke9MU{8z)Z6!!N_>)H<=SqR=2YAr*eYq z^*l^ORaUr1gL=IwObL|1o}Ds^WK~=igP)D7=P5y#>Ns$adv!F9ij{yp`Rd+U`|7t%SV;_%yR!q@73!qyeozKXeYMR`m>^zy-;-vk zZ5JME6%4qP;;4SN=9`XW{p=RZ(FdL7-6Cu^&yN%vyg}PpO|lNai2B~j606bsmpo_q zJbO~nVWq^A5HdHyF(WC8uj^Bf~Wl12_9IRXyOP={mBL7&BN z4$X`Z*A>{eOhGK>)~*=YJfn+KCA2OiF=MAbE{d(Q8CVl%N4}AD?J-MiwNm@MFcdRA zuiea!()_f^sFggq=P1g4czsojKOpFQ;81F*N{@LvJmx~$w9G;qYV6!bY^kIcQLbz1 zuNA~yp-7$~(e5cQD--P0>9zGl*~)C;FPJdTyMklE0fLmA*IqXpP_O9K_sR-|;7-s6 z@+~6~fJwJFwS)_%k5fu1`)3C^CCWJ!n*ce(tfI;774D3#f@X#xKaJbd*c2I!be6m- zKGurluv^;IixjRf3;jn%u77#9(PKrkwB|lJ#1swsVagutnRlNRYome!AJ5}pjb2Q0 zepXqRp_TD!tg;|;ir#e%52vW6imAlnH_&IO<+z{#enl|cw~>X?*5mW0&$4h9dY1qm z@88i}EmEhFS<}LzijW)ldFB`bg-RYlh2=|l9Q9jcdD}@9c;oTxkA!dX!h(nZw-n7Z zM%W0BV$Jh6)P8y5n4BnF>mN4K(P$ksE%%OBdLNd-PQhny#uP?zV*P*JWAtoiJV104 z8vsc(jsWuh?W|0E9dMR)#iCevCNgimSl zBzS4M-H`!|S^i1s+k2)j4k-$n;C+b9Ci_z`JgxP&lb`;}KJKnnFc*(Dz!0L3UxuFT*13Cq z4bs9Tp>Y5!?~QBmp%$+UFckR)irX=z!~vr|&?MNL(s%^y8yYJL)hBba;KXMGJFEbyKud-K#0@`qp-^$`@Q6P>nuF`)-s zXV2HiF=M7FsD+^{d}x>Sr|6GB)Zl9AGgNa2!@|I8RWUZM;9BuJGYxdk{8EL7GI z1AVe(imWi_Z5?k;KWKQs!bLE$=>vm?-W z(<|E(TM%l9Hr&KsVqn1`1Q14S-Bb-elDeT-g>GF5wgu)SQ%vYVvN%rA2;6s6te!Czog$4 zYNl`dVj)T(&`=I@sCLNu!s$qtaQN`ITuVpbBdHZ@umwOF&XpgFHUC*8n;vh!tA~TP zO@)d@k%z4TE;1?Gs2CY$hk=GM(e$@t zE(C2{c#AXdK`{R!t*V^;go4kYQa;ewB_iGA)1XgH3WgM9CA2lEmHS?1nEYl9#{)Uj zz_aI>cs|?)JE_o1Ac=?xGecVnb?a{924G;<{V$*(sro57wn}NSIxn(uldQ?(CR@A8 z`s6y&U9OC=z{{>4l3lZ@P8wcg9M#~8M}qy6!J3m9tELsn%~$Fm`?^dCb{e*|&(Ie+ zb-O7%)k3tKSyJPawA-GCiHy@OP*djFaNsK&6D!^m#I@+5wI6t}>tc#x@Kc7kDx4@; z_-a{KVsM)%(Oy43MuU5*Q;1+sJ6d>ppcSpU6`!b5DCXlcZ*?j8Br^K>-X;FnI^C>H zX+NfUljJu-{DE3d>K@sHF%7z%;dNbLoF*R>%EfNCwKzlJ7I$jLD5egCytV5rACc&h6$e}q!a+rc@m6|cU37^pn$qb9iO(3nP8C7INC86 z5G%~L$wejP27LfpyBgo%pD(5Bov0es$UVm_Kw{GbkV#o9w0w-}AC0a@LCgz;U{RH; z)?rYB3HLWn)A@98irBDS-$S6`#g^9Mr&;UpR%->-yPWWsf8F_n@1@M(t1ow;fw-Bq z&mlmln0yPw>f_~z=`ljW6MEy%=AnXq_n;-V=apA2=AfGb!@q(=ZrK~NWmR19$~@n@ z>bhEctJ3$Xa7pF{yhE%sUXa=Tsa)!&ZJ4ft?p<2C8|g-+HEqLIs=h@vE0}5XsPGr@539rLpa9oSd?&f z4XOdWesft9FmUdjbH`3>PlqdJ?aPQAi~VvUHq;Fv*50y%$BhJ-no+AnNsr$gXiyd#G2m^~r(pPIo;?aw`WmuzJ9kcAH z=nSJ8J>QjkUy(lmF{BMk#Hd_d3Ewzr+cJ6gOMM43W$|P5xE_VygRN-f5kGN??Q=ly z%g+El>>R|4tC%o2_O38tyDS|zP;ATl%}R?R^li`bcpqHBqKQ}Lh)rJpIq3jHB5;-o z`2RpJvD}K96{6B98JZkVGCejG{;OYWi?U72)56?fT*eHYwEf(8dJmDtVSz?`ac%(5TifM8@adNXq`2~D zZXr${z^ zQ_R*srnD-G=4+@;^N{f_EgsV~<)}s=mL+;7Gq-)}iyC9A%B&i}1aNjrfM*ds#o?Hxx=&%|3TChmnd_fD8V4H3kmi zwQ+#SyA)Ql^|2wx*=aT>?ZfCN>$Vzyt?OOEhMqa2nG&|km*~+k-VkXYpSZgolrpJm z3Y64j|3WT98(Ga88M2^}-``ZN+hDsWYR@jKFo{`mk-z+@78QwH$XZ7|;NM8wGvO3J zA$(MsX%4)~BJVIVSfGdKnpjYl@hq!QNsgk5QF=y0elLi=u_>4pRP2uYY!AUc&ncy~ zh^?tyG^gOsgC8zq0E7ALY^W~Ra|7dTy^qbaK%{bAGgeF{Zap2&{5e7AnHHpb$mM8t zK3%2=qNZIw%SYh@U6rI19mvj{f|jY~6Pl z+$qKYi>Fl0b|V#B4kZ>v5ekied@j0n@FZknSSWiRrK8A-Et3wODp|1I zV4uZqVnlA3Kc`OCwnVKydhDdzKf%n`cq`S=!vg>1Ysv~5UA8a$XsGluD@QMUU%Q< zy7)R%7*#zLiuJp8*Q1aMV4x6NDmN(8xfzJThJ0gw?GghL$HCgDhnQ}UjR}A`mcSiz z6jFO1Io)754`|&c-;X$)z~iA49vxt|e^$C;8$K>JdzXeft+Q!^P4mwIi+#7YxQs0q z^lRSitH!%ooxjA%=6E~0X#ap$rL5IcFh;s+)OABmk_k@?S+p1$Xlz6dYpnn8NzvtT zlAyW`t!($C&L*oBff9DuOG<+7WjK!Ef)d6_fUwHnv`0~;{H+^v_fyr&YmjQPt~iUS zAu|kDDsdwup!36a(@mLBG!P(iZ19r*M$`Y`P<#lj})~?-XgH{&KtNNXPoDUN%v>-)qE~It~)}^F;<2W^zSq}oS~^OIdfQMwcpR0k4O=2{odif! zaSb1kGOXrp7+h4O@N9`sL*hYDcTe7<`B{dyk7LXHi=IUwrGQ!rILJ%sB~_hXphT0e zEsVvW*;x%Cp3gn}h19<;tl*~8jOX9Q)%U!=SYJz$4e#`z8#cz|Ku7%r)AZ?WIh6`QM?#mb%$KY4NG_A4Q9EnNPebc=L}qV}v2TR;k!# zDGi1 z5t1+<>)igS;C_@T7!IF*OfQ2}>C1&35k1FiJcjYtb}4ylt4Qx(#Jp&#ow*$`ya))K zT1d&Sh4KBDeXItsk)5_NOJdtnCtS{auWnhjlTPChIJN1JM^tk!)3ki&%@nJ5F3Xy>VXcK z+JS6~N?Yl5F3`xT?`dli;*)Gnwx?7xn?Rg?;i2jDWhIW(hyTMQM)fLy&iIw6@DPdn zXZ43tTYsWFq?wPK*q%2B7I|3xJBB3cW2pO0UOr1v(_<2zbUEEl1z_cro29OBECC{= zxnQ6upyONL@kZuLMR-PCIAAGFezgj-4|O|ssY)sHA*Ic+#gnb!&dCMBZJMlo0Q4c> zto#hK+p6_(I;nT}=3}4*GfPW+emKxGg<#87%Fyst44d5{UwClQB~Us@SKJx&GN8mk zq0X@g*Hj$QFsPY#9Xn{LE$tin-iv>NnoJTb65%`TCCv|xzbFRI!P~FS2>y9Hae$i- z?j7Y)GHD#%I~CHQ;L0=uRPh{;|1MN4wIhKj$I>gmc->oTFFxTM&KYiv0jjcS&MdsE zcPTQefA+7o5QoZ;Z*^hBszqBGx>+q87;tA~j9Y%8!Me-WCGirCv(I{^Jav0yhSYA0 z!tS=yB~v@FM%No8BsO4gIpWvLuu6?2{`?Sa;6!`s#On_xoFTZenb zi<09lV41i}5g%&#b)MUi5Z@g;D{LIS2=sT{bIxCq*o^$l2@&R}>J)d1ZjLr#jO>_7 z*yTLa+cLWA+&cX-wht!Ry9sZX8h`C3gXN$Dn$XZz5l};!N`>%@4s2!>mP$)pG)5P= z6{RFS61UyJstD6Qe80<_`Si^$S**SvhrQWSj?~<#WA-I}8V9<+rXMRGUj@QwIM;x% z`m3AaG(&uRUtnB>%*PUfb9>M9FKKgcC`w`mgmRsoEoRd@z97#)fBAa=hXnk>Fj#Pp zDXDTh`o1Pja}*xy-;#;u%y+5ep5v9I_ra;B=ByV8uI?<>5aHdTuC8DyHU@qAM%MT? z#?Ob(XOj1o<}FH3<%|i(End^f{&Kaf#rT&UJ3H)j>(@g;+ix^`V3GFtnRExKQWD{@ zcs>5Dp=;t3xtPu)34e77n5VlaSol?Q7>ZvXr z=u+%8)`6hV_u)Ut%`x$!OpD5qv=OqZmWy2!Sg1u?K$8E|*rDp_M=0iBmCSF3*)tmM z&C~LI&-p_Hb2u7lWFj}c??M3~>gN-D;ucVl@i|vSuVkJg-M=JkG+-(>F-|t0+nm<{ z8zvson~WBD+O;zby@ar-#IXmyYjOG)8MN2RuPei%x=~klX=hbdfBXdtRdiRoDdTj~ zHH0=4)>wE9ThJIqcJr(g@QcY83xzDmv z%12}mxl>lvYC{1oBF{ugo5;)-NnTp1%%4rLAi-HDNT}>_0Xqf-bD(>q`O4`>&8;CL z+c0m(a5qsShukA)<{vd%`tYz8R^OZSz`oddhAN@PvphMR+q2|{Z7nQZMA7|Gl^|r3RV}}n~^B;nS>{X32syo3e!^9#?bPh6c^LWf^P5V zR63j5+Ztp7Qrt=dM4iHa47gK3R6zSOsZu}(R!PI+J*PU$JTL7IB6SZ{@Y5HZz2gY@ z5I2Sjo6u_Eb;o7g zIM&o9uR51)5xm|cZ8WQF9+_+Ts)+T7{^2hBHpR@^HH7K6Flg7hR-)5|$_DfhnL6Q? zf`*B8_mHHRs5JUmF4>$ju5?;1rGz(`!?y`D5M?Qjn?(i7<}oF&xVq(9YBkx%T}^~! zWuT5vfY)M;sMp|qzF2ACdlW?h0n>XQa%TM$q2+9Fm7h!chN(n<7Fh?r;}XRzn70K%Msh)YG@psht) zNM#OQi3T+vis(uqe=$EV+y~|q+PdlwZ4^B-j!z~Al2CwZNyU_V6L(w?!Qm)8$T$tg zDL~i$zy;j(>HNo}r}hArF3VI;Hx2Ne+uKKX^XOckIkR`AJnH>$&S>C>VxCS)hAvsh|3`L@e9_JSs()edDp{@SkgG9l&h0OU;)1AS7m z&d+#tZsD-PB$ZKDUtWzX(5?ln?WUVBuIgR*g9^5mn23_Qy;?)|GZEB!+vy@lf)nV^ z2QI-i>8wGWJgtB}T>humKA{pg&DkXCk9-hB2%Z|=Ms1!8hM)s@te5mM!e&}dO_<3{ zVh#5!Z`DV6T!4|D$o6p4fV|?oiw!kO4Db%PHh(|+>(P%AM7@_ThH~75dCrA#DirXr zy%@|qm!P4(!HlkK1;~j%r^j`ruvRZc`Gysn9e>58dtU+w%29t?s@iTf^$Up4WBOP^ zpb)6zoAhfeTNCZ1NS9_exJO<3t|bh+TsDHCAh=JH&N?+ClJH&{8@0OD1W*34<2Yhu zOQNU(sXE+B>3XCUOAE*x*w_0}2nVa;yPM)+rO#6AnFg8_N|dQbp`h^1kN_*DM~eWQ zH-%)jJzJ9}a`abuw07XhS0S%N5sf?hzzr6bL1+V!-Q%0SEB*gq?3`jmi=s4Lwr$(C zZQE71Y}>wNo40J+wr$&XRo8UVold5cnaBNb9!~a2_P_R8=lhU*O$fdcXynvyZ8C~sUyw82VUDp1#{I6N#S`DRTzvInFEGdET*YG>qsG|Fp!4iBYQTA z(y{Qp{cBQ_hqw;>#G%(FOhP3&jKf52;Z^-OZv=;Ggn-SkVfp4_ z_oEJ;_M;K%3`4K3N+xgc3t-PpuQ0JrvjH$h^*L-;%@CW;X``K_F38$0=zgTZZBqa& z&HZ#9J77n&f zFX$0)Zv+Lz(lTt+3Ha+RQ*)UI|A!leaYi@$pgg5t< zfN#GNv^XDh^_relJ<3zymA=wDx$9K|dREGFm9(&-Fm#(r==mA;W9S%a4f1H>%PCn! zDwepz#|Q%^Qw*?dx2w+1@^SOC5h2)ZRr5KDZdd3|jB=`@G^TrysKWL0mvg7pq*+qc zY54S6q$jxFNYU6bIS0}ooESCsmf~&sr&dpv0>$8<5z0q>Q5?9)q-nL}MG^TC=bksx%ofB=5|GPf*GIj?Q_y;A`dh*8& z^C0#&Fw2O3lk04sUD_hzT5SU1GmryQ<6Q#LhgxJc-lcNprD3GWHax7Uw9E>Youl4< znvzkx>KQ*oAGt?v53ahPdFLc#*dQ%A3b~2Oy~tFUn@)QT!wMOya|qph9XmHTnctI% zXs1WXl7lc>Jsn8Rv5^jcJf2%mw8qFijxqtoec|c*ei&GZb7}lP3hP&L6O>A-jRKO- zx(>#5nDm<%Qt8QH5l|7en+IZ&ZNf%b2EQAcDxzOz4w%?xdSz1V42D1{!$O89UCL@#1SFt@BD z*?}srNrX_|O~p^B)RIedFSEr_GF!%Tyjy`Zl0GtyC{?tMc`->O$OSf3onoeH+#R1+ ze%U5&Y0OaTurr4%T=fL{fZtsINw89hJz2d1S)xVcpZhQE%Qhm8uDFS~zJ2QBy%&mB z)@_c~5WJuu0$Pbz(JH(LeMPVqe0Hg-fqxxpJ}{sZK;75mR-%0#)8Sdd z*c1{}Yutw(PJrzUJO1EU9+{M?Q{tiFdy=STctS!%aA$NEb-uv4Q^)S2O^Q!3&~SrZ zY+}M9{=kh910$+u3!J8`p$~wZAc)NGtK@cMQ9mgTc6U`eq>76;n?wBv}rSGq0^OLKRla-&3YktH3)= z>22lvKELHFen615P9$@#rw0VuBD^%peV*4xMAQa$ezZ(OfSnl+8!kt zQXFRt#DNd=MH}qUb8uY6?&V@POI!OeTh6_hH5xW|Fk|~t>sHUv&xKmaqPFY@JZNs){x+kH870Az_Sa?L0&`X0mXF>pFjCz~>|uV=B# z$|BFQXjF>55Ap9axF;Be_l;TsOp-VCF`qd)%f7N?GgSMb9u)kSY{7;hxadj>f6=>3 zA)4=0>lCDz1q(;ht7%{SlVojLDf(l2PjiGtz7$voC0Dk5?|q(&Ii@^Xj0}%Civ+QC z6K96#VR>qs(DpwM&CXB@oYfEQN#iid$IlZ{e!g`fZZM%kLJn_y}Id9y^ z5>VqW2%FK3b_}&UoXg3X?a2<|RgdANcMW~Qrc3$?T4~RXkyW8G?kU0n^s@8YlOmR8 z3^r-?YKWwH<0`tJVkkHb@N$PDQm$!EhSnf!%A=w#T1Kw*RxNy9m&6+(XgSJp#QLiy z41E89o`H+ld-PFaZ{^KAT!h=Ivs1~-*ZCBWq{6*(p=04 zo3cfL3lC-E6`fl87*oWSYME(0TQlfQeIN)Uu<=Un$Ivb}LafobP_2b=B41;-9{VY0 z`=XNRlNeZ)ohm@pj|@qW@}H=Ap}BI4uw6Nt7lE2ud#n+U8TO;RdKvC`MK*K&iZ&`B z57Ip(a^UvabEks({YGH%N#5s>Sv=K?MnHQ&pgW_trD=c=#1f_HT6hdx(pd%Q0-_x8b5B5S=Hl>`k^+7vP5*CW>d8L(o|{)5&}lvgmHedOggq zQWY69l#HqDuM^$#xQY#?t*_VscPxZf_ID*}hx&3xdGGV|=L&0ne6t+4q}=3L?c#7& z*VAse3kFE7%{HdaPN$ce)2SOh14j3MaWdTxzI~Kgf__I&>T0;_RQXY;SjU}lbY4Ma zA-#hH82y8&vDs${p|SE0$kTdkUr@Y=1`KMBjt));5l+#068;^)Cd*3edwd%?Q3APj z$TxS|aZLcSGS-{a+e~qH!`+EIJ>;N#JrX{d@<*~%_#E-CiQ}_193jPop2j^{_jAmF zCg`_#fZ|kBj<8z-I>kE>XpgXesk!(wfm8Td=TQ33xi)QD#G~Wnr%{Cg_fQ_aFILj( zK2HR7Q3|O|S1c8G5ZrL3A^mB0pQ;cZiElgPqR>$8t=@@FtmnqPLKA@ftPZNQ zCze@FbkR(WiBFgaD4v~lUx8!Z%Gp!333cuRwXuY;nV2Rt2@-HN(LFGYP`EICjt@x6 zZJL9fkBoJfNm?$KO9>}s+qqz&Ks%}gw*gh-Lrdx2g;TAHz!G))CyUG1da)2}?>;R( z1+HphTGN5| zbKbHbUUaeGJ}C`@3B*j-&77w6lUz&;vl}|y7PavjQNEh?Y{@~)d*cU=U#q;a)71zl zQTAWA-IQ1b9WSo+V$PObyKU#xhv)*VDNr_Peo0h5A}`!0pVxbIC|Xp2bKU->B4XkE zd|N*1bR;sRUHDbhjH`4Vd)gN5V%|az$K2cE6XDw#{YqG zFme3X>VGmhn10zE|EX#E{~-6dD0}dzSmBe4YL1h#<7<=GH^MMuOfoQycEc2OB4r#+ zP8w!0o95@!Nj0M>>9z<(R4Ph^LQNvEwC&sN@ij^yFg&cEUhiGko7&#!7vSI`svre5G=nf4v$-!)mJ$KlF{;N41$~!6 zd|cIXPYMC_Tko9?cBA0K-h-Ax114e)R0DOZ-wOsK0E5VMryDb)#fp9feB1N0&bWQb z)3UIKMTSpgpY0eoI{;ki0JsA#bU9gk;`dv`BdC8S0y_Z%_i5(-;N!XxMnf}>GGuMrevkudA1DZ~Ea>NBfN1VCO+8{>;5j*7UMWA>6&4x|`BL@P3%g1kM5p+Eb4zbkYlAG)wN8C7zSd#5Mh4 zn6cGt)UZi=rO~o2YfYv87r+bHS|6=%SDwbl$rE3loyPKF)d+cC7WJ7rvFEfd* z)4fD*F0Z*rd;YvMF!n~uRTU-2M|1^_s)>wkyVLkPXJPkjmSUsv+1RtY&Uvy@_iUly zqM{?|h!y^FYQQG(Vw~-T!PtF*;mbM7*VE71La#fZ zcvQOYX|6ZumwNoroD=E=VHJg2Pkwej|DLg2MvP4wKkvHUKriI!i)@6yqwV>%H{o1aPeIErk=V5D#g^BBHg!9PykD|R zxd4`J_^%ncD=T$BavvA9C@rZNF+Kak#ARTO{97ssCgaMf+s@*d{;90dUK1ywP1JV#lv;LRwDj<=~vmw0dPde$EtwPca93@5%Mq}Q%DKCxt0{a>N@*~ zw!;K4JeFx;Gu4&MELoRMcJs%1dpR*yj$Oghw1)K^Ml5aqkKnz)6R2rgxXS@BWrOJD=i_#Z|O|VMgoG2zG&XTj(YbOzj5gDZT{1V2`lz(f?R+gjZ zy?ju)h95(X9aHPJ!vimlANYJbo&K!~%;%UZnJDs?{Tyz666}AaKza2QO+LI6_k^N` zo!WLduyLv%1G-CJ+(<8mVa7KD+wQS{IF_8(XN}wQ!e3FTz1m39;koB5xsX5;|M4!$ z;zwz!lI5#;SV{*sNeSgT1E07V{n0Eli1E^NW_mso4uUpmpK5=xzy#iiUi4zKK=0#F zoj*Q~&S8aKmB!ndSX!>qdkoOM-75$DyFuDmK|KY%9$kh){P#&s^dUtD=h|2Ft%yh= zxy$OU`m$hV+82kk-a6-{Rc6YGdNAE1X35SHE)-T(Z44AsLM*1-;Aqui>ujp|WAuST zu?P1iLLRE2i|wbr+$!-rXi2(Di)2|g>^dikU~lJf*GV}QKS)@&1uTB)0rq8OFvL9A z^*Qbx#J3ek=#BBO)TJR;KW=(uZx(##l>yi4GheODpDbymDZ`%jAlm&Dv``kZLOeH$ zHSODQTcs1fmX!$#)t-X{>GcJl16Hc+yL-cflFH)}h&WJkrE!)byqg+aK05*>E`v%G zmO{1NyUCW97m8)3i_`KP5J%gpljxCi_tL-TN0vy%Gb?s*1UgO`J(E+3_m3*ei1Q&4 zu&X863iXW(3cpqtRd*^~|RWm&=xkdB{u%>S04*a;X2?2Ig-czFIRCH*ES zHm3ikr~W1=MpoAUW|{rZ2*t?6%)$7d8S4LyHLLPaRmr+apu?hR^#bnO+REO&mIa1o zxxPk%sQz6BbyyJq27<0j3Io`A+8SG0N}4f@qV5~egF1pj1332{15nmt z`Ue5`PS4E5%D~1s+dn#nYG}0qoi;AIoMK|8e;2(?fEt`X$xmn(LJ!X42|PUS!8Nx5zx`u z?Er*B_s(M)-@LN{tAhg!Zci>xebWEF40KozASV}hr^jm7+IB)2S8Ho~8mBOhE})&N zYLW4Gz`_#L;k&vG`e-KRuEE{0#kIllv%0k3#`gR#$WIW#3*Dc3b>XQ-tJuJwb8u)5 z?eIzMgEqZ@`Opr-mJY#d!jI4Zh$DzraQe8}06n#Z#jfoqW4h-ZTfM~CN*znZ#4;0{79_a# zgI~*7N8q*?k*(dE7&JEY27eL)I%YWest)SDr737Fz*_gzgi1N!1t?=f-A^RIwO?BA^YIVdPjq=tUxO5N+35up!#DfLlb!fd3#t9mqoeo0 zGkz5XXWCE6tvVJYB)GgkJTo^uzJGdRiEz%PE?uV_r!219zGu~#~C zY($oUNTX_=s3& zm?z!thif>d(|%kjsNm~2r8vr1wI12DdbBD1E&O$7jDpr5+n9GK=>!_cs{#l#l19vU};)gZ`7XCuykD%fQ}Z^a|+D4=tf9YNEqOyN?h;ydH+N(d*Q zInFw)^qp&HD$>;TL;mXfg(B3;Yzik%qRvn;`aX1=z0Y z3WoD)=CaCbN;sR96x|_V40TsTz6l(1M9arVsUIv@o9UTFA9iYiT?m03RQYX=rb#10 z4%ez!P5{fo^l)1g>GXKDoel$0>Z=rV z=H8Y&xE(c7yORDSPBL0M%(fs71up>ak<;Vy%*$#f3u)r)4|HoQa zOCg;7aK$tSUbl<~8EvvGoQg9t?{6DnCFx5~Z~~hIX0!{YMAaDdWvN!1qa&IIc+$`( z416gx|8U{TFBr?X?HS*u57aQ0Ye7hSrNXwlqC%}6ayG9Ep5z#>7d+JKB>#F{DqMMS z%d|j>w3PjR9HS+_W>MN~P6|jRJy8?YD&wOTZF6U@@Nw(EW6ntzppElYAtXf=Q(}rt zV+tZ2&~Or80|J=FMu)$Iee;Oq#;~P_=6A0k#`jiWDw-^v*0LhLLK#i$S_(&$fO(j@ zpCvfrj6`>^x9y#AyN<+mTQ7IAKQqnrPtj={x!A5B@-rX0?X-(LJ4GwKmYsyJIqwp)uHbS7%8rlp9AA5&B}5IQ=gbifXn=IzIadY{#48V~JNj&}W+!g(-2_tQz~o6i+J zEAe|5YQ+uXn)koICSuGM)4C*YhoAeQnVe**`*U9dt$&yuVDz~))##jFIU!cSO#;CW zDa!x;OEgq+ttZy{{YeAt0p-(|GY=N3yFb_6b__`@cZ3KDMOFXMHB=-kHgTi+lppSK z`8_T|I(cYs9;1vtbExrFo|Ji4)O6X7|RgEjVW zMSgA8z`#tH;5SBr9|cSVRY)kDyZWHXi;YaEe%A3K;frFTUEaaAm!G?W5Ie{Whqav1 zp6U7n1T?Vk(Lv4PXlb^nSC{E$VM8<(G0~?Y|A>K}l40AdDNAJ%;V1dkGvxy+y2Vy! zF-q;Rw7&hkLbHhm!Di|=Ket)TEqqavMbm|?!e+XG5@(Pxk&x;-D#qYqRocIA+7Lm# zZf?j^#p^0JpR=V$5~maFET!RG-yeS@-_e|;GZVd6M)Z8&_c^C^@i!=s29D_R(BTIh zzWYPD7(zO?hz8_Wz12$V26{Ds#NPqbR1ScGa@#IX@cUe%TzV1=I|8O$zQB43kP9ZuoNS&-0ih; zc^{I72_+Ao%a#9)3{=hEa3-({_rD$!^bgr68<-ToC16o;qgK~+>DcPVO^GCa?OpZ) zSqR^;&Nzs&yWu);&F!gWoy$f)L)T3{y0b+5lQH($Q3z`pMWSAJwBIKMoQ~hQ7(H%{ zEjbqA-D)jC?VxW_q8j$y4D#(a=aIjIX^y zKC84^eEGmBS_5z{>RxK-s*|&k65snKm-|Ywuk-`Zk@C}$yi+x_HZdZ~$L*q<2;(qw zARTeg&>^lRh}bPOCc02de2V+pMQ_9-Y%X7a;S~$!(10HFZ|`5nAZ#z6?6<1s{9#{h z6zpt4l=GT~*pIof<$M6i+LTHOh*wx;Rcn4An5)jr zF4(ff?E=;832+A$+QBJx3k;MUk*RojhD9rTJ%8W(I#)z)BKF~-O7?6A*v;3W?|n_| ztK3Ky_xB0M_3|b5@rQWP5V6hGSv*inPWHF)kXA@Y$tj=Yh7D(J=BvlPc}0boJlEh! zK(39v+bBOAJgP#&%mGOrk0@AgIgo97KMANED3#ZE9->3~4J*QLo2YW2*(k*%epmJN zr0kAIMWu%TujY6j&O4mz+k!3A9ue~jJ+yTz&7{VgFE7~xaCK`ivkS6WuDyn*cnI=* zDUkvv{Rjq`cAW0{%RYSJT{joc;pYpd=QGqTJ~GrEdKPc-O&ca>Yk@MERf^_X6F6kK zTTBee%V4du$sMLgBdqB)snePRlat5MFzByrJFLt>98a(C?r?PCtDgff-^lf1t`cQcBsTS(g z{A9qjad6!~sGVQAP2%3jf=Lb6u)FqIpUW>TrKI8*@7|ti;=)-WWp2(+(K=ghyKp0?MRX&bhlG#&zadgl+a4Os%YCf*6^1L zfn4?G2Ho|7hB%R*RUy%&+%XAKt>WkI`^6>noEI)3OaKb*87c-kyjAI*qVL79{tsE%(*Jq`gdgMS`P_Ai@b6Q za#bQ;9sI%u!cG;kcN$%!<<9LQY3s9^1@edt5L2i&zs5NZRLWLVblge^ctnQ)#rng6 z^H)G9`qZ|TzmXsz$5XMZr5SV}mHdI!bL|O4@bE?{5z8617A4MO1Pc`JHq~Um?pu!J zqOU>%VV~k0o(h7>nSi$~+iy7wPk+|V05U&5nSrh3TZkX2;0IbKp}~K-WFH=zl`=Qt z(Roh3BKdm7L0`t9C#l?Fl15r69qDXPz~#Uj#h!RmGc5>_wQccIj_#vcr;ds~?W?gV9ai0a-^W2;I{U$#LSVYV3h} zg-u1{{JfHh5+pj&9y?v$Phx84*AmB@B~9i*&y-^yvFCh+j~LM$51vxx*bAF9<8(&@ zzLyy#SIESpFQQ}H0+R)QXR<{1>8MTiDAk;A=~UOjJjJEEI`%_O#E)?!=P35j_ga+( z9pM;^yj@;QfNPFEP=p9TqR8z{^eY&gUzNBil0B& zWz`hHo-2Bhj(|>$!l{PNRlRt5yBagZK?cXwRwXv`^tdjk@4Rp(l7O?(Xka%%TBvjI zW&BO^CaFl-f{uH-BQDca$&tiPiyRkt?}HZz`~A>$d1&@c9!pr7NjbG8Yqu@s^4eI9 zLk2Iz;!m!X-;xcs#87l)Y2hL|bK}jDPt^x}bo&si>B=FjK^QIPbl?gS(B7q660m5A z3C{;;BYzzJ_?ne6)P~>;LIfkRzEac?_b+GoOp@~P&<=<9pzgd!qM9szU3L&>pk}LQpfQyM*S-B0Z~nl{TqMbC?6(`9T2=<9ff1IKoxTuO^(r(oWV zit@9O8WFwzx(OvI5tP~1kmU{OpszP?2HbXWZmMa3q^G((&*hO?t@V;}RSaMz@O)(U zat_%P_NCB_ZyXwW^$m_j%mO-@${>VEOm4$j#DgTIO)g*0^z(P4z-!?49(#gG*7A^g z4H84W-r8zGc6dRxC@?>_sQm{Jjo@#c!=!j;e{pGrq14!iwl%{;_p%LHj>g1)8_Aw8 z9&0>@Fu^*h^i>#cc#90(;W^RumMk7Xr%39&X)V{3%F%<$H#fgwNMM71#0_qzE`8Mr z$)n(*6j(Q9Z8I{_;Cl|QrVcd?)M5?jOkhRN|0cL#&7UFYk_aJQNwq|QmXUic%j#C( zyimgRH%Xov4MX0zyK?1U@Xf3>$x6o#`{D@Qti{CQ@>GW6S$zgc6v5Biye=s0jG`8q zu`H&glc^J=5ZU)3$djFMhuJe8YVs<27ux%A93IjH{hC^W-K7~|= z;$*z~&tK^yjp~U9t;rfjdL9gtcUpRPm44(c+@^S)zYBk~9lhE8c%fBgMstyR^o{TsWyjy&>XN#cU<^Ut z>)BhA1lg7b(2Z{#Qk{}4QvC2BJZ%le`y5Vf3nRg5;`2F6ovRJs|J?@&z{SB;O;RY% zcacdy7&$*uIyX#&Ja8aspd%S8^$$j@ty<#y($b!MSEVV&z>^`c-l>w_RK^_zGxO1u z%&FuXuD;0iU8@KG#k^o&cyuclZnd0)#`YwK&NwMrE6{WbA5{I+V6s~hzF?#>!rleW+Wsg6u73*f~C<&-LHcQ;sqxz zt2_5HmdDurs_BAcloD6h5e>bwVpZ^-AhmOHp;KHoneQO=2{A}NTFW9+9=nfb+WmQQ z9=h7p;t4gSK<=1HTF#yn1((P#CNe+4BK+5z#Nms9U^6S|mqMBs^b7B$B>IrNLJMnM z=~bf9FkP^Q|E*Uq6cDSGnYT#b*z%m86Rf4njZGn>p7@;1PczazGIUqUzUE;a`cch& z%Oazk%?Vt_Y)!`e7e%RR2nd{M{ip^@*{QhtRZR)Bm^H=deC6<#OASb&PgL>!fQFej zgn8p!J@fFzK*P|3(RrxTN|?wKVM74D=i=ZIgQ{E-6Z_GlK`NL@aLLK4KxHdyE26Ap zND3~ZvyW{auI={{q{Gr2a@wRp-w)z4(a29jSV`~Uyq8~#4qIfBB9CM5-Wr3PtS11I z`YT`WKIk(^Z>V^lLE^e*jbV>)<*hw-W0_ZXV8QKZP+e=wxuASSuMsj+KUW`4&-KVq zy?27*rJO!I%{m&&^(aDJ!tJkY{O1Ct$jAT6Q5uFjKOg*wg#_F_l2=U$HiLJF-jWv^ z2-;`a zq~}-J07Tgco-8pg1`qXAM4vWyz6YtFIsm1}gC1Ks%HCzdP z`)>L1dP%gHgr&SNWdH#_7(!OuM=|8FeI)CWH-zVtuO0%qu&dq1geV+-a}}7`@rDxu zz399U;Q{Wei`UpTQiNJ>G*Zl)O!dX!6WW?l@hx?$95O??O#_2mc#7UHMjiLlAvc)_ zjE@x`3>*2cd1cnp%*dHvSQb%iS73u$)fMDESlnAj&0M#Ronsc->4bFE`LXDiu?x*Qa za6w}{;#rFZ{X-u_EX$pFnx(iCk&}9Ts91qWCnT=&57}W$Q(Uj#3zB86j;qt)@q zyUiC@OXh?^WCz}`#J`k+@$xAhXG7XTUm>_k33JthdKqkRQwPGQ)v!JI67!?ywP~#m zn8@Y)4e8~3>@!gc9*t{T&%r+&eKw`jrd-iB2hT>6`kGf=itr<~n33v&XKN51l$7n6 zd}wFwoKpAp0pC0b02bTFyx0IrSa8GO+~CyP8KrtZS~6y-<&H^m10*j?#qeH)RvmYC z>-D8oYMk}TgVSq^k1pMmaM6x9KvulWuu|v+#&8nBGy*)TgLE# z7gv8+g}M_~UW8W++DRSj_N)ynr&N5t4=;7K|8eGm(SL&dFL~piX90ipAK=<& z?6(RK6f!-Yz`dMRYeY$k7sK8EIiF~`7KRr6`XVb6QsgX-dbVqdR57>aO_VY7{yrrZ z0dm`a*>G|(Oh_-tT;aMez=5V`$u!u*)R|O$LWb~?ympkQsVyp(5LrM-^bP&@ihwJ} zd%D*qtF)UyL*`5o)7u{7Dq~;qM__kUX@-b*t9(|sNXJp)$1?l}474SI$i7$Uj%4nO znyay*NTlrssmTOF!yc?y(`1T{h`X4J1N!5@-+8l3g45>-;g>B@Wv(K2SAU5=PuFBd^Ts3E6IZ+$MiwOrv{3*y{4?#5+k@m@fxVB}0E`;@VIu8_+Em z!o;Qo+xC9JM97gQVF;uQON;1JNi)~u!65G`Wc-i15e1yDEPiIZAuPA^lOLQv2(%hv zfkup2=?L8gR;c-HUwGKIFdOZ2LQdfK?7RclXhwO{a+Nm}WkIhQdB|(s$_kwUukQcpX+EL`K+0T^;{*wds*tfm8iCSN>(7>u zj^;%G$kw2vBL%M%(h6}d%AMRX50}uG^L8vf4gsVx?rYP4Z!q#T+sxnEA1MO~0UL%S zcre79$dbO0_>ZQ>5>PDyZ9~otz}yETc~nNFz_W$KYpvX&rPT{a?7i@wgQb^?y*G5HHeUijWvlNGCAt zbXD7p=a(1&zTZNnbu<^|>GOJ9ow%(iaylJXyOf&@hhaT7&ds)$T2%|DLsly1W^vxqd(xr85NxcYF^#;jKvA_> zO`&^7+ZQt0W7X853@kH*N|+XiNx^5ul$j34Aeox=Hua<@<1f*lmJnvq6T)m$1q3%KPz`)SRU7>htW3m<0w?$THqwgTA z)-Ec2C`s4VS(-1H(;T9o{<1h0b&}SZdq*w;DH|=7^T5j^Sj|D(T3|QX<9Gn^XUH(`&ue9vm~q)v)xKYU;zF7ay>f;bpsvI8L-u6G zizZyfJHFs56=)S*1_JY#{6=|x{Y#W~=oE&By)_k71e?Q@=02a9&eAMdt?2y=$EvF< zd>LxI3{g6%iZ1(;un2tKO}m%aNJ?VW4YVs+YNw z{59XBO<7zG`zKEu{uegaE?}5nBnhMSDovl1MZF~~Yd!SBJ5C^86-*htwAZTpPZUzW z%rw8x>*i)=H{#w!@&WGqs}&&d?APp3wAcF(_RgBQzh3=~@v@k%v1r`&S{kxCmraPV ze18{$RF3UZtj@%={`zOr89f)Kc|DOsdisP}?tkDHn4q z!8BSvx*y}IA$O=bPg)?TXAI*&y%3N=hxo>XO5;ArbvnR+4Cw8CoP~VdXy1aKUl~DP z&Z~15`0mJMFEL$rrREk%8yuMeiZdc003re%Cy{Fp(g9h>hIV$^-9>B-wl??d2%Pg|rSjIrptvP@&IpdsQuCD;8c*R)+l1U$fe)l4SL;fX*jS{ZWQ0fPAi@c%|*4GkJ zkw_os*nzm>>JxOalY+n84bw^9;)&RsD%oUp=4ccC(m50#oR89^n2iMw$FWVz6;^g| z*{xRd(AUUo(NP55`j9awXTdx6?|84*Xk@2OQ3P$s8)(KNNun@y!m}c<r%6(dplO`hTs)B!P0yzy>WxN?C}zqW=Nl0IvMr13uR2_+vz_b!r^XouG5Ff{Cp z-fm83WZ-GxP5O2d7mJ2_U<}nFKmYyU;P@=so^W+@6GcLJWIphQi``!$0-Swtu) zVVj^JIZ{Q@wpR)H-MFacK^=X;=G%H#D~8z~^A?B&^l^EXF zzU6iMt=;VPy^SE&qU0qL*`s;C(#t4#H--kDyyji?*%uhS8~d#}gCr2Y3h>}F3{0C- zb?ypiDmH;|AM`G+@$HxbnN*_0&>~e6l}z8o%{TZO7sAbxV!t zua{)r8Ck5Yyc~Q?8(SA*c%d(>7v~R0d(a_^uY8ON#I2q197Zyi#&W_qW*Hj{QnScc zY?2IsE(ipLbCdOUk0P^S{9ts+mSv?`Iw>Zm@Z?+YK33s7D-Ng~=HGsXv2W?@MF~DX zl{L7uEnfRJRt2>1rnYUc)iazgd<~alzr7kAk%qjX zN{ZgL7gKTsx;)vR@9vpMyN(E&%d@3L;$)1#jvigVBU_G?t}NU6^cH{41W;#)S~Qjt zud80g6cONQ_EA1Jfoct->Rsbo$xm&$=O}DOkW7+)uNUjL_UoP;a;eLf7T@lymOQ#Z zt&863Or!W(cLKmH8oZC)k`1?iQ5Pvs;d(;R26>OcLys>usG;%yXz0)pQI{_sh>@Gu zW&jg}qj}`zUQ6W5RSG_si{D`Zk6 zVa$AEo&nVMY|DQ`otkLwIP;DZANsp}XGpY|J^d%92l zSQ_r_O$&5JQ2Wx?qJ-WZX{Hi{4v)w-c9S{s#~h4?p;mZ5BmW{?em~OJ7Gs(rY!tr6MBp z${o+Qqn~C8*9|H%kS6Gz0q+Q^oZUiv22sJwLiZkbp~gI4&J@dp+xX-|EBBPsu|}nA zo1x@ZZ)R8TJr+2qqv1yW{9k7jTzX}v&VMj=&QO8?+X5Whp4qXvW81cE+qP}nwr$(C zZR>CJqxa5vNyi;^(Dzo=?qhmaAgeb|q%2rYWYl&=i&J0NMi)VhrOxF~dE!*;ai*>7%OC#*NgMr<-SOqUbyvzX{ihZ5eF4#l(ciGC zN=(S_p>L3St@CX8FmN4YME+arceaf$Gv;?(U`3jeqWWIVjpCRsBp`Zb=)hxsyyBF# z$rMpjqSR7T479+?hnTp^Py!C7cl|0(JCVynovovNBO6;n>Z`CGBvBX z&#D-zfM>tMzd)j2$s4v?G0<+tf z{S!-KJa9pXu$xzPVYwE*kJlW+JwyJW2nAfRb@_IDD2(D~R~&nkV=CLkk~=~vJa~^s zK)yC=%b&0(4>*0{b9PRxD@(;PwUy}EJu~3a7QMT7lT5=yawzrD-?;?8Txbbao1A~2`#w!7i*Ayr?15T@+*<{ncbVW4DnY>Th~2ix_%y$x{+*{15;gJYczf1vI@DA@1X% zf%a9(zS~&``{J)>D-se~YHeTw^7tOO>CX4fTzPQc(rPPPN0?#0`j(E=`> znEQdNt@TFvr>m7WjVK1m1#xkp(AF!&AaGVhrFq*b_C!vobWQ^$YIy4*oObL}%_q1Z zBxj^@Jk_TS9Ch8M+Bl9z2vD|no5??w|7 z;Z|e=hx$OSHpd=LROu3+p>**ni*}%x{{#b-c=QuCy*Mc>L};D(X*qv32}_)%Uo6u1W2-6*(*=JcO6VqF;Nrl@!Gx4n#H1y3j!1Ur~_ zo$~tP^$WWbx^{GU4}YoS;p4(n8pk);S={ zw~5)OuEl94=mKz--oNF3ptYyvjo!zTyy#%n;RREvd2i~oLkmSPH32t~ZwzSy^DnZf zu0nzrT)}OQ#Tc;mC-k3Q#08ZO%c^&)Dw4N~5b0(awdlp1uQW^*0Ki#4&2Gz?-2c(` zx8BWM3iyetOB3%9fb%Cd5LhHRt#o(LqR-Z}T!Wq8P9GaM4O365_(W9&>l_$3;xFGl z@j=pkBlAO$Qd@eblYam0yN1`L_ZAqyD>mxc9}LvDX0V{A{DQFb@&s*86zS%v zCeCZ_$$3Por0wyC3_atg8fj5aQiy_&zdGB584K5!G>P0zfoGR8?9SB|sK}|K`qSd1 zI5tQ7JSD-6e@0T_v@EfVud=t-w|y-TJ{KX`=;iKafgW7YIW#(sWm~YGs&6_?NGh^Z zyYWR37&9rY5(&M^5V{O-Ixbp3PEQ$Dd@yVwWf~Ec;aPGkMvRN(?PN}~Tw?hWQk>`5aj3J8 zE%#hl2s)z&n95Td8ZYR}X6ix>d%d8J!kc-(e*So1gWBw?AdxyAGIIol#p{TD(VL&^ z-P4Ov=~Fm%5|=ZC1M3&A|5*HMgkc`cngyOv672q>>SHYM&lHaW$vAqJa<36)!@BOj zNJXRbEnFwK3NiaAVyD^sC8E`21FarEC=v(_T7Km0G)uBpp?yICq5`{?|8&cpkH z;VG?!ysG7B>tG;^%0Zy$`ERk(lM|hT)I-F^uPeE_zOu2QySj@zl{&j2J(y~*HVgJ& zqJ3z5YHWC7M`mekIOh$bM-LPLvQB+1ozsO?DzAY#-Fa&wIrG$i3 z06B@VN!@OijzLklgshRaCC_S575P+Xg0J{2C#^#SwCex2zHGzLQ zbic;H^zda}Rz`Sa#!z3SCIE0oGEFMe)81PEDpSAU#Q5yVUEH=c;HoPrznYR?;yYCv zo8Q_X?5wL_jkTZUZ&1Z1R~LI0`vwO)z^~{Ea>}1uE^X>)t)K8~t7lc%I@VuHa|_#p z4_Wcwh40|^N@x0p21l^AH21bIK52$Igw7{J5J&`q$S` zQ*FJU{)>@H!R6!yfON!nQd%45NAT3-Ze6`Z03NL`CnK57fpjb zQqw&xol>JcO$}%&+RxYXg|gtEU|9Y2_ zk#q4^qhGh{$(7V4bz|b#eQf&EbVDrid5wY6#T( zTAZ$pcJP^?0E?1uBkyEChYd!5mP%xjV?N}j`<<}uKWAd9#JiQPqOd~i8A_OTZeSnK z?oj}R?@R^7N9O6o_rNG_8#Q_+Vh+Y%Nz21sDE9OC^yckmVC7VF3+Au~5KBsno02sO zF-iNJ#5O#-{PO%L;&TI&d8J&#M)g*Ba%LP3vw+lc zEvw`#!9TvQyS||xUXpMTl|`N|p>Z+q)regM+Xi%G@Fg}M@C=B|tKb4EY53f!OKr9n!MFuO?nU$KS9>}*eIo12O9g^k&?BCa7io$V^`rJN(H2+N<{N>?EN_Uc6oNge0$F5-8 z6XL*|Rm?(CcRF>u2uy4gytHm~5QTa2Nm#FuVYyX}8Ehs^gteYpFn^h7U?aywVWwyu zFvp@`JYgh47-3%Wr4F(%%Dc>2fzV-l$k87olt|RFa)uUuv7MX*e3UeE|E~@_A#q>dUq5D8>fd&Sa73uX)tg#>0ea3KEHSP* zNcZk8u$w_L(AEhM#}aO z_2}@x?9wxj#z(rb#XC zKd^u3*avN=!+_}|8+Y|MG!&-bd}DKCJ49==ZrvzbrBM0kVcz>!W2G&W0D~C)9Fu?# z@r!k~uxg+5rjmqBIG%k(_|EH}*96srn%okrrGM7WRMA=vrBU}q=;<>`pR~fUuJ?vt zc=r=v2vM?FqqM{N#^uW!-DT+btf`eU5(-MDh~p|LIUKO}mao8tXwb$eNbYeb0p2JF zOzDyeTcAZv*M8Zy11xdclv@A6^WJm6F%Zs%$VmCq`V@w3N%tsuTspMZnWu&D*d2vG z45S|-Tv6feQXiqd_#u7fI&bEfT)PlNLb-3za0ORsp>Go_$cbd zNs3qlGi%Ab-e7pRfiqsJ4z9!joV9Y2&Pz^?ClvVJBHVjfUe&hRMHGt;`NGptAFDN4 z2tn>YtXTDNhB^nE6!a5)GOgkDn%9l2x#iGeK7&!*{&vaBIvwG{j^bOB?`?o__B$M%8D-sNuzp60{>^CC*Ilqo3?S^YT;C)1>I0{zX0}OX;yE*eQlRn;HV>dTwJyZ{r(Wi2HkXC`( zdFZ(8fCizX|@I2>apJ{@GE=mVzaZ3lOXdAUp z@jtO9S|Y(w4VkNw(z!zm9{yfAKh@;tKtl9N>-=0)O{kZ8i=F%ce994>qU%(rP~-Gm z@M|(Kh#HN&T9r&bd%NvZe-*$y0eJ?nZRBNE(0^Eg3_?kfCh`*zdvAEM{*oFz zd7nt0&u|>IO*=pmx2@to`TLZaoG=pO${_0>1T&Ez5->Z$NtqC z7pu5NQRqi%14|#IZ%j~V8@D_qg)KM%9>`<3p>F?sptN5M1R4ueQ{~?BCjD-^5qc>F z;GnnQ>hSH#u;7##2fYd;l4xEenEQllek4)4#prrf<@xChdcT{?QVD-0mVR#tro5&& zJC`&chN?wiA*%7J?Cb{K>9&`1kKJG+<_g=r+uS!RHRY?M7VGb z2g+#(_3ZmWbp>xifL`3);wT$D4w7T|6hGL&VEmPIGaoe#Z*Q zBVP+ir{}nn5lS8x5H`dE=D;4U)&W;^)HK>Gt?3+D8kdRrHl+x4Yo&vKoO%OQT%Ol#BoB;TIJ$ zsyOV3NH{Y-iC`PkwEJaUk4+; z8Z}p9yVJp})9Ip8d}(px3mT8Ktn)^WLrlaK*SnX@%P*+BI@P>z<(&iLHgQaamA>Un!+!cPDLJ=*vgo9 z+eRf$O4xN1dbpyjl6eUqbAOLN#U4GFWG^lZN6bTPvt`h7VJ{RPg}x)rL#b^exK_Eb zO0c4&u`T&fuf}s)R}=30#tJg38HyF0yO%o_#?5u09S;dRMiEI{|t(Y z5}h&ClLP3FK3Df$EqjZZ%-e>LwbjjAPrZWYZbA zWd{~J2NcPxK*8cR=rcSW5@iJ(__bF`+a5ZyYeq+lsdC~=G8V@aTghtsFe#tnG@gW# z>gKO1c##~fx07v@^9z%2AJ;CQPhHz+ghI*nj!D3G;iWLavL{?bl5~T-5o_$O#J`Hq zDv6@oU}d-_&eZX=(F-6$$-LHg5YxNNBPkc+pCshn1L(^l&IAhtK=QT$VA6|t+BTmF zwKU$uO5M!>#rh!H&E&5QXFvb;P`7GU6~4bEF7odCCcV7%-(%%ZAM)XE4_~}qK4$!oJxSzMC`MrIHYRV9#XI{9&UvzJ9<}Ga!7a~OK*%)oX|{td z3Om%3yBz5OiI@=>l-QMBc(Hi$x237uW5aF9bCSWLRjF(MPcty=xCahL6msgmZ^j=^ z7K0vLSg?V>E1N9QP-5R#k9gsA@Vyo1pmq#ItK0tchb8ku#p|+qzn9BB<=3$LUN>t} zP_PMOPLy5)^avsOB9!s$A?=&jK1f~40jm+d3+^cZDlSA@n=xk7oX{%ku>zcwvQ}jB==FA+XSOaw1o+LcCmF1- zT37*$3dTdSNA*M3m^0qnNQ?g@!(88^;!04CKV4dGmKVK5fhBg0x*?094${2)?D*iu zhma~TxPY zy&$-EqWK0a5W}0bHz=V%xPiql@T0_fhjH;N3 z?6SQ`0WXv#7Y{uU?_VYoa-gd*VY6h;W(69zO>-i-y?sH}HYf#qN|v@VZ#Nwp!%47D zxXaWk17Mbfm)CNW(ASI}oZybJB`38@S+es@<-8+`b=rH^GxK>)GSNx?Gg4cc+|#aU zk)G6DUdoPK!AuieV#~^sE|BZRu^3U)bfx$G?O|+y21rwg`(i>C-kR)O?q9j{(^arh zC+DUD?rX=SYQuI{Lm0O|W?^$Z3Q0fiKbTaTf;ks{L(__|uEbQBh%Pn;$p?q3h8vXE zOf{p0>HE){_rI%C;4~8K?WSiEkYbL!qWsB=Ib^(NtCJ9 zSn8#_9D`QqZgiR0<*SnlmrZ4LFHJYi@cRT76!B4ZDENie+Qc&h;7npucQdqSkX?J* z8!0G1Fz4O5pwi_o-7jk5BD5n1a43C)DUGU|@Gwv%gE>d$+@#0s7=XX6=UE zmln8%m)wY#ZV>3E`DKjAY!Ysl(An{ga{0lkij*QUJx1tvTZ3FphBepkUUf$hFmbnr zWtqL)FyPm*UOc(76K^GN2z0~p%F7ziA>e|ul|u*xNI4p6K1(oIq@1bs|9ieA&=C}z zzq`1fO31T`8jU-Gb0R_kURLY6eXY2dTj5?b^lNlOlx%RdqJ?d+R+|?4%Ad=~&a_<| zZ&qTPVGs(_`e)`hbmf7P{qxGr=uixZ?9x{=HsOZ<)kDC?{>mmYWjq4n;5qUTA;y-w z6l={!I!*~!x!R{vS0>AIGj|^DIpiXTeX$J;;*%*^$dnQ1#tvE!Eg$~9Wnz3C*D!L> zo4wt=sJvopML38s%CsOlI4F5_qOc9zEDh*vL3Cq$?nZSWc3Kz|38HajjMhBA%<>k4 z$UYR|3#cS=4hdJzr|b%R#4+BC-o()P?pk&=?4;mqaa{yY<=uQ0u%CicZU>5>@BU{m zH;mq-I9R@;4hq;yvwnrH162wrM2va zQeSKdaRzN)pEpW2j6asu`woEcQPCU5A4Q^r!}<^DK0y(5|j7@4LrIOQD-7f@$gj;c^TQPC^;y=&)ha7%EXD z?~HS|@Puhq-x8#W*zFNlxDsv34t3k%3LZh1hZj0qx*sOw@QB2(;yP+rp3z9#?OfTS z>vX%c?xy>>h(=bE7$DzW{Bm%(2jlTT7FNL%ns6V}5j%K&vZFsgmKw5`mX#L#PXxc3 zf#~{M$BkFyI~n+|K0IV-zNTt8vx!MFS>M`M5>JFMJ+TIg1tSxzJjIB1@B zOd6ow$d;$D%yKEAdAX8EzgO@^asQ>L2W4%;F@=lM9XZ-ppW@bt#MSBtww!wM$&g^G z99ILCnvgbNuPr~bqP!8l5IgZQS(19Aa4`X0c`T87EOW3=jC*?oe!<9XyAun;Vf9g0 zLXik5iI}E@-W6p|DokqEdyy!7#J)vrBw<^|H&qq3d-$#FfpguRSQ3@YEh(U#p$pa{ z`-H>|_R$ZQs?d|GiL$}~J1W$K9Ec}OpTGrj$m?#L zSgzP;YGz3qC!2=clJPASezqD1p(JmJ!@XKJi?3_M`C(-DL)Yhz15fr`v?Jwjl09Y5 zYg^B-HpQ!M9}(6*Ea`c*?y;*h_~Zmi70nG3h4Q6E4HCmGJRxq#jz4*>G$ZOVn$1b< zu@zXcC8}rJj=D?e{ck$tHz$(17b$4li4mBv+m%F# ziSxesIPb#NmMXeE26Epd(NS#({?#e-Kec^&r(ums-F^Z-Nf;!&ja zO?J)Wm6xx3(<|U`i4~iwFLZ zsdy0}QjT&ndtX-y3>ed}ITN*m(qm`wzS=z>!*MkbN=x+t$o9eM3v(|G678_ku!*Cg zs6hv5jD-vaG>K%j_p*c%eIGhbYCZntE+$mrmHV384zNp1QvZxENfdS3&M1wzl-t9ayx zCE{{4=$!m-o3|mJQ5DC>K}(HF_f@ZuxB$11qOs4FwGE`!QQ55!e8lL2WfEd%USMi^ z?JFN|8%2w1ixO9V(K(`amY?q^T7bz4Mn~1=?i}V$Ah-i2R-TC$4o~3AE`y;(Q+eAY zYY6yxT&c3IU(a})gV93q$h&>>NqFiaNKsur)^Yl0QCk5pOBkQ^C{Fo=>az-nEN=bW zXk@88`1KqOC=3E$yuUU1%4r!{RHQbk!Y6G!p2DBuy#PWU^OiG%fQR4$5jSAXxWfL^ zzdP5v0^dXeKRQvoV!+;}ENL=21}KvHAbA9@@z$z}`zN&=jfT36?=8L$ z)p7j^I06Jc#Z$9RzK^6T&|VQbSTHUjPjzg+$zbwl?wEfkp)ouOuowD!9svlG>dDii zY9rPTUSP7fgZil)3CA6Tf@g85Hn3C8a@O-Bt=ip1Z>mLR%ekapK;I=PBH;GobAH^h ziA8Vv`Ouq)N+EEq4ej&$x?@h!%ky#9Mw*Uv_Yyt-xdIn=blf^_{O7P4-LZpz1flI# z#A+8P{w$P1V$OfFGxN|I72I7+L~DLENDwPP5JxTcv5mj>5XxI-dM#dFXNrA9*Y|Ai z8|Vo|7s^dL!g*~f=LiGjlaJA{MMtsP49B}bQA$?bv%HJDP53;nJ%YJz(zPUTLQUlR z+SvaU5zegrN~7->WXZmlKU4(JC+NQ>MXW*QSS;r>uo_P8k8<5>4Gv>PQ>6OvH^A3Q zI;zzu-tZM)Z41Os;XAna-t?k!2R`rV4?2K!1~ccI0*$4aZc=S{g>6 z;A%Vsl$=Y*k9(;aBJl}YuB+E6D@y-RzaZz{2u+mvBz{{gP z<%y}?BjaYX4Os+_70VxO^F}l3CEceMpO#`;l>(cHT(U|kzAuDIeJD?Ahb?-(TVQF|LOWY_H8ch z^K2ULL!^nHn9ViTf!5gA=n3*iEbe>$YoPE~eK{jSQggIRC|A_BUnF`+!{Ns~$cl^e zWr(D{+wB(XHK$Atu9$LR$(_;}(;J-RF!CW57)f0B&lPvky+KPfINow=M;X|zyW0(3 ztDxE*M2!eK;2339k)epG5I^Ef90Q8GHY8X-%z7odA~0f6omJf65q*^^dXv4u%N-R( zgsWim0g-)PXvelq8J@w2-lcqS=jY@nh>Mqz|EA%}Zb8~opu|LC9Y*N$3xA?r+M|^`i!9|ByT6GfA(0sr4X!&=xd-g}s8txJ}h^xV9J%<*FoTySjhD1k*A#k=0|&hO$Nw1{@3#FU~!u<=_!?x%Mt` zc+L6vE;Q0SRT^M3MRgru2z*z}#li@s0Jm<)wW5M6u9l_$O6+BSbxtTzS>30%H`lar zqMP&##u{{JPkOFUU8HYXQS0v_eTaD(2Tt*oU9m#P1>mvvdkib8HVqnmNQ`iR!#@69 zmgaI2dY;)q(DV+~orjlHWD_Zm^-sGuqHTDT;I%Ki)a??Btf+X6irS}SULsF}ywkym zlfSvx?&W+%tOn?P5%4CS$iLV@A6v&gjp+680oAB73*jGAsb7zn)@!j~ z_=WHaQ0ta&2`qj4E|k9+>Sy{wb+>oYc@Rn3*IcVe8+&OEH=+J9H`M^H{9Bi^dYRxqXl)(C5Shtmw=(BDK~^ugi>tc_BETNI zc|h8i-k!!MF3chKG#jg6xZ&`l?l-8TM%E@y8~*V|?OF?-wF+@tbd(8au3L?1^|xYU z?6h)EJX@`aeXXi>efr4ArMs1XK%(E2stIE$7T4n*AF^;edrfo;Iugv4z z=Kb7*pyTtVKO#}pg~`$*_C`5&E-L%7pu1g-@U))b2=Xh?jj6UuWZU{flKD?=`Z|eT zYAOFHy&Dcq+wy@(`>HpYs!!cPLTCciHW{tKgsiC=v zFC`B6T98=}*$%|05kh}M?~X*Rf2*zw3gdwYo;(g0+G8zKSjIW0m@2j$j^4na5 z1_eNY6x_SPn4UYK$D>D8fR3G=!Kg{`;eOP|v-7ix(D)tKeazR}T9aWVVJSv&MFKFx0TiESOexK;-PpywulKI+O0sic zu!jw=zF}C{AVFBZnTec7SmN3Q)bkr1xby2{o`~9Fw-SK%&I;^g4t3Q2nBKZ(asQpv)d=Km_9^kXams6Q z$V>dna%2FU*LE1Yh~Hee$GXjyAqrWVV{ABL(&50!iq3>p*u5FU!VR`=>DeBI9xR2aYi7+}>OB_Q0npjzCD|B{5BFyeMq% zZ_ljp^6TR|z&JbEOF~}*V%IKKPFzMq4=j_CjUy}K7I@tL2VO_F&G`H0N3Zfcn3f8{?>xfsQ*wa1 zp1@{MGa|8P_2d2cO=;g`-1US&gN;_%2!2Q-6_x_jZ%LhWn4A=oDy{Wt))gE{-rxXy zr){=fh&Qe)GjsPgO_{(l`mO95L&#`(Eswhh?|C~7##S3NAJDngZ->&PXIg`ofLhm` ztfd_#A17T##C|ZUM^LCEZ#QDUhcf1ZVYVOuj)SWhJ&avApjzr5Ri0gu{F?;K;XK=> zk(&cuyaN)lmLZA9C*kR(<=9rxWVM>~HJ zoD)FXNmfvE4P|rSB5XKZ{0O~bu`R5( zi2e(EAm=a(jQLBk3Cg-qAvtkYDreLdcdVf3Efk321cO0CtH&tQ%3)qu%k0sSZJ*3gjO`qg-Ln@K`_$i!%% zRlJ{kaH9k7x~M;g5Mh$@SAI{@4Csos2|OH?{<1A7=PT623)Njnd2o6DGxl#Oz;6-G zV;^lFGW1#B-Yu*oYC;qse@Sb=+3loewwl)_N8B$P704@d_|o!5ia$~!h^ug%w?TD7 zsFF~SlMouG`+?+~CLC8*py@@cL|;rs)oV3+qYS|WH8J(l#f)VR%510F1gxwz`fqvL z@>i_NY-k1niF<#<_@m`HEa1bIr`lpl6W1P!8iW&m3kcI|*gJvBg(7}J)hQOmlQR~} zbcx?rB)sr}q@IV4YGLZ!Q!EIumMkg1(Z~k*EOLs`5X9Yd`Mq_!Qy zgQB|lXuQ8A*V~u(v=CZYiXr1k+#BcAV3q2`-=b&+BSroD{dUT z%gY{Sxo^k5xh(8A-?%gUN#^lBaLoGFMp0y2D%!$;*^9I^4oKP5{F*!V{MtpuoIY zmnWKjQhSnQ&22-5?){jA45?HE1mh^Y&7mzYGhk3OAYO*96G^ihIO9?S2^PVtA5a)* z$GK5^`hdEW6n|&%3ot3e*dvLlaL=xQKw#MyvMKJ`5R!izO310mG%pr`FBOw)I5m-O zEvI+RKWmM?cY~dK?nQf1dODi<*t===JpM*7-(pNFmwA^HN)EvRDHasdCib9j}ES0AmQ`4PW6y63mbGaFhm8ei^!pDJi+8k9dSLm&}W|j?XT$ z0zs&u^uyQ46xRLpumJ4W34w37Yt(C|m%Ct?)Pr(*b^lC}$33tXaxQ@w@KEB9z*eSq zQmXof47>&9XIC@1mf31l1JB|`0Z1TW~@nkQ4)<5 z0Fh5bHFvzy8j_VU)tM~}moWCyZ|l6umQU-^*U7_C@1WLf^uc)aM5EE`HKF)zW4O9B zV~l{fZR@XQlTo7XC6d4P%Yad8-b*1~M(X2u)u+=7to&TAoO07@H(8ywV{Sderj z*QWK!l{c~yXE>Ij&EAi3fEzW4Bg-u4d8yTa-ZCJ+UAi5Egp`(%QGDUWnroGksB$2v zDY|m*k8hM ze1vOK_~`1c=-YG%?R#Ce4PUU20>=CbL{`oyL{Z|>{eJTEs3$4{>YXS%bM<_WZ6n$p zhFja-J?h6^r7Kc@$$ODWDrw>!eJwu}Bn}1#z3?&g8nbqEEyS#FQkz9;g{zKE$68D1FqU2d_cRyA_V%)aFM&0k%WU zy0!zWw@i&UhJM;QdB96o-9eRa$=mge z2EH`^Fr-L{uSr3e`b9aD8+8ycWP{QRAD^NaX90lEGidQir$Ea#llfdtAyOZl;U|)LJ!>XY3VOO&rxNb^QE-pIj8lk!>^hj8=Oi1t zYJ#|Auc~z#KZyZGV^8$UvMJ>QdsQQ+Ib!sFnQz6?^bz9YGiXmbZd_CzZ|&+_B-60! zMG|}$&|&hw+9PDTIl2w;E$f#*DPrZhFDdjyujChBDAGp51jNp58@wH3Z*4JP2n^0ufgV|AN z&t=7ziBLRnehikd=iHW8{%L<~Q@B&I{#AsQW#Mw!&K1Mbq(zfd+)_Mx1#2D8xb5bf zl@D^%89l`Y*S;d6B3-dEjw(8ODv>}!Nof#ZV}1R`y%2K^(7W@-b7*_m5~-z!<#;V0 z>P$m)*^W&B*vn|d6^_gNm5;{o&9YgwduXZ}jS+X>)ktN5MpC~Oxa|SXtlDkA zR`zXXGJ*s*PHM#f2Jvs0=ZHqg2eM2S3h1;sqfj{a^@lD)nkXhc$iOMN)r@Q1hpi1Fw zKnGA$B-vuivx5jMBo_5_|1^UM_(;~%o&{Zg64&l5J*Ukz_!o?50kmk)P z6)h94;R#)BG0*Q)uV_}ea?kBKW0))dF6a?IuXRQ-*9*1=JWBSlMB zqt*95<*|2qi#K`I!Uce#wR2XWKX9;XlI*0=->k)X26D&B|sJ+ejNHk1#P_yl8_ z!4$K=F(<{Su&oH`hYY46pvJ?^RkKYdObj3z4D5&W5)ki;T8=WD6c9zmNGvQKOn@rLa@+)D$DASK*3nEp zQ{|-&gwx4eG{p*~wrnZCBbw2*>Wdbaic5KJ7|;+WTSRCHLmHRvnSjHl9WMo*QZpv~)=0Loui;ar{-Sr+4Q zk#i$oq~%gkbS1thOsve!fgYPGP{!j}fygIk_-5*|j$|-**026!6{SH#0U7pOmnGPO z1;I=n47vKphgma@Xt6>T7P&b0*J8Q+i@~Qd2K|!NFko(m$e$ zj0I9|SaJ*Dso8SpRmuBbsoUV8_W(?+T?Mg6n(0C*sSCWYy`ZbwlhmAEuPy`Pln#i* z1>S*RqOV%sj%*0eX}y7=jeML@*M!vde*k(6N4EL?dQC_$vX`Qrtlh7a+l>0-B}<2< zcTvd|e#qTS_)5+uJ9-=Mp~4C(XlNT&dH*=#TWAd8T|bkr&%kFpE@oS+A&RB72|f|2 z9Ueg;DO4K}zWShzw@g(fl?R?T~Vl{;H9aUBntLSb-<0DUDAMa}=0{}DRQr+ui=UgAWyRRgk zwqeX3^K+vnBNX~poq~BmSSM~>>r_TLxH*QkLX|3}z7u()0-fttv!w~?p96i8hw{2O zA}ciMoB&6f4h8+Gd3~P`9WQK3GwZ)bDUAnscZ4hdYGwXGKLT-{jq)oUGIqyOGV^g7 zlF}8G2!-)Qi^Hd(L#EnI`g&-LM6WR`fl5Z_6yv@+CQQ~_BV!K3pT2~*hL^9teSpJ% zAOm+yYg=LLI5#uH^GZ{u>m#~k`8L_!=7+1*icz#g(U4jp0H`c9zd&5aRcF->yx|Mp zJj~1p*8~CzviiGQqh=XgTJ{8JrP#JCFyFZP!l{IolubSscKyO%1ydJ+4srswImmzp zbUSv|CX~#U(#h9t1%G{pE-ikdfFuSFDveq$MZ#-ZzQ8k5=MdC~Q^K{XUV|e8>d4BS z3#xi|=W}#1|6wGC^xMn{2O10!Ft=^rj&cX8JDI-V4bT&V>@fUiM0Z1k^J92JONhi} zr<49jP2>XlqLuaoLl&ETP~@0p3ZMO0znZiilm4k|{NaZ@#sZbQcu9tm zF2)G-S_f<#Je-E)k|)|h?G`03GYlf0y*~!zb!>ub93wnT9!kp#y>L=bWW^eCC1~br zj9H}iWjkFfD6%5#o8jt8$d~w~|4ec=fSl4~=|g8lH}17SzfHhY!645J*o=*`hsIue zP8*A3E=x=0CxH-nYCGNQGF2}-dXf10d<@M0lcs`*)ho4h= zOkV`(32hJwCpbprKCm=z%(|1V>=({3Bq4W zvSLltB0Um7g%CTv-9`VHY1VdZhA$lD5~1V@=XM3(z3Z*<$`Wk>aDB2}GW)}Pakg3; zc58QK8cOG;4XSo1h81QW=Ga(HEKww^V%M=}QOWIKf(I|GKs1x49Ol0ynVWa`!Ycat z-8!r*9p^5SRN;)|rfLW4_@5PQ7VmA?Gi)PbG6OgaPt}I{s+x8%lOcw!3AyJ$Ht?+T(#3HM6}W-8JG*J|g*^n7Sw_bOihAui$mnM=C)4%}ZsFh*l_UJ$@x z95vwaQWvrq8}HBWFQSYjF(Leqtb1-}`IN?N9eITKn!oJ>32-6plCx$3?cFHB#6>E9Tb($g)e&A7^`b>^xN02kyLF~`Qh2pa zswm?wfjK44*K59j*^|bbMmG^P2VxH*&P_#A5W#j}~}AkkhTYUS%P12kfP4 zTW_35OmQhys~Kq|u4E|>rq2Q5bBIoO!^bilxXqny(7jwydWTz=Wdx?y)|>Q!d^k8P z!n>8w4m*8(wD}Ql|vOMWh_an+|j>S!kr4-r{@D8 z44CavO-u58U=pQ7acg+UYj2%U9x|mE9GU6h1r#UWg{kd_w)RGDi#E zP}I2e?feP;Nps|}XVJK5@j*Jo$=TgNV59JoH zBDWf^-i{OXZ=T|w*?xTxQkxU(AxcbqS7_o-GNsH-bcq717!97SdMIes+-p%oDxXKCZ9rwWiB6;Ykm=eUV1r^PmYRJ0`BEpHO(RULP)&`&V z`ML)vJE9I7IKGDFbMFAQa?ZP{Q>mN|igMU-_D^%ol5XYn=TOI{Q5bopj&*#@1Jf5~ zQV0OgC^hyO{IT}9Yf!0D>|Z5G?xSQ!kc#JRI)k`vr>dawY&LOos1g!E@zq&d?KQ?Z zx(|q?=^Q;VG1pz;XA;gEnQ$Woz$N7M`N2!1UU6NWxNBn7$SV$2R_44c?h-!>8z-2a zdP^VPD|#*vqJhBGhFBys&JGR+JXr-*{`E`QH7ik3Ll%7sTKiqz8Y|q~ioX=rF_Qp> zakz7JLvY_S{X#EvDjv5T;BRNcS5*w5m8u#?!-&K2==5($UJn1and9w&7meIu0SPTh z1CD>Vr|zqmNlEE^#=4=wdGRDW)6W8671&6;{vWG-g|a2FB#ZRUIsQDHhLK#mzh~LCK^i;8{ZX?X8@&6T13p%A zhfD#w9GG%d-WFt>qng!H#oayD$mt2(8K=4VH`istNPP_BnkY0v;bCY>{KT1hMiPky z+IbB91t&K^j&W`PjP+Z5d2$i3s=9cv)^r>&V-7kQ7&dlP;-lc=$sziRwBega6=PE+ zX~@q^0XwRsH{vmXUW0p*0M%xw(cIOhX(#!zgTuF&;*AO65&>lBWs46{Z&6Y2n|0v8Kfhl&2eP}msqMK3FCBRtGi$QD za8Ou+Tm`wM9D=^cNTDrj21sMJ=RPt>S;zlCm^kn*<(PyQH8NfaI`mKVO3J1yII|9~ zmrO%=MCw@^^p)sIeOm9^SRl^lGum5ujATXHE}1(>E#^2sds7!AR?B7HHJf<{q6xOSB4TB68WV=<+;V#{q^FvC+y# zM+%gJNDT$$Y`uOxil(_lQHv05T-c-N`)#q*@b@Y1rsttzJ^)=issk!HAVT)>vaj2R`}bih{a`ZbWdGvR`; zqH4f=L2anw1wz{kOmk@TyFEC?r45VOe)qcr%|;eAg5()fG`Ah=f~unI)o*oqL?k5$ z^OHqBJh~kW=_8Bb#MWF9-5ps}RBYIU$QwkCNf~3UUJ|IeU1Qv>%`4<=h3w6fvwe@b; zF-@SuZMStR>%*)$aF+h<7QR|jnxd$`DP+|~SY1*PehqmzJ#K=*t%7qFRclxG0A2)x zCn}t}9(&Pn3a zm3L^}?lpaoLn`KCbv(3QtG6SUafBf;^ov4D>qbBo^gzsBNtktW5~9h>u99YxEcxFu zRMdD{(k*bMz(N>af+ty+t`7vc#Sx5tmHC&!>g)VW>ibNOtv}XxC&wW!mLmf+=lSLh zkWH{26}iF|f$^IymKz#@woRq}fHxAyP{u`I@)0^M$oPJ1voUeY*7vA^EF%(6=)2sn z99Nsd`d<64QMD_xO_+oyV}(1 zRK;soGjqW2jf0;QhCq|M&^`l4yj=18Bh-5-C+{<-F7L|5$&EPtpr4qUwY6Q!VEe1m zu|_J|`0%?&pyLJhy&jUcIRHGeHTIKbjI$J$nh*D&LyfY(Ws~eznS}Fin;GL>;4|{h zm7rv3Ed4nT7p%AlM16w`EQ>;A8(NSY21d$m9@1oLj6R7947)nn8Fn0#iKA8UGrsyH zoUGx&LE|ocgqzL?hlPDjkz!{UA*Ra`CRK4V#te-|UL}-F0{fAnnaq~;FrH33UFmw? znYr^S>G`W1GcFIzK({ElyGnc;SJ6@-DK(exyp8CJMY4?M(Ndv_e70M$bsS2KOy>lp zD0i=OSUau2)G|hc_igD(+e}`8^J(Q#NvpX-Oo!olxb(IjG0KDxst!@v5VPf0YK#l< zQf|}f7dtk2L_xEC%orCHqttE{0UvuRxG^PyaJx`X_&KdfYMa&NtONFPkERI~oJ_N# zvn=z^R2771bg2_4s-GX%ig(6(zU%f0(oArpn)3sde-skAge?9g*&oY(j`Lfl3sN(gWp8?;^aLbyrChQ3IP|~22k>HB_l!7&@$8^nw8`js!CnNZJ_-GsaAb^57*oPSn3i6uCSSDI2u!Q*+iMYP0cP4)o-M9!edp+qISws!i z@#{uspej!s@@|%BCtQM_r-C^v`fIAz2| z0rN=T#R=vOBHqdLx#6=g|C-!1xaqLD(k8ETpHuv6%@hNSKPfzq@LV%_NoNZJWLB5k zmL1a`lG^~dZ zgw(r)16BtnDg*?LuEx)8h7FV(+kheTQ-*cs2mw;vnj|)a4J26wF}(pPg4E>U0wwz9 z0iwgZ4BG7}X{+mWifRLz1Y(FMg1ybg9bEu9e8v1vbT{X!_b0mh4O9o)JH9r#{ZtKE zg8)?51wIJ>${EDjQ8FM1{h#RWR-Mnl_NXepuC5+kPG5XY;J?2W$@Aai3rp*hPY$-2 zx43YQb-o?Yu62&@I`e|7xKp_2)(|Zoz^%c2df&r8OKyRNn39p1D>|2)ERq(tf>y0< zfmhXEw4fA$tfS+T*igW2t(|k7PTyD40Zac^PUGSl>iKzabvsl2Jqdhy4ic={P*CU6 zIn?Cn7h?zBB5Cx=OY8ich5+Q=60jCzAO9WH7LxOAg|xMkSr{PG4|i&acn1O`oJLhO zj!@%34D9;tK-2i~67{h|bQciQ`%QoU^&RzogY(Il@cRA2|IORngbP+=YyQFk^u5j; z_>bJg9MN;*Kh518aZqNLuN~jX%U-0~@9dSHon_!p+O2Qc%$&+K9i73;>g_*9#}<40 zCI{b`K<-HpKEZ(%h8nOofhE|7)ehgQ&6hz$7Er4MkEJ())!%wEgXl3#%+mCxyBdJUFA` z2Ks)uO;^<2>_gohn4BJfG(6rl3Gj(?D8F-YeETE}{uUklGO!?L)v&0e>G`S2c7Tde4~}iw-M38p#;n_G)Zr@u?M<)v3~+^Fr^ZR|I9e@o$|?MPbI z9kn%Rq#$_Y>+nPa(KSHPZb(qJd-v|L+MyX=BxXc$j~pc}=XKB?58n2hJTKMYsUO6u zB~#|3e%IqYP6?L9N>zLlmr--wwWwj|gym@fC9q-L);HtK+Iw9|a{sAP zQFG7$y0lKw5cBX=jsH#3&wE=1`vTyiK|wVWGOV|5lKJwTAZbOOa7|47$+jO$zQ_TY zaA~5=SiD6LW#CU0=YEVv?1t(T9K!%<^dI&icjF~bs*ml$Jzp|FEP03hiWCD9eY%Xx zkx#1a=DgkPS}MiRC$ydVpzD1>tx};W>{z9dqC(3BT7@jI7MD8rOg^pF_aP!6$7@Q+ zg!H%YV&R@SxwLw==Pz>)vKjYeA}xyD=UCW)_mGZAb^mcF?Vnv!dVuYv5Pi!B(_pz=5Z#k8*#tGO)@3*%cR@OV$)Wu+N3hB8xZyUsTeHHD~r3v1W-AR8zhM7MGSf)d1c!b z(^YW)xU2~Og>DnG!xn!CzSEimj4Jm~fhzFS$4YxfGM0vMnK;kOQVQ2?QjVcz0Z*Z{ z)O1T06$l)Yg4YmXP4n?1hf)}KEkM}$g)^PMEGDXp5-Z<^R(tY-#eV5m>qtLcS@HdJ0J98U4znelJVa?bm@z{!>^NxtNYCorcdr8hewptMr?ko;rno3 zx>xkc=g;5|1W6(HbM~!3_`@~w%Hl@&w(ecGF~OW>81a#VGGTbgMi2hAb%4+%P0Pt@ zL5*Xyv!;kzu53mWChLHv!LLT;fOK2|jIG*NMU)}jnG%*$^4Pmux@h`MAe+7&gOnvm6uw1O2|UgJNn(N%~dM=DJS z(0q3de&U084*4?V!9DXJnt>N>pW5RfD$|irOnP1^lqVw34gTwl`T^}>9*N)th~{VE zMx(7s{0kA%MchAi+&b@l7{xA{ZDotsYt;a9rZZg8MHS4H)F2DxbJ!_78SoLRmK9uinzJ2b}3$QvT+`&tSC+9uV8fgTf$y!oZ70#k| z{4~DOh(wQAG<;nvsuiCic9>{ewLA%&k^&NhU-a2Mtnx7&w&QB6yhCzd&oO8BG4sx0 z5RWL=Y2|!B`qHE2h1+Oiabd2j9^~xyBbF%nMmn0=zfoDy*%%m5p4Riv%t=>0IR>f? z%qf+~2~C}OU%H#zF;af>5XE!1xQ(Zj9A0vQPYn06_V6x<)frHv(_6|Mj4cUX))YgR zR@)*p!$;VAbcCuZ<|Z)3{k;E!JXhke;gbb7!TzVNv_FJ1Wn?QL58JPI#c9x={)YsY z${FI4RF|MdLW}7D`c}8?PY4vX9X+;=x26cH_?^eyDRaI=@Og=$l#n<>D%IuNGx6kI zbZ&q*vfwFrx0x)V*v?3AlT>2lrWU?jA*UL?MQ+&?w*!CTyeI4&F}NXQPiQ9CCQ!UU)^vOb_v zvOw^>w}OnH4cq*)l}b|;GbHAs!lHebFM8V&Bj$)hECXS}s3c60?s~-f`zOBEwW`(T zmT7fVdH--6z16_f8LmvO7WQ3fc8W!1lKy?ZFydtJbRI^sZhe}VVYZMCT$nf!TgeHt zAi}*34=uS8cBk~F-z3%svtgIB)MP*aUQP>Vg>+~Z@O{Xq-a3WL zWuNo=`Izy%Cr9#NNnEk`t5n811#K*>#y@vENvqVGdGxM#t4F^Bw`7_d{j;+s>pe>C zs0Yd|89tXC_Bm(CL6igqjLsSK_^d;41RP#b8>J8w{S|@7_};vI$L+eGttPP+sWJ$Y zn!BME&uDGONf=aIsPM-Jb6yMt5*_%9ma?E6DnCO1F)jsSB=SQ%!%Gh3A_z`*VP-KX zc;k?D?jlaGB(6JkX}*cGv@V~uPkw^Ao_z3PZ~6W2rJM$f!fokbBUrxiv$fm+(>Z-i zArpc2;=5ixuSBc(V;-bPfm#vEk`#6uPzm|u*yV-1;0-#t0=fYr>ECdMZa$`p={!h| zKb>XGs_+>;MozNy7TdG4{hX~T3S5mIVP=AYVf;Nxy|WwDC8x;cj2T7ea&@0uC*xL>@)Q(svXSux?;Jj?WG#-wg&aVTH5k{p8t{Ys&F*o*W zQoS%c`rfOBuh4jY1{r@tBNIOI;28|hN7^|ID!lpBG^DM>+c?TIYep|k+>+XUWBYX6 z81>B4UCsn;2i$fA7iyP6P2ajfWsZ=)#Lrx{oL%w;>C>$(dyrA&*>7xqSZ#!(yA5BN&SVSDY_%Z0hHRyKN=qwSOR{Zs+vxJaRw{bO30ns z>`rQvnbd^-I==42Omc7p^xtgsCv@O|bh}FP)P6q?s$}BGlczUR{wmw;VrL8M3hMggulK*_Dsc^AS8Jr{Omsg~WmM3@h?z#U1A4(&=oL?2ekgk84tP$FJ z$JarWYLycBeHwKqA_eDeNpg!`FY?09B6O-Po!~L;Y-5cI09|KDgN@b2lkHO|UA*#o zW2reZx#2&j6w9{(ZP_f)Vgnj?cv=7TFrVf(gcmLbL44<`L#XSDNe;ts6yOYSN@$_R z1O{V-Ufq>+TGH>#LZ>^C5dD7SZXmdN;s41**;`YwxY6oQ{Q7K`UH5R z;id*#CyI?xO0w+}^Pqozyn2V*PjZsVX_5D3X(iv2@0w%1r$yb5WrR;`co|L+iX+a!efOvv@cX!8rm_zOJT1%k}^Z=vL~zoy!he8WYEgLks-QQTEfk{ zg8il)$)gsR+Z6Ml%8ihAO|zwKwvfV=I@w?T%wkSc7s^`m=5qE4uSUZB+7HJwy7Qr% zDhk_~Iyz#H0U#>a+^%Hz8sSVgq2?yV<;H*x76=>whkkI#(rCX*k7={|sVJKRscHrjI(I2aJ^<-L zAo1iE3JL#2m?%x(WE>&J)Fhi$n5f23tnsQ{pGus!*yc>$2>#c?D%%R@*lI%41x-{Tl=jLgR}5rf z&3vk$iI4^~0T^GleoN!hJM(%Bkwu$eIHf?CN8a`E$NcvT1D+N~_AN7J!9g{nQ#gA$ z`Js~cj{frzbN1+);XQ)>{jV+UM!t_sEd|e&kx)L)8CjtWZji|+K1}Mv>7!cOo|3by1f(*;YEQF&f2~Ex zWeQtN07pI9ZHeo>enGkaQ{&+D^U9CHtFe{q@0VC3$51N~@S!0?0KPrJSFL*f`%WZz^$V^w=HlL0imkUqECj(HTE>46iJuDU=)7 zURNhao7Q|GS;&GVR&;cs9N=^hUJ3CHiby4TcK^YlC!5hKAlz^|`wtuuOJc>K>_t zkB>8omGg0Dg6+CE+UK37N~7?a%({4aI{NpOq>@bi*xvI-M~?NaoFe#EMZw%=m4U^a zUYm2Rx=)erd%MWuw8$(1Osv#yHixeiuk@RPJ}t4>D(#ih^(CT$(J-aJO{614FJwn@ltz6>lkj~f-#6xw4>tvKs*g0RRUw#q83mTNRU^Dj(_Wn z4O6u`^8Va1w1RNissx}VYwieJDp19{&%p+GFbaBa^eaa{#Xry180T(FC;ge{aTbl% zVG_uRR>dzXS~82j8Db)16(!_;S9*QE5VJ1WwnvcZGI$lc9gstqjNxDX1;O1E?ToAd z1D}uCGF@KJ{(xqhApTmuh|&nP~(>S!z;7m63PM+^< zY+$Z24mte#d^I=dx6tU3SvwV%ZJRIn&@ZQ=uZMUvig|ylG5OI zN7C~7zJE&GDBedriQzf zc66+Givr*jb0$-&bfmTh`i9jco_K~Jf#p$Lc?nbdT3qH=+ zk+2UVLqJY+Vaq$Jk1f_fi@d^w(7QWkf-xi>U1ccSgXv0S8xN&jdz3qO)dg#?TPs7b z3=bHl>J=^4g#?gRDcx@+B=9p=-&`^B9Hr%O zd#_V8+&#FOLavB()&5ai9I^dJ1uzIgPXpw53{h*T&_N{rbb;%r8 zIDa@PRkhv`lKYO|WKI|%Or;Nfvg$_U+Y#DJTW6@Bq??7=z~ zc$~wiO(moy4Mum-nwi!;<8(6n=GQ=JY(iMAS73?)rHzn>x#XFSPeQ9 zs?&9Ji%3=dCP#4dX=EcvmP$BPhJ_^437xbhWr6hrSkb$AnQ^<;Rn|R5=2Xw=8up;iUYRDKLR)G%az;00k&`n@iT`-94_5%-H+;;{n5Pv zD`J|G_4_x6Zr0z{mov;%*|&SX^i+~dVE*{&>z4==3%AV53V^4lIO8Onnbvxh2w)?* z1Wr=sLUCt1Hm~6sa)~rZjG-?*jTyyXAVida5r5-wCdlj=IhxL+hl%I@Eg12Af{PTj z5`^HFuOuuf6)`LDQJ>--7#S>qDu(DqYSABq4ID{%fl=uToJ7GGORcgSy{M#nues`j zGq+I(w7F4+n$)B-3WUfelkbn|*{bgbc=ocH$Ql&omn#SzRp{Qpi+4XaH-;jgwkVe< z=L=!UcF%>S0R?k!_PeD3HrXmv*vcC}vm5lfsGK-o7z2#r9LaNDJR|J#Qu#ZTH*}vM zGFNsr+Va}iG3~D-ce{T_m|<|?q#U}Uv)on6N=8a8LpYDpm2tG1Sl1R@3ryZ zc6;sCtNlUk$N-m!e9~7wCPMI%v<1%>S)C;@?M2y`lsfk24XCtkBV*wKVh+7{yis!> zEbdT0$bQ9|JKC1m#Xh}y{k@=bY%7|YsPc53L+1GP>wqlJ$=0Zz(M9D(!a=M39Wis+ z5(T9I_yWvjXAbqvL+9hH8Ou!fYt`C&-NZ$wIB{HP%cY)un7D7KAyJA;-Iu)-L%5xH zgq1P#%a9j$yGywzBumDQ1a2%=D@u3~1X>$eak_<=mV~0D>ZE6-WxHSRn7vXpG~)&V zZed|DN_?v6e0iWXv{g}25$^uX7RUWiX9v*0);f6D;3=jL4wVJB#1_l33f7v7=UqUumW7`AZ9g&!B;hW(i2OU!+xq?Po8l^k$GfvF zHsDkVOG?jiK+$J$bqlhaft(B*QEZkS zsAjsT<#vgg0=%pQMuii6eEhWLg=GML@MQa`6vGc;iPMqRa5&D_|l>PGakBj&(zwqWV~K=HLmfMGd=w zFwB+UG>@`8)P=*?Y{1A}2itvv+YF+kgdXWxa=;A`-g7|>XBtac6edGOm6J8%O%rO_ zO8=t6mhKQ@7WZLZABUTQn@7A}J!I?@jqVtgx7sFA-1swXgH%o10U_G?R@D2k=lxY_ z8M$5!ws!m}<08m%w=jB>M~VDZm4Zc@x(uigfJtRW9lykd@M}D5aLAGL`cc$$-Zcfm z__vE!P_{)*;vbJnnMV9rKWR z1-+A_5rklK@kw*`MA)lVBWhW(JqhB$FA|#et(K)YQGNsiy5`=djK4y{5mT!#x}C1E|){pJ+vN5DBO z!%I5M_oAoPk>6fzNJTtBIFbA{x{p`P4&mcW)*9mXfqgYbh4aEN?DUPRi`PSi^J$`r zje_zh8o|4~J|Gh2n}HMLP)QT-H!8d}@nfLWM~mUD8g6?L$TLPhU8lsl9fMR52-c{Cl()tNQm?mJIZ~DyV4@&)HqHj6ko$zx1&0U{_8mR3I7ZE#5EHW3zksO7MtAp zYhxxut`~jZnCo#nerQ28N27f+;YYQ4@oI(Y^l#G%X$fKFODXrVy(w*w+r%n%DAVX# zV5UmTdEHK15A*d9=zXrd%T$Lvj6BIE@x5gd|DCqyBzFqby}WwGET`biGFsji6wgam z-9>dXE!Ipf-2+nTIOh7`)IJX*cHR&3JqSwiWZxbuo1t$iW=EW>8ymD*p86B`xbJhQ(dGVyQmTPF&7P1ph<<}|t0~e90{84hK-z}`upR82p7o>&)@&Xn z!(}D2DvKsb^P<0Ftatyeb4cpo@u-Bk>bE4|V;yQs!v(kYd;XV`ilire&vY$C;K>=} zm&`cJQ0~BjJ>X(`Z)X)n)OeF*!Hffr)Zez~(M-Crs|8r>oiGidy@I-DKwK#BZk~pN zK=?Y3IZ0_7mIc&ryOOwA#RRwx>K_g|a_mIOQY#1EChaOI4Za@E&E4vluT8|=`fchs z%G*}<1mUOeWAKW)J8oRyDw-zRGu0He;q)64x`8^KSI@{b!1B8;qA&lN1**EnjWp-qc{+imQRH@zo^9kTAFq~EQ6f) zGHJ3!`{(Iu4x~Js4LdpB)Q@T8J6puNrr<))}DH?4#)K<(rofBv;$3>f)J z_ei~9flb(tio1G&3REw?qSUGj9 zjJ7!G^$6Geewyi@iI7i8_-5)t?Hb{G3n3Brfb@AM1fn$#VSChqDVu3mMYS2hrXfmp z9Rj#r638f_aSmHy2vQf0YKEb!7rkSx)iNqv+yw=7BL&mF>}UUaO6D)?uqJ$G{8dx! z29;q@2=rR^7fXe}x2O*IRpF$TVTz{w*h~$W+jCdM$a)4yWoiem{$*@>R+FTx=rNe2 z)!BX-Pch>7oAg~}5wj_6pIfpBqI_O9XjK6$@~hy|0E6%lVyv}quCL4<_CU>%!jyd8>{w|`Xq#2e29(Fas||& znII+*T4qBPv>@bO2#HN~vy9ldeDJlg>;%i<50|Z>MG8r}_S^s|jh|Uc zhF~K#+_)!F1Ms&!rK)8&Kc~5cwGa>G1cco0S@$3n=BsBGPUK-O-sWowQX(v7@}h$8tZH3s@nvZj*1J3ddhJ=>gQb(ET%7n?!x?e*{azjpIyV zT+*m*By1)`0mmrl=T6Ld^?1du(bwRD+l+2GYUC_=@#sf1*qB8*tsCnGR^|WKx)h|5 zGC&>vsNR-mZ)&`Jd+XA^nvdV3F6o}FK}qOdU~i347jZ3h-X%R*74Ai$6!@Xi(yZ(ePBRbkeQ`B zfdoR*svOuHjcxGNasH4|Ro2msI6R4&(PJ8hASVg1O5LPg=SZxnero>2Q^J8{d)NOW z@qnMtf9KlPNfs}m9|({bVIm^DCR313gFhBH)B}dbD)aV3+()v7X2}&4pru6ec=_bW zR$}6%0LKg1@xQ;V*}BQt+q)cY^zj5$ntmT&7yjntLHYJd`dj?y{grOx6Fp* z{ZK6-f3_9f6FM3(TbI`PgT$y2Qy<27q-IK>nx>mRfL#vuTiTz4uHOVC+Q%~@t0L6# z&fp5c_j!m?!9B5=hJdq>#G7^%!$iv;0cn;?pa*qi?2etOhn*EW>-JvCVYY`pkCw2( z^7j~3AGESx%3oqTWuJ7vP=~eby2c-&_fHm8JB1a^TW&aOF7SmGNaPLH9E2-K0>X!o z;Gc18%?$o2#c|cks1?$(EZ&u@NQ5)X1UflzH&<2wu!6{q8=&lSoGR(2K`>{Qzovvs zdP-C`uK`Z5(X@5R@D0E-8ykmtyq6hqnv+|k;OmURd@I`SeGU?h{(He!e5ugW9;gKO zZNYuxmBtevHqTpMbHph{)%Lq2(o)n>+y0|!P5E8dNA@bMr#KyZKShH z6GNS&~kvBK2+-~pJGI704$RH zX~*;yRsy9a?>hnr10Rl-xZc|Jg5%If2zul@55dDND+eqnFkkR`$<6&qsxP+@1?U~! z7MQi{wJkMA#0Uh)qdmWxQ=cxt69y<|PIB?gys{lcG3;`O8+8vB*~GaQX?Yi zqS#Wd^(j{n))hLF6LzPmqTQWP;!hsy!iR_qzkNShq#GfO>T`NJOh=On+?#IBfa9rz>spWM3N zOPE7&OOPobV<8nFTH3&{hzQ7d5Tf(l(<56mqcfNSb&a_{g~i3+=|_y1fl(`hNi$NI zw6H!vM<<9L1=S(N__NzjzxM<1{=$HP=+XxPxzOqm38^Yc>u3w32+@}pAS8mVgXRim zD>}2*H?x3`YGs04=>@U@xj>f!~Js^=RCDX0`2 z7$EWYBA?n?9PQt~;Tj%+)H!mrczkw(YCs02c9mCWuNi>*u212AF^tZw?JW#kqrQ`= zI6FGIF}t-myMp_gtD*dUmCUV48?FgBa<2!30wWyf=b0mv+6Od0az0BxM787StaSkC z2Ic*nzyb_1S*43-7uO}=5|GTzqw}d3o*k3gHPk~>0LcYPADcU68^pkn&z8R#ur=KS z0Q*S4j>E^d>-vg!f8Dqcw5fA;176Q!?XR!%KW20LoU}7CLmYWr>6F^Y<^|Hs_%`Bn zarvM;MsWRl5B~d|g`G7+5<7)=Y1aj0F66>R0S>t`lLB=9ojD%+?L+d-DD}}pa{C?K z`5mzT)gkeHg#7*8eG*WX=GfS*#?er z;jh2F6eTF_XX%73eJ*mLv38kzaF!^AE_tAhL9v$*>T3D(5<{z4$&_TB+X;e^kO zP43kGZNBk!RrSb?USq8vFVq`gI-laf^gc6Sph`l5kOxEvfgw)-1WJqfaANY|8_!pD z>HnS36o6X)aY5&Bd2V|IJ!@ueYHkkf>;L`mQ2YC#uD=J4?=!z;wb#Xs(dAQ0>>9Fg}b-(s7*2k?_hyFIZucRO%=g1((k zA3@v%ordJSZN$-y=CKV3H_iF%g|R2LcYH9Iswid^!z2$u)#b{mA%Lr70c86va__H3=w{ z&ARVRtXY|C|F#sOS#C?e)t3E9z?>$bajQ6%HJHZXjLw*-s9rG+@ao~za+ZFZsW5yj z#4KWb#fFnv2?WG%vG6xHHZ7q?Wo|wZ#@N5?s~%UoOouHw2kVs_-Yvj%`Kfga@`cUD zCUlh3ZiI+yft-@RMRHDVO{kgqQ~Ov93ZP!S8Y^$b#%+E0k(&zB<4M#+_r|IV;B(Hg z{KB5wvFwD^uW&(*eT-nI(jViJkhKCzv*$&ict^--nStAr>2FxQVeG6Ee}(7zEd8R8 znuj?l{K#t0mM@^d@@b0%*}a|fazju0qj6TLN7yAx1J}88{il3B(y;-#?v)$9JxrKH z!Oay@DE>4}TY=2`2C`hJcxl|k{AzeG9P$Ff@Z%a^1B)CAq|096*IefzKa$sNFGl$e zWhGjKMUQTPq>Y-M-^y~dbSaQJCcHQM(e!kh@@OrT^N8#CYx?Fo3A*A?E;`Br{`yi2 zTjaA3%0J`%vEP(WnDqC(Ov3``r%$2xx9Tj&{mZCYTiU>ZymHhgv|Hzg>VC})So+Pc z_7OBs&YEz#hW;1Wr4WOh^rCv)xcy~2$qK?lm!U95g%rqOQ}LlQ4mfGQSg2FN`6b3*Er~QB2f5ajip@sl zK@#$OehqrQJ0xu`K*~--gy0HF_JAYGLdZ+jFhAEP#$&eApevw$v4Fv+{MKDX}hXB4!|uz z{PXZz-Bxr-8=jM|A^j^gyX)9zK!fmt?x79YD&BEz0$$YDMPSrPn!3&`-=+8@POHC@OihCN zs7FiWDIK=mlO8p0e3z2lVr%f$Q}_~e_xAOSJ?XymcXuG%=wJK(Bfc*Jfa&o|04#Y5 z)Yvoeh&?;YWvw)({ZT$}4Xxpu{TtT%oD|w*dY%tRe%T$AaX|+z=)B%bNzJ6Jb}wzc zDoXz{VtKx98j}7-{lFiGc9fA@sPDsc{L}KB=xqkreQ7Z)3r4r4Bg(XUqae5j{am@278DDtmiobf zVCSm-|3lb22YC`ai@rOyZJXb*ZJRr`ZO`l&JGO1xwr$(i4&MCkx#z`+_in_E=&0!E z&dRLHf4bw7m6?}Ppsc0pZ4yt1EUGd^A8e3eMwJQ zaIRLyr|=F(;&l{{qnQv$UGGxbbrHY9K~~TNTsJu}*Uz*%ma48VaU_M@kec&@Mh$0M z2R3p}!g14e(IwDU;O)N^&qrXU1Rl7XRb=jSeHu_lDFXJjSSQf~)aDv@*2f*7FtJHr zm&<*$?hJ87yD~n`|42&*)0RM^4TU@|*n~g^@>u=b-;(3`K(6}kq@4bwQ<01qke_f)FV*AXC|J(77<4I}=nNNvRWtW=O zRU9NBiKAchaGa!rgG?LLN=Bf6zcyk{spyS9$}L@hvJj2{6`-L9w`gVv_1)?$Z@i!F z#9&@NWGEMotb8;lS)1tSoeSuuRC^{PP=Vg%IF| z$en}56vD<>fT~YUsU=VEf^fzmXXS>HF!WmbIuv|i;+vpdO9_v%5qY_xrDT=u25$H_ z%>RO>6Dqyhs3iE>7;OPmA08`u5}&OgX2018a}b2G(Myr6_&5k9AsJyWLNc9u2$L7c z;4F6gCoP5!qf2}0y66d=e`hd+FCRZw;3CI(@7NYx8Q1c))IszmWSmwND&sv!Mqu*W z15sTsyI;bNwVXQ)y+I?);xz;YDhgQ>=`L(Ws16rrSj^>JLGW(HkS>w`RX|$4^=)z{ zj=<}-ZD7!<or!e%G7A-m!b!Xz!QD~beI?5{0(m=pl zT0Z4^a=NP+wCZN3wi%SjMvfX-_a85LPMfAfajkoUlZ_2AHARP#Qorx9IA%T0Y1pdg zJN$}?tVOSOx5L%#gJ_za7Po>n*^%(ECgL$~@`Vyi1mu280me+lYAX)M!)+vP45cY{MJ(Sbhtk1Q14)MT zk>q^rkqS${1A~hD;mbaOh00?D47a;$(7^UAb8ba=awVuQu~lev%%i{#bZBRLDqq`3 zfy^nuwZ4(q`2FTiJj92CsUhk35+ zJ;YvsJSzw;pW(5yRFD1ym6km=KNP_AQsDnd(VO-!k8}8gbKk106O`dmvo3D_t~y;Z z+?I48VK6e@w&2&Ke3}m%qi%&x{8&&5o~|{~d08&o#=5n%EU%3 z*&b4jqTb?LIqVc0j<4kreVDucR5--o+SL&A+7zmmnfT$abAS1v9D%yrLT`p(czXF{ zArBqni+p!t0@N^>Y`v?OzT3o*|W4LS&$(Xz`lSlJla`PczA!$xOt zBjg3!XSNnZiI^sXjorB$l$V#iTr-w-dP^kBrZF_ zkU8Cp9?cX8N3x_s1$sXrq(Qb(6T7?*=CC}Z<(*>6hbZh+rk55y{dVv(7+b0N0tJZLb-g%;%&d=w4C(^k) z0}k48UKnIW``Cwn{T$lRrLMj^_CZ7P28&Vvsmg?~Mw2GjqL-M$Kd?qGwdHlqNb*ns zu-V_BXW9OA%1(HmRTv^X#2>ECG45_EQCpA7p($}OmS@L>a@$bUIwisz%dg!r-9=uR z7W$v@)ktsoPUh-AAHBn6BcNdL}hiBpuK1P53Xd`hsy8AELh~!ypAQB{6jFvOT;UOAtk!Pt{ezqS>bn8XD zasr5ZB#LBa#L|BBg?0s{3OD1t;-4L5TUY^Lyl>L48aX1Rt$(j@R5)l6nb6B`5c4|g z+N-;Q+E{y5E|2P&&Wh-k7K2F}G(dkhsvk-vvTI3QrTr)3HD;Y_pGMfWcna~E2N~%o z-mtEwpddqB4=OmGT47azDgv|Q9u5VJ+JC>6V6U9BaM{f5J(;-jJg};6-)Vfc9FEv; zb$6rvmXr{psMtGr@cOAg!-*vGtti*t5^@6c*9S; zV9YKIr|(SBPn58sENh3snA;waQLnjL+>)~@^I zF~br9ptaST!C2Huoo`oB`TY@6VA=w1t~MDA#4$lqSKucB*D2nIhlQlw$Wcln&W%~`*yr_p z#JKWmK}kSNV|yEM+culf2_}ferP5L9>mbL;l!}2-&HdwXl0~)XFQ}Ew)T7JaXq_>XE`apVWen zMHaRs3qxf|FC%9qYc9xjW9(mMtRCKnD(#`U&lTP^k4inVmIgb6LPT$i?=>Z%%>bdb zG|SPGMqx963Q4OB?KhI3ugtHw44AahUNqZSk<~X`?*I+{u}G27V(q1IHkVJ3scJur*S*4n?&-7rln4LKSw0P45^E(%Qylj^)Mj!n)n`qxPFx4;VGFKAa_U$V zgj=fvZ`zf0OEdy%iD1|886FLdG~%pAq&tQv8Qlut&*08@5c)|_A$?T5Q}9=jfH==a zz4adoc2(*;4xsYR<(R2QeXXoLhC)v+45Mxcq+yl!^YjT#0Qw%_;@88`lsvhVI|~A- zTc8Y5RpA+V=Xg6B1M|>eI!p(N=ybAWngZy_0}p5%sG=@oo}uy9OgT5*yR!?nn1P)D zhNs5ec8{Lu%Uq!Q>XX_s^D+gqul@J}9wk2j19`(ow4=@lPh=iv^spaF_>RwE921fj zU89I{TyxpHiFUcbK-3;TkuOks-QU(l2Bil6Ig4cRB~!5@wa6Jl@rX;lCJ5C99N7sa zntbs#g%5Vzp89L~veSxhNeDrmTZqh6@fP!;y$~ zQ@%@W+=JJHFJE?r$>pXA4%WRz#Hw#t*dUFzy#!C%KH@((^B~-WA7yS27MFmtK~Yx` zd7a5_4*@jo5rJvY7$^+f1u(?^{yzSb7W)Ox?cc4 z2hOaIbe_c0j|>+B-K%0RPv1*jfESn{aKW@t(QG*$qqt}V&lo=h*JTba@Qv>B?LYk? z$?eyvzxeq_tlUn_Pgi4;1RaCnM8Wdfsa1a!-if6I{#hrE(%&>sMPwrs%|Lgbu%1WN zwzp+bGDiC|B){IJ58|J}CZ~E`OY+BI9;MuiSN6^8T*|cx07SG+BuG!p1!8wx`H}dr z;qYt7yJai6z;g187;d-kXb8bwKuB$CftFm|&tv1yF#X^;UqA#&GYvZpxI%c`;&heYBTQZ_dlyQfXi4GmBDr zrN0RH12MX7nn}tx2glNWFC3saM<8C?yhRnp@JHQ}q>E<7&piTUkR%A;?ooQRe`kPK z@u^>Bmh|za;J&x@Lzl7IgKl?h)O}MnSQXT?veh=|`dw~M^)1_Ce$lyPOMG2s>5Mft zNP~^IYi!ix!7ZlIpf?6_il!@gm|c;>&ND%LZEB3DE6XU}6|Hvgr^#{hWytH{2c*Z1a9*cM*hEb|Ay$Ac zo%KVqVM-rP9GcOA^{cATL)-N2&oy3NK=eJ$AF*>}@<5Qf4y4XEtug-3s+oIZsOn6V zRFYcToT^|s*B%ZNHc*2-%?2^f!Cb(Eqygv}VIy1Tz=L7H>qBr+E0lg$=ZKEQzJuXM5dlbopEDb=CPWTeFp*X(r6 zm*beRz29zY>!fc(Ewx?cOU38(2JY^TRlQ0ki@H7PpApb~Cg?S1qt{gkjA4OOmbP(< zV6an4f_MKzBT)I+=N-4C0?1z;Ak)CLBRq*SkSh#ZA;Ac&TJguANRB8_rsl&W3kGhe z25s^np@lMby$h}?b~vh>=HoJIM=l&}>CS+87h3e7?Bk>@^M_-z)CQE7?Y$IN8GOQj z43sT1I_Gy_ODqKmu#2pQ4&-ZbX?Xo4PmGyU=d^pmVBIel;Qx+^_EpQ{bG^{R9QEhO z^eh`igh{w!*Y?flJDJ@C_h+KcB_P>Xy@X>GVCX)4+$irmQXjzkU5XGkOi2<0GOh7* zAjqwY&_NQ#b^?Vq z_xtHajI`V``+?muEO}!Qlm}gL=242*MRkz8gUOf)_i{C%+0rov8B^r#t%$Vo5iB2_ z)d<3$QG8`_%6nF;u4G26z7@RfMjaM3N6&@g_{+( zei5%K_BLCWAulNf78i3N%Ty>H{6Jee{bBkiPr=-i5G6E_4tM3=%>PL^aBc@bGX5VV-ql5oUq$4=|*jOn_Xafp;FQQAS^|QyoVU%6MtP{M7%*| zoQBd8iR;YOQQnkN7y;~rVC4*>OJBYQmR)m9e_-NHPM%l@n~Bm}cKG z9&WCLHRH;B+k`T8JzMiByo@8HV1QMe5JeUo(06R?p_##6Of~j{bq-Cg%zHV+-*T3~?MD@NY<1K3j&N!ZC-mIF+k6!}_q`l;Ps1Y`JN7BlFid8Q z`sXqcA{vrsi)1rf?C{OISgygIKdt~h!xcbFUsaH#Tqh?f?D59y^Pb0vkDS3QI_tBw*R{gshy0wxS%qxJwQ~Y!rIv9`g-@Q7UWA22YZ-aA; z?-he8`IvD0ih6&H9nnFk<2``^HU>!@J<3rQyNfTSvn+@#F6!M9RHZNg$anzr*0FhxXaw3Rt$GpViJIAL*z=?+c-zUGy*rtVhA?Am z;7b)q*x5dMv8LcZC0W`S&g*)buwCY)Z{Lkwb*(qX(U@Y|W96pBmf1B>N6bmH%8?hC zVi?L>s^+g`F9HFV47(f=?oSm0O#-cNvW}w)dS{5fBG%xxao9Ojt>~*7uW2bVg!!_O zp*8b^ue=GO%8%%tki=!)kk?vbr@A@(F%Yq?R+Tinje2< zpLmb4MDl;F0icBg0lyR-7k;bKK`_ z_)0CF#!scPUp)IOV4GFeAggD&Nt5f`C;2^F{abAqZ>H;m{vM7oRJ5s0n_-ics@dJ zs$@7v>PzQMi}ql6#5X-u5!l;#-GW9Y)B!~`K5GLz zT;{%a!KX#zy(a$@*egnNY&Mzc!^7iI#P!jW4aH44+$?wqTwS*@|0GzGgV^&7tL-0??vTJ~Z?Yjr zmJ5kYI3oBCMvit@K4guQ&ZJ3GoeSaRymJ-?H`>woI;DV&$A#u{(rg!P|F5}dnlU{k z?|mK)LW-pcMUrL^{Tz?_bC|m6&)d# zBGh0J!-n^ul}!YOqef7}zoi^VUX*SHFoW9nSrOu9shn5F7k8uw7Y!VY;HZg=Hy{f? z8}HQDjv`CzDY4Hsxt4e`GnlK3vZ{$*40v zW(z@jO62o7!h!YU(g@RPi!h^tI`%ki>k*%1@C;@r)|Fo* z*c|c(>K!aM^0rGGxj`~K=h!DE)9sDM(Nl#g24pf{lPw7J&jsEGR;?^ov;L`5qV=G% zUaQN6bc5m_qyJ#JV;gahtrFtTNW?(mcRf=$n8Nfx)?Xcw94>$cU+B*x2M6c|lXiA` zbZX}O^TG|PFPgsV#7gRkJ~>nq4z0RWj%GL^ca8CMzE_2n$xT8h{%pR-~!W*YP^dZ|;BJ zBA~S=Hf)U6${M?+{SHPT>Ob2$oj;~nkn-Dt1LPfSey!3}z!fGmG>xh+a!Ew_fi+g3 zXvhG$rB6s*(?t|^FMjnUOh*{YwfJnkS{`gy0ZOQidrorc(Q%uF+? zaW}W_I67#q%4QzbLgSIs=?dSaFNoehts#=3bgg+}pb<{DYgGCF`rB&v_kg`^%;u5D z>?&Ggg?^ZuUsu;CkI57WZNQMLg0higw$-$Z8QF(TODo>;gSzaP((P8kNjdnJmUoOX zS?Egp$y3sLazI{DGq%VCsFO}=eJQyOX6oGl7#n8!^o1?Am+b}a{Baz&{O|)}(v5D6@uc-%aiKSTH%99?mdULUz2+$Rzhj5<83gyhmfq?@MB*S^cF^^DT6YVmdOehn z%s5Xj=E_Q}-{A>70o={K$~;-++Z!!aH}6Rpd`IBpvF_1`E41+SeJVEzZ5Fl=s8C|G z*{T?nWyVGW`TX><-sRBP9DhXE+_a#$!cTKlia+6&;G zarK9pF%GR`qw{zNArj#~Cx$^!uk%}j3pL&8c#d%Jf{>5fZAWh4NQ+oo!dZcFJXh%$ ztRuMTxFGMhiJq82_wMOv=U|=7aG-}RTceM<b4`{;C2`6BiOlAWV5{o9_-i#{$95VCo}&e?>t!$`UrFt&*4M?^r5nCRFFg|uq|1nC&f_lh zC0JkF;07+9id4Ih@9Vk+eDY#og9L;JYC@+B@dI#JXXvG-KhuvY_aVTip0cHWnQl!S ziQsjq<2b|<0WYu1cbsuj198sNnVl5X^oarZR{t0y#hKNkmP3->Y_uDxmk|DXk?}K9 zKkLF|eiO5B>wE`m{Yvr8Y}VtiWof*Zt0Wqa@w!E;&~cV#+?n(s5`Yw4;eeWZjf9{o zG_7O9%_1RWCHL@wN*5Ov+%S= z(TR7Nier}?;3cN-^;Rs*2$550_}j#G>+Va(v6Px~hV)#>eAuLeqSFup_dB z`p#~GDp<`bDEuM(?3|6jX4A|*xX_%}VoDSw7dhqAQ^x)9*89~!mGk8kC_+I+X=uFH z>M{F1Wo4NcOPleU3_R-!+33+ckGyJNzsTNFM#d;fLL6~Na(kpSi+ZATwLr1K@I5Os z*X5&rHef1q=Vfvx)00w)kF;dqDPT`yCGcq+b2O8$$ne@Tc^w$S*vp-vmb!k36OihZ zK67jLPki{Fjsbdmk4>xJ3UiL4$*oeXH!Oz301bJFnG{{5e9FHpQ>l6CA`DTn(V^>q2xUCf2A@} zu3&`vf?4-@l8vrv@cks2mqK#k2p8DL3yvdkc=D)dc}nnOVTDzU$K6&6t6=MS>3zR- zs>v+YA$tx>U`-O2;b@y_bOCczA)L7zIM?~U4kT(;cS^#se-Qd*ZuwicM9B4jL!JtN z`-}`*0Eb5l$p@VV2EZk>bLw=WHfGIBa0msh)#_k{9SQFmd|-8tpD!}PjI&tS>q`{; z-Wcuflu142vg{S1V)H{Q{JL(V)iI;>k$3?9F7DrXvPWzG+y31Q;XsClY^prtEwm`B zFnU~*0_&O#;ClAHOUR;zN&b#{f=5F5n>`wQk!ILn5mNsU6Z9skmi<}}!HA{c=XK0^ z(l=-;4ka)COFV~bV*^syb$sZj?gz!v!q~mi##mUYO4>o#A4~b3iFV7RMU0c`QuuTV zUZ6lWjr;s)o$1UytR_D(KBgF&1SaV}c5Z{=7C3XXUk1?4C^@^TF@hBiS5M6I0DJa> z$N2sZhjb^?+hi6TRwyl79e5u^XjWG$gqUsGBy(P%YXeLYOYD>Vz;A>w&*lA3(uwl zY&%E5Gzq;{;4~(@jowF^4{k0_6f4Qw3L=HoiEynEjf--w4vmHF)vK7WcNzQz543DeA@&yg>o82^0HpPn zbtMS<1q?iM47KyBx1EH)s2O;jcN{o$t5Rs(=Yndk2U=!jO;cq9zY4W})+h5%h3D`i zj&+GdNs9sxR6}r68oa5KL0H(vYBhhcCdGkj_34G3jWav(cmhq903`9a3rrWFh@;?3 zUuWKC3@CF@h<@^U*98&Y+^?b# z=jZ*~>AqIh*+5x`F}h7n&MCF{u(qSZNE))R?K4$0fd0a-Xva0r+EIEdJSL}I zI2rLbe9hH`vTz*dxGL+0uJH zYNgV3LqxO`M6PW9YipPt8ZXEyCUqLG_`dhJmq)-zj-12W?Lo7O8A4UaKK(d{oie*7 zYu92a;dG}AWvFqoX;oSE?R(!>m7%8BY8#)NmJfkW0E+7yuQ-UWh zwsQXk!r&b-&(`=A><=ixYBUVAAqxXG)HaJSxE0RPt8O*28Lk#L+A@`ywk-B?%H!AH z?H3%`_Ksp{@8Sa+EYZ9YM7;~3A6q;d&R%m2)?utX(LC<(Kw)1Dx zFoyT(H8lx7Jg&HGys6Qz#09ocjnc>U1L7hAr{mV!@8Z{jxbqj3l{NOISCT;9rPe(J z?JA3qDI)DtydPQ4&SqIrc#a<|3W(yxiE#o~5I!S6+p3MDTR%L}ujJm>w_)sbLE9_p ztP2G(pcL&n9tR#(&Ab*R&w5JPSlkKSKbP)GtCD=sjQHu_I+j1IYm(nJ4HT-ppV@Q7iap(!Aot6ypd5{PsKg|SaXc2x;|s6@#QY!5~l4)v2mLPhqwNL&Un z(`*Y82{lVmTe}2C88(ieFXD>KU!%(x+NwE!of2|jOK$_VF}34tv)PifKyYoai#mFS z9=y}HgasW>E~ix@o%J9KiCox{+XJ_Yn&Rs%CQOazTlkJ1czSC4E~d&=WQQ1ckWTI178y?1!$7(==#d^9jRO694cHNooxFqEZ;EQu=cxoC`6wK1lN_tI|lDmPh=TAi{VkZXx!K-TjxT{z4%kw}<-eYh0PHmG)OtkM^Cu09Q>V3gh5nFew zAWKjN@cz~@mH8*)%fE8{+8AzvGaO2UxC+p!`P_7fcAt0*K3Z)hK>hq>p5>edhP(0-YG}K4#M4q&4^t1gH){k{4Afa3X5R#N2r?fDk_|3J^+c$yCRp=iuiO_SCydJ3 zakuBBp&w#Mwx3&(6x9w8XfSSzkm!3{tVvj4cGzl4C$HPIRPf=nk-QV$fa>FTG|iB^ z+|t80CVFWBV_UgOUY8!!w7~~T9&5}r%_w_t)&N?paCpxRKZ#&ad$ z0Cwej?q1Irh?sF&WkO=Us#*7B4<~7HHOf+iX57pgh-6m5c_gi~O~zjuc@c8gXSEos z6|I3DGhAuaOCe78xcC}!o;3!>UX6-A?9m%J{!WC!h{W`^?NO!rS1`%s%-t*%wf8X? zajSe?9gVHlu}$C@ z_yevxFh*S!D;~>7BdM0B!@WT_uYNB#7x}3^T2y;4Q=^rr-(l8IZRhxM6w7-#Z`jlW zNla?=pH7l7&*9B`#BqEVng;oMZ)R$o;92jFJYG1_hQ(vob;0_2`1NNpKf(T$ne8Mw z)!A4KFDxd~<~jWkb$G%Q23?@+{7cLuLG$bnh{eg7c@wY@x`VzZFYEtqCD6eam~rgB zjk;Gdx9_$+no2baRzwK_QY$nQP`wbX#83gv3utA?R=n4u7)+0T2J~8jqoE?RRp5tS z&=#=!N2U;Pm*zlHDLV~(;K5OBX7!9}xpH^i^4X9n=%N65fwkEh?KZYSM2>!MSMe!qQv5;}$7V4}b6H7w0xr1GKCT%;w z8H3RPbp$Ghd3Jrxjq)g&oT3&dtIp47QS-tcCPD80~z_lvv3|kgLUWMb$ zB|gdgU<2urmEri!LgUT*sA|{i5^Tb*+b)ta1=uA_TIx4K&({CkY&z7*8Fl+sfEfS+mGJ<|7dYkqIOcW=5@BswyKNcSi zev>->!S^=8A5XQwHt3)eb^6g~kZ0*S1~PGDFbhB3Y$U*%&s%tQsWZsV9A1d?vuRk- z3Pu=1op!+4M+cP%RomVumyNQE2LivGO^4g;B|nd!DP@13RoiIS@)@6gWxJBl+C>E( zbMGv-a5-20o}Sn?lz*LL?m$ESHiWnXJIl}W<`uDWKsy=y8z_!6aMT7Yt!f)srm9!S zR8L5-xh-xDJl(wh6yrG2+X`nbjk=wEnO%9`^VI!OGiMS^t#$}(zXNUuY zRZ!1eg-h1p1BZgaJ{p?0%xyXR-AyEPGM$H+Iqol-+m=aV)}pq2k>rYP4UuIO$=$6d z6R(E(xHsjEUZ}wrN*F=1k*{DuyyFt=U85N~vcjFzVU{8$kp_b8dzes&lDUEz>2!WA)MP>+j43ZBUEPs>h{ z=jxaj{kbtXwKum zoQ%x>opeaZ0$^hMUzvr3|9NZeY~o1BAZBggY$9S}WM^yw&Bq7r+mV!%6Cv%zPNNEcmVl zI&MEUFmU62GK@Di-x{GdUoS-oC9kVFyhlYd+!P#8ow^JT-fXxWSXzK~lGx|3zG=UD zXiP>4f&~IHwj_kD4JD0ct(sIAGVqJdK8870r`I7fke*Ct2p6{i54N%Q0Tf?@3OcyX2-e($v7!=`r&gI0 zoC)_&Rszhhb5R+PL9HG$RR5w{x)WkCGWf3qAXBx78IN3d@Vj3MR@L4nnMJScEaY%4y|IFG^1Cv4~ z7{*b`YyZFF=C?qpM!UJtl>owDg=)i0g~-|HrN1~K7PSRHy+}D$yrz49ElSpoNr8QE zt80P)<@O4R)4^*TfnGw0tH=?xsHjY9;R@I*6x$cZ;ugjd>tW|oLystrTnPdB2^9Bp z`uBI1U?Nxc@eN@{(Pl#6hak=8_Zr)W4XDBj*xDD)YJ>mOI|{&_5k+ZBa?YC#1};;X z%*&yQ55vbmOghCTN3|4WhX&~@;>ZMw%mvdhig(xqGLz`DhsY#~!vG_KP}Cn1GN2UH zH-zlR(r2k9-i1{F(*n%|iaR|(7MDo!XHEqF_n#5STB;XooauKkGvTZKpXz)W24BJb z@9zixe;zQfGY>6u$ky$|OJ2{~QirCFY@g?{N*<52<2GJA3df z^_wM}zN|aI$#IMyEI_=aCFfSAK;VrK44x5w!2aP-v;4R-ss*|l0(`l{E;U|69?H%}<=&70XdMexSR_Fa<O!L((w`Q@K&$8C<@vRm!_`&5Qmjo z9jBh+ z-wwAUVdzx(+I8|)Gttf1%^}E}#Mgp#N2I_?1Nvnm`od2oHakD=2|Na0@2{r^Ge4iT zZ5=$q;F$+p16#N0Z@Ej$oubQ@kbnW90cJ2{JHp*w!N^~s$5#yt{>M1rJOJseH*~;^ zpZmv@y1W?ZG$=Uo=vABmJqqv?qa+#*Xb2w(V`0AV7ygUnyW8&V1*z#9?X=bSlR>L4wnhg(=+njUtbmhq>V-q_z=zeEz0A=kLK*T z5d7KL88~PQ(#7w~rYOFo8RO}l*PR|5$}!Q^C2rIo|3+tmHm&$L^APsrA$8SHZRPjq z>nz#XdzHL=wgl4YGIX9`APSRWeSB4gMsREXNGn$+79^npdDik^lVH2k8n8CZq1m||hzn2=$bEPAOonLxsmibd;5bqD=xq&7$ll8_EP zOjJC5DrgxLk{y*eAI^+BrzSv*_xCJs`1eT*cT=9K`;yWsmUAm#F`O~?4>dcst^h$m zUxA&a2?%!UOJC^J5l}i^FM)mr>Xq6~y~owkM%J!TbiqHxObi-6W_sD&P_PV~obk2$ zGfe!T?GRKx0el{8d<9>y-X~4h04d+*(C9B_1-{2%d)xr5NFh03XDopXkdN+e3)fki z4v^<0y86!f$aaik}3}KJ^4c3Xl*gI&Qm#eqyh`(1YusksumPe_QtZL5o8z+1ccw9c-M| zeuxf$1pvs%p>HWz?iDhGLa z9pug0NQl z?yaGDF0##dM;hcj*ub(~W*V{&G)s6cfM?oIG^g$=m9kv|%$6Q1kKNU%X4-oj&OOwe zcqS6fRChI>c_uisO~xCjZ)v@KDo7evD&yl2TNH7`%DegS?UyESn+UG3XE|@5C?jk=tI&zgBg=w^?rJsqy!X4e?$wyg$3Qm)-U{ zFKlsIE;I1To%ZNubQ|5Se2vdNq_$(mxXdt!qxBHM?>jc0vE`15mY}VCjRJzU>^44a zHEVEIaqoQ$w`eF|LEkQ_Z+0_I>BmG)>Je3o7e$|@q`0iMsm~TY zqUvY5KX5$Y(*l?KEbhEv>ZW02ui+L$SS~XF*<1B_uhX&7l!3#~w@GjjvTzda*;G#0 zapeoaOYF^#shqg3xvF+(O1S@5_VUL)6AE_fJ^N)S(yg+Q;j9Gtbc4k&pVgd|Tna;H zZ~MAF`^_1qL0@(;bF=zddDFyHX#vb!5wK{YJ`?&>#XdXv^!qzUsL3B}Ni})v3YaV1 zEa8plDZ!o8DbiamvFW9Gqr-&&Fgz4%QsUM*beXQ3YlMqy#9sQlEBVMh22^h4Wg51& z+uV(?+av=e;H1{W!H)J|OX~3@mWGSQqEV3QSCGY9|J@vTEpqCdujvF6??<}vcodfAzR@%&7lI1s$JfpYY3t_=kaMEr$<$y2_s5C0U2Vk;K9cG zxPmpzT{D;P>K&K`3db8uWu*SojhS*V;8{=!LA3!F_A>n&rf={`=rj^zuMkOS7MW|i zamDN;c9I?6?#O}-1hJ4>hQ_pZ7hbLGA4BrP7;iaE8LAI^NTPM8-T&UnMe6IxE(m(P zH?!Apj`s-VTs#rCssD1Km~!YssdAAh3>Pyan}PeIO)_$cZRV^?%l=wu%_@S1n+|gx z-j9+o!(A>w1@F1_D^8cm9g<0MIEzeB!NL&t)R4kF;b7(fJ~N==#XU0s`7JR65buZ8 z!tH)E>vg}8h@}QWF*o-t9t?k!;D}3m6p2Qm8X9GkeG>_^Sqt6IPoV;FETt8jf{)Z{ zd6N!nF^VB=KuzGz8QySkk7P`4&l^HDit?7N0ZC2)-qZ?!Gx1L z!K9O$uwLJD2KNcGpyW65|6Wfc>Ssj5@`hb{B$(^kH)8wwG z8V$FCDOvqz`cyArlWXPya9Psbu2y5w${DIqTiZkdZwxAzV1Nri~z7G}%Ouqqb5 z*bcA_r)|U949cfGvCJ0a4oveW2AU&q!_tq8r?@MZ3_O9yR^}!W*4W$4G;D6aBGmb zNt3! z?Y$yCs~Gs2Q%!aUcj1}#@BtXEDgT4d%xIFKy{AlB@d%toPxh5_Jl(T3XH}P=>x+1D z^lwA@XD6&J5|S6{`l@{s8O5(zhA({g2Ha`oI@$79FomJvYC{_JI9t)HlCB}V9zPCz zj46Y;%+6sX*pcw>RoXzbVDXAD#ES#-PD2+|5?aeKO z42-NyEv&7D?HnCVjGQg(Y`F;k_dTJLg_*5^HKCo0v$chZqZ1W_vWubRkI;__;U84t z{G&|9;Kv4`n}xGEq4^I8`O#rY#UO3s;b!M(>_quv^nVHdXchTE9fXu3Tuh8iOpKgN zYybcw3nLR9BMSv1BgK!Ltex@yO+>}fz~0`(_=lSqSUZ_OGbpHtYS4?hSX&z!*xLSQ z$13I)PJ}<#f9;%>Q0-^YKc)$p=>aSN)*qv+bWHSY|Lu_<`JbiBSh$)HGBE-;8AMDB zEevd_2(9c5oXwp~3&(lw@0m4Bowzl$Dk9&KUu(+qeG4ZXW1T49Y((S{1f3Wh+zp#1z2pdjcz+aQpRw0s~B zl)^zg7lmjWS7SpLGb2}1M^jf9CpRZUH#1{bM^_gEGYc0BLvsUXI|af@fcE&L=A{Eu zt)Y>*1+n9Ds?$yeg@%i?ecx5Wq}9~mvDls19q5qd;tc;s3X~HbJB{?IUBilhYO8-{sA*+D?GOo8miOd!aW?f|yHwc<6oa?^Z z;RX-iqF9&e{tp5cJ!PV~vo^5GI7U0nR?1<`%_zTcPFRlhZN}@2$t|)Agx_l3vOJW$ zfcq`$EtZFc7h-vfb#GZD@;?at?6}3}2D{&atqXev=dt|FsLe>{WIv!FrLnlhxxwVA z;TGeT!wD?8Qg2Nb7O=fFy2Y{3?t#v&QWM$B)6=)OZV7Jr^-wq?JL8v^?H83>YPS>? z3YoQjWAU{QKe=vKLSDw-79+0eJ#xPNT9bcf)=9i&S;&>bwwBvBa~5lHhRX6ULg$#* zl*(<;IC`C-|LNn5yDjDie7A6CY)WVRu;QN#FT1w)1F;A;?FVKNtmOw{H?X~H{GA|c z!({!y@W!SoQ5nsr*lw`!|4`al&k@ivwLQj^xyxY1jRj|#jh9wz6{!_@rCfSo+tOWiyLMf$Q0%`F zn5FnP&6F>*>-ZFgvgc|SnEjY1h00pxRt1 zDm-o1ze|R<>}Ci$+Oh|$u6VQ{r+J~MNOpj}VfM=MnBy8}76?7ITJZ0YS_I3K$bhtu z3~M4@y}x95N@vA}1&5lrxV3avEDYGMdGkt?%@&g_tCOP>q)+^i^npeGOqsRZW+vIx`6M(vYiJnvFCre&vzjsGc8atb++bHpU9MFwRTKV z2Lmtg^)>r>hfgo*k)4qF#)fB?u5Q^&`B{BZ?dpwf4;UnvqBwtDC{8$X=~J}q<$`3( zlwOmw237XiMQ58ly|e${U!!?6;#5!58G}>P4~QN(b8`E^IZMyR&P~|&B>MqdLQhJV zdmPjD14a>y(Fs>C`Mr1(Tfl#ZJ!jG$MhVuL_s*Yp*m}Cm{(7hE0=uN6Phvj^y@{-{ zToFAZJo0qgn_W@4_b>hSTEum91*e8mBQo+Zo2hw@ZwX zZ`UzgQK4vDAwEgm&b0s=Iq9@&)U3zy>Zq&vvZ_Bx|b@m3Ud9Ph+Z+9hhZshHzb+68r z{ksy{d{eysb(fHa&+SFi4Sy}3u)*n2SKJhjh>NQkIki>RyvX%j&!(~lB+LckPuQ@? zTtg`A%Q*)Szp7U$(Y4v7(`SQ#EGblz>ox^N*6qh6xl~e#Ld@e&n3o{b~E>%@me>W}w DPEo{6 diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION deleted file mode 100644 index dfd52975c..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/DESCRIPTION +++ /dev/null @@ -1,22 +0,0 @@ -Package: delphiBackfillCorrection -Type: Package -Title: Correct signal outliers -Version: 1.0 -Date: 2022-08-24 -Author: Jingjing Tang -Maintainer: Jingjing Tang -Description: Takes auxiliary output from COVIDcast API data pipelines and - adjusts unusual values using a lasso-penalized quantile regression. - Output is used for research and model development. -License: file LICENSE -Depends: R (>= 3.5.0), -Imports: dplyr, plyr, readr, tibble, stringr, covidcast, quantgen, - arrow, evalcast, jsonlite, lubridate, tidyr, zoo, utils, rlang, - parallel -Suggests: knitr (>= 1.15), rmarkdown (>= 1.4), testthat (>= 1.0.1), - covr (>= 2.2.2) -RoxygenNote: 7.2.0 -Encoding: UTF-8 -NeedsCompilation: no -Packaged: 2022-09-26 15:03:17 UTC; nat -Built: R 4.2.0; ; 2022-09-26 15:03:23 UTC; unix diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX deleted file mode 100644 index ac4b9369d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/INDEX +++ /dev/null @@ -1,65 +0,0 @@ -add_7davs_and_target Add 7dav and target to the data Target is the - updates made ref_lag days after the first - release -add_dayofweek Add one hot encoding for day of a week info in - terms of reference and issue date -add_params_for_dates Add params related to date -add_shift Used for data shifting in terms of reference - date -add_sqrtscale Add columns to indicate the scale of value at - square root level -add_weekofmonth Add one hot encoding for week of a month info - in terms of issue date -create_dir_not_exist Create directory if not already existing -create_name_pattern Create pattern to match input files of a given - type and signal -data_filteration Filtration for training and testing data with - different lags -delta Sum of squared error -est_priors Main function for the beta prior approach - Estimate the priors for the beta distribution - based on data for a certain day of a week -evaluate Evaluation of the test results based on WIS - score The WIS score calculation is based on the - weighted_interval_score function from the - 'evalcast' package from Delphi -export_test_result Export the result to customized directory -fill_missing_updates Get pivot table, filling NANs. If there is no - update on issue date D but previous reports - exist for issue date D_p < D, all the dates - between [D_p, D] are filled with with the - reported value on date D_p. If there is no - update for any previous issue date, fill in - with 0. -fill_rows Re-index, fill na, make sure all reference date - have enough rows for updates -frac_adj Update fraction using beta prior approach -frac_adj_with_pseudo Update fraction based on the pseudo counts for - numerators and denominators -generate_filename Construct filename for model with given - parameters -get_7dav Calculate 7 day moving average for each issue - date The 7dav for date D reported on issue date - D_i is the average from D-7 to D-1 -get_files_list List valid input files. -get_model Train model using quantile regression with - Lasso penalty, or load from disk -get_populous_counties Subset list of counties to those included in - the 200 most populous in the US -get_weekofmonth Get week of a month info according to a date -main Perform backfill correction on all desired - signals and geo levels -main_local Main function to correct a single local signal -model_training_and_testing - Fetch model and use to generate - predictions/perform corrections -objective Generate objection function -read_data Read a parquet file into a dataframe -read_params Return params file as an R list -run_backfill Get backfill-corrected estimates for a single - signal + geo combination -run_backfill_local Corrected estimates from a single local signal -subset_valid_files Return file names only if they contain data to - be used in training -training_days_check Check available training days -validity_checks Check input data for validity diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE deleted file mode 100644 index 2d1447e00..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/LICENSE +++ /dev/null @@ -1,2 +0,0 @@ -Currently approved for internal DELPHI use only. - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/Rd.rds deleted file mode 100644 index 8a2190e98487fe1343f025fc3d1ff66d3bb5f7dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1959 zcmV;Y2Uz$YiwFP!000001MOMejub@{9(0L-fC~5%jj>`((73yTH+W$*xC;hBVwNSE z7}Kfgu9+$5?rOTKXBn^bU3@MdMf^_vbXE8C@&sJ$%ygZq^Z%VXr}wuDv)SzR+0JYD zJd2;RvyYGO?tO!&t9Uqvzqj%C>1=0q9^dctqR4I)e62HH7n$MhQkeat!js=7a=|xh zaU#UixNeU%yycb77OKS-BXlHRB(h$~#h8_T)*7AjQpEKuv`*aBqEfY4B|`F6fNxgF zHmjA%#B-?=hVOZD%_{+2j1g@esaK&A&la-8GB;8sq;rKR4Uf;4gf>~zO4TMNyeZba z?7%-Ry(^wKsx_IR{jJblnP~GqM3q@3wU+fV>zc7gEV)%Dsfb0(^NbfyQZ9ZeJCSCU zHCl9qisWlcQIk1>3=?E{!VQ75k&H|4&=T#`SvhdsMIuxReu!%y&|0IKu2h}Qa@EyF zrWRZw`edBmsJKkrdx;*iQX$N7@nb73Gc9Q3Ae(IPBYr1psreI;8@W!*x`-fC3Z?e@d=SqLwa?)p}QVa}B!?Ygsr~B?;DR3k1L=E0HG>eK>PbR?;oIXN^U)~`c z1SdsI_>~V&2SiY!M?+ZXp%5US;Gl;~lI_9&+Xx7h?eq^>F_HVh+)?6>vVQjJ+kX%J zlHD!}Mw!PDYUYXoGm4p&U=&k!?3SdqOCAiY;)P(XSY#z%GKvM`3&7FtS;$r!5KF;z zNRnSjjH(4&DZ@mas{%n}s3vrnsRiSVQkBViq42@LtJ1U%8KT7jVf6zfJF8dV`yugl zsYVh(C`kKc$IZtYYfci(v9*e1PYX#7eow4(Rd!XaiKna!nS*R?wAGD{QcZzwhUsS= zZ{ZUK152^)F?x8a<UVy> zub*T^#Z6A4n$9qbET_Q|cm{pbI6rH-ta~_qdl#kFRa&cez@W<*R~(#ksp3Joh4m)f zz?T8I*B*5hNpfB)m}pzodfMMVgCARTbsj~)#d86Uwt_R>G_B&$`woOvAqK9x5BC?a zzm@aOa+-4rJ$_i_Xdh>}Xbp)Qq@7pfpF8e`ptGP6Q!g)mn)=aCTqk3f--j|+TE(k?z~PHRnE1!j=<$l`BFIAp(vY`^tyg= z$KGD{fYXnnn|Io%O8yD4eU7*TXDY%zRZid_nSNuhgq`=Q=v_pHHC2#VLr z1D#nzLKd<^2G7FpVl8puKrX4EUDjFurhBu7eajAak(*_xCdf8K2_CQ6@1XCp!#@~- zhBSf7Hht`evx>N5*+I;O(egb_SsUSc6BUf=IBN-Amj8`?@5Tp5Vh_L;&;5p)?*bxE z0q_poN%diEU@N{BfU)Y9D@I*_6*_DYzIg1_K_g%z+fg4emCydbPvZlF#Bm)2v$wkw zS9cY45tRG{!2;@3vbGDJ1nkJ zal3y^MabLZ{ENzh_NS!8SppH79BD0mz_nJaffq`%*=5kNRJ?Fdqy+z1cR+vdAKK;nI|~M*?CCZ znxRL6Cuiy5p2?YdxMh2`9@71cJzPv@?I9)3+{5*$*?VNq9c z7|i}d((oO?C?6)?1&nq~yb~B@_td+AksG()5u{aM_UgO;e&v_RZ~Fg5EQa=W_r!An zi&o2M~ diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/features.rds deleted file mode 100644 index a2535957db3eea32a3f76a7490aa3fbe85d332ba..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 121 zcmb2|=3oE==I#ec2?+^l35iUT);Op!XJ>TGUdPJJJlSBvVU8nA3A0)og*{eFEM2-} zrmA$*i&>Gg)L+d~QJwW#Epo}Smr_z{(n}(wmP$%V&oplEZGN42*87aFuA6a(wefnD Yts44<%Wqs}J@J>F;psHn+;*Ui05pp&cK`qY diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/hsearch.rds deleted file mode 100644 index 418ba68ead949a4f0d89cc674f9784a359c3b519..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1809 zcmV+s2k!VEiwFP!000001MOLDZyQArc9KSI3bdsy0u>=v2&q&gA#vKKNEKC@m#Q>W zQCgYxt-Ke7*b*x1lYf;Bq)Ug(ItOXs{d6p&X z86TR2S6O1Y?hEUA?;$VaBbA>BaXfyY=9-r#$(07a6~^-xkY@%mpO9dlYim+o2)}q4 z7g3_hOI6uH5S(fOvXV^dq*68!FQf@_u6bU?O99HQ6*|<$6)=D&P&Tl}$7*ICYRYo| za8VeW)LJSXOuZn6yl6nCf4(YS)JogL;!Z70Qv}`Du_TjH8Y8QI($o_U6Hlv?C^pwT zO?dV!9`?`4iL`^HHloRt=e^$-71}J&dV#J5A&>&fiGRL?&$!1VMH`a^TrCy2=@&l0 zg<923p&FBret8z{>}m8uMkWKb=5n5mp`hN)n($`!mZkX0I72OUDg0YUZwbbm%AibHWikae`AF4^N3mwW zHp|TOhHHq40tX8*j2PT{t>@%0xlZ_d)%RDt{bG}fo26p7-l_Adlrum9{D%#61BLsY5Yd*IIF~dmEIMAqTE| zP43UYe=U0rN$PP-JrHCZ?&B;K+M>D%*m*;K+jbog&jJsHqI=9WKhw(l{_z1IKtZ|> zVNd%6ph%m-36Nv^QPPQ!{Q$g@V?YxPNIOWFPgv6!Cye}MYi>^f8 z)?3@uo#}wH53`###;Q{O3~D1r+<}=(kD!WcIRvy=-Ydi+roX_{J>7U}mf2%+7z_f# zE9DEFks|?xY>Po;A?{)*p`n6aG{HF3QGCbuCN;aywiW@KMd&7gHpmi0ykb8Bf01qd z#IR`45-^$ILvPM2ln%?T#9SIl?(2-D3|FJpz}6h+EmjxFUs?8H+}#&TP}t&y-%#;I zD3N0*@CKx#>65*I4fs$%8LOs0U^wMS>EsB!b>z)~K}VTwa890uXMf<^(+4Ji<0d4d z>E+Z_O^H(y!H*@FfjOnDXu(VDZmJ3jp-Ekl<0Pe%G`(C1T*XuC6adI@@|R7IVZqj^ zNJ=%NS|5VSzLO%b6vtAHML;9OcUc5V%-$&kB{GbXD3P$ew2t`QS_*fx4|fq$G_%IL zRmj_uLu^9c9~WQfEHK^+1Wq3+!eAiD(_LbZ7Q?o{3p( zmp9K6fUtQ}03Ui&$wsU^7_n7BqZ+U=0zH+VK+q4s*`B}Lq=$i8nPU6fO&F z+3n=T8`tC}S0i!Ih0EUbJI(Yv&0F`K#{cp>efS?e&>sh#8NWD}$r2k2ym0w5c%47B z?%HwxFd**_>=d-u?K!{a?$6rLkjEuFuq<@^&dI#D|N*Y#|wDiyVTqn9P0N=$dprG(f+g1$rZBk+K+5 zq)JkD^7BQ9GO18Y05X7Ki!*+YoYBl1{yECB?0z;ofbTgxX7k^_ex9CZ*}*X^UT3rH z5PrWc7EAaL%IlK-!1<jK)_>zsYGGM!QH#?Ue1(}vZZQi&T!OrX(2FRj%@r~V7;K@pBLcbx zo|98ASC@9VrO8MTH#=VX;_^NkxgRYd_Cpl55oqOn$qEyK$ElB^(9KFiwSdMkvM>4M zvCzFYn9WeMCCxX<72EF5%=I4b&d}aGvU`qaZY4m&LwNakF?F$UzQ?`$LI@aM1Dx-$ zo9-b)H?*Dc>0lOn;;9kqe9tE)r*j<-2cnO*opWw+bThu3<$<3{ZvAXLZ2Y;VW{3~C z8x5Y;3wH;e5OU``8R@ep@v`gIPx@-=_wGc}8D36mpHKQg(wuo_MaL)UMu42<)$O|C x57EhmH*_lC3=f0mcBV&R+xh&DThV*Q9poz8agv5O}*o6a}xJ=>(LVQTvEH{2_ zy*YMfJMFqZ9`T$wB}?NXb8u7t{C@s;-@i_hBpD^?5&n+xZ!-S*8 zeS_ly$Fn3&GW=$_T630DW4*vpPhd>7R+}Dps(~_6Qhi@RkjP50)je=@_3P{{O(rPgh?yfEN~gz`&*!2a8x!2j!-G$= zob4z4x=tP_EJLIT)2bvgsP`dGdLWJl38y7rD@%mJnjEZQL&RA_+;A4=RT!On&bq?T z3J^w*O7)GTl@J9`*`9yt7hY1v$a_ZDhEVJdEY(YFc6_pRYnX7=Vgqp3?&LtT;)^Fz z=3ZNqQ#5A-K6jvFsYR1H{b@O71Pp%0HIevic;Kep=fVXzX}SPvIo6X-{i8aK&sN^UDIB!UaMOiPhM!66UuIPWIsoZ+Z8DdR7~yK39z*)S``dZS#y^iB|Pi!oG?r?j|cFj*wDztDrg@^9+yqSKb7ELEx^sM3BxT*p@ z@H5zzVmq+m_aqzespMrxvUl8sZLvL>@9reght}{-VerVm{_*`?;WxyW{{h4==aEee F0085dj|2b! diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/Meta/package.rds deleted file mode 100644 index 7d400264daef72822a92841e8ca2ea92aec0074b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1081 zcmV-91jhRxiwFP!000001Jzf}Z`(E$muyA_PYIE*P$C8>P$xPNQ^+qNf(lr7mq1MSexhAi@t@8kF5d(!jo9mjDRj$4P{ zCJdeCyXh~z7chGala}MuH{t%B<2r4)e;*JQUDCJMpI=bMMnX#BYbv-|-#CR8JR@fM z#B4x{&NyaB#F|kem04~KyuKNBhW+8N*MHF)?ik6pzfwNCf(uPCpBdqg1tEHMrQtUG z8S_Ar8_y48j|B0)+5G1e&xt}fzNL&}d6&44VvR1O2+`=|{rJGgN~1TYV-#SGQA8uc zC{I*)&_VFQ!BwoZLNSk34DnxM7Q=F^k_a$jSSitq2tcEsNPs@Zm}|-il4K@{QoxIs z+BwM_(?k{aT?h$V6v5KJOnjkG0Iw(@*Mx~EBwTw2!Z#O*xufyu@c8V|5;H+h_g|xJ zZ`bQ@b&c?kAYym2i?UxyZ~!+=&(9g@BBdn+3<6(V(;xxe&0?Jq-tAy1#Z4Cx0GBOY zDZv@lpvdBLNr4-Z*a{NMpM>b57(goU!hrm4C&CglMwp3YJm*x)G{k{7*xiDCA(rz% z+;Ah@0huOB>r1SSsP7H7x*Z5FQyhBm-7*H7&tYcs#AIhSsdHgQKV<3iQ*ZbT4R&Al zw_gsPqj%HM9^zP=Jx?cSC$Y4L_Rz}q;dZt?=JeJ{tHG&RmmG&g!C<)%N{Q}H?;<;H zPN`;gvhg0$eNi9o0g!3N58lN3Qb=0>zhTNXT*OKq9uVbA8ZDRC5%mdIx$y%6$rspi z+8BqCklIdLXYp(XG~4;ZiMYLk6F(Mt2(3fzi-6|kwRucPphhB$D8oyBTLx`lA-;_% z(=hl84k39?xaN6LOY$>)prkS<;n6Ac=_hq#_-t6ulMm`nX%c?8E)e!8$~>3FJPwJZ zelAW!NiAA8PHFz+U(UR=&0S&cl;l3_tkqvQh26b1GD z*K7DouI7Tjx{an@g_;5$OS+q6wv9nVHjRp$ny~)=NKyn&$$(Q#;+}z;X+Q9bF7>*D>+a&)1@;ia9-eWg_FE%a!UAHd1HTgHkyo2r{k03Vnc0g zO@cyS-?sDN`o6#1=$HNnw2vsC19wwVdWqQcdTd@IOaC{935%") -importFrom(dplyr,across) -importFrom(dplyr,arrange) -importFrom(dplyr,bind_rows) -importFrom(dplyr,desc) -importFrom(dplyr,everything) -importFrom(dplyr,filter) -importFrom(dplyr,group_by) -importFrom(dplyr,group_split) -importFrom(dplyr,if_else) -importFrom(dplyr,pull) -importFrom(dplyr,select) -importFrom(dplyr,summarize) -importFrom(evalcast,weighted_interval_score) -importFrom(jsonlite,read_json) -importFrom(lubridate,day) -importFrom(lubridate,days_in_month) -importFrom(lubridate,make_date) -importFrom(lubridate,month) -importFrom(lubridate,year) -importFrom(parallel,detectCores) -importFrom(plyr,rbind.fill) -importFrom(quantgen,quantile_lasso) -importFrom(readr,read_csv) -importFrom(readr,write_csv) -importFrom(rlang,.data) -importFrom(rlang,.env) -importFrom(stats,coef) -importFrom(stats,nlm) -importFrom(stats,pbeta) -importFrom(stats,predict) -importFrom(stats,setNames) -importFrom(stringr,str_interp) -importFrom(stringr,str_split) -importFrom(tibble,tribble) -importFrom(tidyr,crossing) -importFrom(tidyr,drop_na) -importFrom(tidyr,fill) -importFrom(tidyr,pivot_longer) -importFrom(tidyr,pivot_wider) -importFrom(utils,head) -importFrom(zoo,rollmeanr) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection deleted file mode 100644 index 668615632..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection +++ /dev/null @@ -1,27 +0,0 @@ -# File share/R/nspackloader.R -# Part of the R package, https://www.R-project.org -# -# Copyright (C) 1995-2012 The R Core Team -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# A copy of the GNU General Public License is available at -# https://www.r-project.org/Licenses/ - -local({ - info <- loadingNamespaceInfo() - pkg <- info$pkgname - ns <- .getNamespace(as.name(pkg)) - if (is.null(ns)) - stop("cannot find namespace environment for ", pkg, domain = NA); - dbbase <- file.path(info$libname, pkg, "R", pkg) - lazyLoad(dbbase, ns, filter = function(n) n != ".__NAMESPACE__.") -}) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdb deleted file mode 100644 index b496edd6577e0f046af3828f0bd591becd046676..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57192 zcmc$_QoVGD-+wPvWZQHhO+qP}nwr$_u``mTjzO(Onx~uA;vR3>e zA|oRsD)@y4aNYHmk8qv=vIVm5=;paNYo+*DB?TUO(hrHmdNm^(BP;LP2km{)OD2Yl@>=? zPfSeKNX}G@LNb83o^=JZh?$g!je`lyfD%yNi&g{x0AaagXiD+H_2D6CP%y2PUr@a6%-HHQU7C{E>@p)fDD2OHt5Ca(*85>Qb0wR3SZm9_a?i@B~mH~sDh;RF1 za5_D@X%3WDZQ(jg@Vm&@=@StT7Fy`HZWM?E!lw4tHk1bLeELpP?$jXc>&^Ao+A+n{ z3R<#sFt>Qha#nKriYC>6Zfl$7S|%tE{f6HV9h6v|f)$@IN!2UomTHt}Y* zK1nul{lOg6?>xU~bC)Tk=>Gd+@2?wsU$5Tt&GY5$SE1{j*0Y(bQ>hiiq_kw6 zo>}2)5x1(oHf_xF6jHe7q*NyMaZ|F1KbtKKAKQ`HNm!a?PNQfVt#cWuru`4YN$Xpa z$KI~fNZ1WSui>~_2zguqM1NJP66peFd=F@!Rp?VR8c8PE~uQ zq#@m4Mm28#EJL)TI*N%(qrXI41vVf|4Pbzwj8aQfu8~Z_1sDVnu1>p8Fv|Vwr7hHf z4}DLN5O6c`xy4|qOjy8pM&~B%C!Qvkhd>eWH!Tk{y%k->K#qA(cK6-cGDX* zy1*+gXFls-p`2{mjj1T_Bm|lf43?j|@{c4RVY>=to&Yt0-;8Jrp$xu$t(M_AZ(+86t%n9zzxQj zmzt6SgdOs_c$Bph7k-ao*+dJn?k0lvDP#A^a+@pjX(MF+ghA<evx3DNOctg{kpY%>eQXKuXc&Q9Q+Ma{2Z|V;z}T4Ea+8*St~Cc!J1qp7@Y$TRx}D|f$smu;bFFsmcRAgX+4Si64l?s3;nb-Y!JWo=J4 zpIb-Ok%vs>E!ToIya@+1S@LTKkhwi0ZGkkGp=T&yC5RM1&{8P(0SQWF#L0qVn`LqCxJpQG>ph00I> z^)b-VAC-CjQ!nuW0}(J(Fd4jR1jJ0mc!cxhbZi{d45Y(s-6dp9twanw@PK)9=u!Xx z*xDr{Qg&9l9}i`)(eeCuJh4}>L~@1wmydQ%sC59JsTjHi*n$Yph6)XG+STTC9p{k^j~2DDDgsN(mHw+kkOnq?@Aj`-OxG#6w=7nQw7PwcB( zw_q0dety**%Zrrvz9vp_;~?z7;}>ei!pJh31GEgaFV;Yaxlij;V#66d$sL)zPA3|= z{aG!AYhn1Y-CkkbDr>Hm9Z(>NHaskljbxVTB_{s1&Nz@2|!)c-S#y0d|dha4He zy8`O^Kj{SL^d)~7-JRi8L=AHcwAYrFx4+PtSM#=JWfpYHlU z?QUsM17N^FE{^37zyGr*vH!P;A%R7ID2x9OlKq}=(R+G)fyC{3O@YX$feVRq8ekHX zeCEXesey@?^O?euRe}{d<}{EeI9d42WFcSz1ZsW>{%}?JPh5u~4rtjm*sps4;CQGo z{~a;#ghtsy2M2kB0C9NX5_)13F<%e>W_HNQ|FIun@VtI70A?Z(iyu<`|0MFqeJ%bg zGU0)By}HsGiJDSGU}U8M8`cFGB>YD0AJ`0|*PYlIKmsHs{Q9rSCG(#d{kQ92?*`qb z4h|NU+H@geZlm`>09Y)c<$i$wAHB7UimsTE>A!py24H6b5%Pm%{Ldo)->90#^5f_K z3)w|hiw!W3htLK9I2~Ow2GvfTu$nQzSi}~rEE5ba-^C^BEXLO?hn*7I#LiYsF$;A~ z=BXU#2gTlnJ!89sr)|H9xQ8ivTJgGl!Ru!n=#Mq{QeJIrZGC%nXg>;K z>3-|*@qA$OLpc9lMu!G>ITA6#_WEt0oApE@pSnMMntnSF@9#ijPnkSjj5O%* zH~if9@C1o6mJV0?H6G&Qn)1-%g)xgwY!In6VsJ3X;|#B~RVh0Z_w4Vj9-UWeR-5;` z^u<O5G`1tQ zdWfeCqhLdVKGw0IMnHb3{>+T5xH?-=PH}L4ZHNq$bbj{8*vV`pY}~@FkY{Ke(X2+S zM!VsOU#Ww<3w+%(4>CgD2`Ae4ULh-#Gv2Fw-3tVIRkiTkH?4d3Y*>mb)~Qjo# zUwFcdn?sG=#k05ljj!2Ugd01q<;H`tdDu)$McxCG-e>M|C8!CG2p`r33UCQOsn0$@ zEm6O};WFT<-kd<(2C#44PmtOMUA{8%M{1*;tb3|?YzA^i!z5l}={K?+<{{E!rXPj0BVlx#3Mn6tTaCN&5aimvTJ1Ko{- zRE7`guBG%0lcIS^sX-;TdVX2WbFRHZL}l%(=+?FS9%m0}eY3hpC-qpUdt{DhSAh87 zWa-O>S@Y#m&55N;AWa^p^l{CFh2yYlq6oIfDEU(6{Dc$8_NdERx-i7HA`zEQE}nG3 zlt=Wc_H6MiA~|t~YLd3y9U}7%MdiIygH!h);OYfv?Ko&NWNPb~e1)%=vAyAtxhq-6 zDEI*ejGGaNXpfhY&K#_GPfV^}$+jFxn=^gcP8RSDZqm2Z_A6MleOJ2I?DLIqN$>s= zexVP*{5Is?8{T~bPRWb)g>8u?ibb2dU5M##RwxF(f29mLW-~mKiszLEF!g6ed5A_F zj*@3-Tw`TvVN|av!w_Qibf@V4V)@n;;ygp;5BhmcUM}dcZE{j~M)Tckl4~gg(R_;; zdKiwnlY-f5;*T$5$OncxbYhJHzsSF}bjbsit3 zMNb$~mq1!7P?=1@%;jBEilqz|#j@CEVTz9PWt#U!boObn7itFPgOcf}eSWE)e2pfv zy?rK$XBTLNi*7EZLpV*!oDG?eN!S`1PBylM;-1nBc8%OxRkhKXsJNL)73FYBTDCQJ zW{F6e6(!}4_iB-wRGX{(`V}#wf6j5}%_SKmXefM_J4Zv#a0bt}T$DDd>{wEoiw}#~ zx+clJB=i+L>9aYa4EM!fn%F$n#*K0m@0}PImk;F!<2{r@w-yDP2cTuU_CiG9?`{bX zAB|_E=Ojp!L5J=(f))K(acS@HQ=jn-F%f;C=E3PYQPL=J{!FLRFlVYqCS38!rJ|e) z(3U>5rFRNvZL`)Nna3Xw7L~d5qOB|F>GOHJcha?FGqtg(-Gv z!P4a?3%C&W2a+RYm@ll>Nl#=ZL|L$X+)-a0=a_p&rWQ6If&|RPd zpEn>KO`g|H;Toi{gzVp_iM{3|d;}F69&y>gIOHIf?0}-!df{w}9(Ep8`8Tpqzpr(=CZsnMGkh_zk7<%#@q09zbuncs3D`@oKzbcm*aTLEr@;k5( zU;WikVj^K^?ym2|?%R+IJB(ENz+@orDz>q7VVy)tO=Ei`T}x485;j&>RdeyL+H(Xt zcCU5EFu}Q|nRaY68I~r$BT3C;MeKJ6+RE#!)$I@=KSwMDu0#{#rYw%?rX)|Dg;v!C zxV5rnS&Wb+__9y`4rZLNFcrH)l7k$;w$r9=o|Tln>H!tF`vz<9`%ok>JF%E`Txb%n z<!=w|G!yN)A)BV(IP4lVp?Bin)PBis(DbIA6{Ez%PCw%diA&dF!+w+gw2N%bqe>oURZC|tCkl@)w4M9fgKx#5idldvG z)EQLTpBd?yT^}4jtz>jObkHClMA#uH2RdNn9wNMPNpo63+GC_-$Rr;_9U*^3V9kkN zxBI{F#x)joQgtSQ(n0MwEfGiT*FoERV0V&4M6WnNoqe5sxnRA7a>7>$J-yǡCQ z+d;2EZ~<-XUIIGBhktgs@y1UYJa}(*U~kBK!EZ45cy8Eg;0Js}KWvf+qO+OEu7o68jqXRW7Lt{@ za4?{1S;ZAYm=cQyR8b}O7^i5&SCFufl7&^FZN&+E@BIRPW%z10ZV`M*%uw?=2@r^U zkV~f{xrS1Q<_$=NI@)kRSwXAF_{u|ZHxXa;Zh;3Azcl7HLuMHcX!Bcz=`;D2&uIo; zVeYo|wUl8hqA%C`)Nw#mVIgWL{xKg>zMt?P7JA@)l#V&V+}0@p?Z%6cW4zYFPg961 zYt>$Vq{(`YZKc=KDBRpJE8l)-^GZ}rq5EzB=+u1rl+uJM;%H1NxriO~yoB)keg8gU z3#}v>+B0uZd8|KbS66I_j)}qJs6X^pcS9FvAqVT(``0IS92Xh%+rK&@UcaORGy}mb zY~l4k2&>M0(hhjvG10n)+*_HBP>+&MVx*>}AuIZc~R8(iGUFljCtwx|UW} zL4<`QbZJdpEypuUWWBNQps_bTzJAPcg7OFDq^RvA>M)@c}RG(X1t+`*KGWG1KQPY zp4c^MAP;ylc1+8UF7ZeGLfYbC84Us7VRP^Si)pQ9?W?xsonhN&U|Xaljoa23%2Z9)|h6c#ZR(gFbpkvz1Eylx&gRB78EMaq{l=1oo4~u;CnNUlkQ!NmB(y| zSFSj2!vq9!6G00>om7L_zLhRPlfe0Gn@AzJFJt zg62?6xGbmH$f$q~b)Zy8RDc+l6?I@$@KqqDXe}gtm*$_wVO)1Us6IcT3tmir5_P|b zTrwIJq-5545&n3S;{P?3$l-dJ6R;MnY_T~BOiV3?i(0^^D9gKGIrw= zg{pr_sX#l{FC3V=cjK})+M00rtOo*sr?78Pdw`x_Je+^w`H_BcoFS|4yFf4Tdl zyfamNxj-;ZX0@EVNpg@bSr)=-=qAd*`iid&EAie!OZl*+a5b~%)Z~He==nwtW{J?G zg6vbLx~#_x=Pe4pPq>f~^LlXGj*9kr?98`%=D;BBqo7zE7q=uotsje^-&P$+7Geo9 zb(^gDqEqjw>x@t`S}2k@qeVTh$#LUf_u%(nwRORW9y&HBuhy}+9d#l7{8Fv%F2He* z;^xrZKQ|z%&1AD!mf}ycuX$)C1!$By+gP> z^73uO$$z@64@ zAwvP9!6~D5yWcTJ?QQq_mm6RZKA4h3lo*!$b?k&TosBPD9VbQ8Ck+HAcwWred0 z#0yfir#_5-0{`4Ri3}*(J%G*=0RtEMuv*{1;C@~&dYn7wn0oBFZ$W*4_?93}o#immrp{G^a2o*j)9Di*RO$Ck3v;KO%1OR3QbKbR-7(GD5@w zo2-mr&zK_%>08w?zc}R`JRt@9kf76Ms|lat6SX+2f{)Kp>$T>XZ)%#edhB+vXb8gT zNt<}3{wWk84{;4!=z-BBD5kS5uuAgYy(Gx47UJXq-GY3f`bE@=nAKIINlvQRJX) zN#g-xWZUDyy~FPftPJas^l-=hsPGon*p~Z8sL*2{L*^ z{ssIbZ^_wRG8R==t*}^73YEswJFb6j^}P$Nx@*()1~KS@lH3*XeBoH^$hfu>iJ54! znJ=;KoDtjy!nc1tp5(sO4BAZ@dZ7Au4|iLWI}Sp=@}waVk#=)FSO~r; z3E+9!xxJM_Y($m5ke-lHbY&^WuPzmgj#c06?9N~3f6_<9Kk1{UU#A+sFLMtAiDik9 zfjY`458_WRj-2XZ5*oWz%`Nc_Q6(dpXyV$_Uk{qCKD9_0s^!wvTO&5fw!|DnGm=aa zoSqF=@n#h-hz59PrP^U6BvCq%_WbNHEM-HbA4+4H1(^;1J zKo2Ma9n&?z^0Ur=dhY>ee8|TZDE-7pP7&A!JNb8A{4YC+m6*=IWkUSG;u|CxA@~nc zu#t(H{}iK(7q2Ijem3uijGo2eGvP{Af6{#=LKklNfjSUww67VDzwK0>7u+qk^~pXe z%Vh1ZD5kw(*6?wm&~2et%vZ&P>R6|T#dH2wPt}!I`@z6{7LZ}rAzRe6>fY;f?!7m* zhI`k!uT$4H^5u}I^8tyRz~8*{eN*CDNH;K!~>sRs(Or;}yXo?a|NImW@g z|0_ul%n&zgnJcO15?&vDSKF!{TGbL+-!vjPYN$4T%H0q1sI2ewjoc4;YA|j%WRYm> ztwIiB>KszQ_SjXSxd$_+OckRmazmb10)v8`ii+IumQt|pbgNT+d&onHEUHCI)!ooR zSJI+eC-JuwHPb1Y1fSp8AI{^{e9hbM-Q`0W01PV~^qwU4GfZYp2add-7$hy9b!Ayyuv zruQ{h$&e@a!6CeCoMSGk2CY6F_j;g&KWJA}UzUJ66+B$Lt?lu)IfXOEu+xBh!!9re z%*PH_7d3o#krQ0+2N}`j(b*b*d^{dQOPXOO~kzw;gyIp`B-SzrQ>Lf5<8wG_IwySlALt9BpZw>J2xM^S8BZBg~uZC@2arS zRLDLXno;IczgM^TQN|@s=Z{9BxcT+5iwQllNps4O{!6te!3C-Bq7M5bC!{Y+OLVMM7 zB`1zXXjwHu$%Jny;aF(|S=|udmOaB5cv8Rj0&Or-l4sAeB~H2<7f!|7E52#1(P;2W z?QKuouQQq2g_rQP-i2js{RKFNbvoHai{K}*Zg}0Md@@2Zb4{bxGD;C4IcpA#<=&F| zbIZ1^fH|B2X+-u@TVS`YOI={#mZ{Z#!4bFQs^+;aB6{MkIW7f2d&SwZU+8La(*fi^ zW=%4VKg@;WD2q$Jb$<-8#;g)9xCDBR9fgan+r@hkNa4nsb)K` zRv~PKypu;)%DnorWp2$}4%{+ku9g3lS^qmQ+4;g+XI241lzkh|7M}1S6VL=>1b#3u zrN^7fkq%<^gUf+FAvytf==9HlI01X}0rJcJg>HBPG30}&gPj5(@g>*6OaU^2I`UBs z0;PqgLZVXkX-X%Z@hJA02Tgow*dzzW@n`5!-NCl;?Xliq9Hyv|&Uv>0Btw2w#4_<# zhmZH5_P`Ij^4e>FRRJygirX%>u|PI>v-qfkQubH^S4u5QeQg4L*ra^4LvMgkycRod znl z1Y|FHDi);=LIZEIrT;u6!}$Ykw3weP?9i^Onsy|<^$_>MA-RmNjp92ko0K$O4y~M= zyzXr|;-8@bI=+^B)&A8x*caWZ-oQHQWFLNdsX79%+2x+h*tlRES>BwGLH*0J*bF?d zc`*-5`;k&3qdhs)^*lY>+hbMf;Dk%}`McPeuIzCIZ|$3#&7GZA&pOTFdtiGcdA=%w z7PIe_8=yPVS#Uj{oMt|#0a7M}6d6dd5WX$}Es;BhCo`>Zr=ZVV10Z$lB;C~r)B^$o zns?s*A2jjb#C4l|oTMD-tm|!LGpK@tRA*n57@_ALAZu!E?SAolP zFru{*C2WoyP5x7V15h*t1hL9Z4XOHp7XCw`^Dh6-F#IV{c1y1+hIGT~3jiFLd6 zBa?s;`Xy&nld(LGZ6#(@7}>kfjVh63&Tlpn*2P}0eKGE@YL@@5%M=Uzif(V3xU9DC zBtc2PX>`;&Qrs0SK{ESw26}sZg@hT<7C@3)sUrrGi{?~zjp-RYxQC_D{o5O`RJHdG z0Dw%pWF*SSxnKo6}MrszQ~4*BuB=p~ig&!oMTX^ns^l zdaDE};y8^~T^X*a>!-N^kTw<`?Jc0sr?J}|gPKBqRGA&H@>`{|K)e#TL2!^ua!wvxH`K^d#EP43cuR-@1QbXB@Ja&jM z=^w$Ra2Qk#KZ=A1XuVHRzCuRgo+qZu?`6h!BU(0^&{b9?p;6~;@;wkHN*h)=6h;10 zQ<|WPTser8&NCwq)fCsI@Rxl0XC}gV;^P9gJU3yY#R#3YeA~P>Ail3 z&T|!nHRM~QO-IixLN6m%%Ctqnr7c8nEDy#)-n6GhNbzMV_hD(L^7yd$kZOb54g%ND zv~%gxdwzWk4bQH1^Rqj;|C#&>niBoAD4plS4e9+*_aFmk7+sbp>!8^vpxdaRu}E!_ z{*9GlLWVFO(^od+vqrq2GYk-LsA=t9cGTviZx=GIG{wkG_XQ(H#KwT(5;V>4=fLpL~JI~DOn`P zL8GulK~d?B#NO=LXDtqiCr?X>V1^ptcsoF(ADCe+m7aHLr~r#-m+p#x$d~nZ`N+#T z1E)T8^Uv_GnI5w4JK?)ox3dcuj^rEAE_^iO#qXy3&l)lzVJ%tvE}hT*ut$-NZkgm} z0q@*|J+k%2ir=37OQrGa{JLik4szvs?`9{qaWwXiWZ#dJ@UiV?Mf*tjzc|e1kA39v z4>o@rZ^dwXhl-;CVaH1>M+F?DrL!<+N=NpfGZ_Thc6pcJoFAh_pB3cU^=w zwwqKc0WtU=2*`Z{Edeu@=c=uxeHVpiD`F4))E{8Qx?~)xs#{~Sp>X%Z?56RyfFkkp zi;Aut);in9EG1>8It*N1X1*3WRK72k1#0skpl*MFno-Cm+?7+s1I%sENClsSdWWssq!(# zbGx=tu{NHjHv}4FbzYzhg(4&4hJ6$x_SCsIhmtvH=R{f2qLE-9O9t4^ZtoQK4?=)4i9*I`O&^! zDqKN~+In#SxoslC&9?C$dkwf(5{AvD;?ZP*R4Da{Niz+#A}TY-$^J>O{w6a6Glof(5&F$6VLg}$(t z<39M>K((*oUg?sZ#o-WMhvrY_pIaOeSJ_EDsia~>*Uhe}y1D9H0j;gQbOXAC8Z-Rfw|2$x;NICKXU^{R?VLcVXN6Gi@y^z5R}pO8#UQz zo-%bTJbGJs(H&Ca8LzS7<8pBoe@Ew5jEJPw_Y8r+J^^cTOL$ZWrKTG>YWM7D?y{mjDu{U8I+M8XJX zDwxC@afqnbRZ|;=5WE2F`y!y9nG-;U45X-b~HAIR;I7)_$n7|gv)`^-f+n6~TSU}MH|G*t?{|+(r{nklAdGp2T=KZ%;^BwiF@C?dZwP}1fClSZ z=gyn=xW~8A0+-;+>l9oSE0Q$OSeE5k83f-K@=C})9OnF~s+m)tP!|RUn1Hd7Ne2*Z zubcqIRS>w9B*a8?O|Z!F=b{=2$s6^XEC(K+$@@hUNB-27r{UwCb0@Bb7Tx%(e}i;v zuOe~2HzSMY605o4VqTLlyIZq?it8L*2>G+N@la8TZiv7{oIS}XB{k#7B;&n$Ze6u5 zA}`U_VI2x7^zu|F`{=+oiRK*RxTWCzLD)W+yV6#Ka^0(`ZV+UWo)ITV#yY%ky)e)8 z{nd0bncj)cJ4c;6Q_8inCjNfBbdlCxSjj%D%dDx^*vyrONh?fJi)ZGOeOK8fU-W|Tp80`MgcM6 zFx*|i!Mu&)Z7z`I=+ER51F<^Iaad}G)o*CU!ES55X^z|yKh?h(Py2}0_QuAGGBocd;J4Q#^Sq!8R^z}dstP1wTZTBEb~ zU6H-et)L&bK)ym_>wQVt6tI%aJ~PB_9;fWJOy8n^uA3lt6^Uil-!Q!IAAX-$ z&d4OJaD#b6d>HTWKzO5dHSL_U+z<~PHQ%`6)^&q?A~AcxZ$cPcT=h=jicVH|@r#fFjVZhR>&R~~BtvK~w z*RE+G+t(m>0C}N&Adm2XWs7(Nb(G!+4K(?$h*vZviK8Re+OCyEg0#}dQWF0CN(u{U zIY?V`TIZzqpcVqhw}-)~?Hcg#o|3LMHgjJZZqhin%ld$I<3cajdKhQB<+6V+%uZ=f z4DXDht9$ky-k6kh_z08c={{YSu4Cb`;QB{@U525y} zS26e;m5$0?(B%S@_=ywP$&gdH!~Tr}Ml0cF15UW=Af4GcZj$1_T7{wO^)Nfj@a1I$1EJ8lMtJ5g= zm-{s}ML%rWmn0uu3E$-YEYNI)2Z$+gw}ilrmneq!wD{RO*TEhY-XXC`t)usPD-2bk zeSRNd2IeZ<6@P~CUshtPQo(}V1WmHXaB zTUA1rGT=lw^U>mACkj3$9XaMj%bP)J=K-bE>R@C`>SwxU^qR#aW>@@PBzQJ-Ef8)? z6S)U2{5c+ZJ;}vB?d~ zqs&Jsu39`Ni-#DrIV-S*2lk!)l~AdY#kKej{d3z)GWX*wIhSK^-M1p#7xch9-CrK* zl&~V!Y{#a|TrpsW&ZQhxQ%W>j+Rl0|Pn?QCii6MgNpD+0 z=hqXJu=?3%wl0Inb;lFoze$~hD6i@hJ%C!8uBeeVrR#8jW0Hb}?BuukXX3c2CPOD4?Rc1&%U z7)7RIQRh?=9e?My(%}!Y?%u`6S?lK>jesV4%i9;IIme~k!~zc%Ti_0vi4V1Wi z{pr3uK4{+0_;AXK-(FyQjb!PoZoumEmiPZ)34G{wk#>|}B)b+I>6nMT%O28d2W}(! zd}#kjI|%L@TopRp-g}{9>azxA3Fy1Z8P|^e5N&BuD*aHe7#?AJwPYjh4nIC%S%mDK zvx58{1%H(Zdf=JC`#zt$su}O5d8JGK=DkDf8*G(6oW7@Gyt>1kdn*#%;CI4xyQ3nq zuzX)hvw4NO*i39BwU;1oofXM)9l5W{Fs>p_o~( zQ|BXbB;xxGO;WpqA`ZPZ^(yZ?nE3rHo(x%H6MJO+VfC z5gC&%^w`7v4h~T$8!tP+b~#i}yq^W1d_1i1McozAHa&VRN%&#+s*ZoGXz%~AJ!JF% zbyDngTM!E0`gdSk_aIPEK}$3yJTylopQ*#F!kH6I>p+IOhe8V}Y1 z(E+3r)UyYGXBdnUfe-^)_^S%=~VqG^of7jp3g#w z0{_2IyBs7rHvfPD{5quOMw|E%CsRbUFZ))!f}l;8CLHU|D0woKG>fJ8Nfw6eM8oUOxT!Yy>S=GN0a|MBmFOnM@X?@B7;QwqU~P`gdOj%)Pd! zigsvu`jK5~!kW(qzQUGC0h@leE}o&37p*P zU-cjw?!y-Tt0eWyE;0x8L66q@ALbq;{QVm&C;>DdJ%fTfKtOX~o-6mlXh;&$y>jS7 zwIA1@bjt<>8n3L$+_FS^HVNdLJO+sxC0bN?-_8MRK|CJb<{*)N3dcke5^F z^OK6}l409r?>yn;MxiF^Ymc>y{Z^N~vM9fBa?URFS*LfocKy{xo;#m%$aq8`-9I)! zbA-Q@EfD^xV8j;r=?QAM8Ll|egr;8Gxl&Ivj1&ts+AbI_u&S8t2)Rgqw17A0zBH3; za%PJuu(cfd;wGeJO+hg>8muA#wjoBA!QP}8wVvZeOkQ&uCGDn7^}Jyez0FbU4{B%} z$(S36aya^FNRDghwHCNf$?u5LSa0va>C{aDnM`AGe2~CnYmB~vL)m%!ERy9AmR#t; zZs+9)$&>ydXIfVxzEJOPZ=I48Ffl}s`0_~^F^2JqbBxP_TnNyZ@1v8sg(;W1-6q6V zs}PxguR65v)HjBA|K?nORy{w)g@t%BOi+*o<&zVbV2U%quwrrMJVB0g0Q&s3>+wbc z={w{Mj?i@ z|FQYJZC3s}+>y@1lK*Am;&HQ78nKv@W2(b3A&`4_90rz-2HCS*P`hfG9K2iy`|2Hs zKx%M{1&{q-nn_Tbm^_O?*^$JmNog8|AxNQF(n6%HII=_FTF9}|Cyps0?kDCNE@yw6 zixDyQGYY>Q{p)IRPKQ22uOmi(Uw9)7NtYq(vcW4rJLNgkz3n=bxs z3DsxLG;r^#chAoF=a(wy0HsEoFW!mPCyc4NV3cq8ZmGj>lxBNe+jE{)&2yGb$NZAGFhg&F}As%qLy z6){~@y`nzMfq;>{{CSe5oEl<1O8YhTHGRGFfu)(IqgASIAbLnyyPsmhf&UdC(s>cI z^z}icKfDS~3FQ`#l1e%sXEfAG<*q9iun3GUE5d+)oHT_J-?HZ=6i2WA!aUUlVT$Am zzi~zwKc%m(>h6RGAyDLp33?m4g&$03b8B?)mBv@lxQa79^t>QxOchy7)lC;z@(O zsCFEqNZpwF$b{dY0*v7ea6Q_|IR$E90i@)d(R|!M+wv>N3@g(}n18EOPJhQ42Wjg_ z+9zyj*<^JHbuxQYtPi0;D-^<%J%O&Tt7uRcxD*8X{n~#clh0EE65Azo|1{9s^$~znRa)h_X7n2(+h~I z+_sxAUSUNoHjilfqUMC2*e!#RNaU`pvt5>!_HYgLr5F2VDilU+IZIN%E%dzmpfm6H zhdSM^(=j&>hqAXH^<943?&RBkA3}6O(bWK>izcB=D7Z4%GrKACdzSeY*mCGHis*nV z4!r{Buhk>3tvN(kIW}39TTY7_qHU?SIqInJ1iVM43Jr+zRIP^J?r@Uuv??RZm5w`G z6UFTpyrv$kr8lvd$$g{4)FGh&j$J|UJ(bAUBWm*%Pm^eSNDEywI44_G=Zk6=Q^Tg- zsdYJ02h&O!g($JMAD-M7ew7ou(WRawgSua`pLpFebBeVK!; z;9%%)Li$gw4yqTm-z;k#004|{myAvo3F~!X1g+45tt7+IrjIge4lkm=R0X{7T#VAQ zC}3{XYdJn3Efu0{4hFJ#sOx-xzed3pBoOh=;Mgc(l{Liu5|Lgywl-a_ZJbJYenrql zo|=9w?|jqmKW}?+zemw&?#Fwpz7(WR-Fa-(Tu5J6-si8nEjzJiDpwt^R(9<>*=oNR zy=H**sr*WwQ%e8-B&s~Jo}6-juRCf{mM$AO7h8{4p0hA<1rf?vjMVncpEp5C<~VLT4vv!7KzwRpFJ?^L z4hOH0voPL`;kjq>{{WgmWxt8ejGVSbmHmF-?MeD7WPkTp$A8zb?;XGIH=J&x?Rs~n zXEoq{80X-$!z)F#XOkiV^h;6t{51Nq$~!PqqFq+d7VKNw`_O+I^(vCzP0JA%;$)JD3)B2wQTSpwezR-FFsqDW7(`k~;ta1Q7)}i+{)IinWRb41`ObD# z;bSSRQysT^HBooh6g(;EMp2g8JQk|Hiv*R4%}dX`leltW0TfL7thH@jWiG8k2^D2q zn(MM#LQ!3zC~Nx7pUl23k#9UgZIZv63NtdE-GA?OCF(5&^%6JQGW%w;?eVb3n5Hr% zg;_TU{|~YSDh^~ORH{{6e~CO*j2y@pohI}X&;`LQ3SAGh1<+AJ$Mpm9XHiF(BHex2Pm=m%Kq4*P{T0HVkkivV(-?^0Ywa!h6U)iHgz=xO3t z?E9W)`#vv#{x4n2z8?p4i7KuoDAs0x^nhW9!x?Q60j;vhPX%yrk3~Nj#38~wgL62z zV-oB<2HObdru-L<9}kL_<9OX1h@S2w6XdM0Xh&uQtr~Kw8eF3I zD5)DI3Qk0Sp_0khwuXcUM(8i*N$?t5e1eN#yQU>LtQ$Co_|U`^uRfNrow$S-U8CSJ@%Pb|F-a}8qW8{yv`nfM{N)t zB8LCR9R<2o-%$F+LL`)(00wTJ~{6UE`_EZ;vWIU*i>d?NO@*i5;&P3aeviy&{4 z$h&@so`nh-f*hitfVfTZrXJA)$^;b!$`YzmutuI6Ge&>%Qa;1F?4 zaDou?LxN`td_B)O{|DjVb6)@e5OR2&#a3Bw(?AejUy0)!P1_u8(@;{{f&`7=4I!vF zL=h6@0fB^6q{zfh38YT3gBrvO{~2BoQp2o0+pf3S#Hm_g>GOJaX1+n^L)4U>d6au zJOPve3xGUe50C?_gyw{l06M@6OBLxp&{6?D<6Hx9Gptl(p8^zV7{DEx9bgEcp(_9j zIjhK52NZCe1FWO}8ekrZs2nR zd27hKj=Uv&R#2b9dJ?YG$`33IRs2TfI79W}HfPCeblAK=U{Pe@9tei2#88875==E& zX8Zf`%+yg_)<@`Z!i`KnHp^Q=XkBJ_J02-5L=M|VdAvjT>tw>n2Q}hBEs>igVbB*# z+<=s)&8B4wNGDuC*6$y;x}FoX!t;oyFlEcw+}RXN zGZNFh9W)(#U^hFyedr2Kmc+?y`~4v3dI#~8=Y=I?Cv_I(&&v1%ckmk05w|TTu~*vh zz*>?PpOjc$7U&Tfzey>l)AHP7fu{x7rw+CA-wEuZ#hlqWqjdsrt0unz2 zitg~pci~0pd%;W3afjsWi#;WY)C@^9MhW8<$=E|m!Io0+4W?m@NG>zk$rTL$3PzJ$|}z@p?s(7ur9HD3F2n2A}-^xkSD=)A%1f@ z|Ap!`OTT0zd5PcW96A3}OZ^-8%>TqPKGrw6NM6SN*7$RUtvi>zt{1AqRn=F)PXb+M zI+9nJTp)@1Rpsq3t6#ysp71>_T~YjNm(@RWrwkDyv&J6fZh~tBM_6Pg+?K!{aAknT#r!M80Tdq!>OTnU7KNLD<1V`eAnF`(+h%P&2 zV!DY7cyP@y@VYf7n&AALR2O6{V8YLtgDeMG0kYD>!c=c$tlPvS6T86IWD^_USQ)Wh zr?5Q*WIfofCAJ$(OjFoCOT+ebu-yc@?8~?Mj-MO?b^UF^}-Vb^TzQ z*KHwn7no?3)NO*gi&5Q0g!2+qS8(R-Ylm1YL&ugAzbj3wB7S9hosyW#^v*G{UZK|| z@!O@)!+uqI*sn@&n~5G5dgnphsorkL|6UWlX1Bi-V!n^1``!VN2SN6MJOuJE$n!z+ z@yGkcg;2f-1?Hw6hF24}$(qiGIY?Zvg#EKtBTdJuG`%4uKp783Q>2F}M_D9JoeVrf(U*f%pGa z&>rq<%rw7&WyTq|ZC)Qr))_T zCETxN>AozuUuT*hl5ih%$^AOu{$8e@58n^+10X*L@0jLEy?7r?y>xSueO{giM& zLAc)m+;3;ueMZa?nEU5Iejel(Kz@yVx!=RG`;3hHS3rIh zAa3 z@LiUD9ccugl*11|3JwpO{*FmF7%n+H3><#UvaeBm4fiBVubJrjV-)Q0@$d+1LJgNH!6a!h^`%w&DBz6CUVj!#gGKzsX_6mx@tHj=4Pz;2T_inM zk9gGMHUx)9hZMRE7TvHh92-K+WUVi?wthNp5juVrX{l|*;eL5-I5cMC(PyTl8#uV|_F>7<4fQR5JM-;4|wJ z)k%6(ns6djO|Qyx)9W%#R}98#tM%EZ$@$58-e3q((c`CH%w|B+Eu6L$921= zFq=q6x7Z_m&m8wHW zN8<@6JKN(CCzZZX(m}<(H_Ngx)f%=rxsIi(wC8x}GKoX}OU;~D{fprrCDo80Qqn7f zTMpCm9GEHTz?gk_%x;!(ORdoxH6JO@&m}z5^TQ#h6rGhH4lTKGXvzKJ1m)NpE=_bE3>Rt=26!F?=yU5->7I44KfN#BfXH% zX7&pS9PwI6c7JSiI7u(F%{CdV=I<#}uPhT*vvfkF>xIdrTusWYq+CbJb5+@wEozZ& zJ*ito%H^b7LCTe+Jc*QRNVyiu4KU+0KzSOJr{S4MNF06sV9;({o4)`}`CMkRC^}-T zxR3^s-utLX%~O-|Q&RJ-`iqIaoXaK)6xOr0VWT4X?Hi# zTwtepgye?$ZdG2cT;{CR>fPzd0v&{YRmbB$JP2#YmxK#f9crEjm(REBKqqei~Iqb~I=z2DB8J&n(_*`py>>dnc<+h7y z<<@VP+irGdIc6yxiW_K}j{j{_;bK@?5l+2xOu^ON4 z(dm4*jog}_MoQnrPkx%(;&%{+An<)UBJo;~?#k8K`$ z^V1&JBYAIr+S6{=o_+agkL%&OKR@m1uxrl&tDR-^5`Iw6k8MBb)3f7xJRV~5iKQZL zBuC@JMEkIfc3Cu%AWy>S4B=BTDuIRY$@KrJcY$IXB;T#jIPNdKg{cJW3w{0>OiKUi zjGpY%n)3c=D8XMRSng(wH=&>>Lgo$AMHG%U;=xA45BJ>Zv`+o-wK;-z$cj z#cV?DTVUCthfkF+Ne+!Hv7T%aX&6^Zc}B5AIY=oZrz;(1ePMj{X^Fj)kSm>B`kZsQ z;s7PViA?3ciKjeIKY@p3iD$snuL_9@?HYQNgbw^@^~ z(jG6@f~*5sqqgtJw-(vg=m$zWVysuvxA^3ZJ$XBy^(LOYbtiAw?e-!Aj= z6oxJ*XqN*&NA^%TMQ}cDIg{8FQ78djjqJO&&3Th;R$4QF%C^_p(uptGlI6s=MI+8F znsL^#?Gt^7?AxL~q<1xLV?*|1IoZ~VJK1ODWZQLYxNWk3$|gj>6SOy zVdca-pusz!$=kKGS?Q*=ilHzwU0x*0d|Q#u(zBW6Z&9SP7@J_flZ)iH zx=4Ooi{!ViNPg!Q!f)1%l4KK{3vTsAib++G{FWEVZ$**(Ru;+cq$2sPDU#pXBKd77 zgkNFTh5u;<@}II>0{cX{{n;Srf}8_#KEqD{+4zs2n+LMhU0cT29jz0a!`34C^%u!+ zpb&oP^)PEE6jhMyfa085nDU{%^fsTd%q@eT**qgYLZ1;^O!2A#W_?!DicBMuYiK>c zh8CqA~tnV#ydhI~jCR-C~*!G8Tk@*60GUv7_V^mZC} zmba&Nql4Dteik$9=xB3chwCl!W=HpVTc=ICbf2+hjUzt$=!~NdR-AD(!G8Tk@*60G zUwVzR*=$JFY&RKleq_78)3|J*{f=zgcO0MX%sQ?*kUiD-THTt6oHsD7I0tC`jf(it`dojLl~2HJ zoHu&2qx+8BYn+vKINvQGH|z0RitP$J!fpwi*I4%}?eX$fkUb!`sqIs~?bLpcm2bNy zU!^@>J`dzhkUKzF!)|0(qrWQc@NzH6J!-p- zJN0_K@l)F2<$WOcs_i=V&wbwbDeds`L6ChQ4}c_J8))~-1K#*5?U?<0xj23pB;WN9 z>lR<>)oI<*98d+QL8wBg%>h#ZjR;c_rsseefLRDL5GDbd5oQDC0OljiK@fHQ2rYmB zU?D;Pp$V`UVG*DWuoR&Up#yL>!g9b0z-ojQ2tscSU@gKL1fjPMa4y0+1fkar*nrTD zAoMl@HX&?85PF*dTM#xQ2)(U=5T5&$ z%jM@KZEdXxJ__2_;)@Rk!Ga*Zwos^0L+(mb&r8T9Sn$O^S%v$)8nt?9UZ>ec|qhly!c7QW0Ok(+pQ>0rsw0=!_mjHagrQN zX0v!SA5T)mo&DWsloullE;ky^hirdoemnYX@HVDdyoe^J>Ee?i+Cnsn(}^kWbK2=> zk{Ciko^u_fC1ZoJ#%MEAQJ=A%ClS5N=ThJ?{2q8NBV@d1)EF1=369Tr%BVB$iN;Dy z<~JFSkl#Xn6Pz~kJIL?m<0m>Z>|x)qhjT`caiH=0i~;x-_ka zuT91WL$Bjo^pg6IP)F6jrRbME16bkPV{D`TL(D*&DJh3;eZlZV;0J;xqCGy-m{tYT zA7{JiB1sfFKh70sOU19G*RQnlcGX5+?vyK+@Afdd&Kmz#Y20*+`_-nEWAEx??a=D6cgyi` zl}3fvtMLE#8U5>3*7-tnNah<*o5l0REY;Y(3bwUJ)i<7S6jj@?FpBTR_uLY!Tln(P zS-8j+u7-#^#kwWeHmqOxeegXQuNCXATzjw$SaHoebn^&AtO*;y)?vG_9oRN(4>l}( zPX3bdRSvFzP1i3O4))_gw}ku`lxrWT0}TZ-zp0=F$aNc#Iz!+td*~^U{JsLIPp;R1 zbwK7yEgN+Gk|81MZYubjUQ)lTA@y$o_W=iI8{kX@wt)xQ+oU&2hFONeNoN>ePG@nJ z@#}E*HjUy-_AP${-4P2{000#tc%02xOK;mo5MD}@E$T%{*2}V;*s+r|X{|bSiuTY4 z&;kX59(qZE0yz+jMB7AZN+2mWg;5~?BR^BQ6b0<=k{`|GW=Tm!4khquUo+p%&d$z~ z`mI6;DH63ne@nDVO25AR>G*3x)FN&CO`@kjRHD&xFtME3h5ds${{75$oFAv2XN?1U z>SDb3Y;;0GGNPbMv)ze3HiM~08~+G{8@_$+nvP^dJGZ8WV_jLY!Fr@-`17+f`;Cm( zF)v?FOvy+q@J!pa-E+foC&t7KEZW)R1c;zeq=qaJsa%=P+%kecXUoXSf#nB=W1cUY zG%)8&L<*BXmPqKYt-v&9o;~%9%jtyKRtbkCD`4Y}D8+C?(WTgBZl`!hQKRT!yctFZ zAu|325x^oOvO zl}F%XHXl$trg(^Y2Xn^t$6WmruKt9pKjrEQSO1i&Yh3*^uC8-79Kz;f*vx$MIo9|L z7P{Rq~Uo>B*a-ai#Ql zOK@4`W4dr7J>Jfni^*@M<+t)|Y0#v6bRDPXb-h$x;w3HLOVy9*rAoe+s-k>#4KHc* zUZP$y=j%GCmg6A$)cJPa;_CT}OU_1e_vI`nEtO?|o;{6xdpfIbY@YA*eBU)z^<8sC z-)Z#K?(S_S%lM5^_{#7f-PX!CrJLtAbu{*`R&-r^#U)q9)63b^wcNlL7`8~p{WBH> zI~jsC(;tm5OwSzCM4g^~^HyRu`7mk?Y*Xx>U&^3bsTf% zTM}Kxka(NmaL7nwgNIp8^sHd+xoNhcj7>qzG$qSfMB`x@u}4=1YZoOtlJ}3rofo3e zW9<@373HRc>ccyFfu$-4Qr6Z`mQhwEOsRW}3E`8c9+T80O6l7|!8v8pb(D>$zY+KA z5|$)!D%i^fCnZaCY`hi8_hb33s6B|=cwG-Oih_)xAX6yF5DGGbf()SGq$>zw#?y}4 zJ8_#G$Szh`1Nw-7J#6Cx?EHOf-=icl!p&JEI=i8KeDu;~>lmS(MMv^}V7%D)OLoEXhpejHo$)5uSl5B2^LpuWf^Z6NNC z=N$s9ZwJ`rum{`+;{JW$0APKGz{4CI{tqzXaIyr?Ih+Dtt`oce0uyxkzW@LfF?gK4 zSKV*hHV{8q@~3P^PSYlBlX&%-bj^@p!v=H@P1be8fW5E4fD8z8bcnZjQ(@Rx=IMbb7&9v;d*RA%zn*7m$z!b*xF5 zSdI6ZaC9zI39-h4( zE7A_@ge1%wh&8z;l`5gW#VBiXl&%^Tx8?ppoj2{LB1PShS$pSbCnslD)E@Wm0#(N!6ZrPRJ?(aAQ|*E^~}{z zr^LtfT*!6~kFTP{Cp-(qsjf!ps6^tmnMO!%jwp3kbj_Q<$36>%U{PnBQmQRF3Z|4| zo@!S~HNTRvvhW7-CuC{m4WupDP|M&>GI_OulnBelH3jpP!a2UN(YJJmx46w(#G!28 zZjnoXiDyc&6v?rdljDw%z;kUwb)gpK%4bM z`A^km8Q-eF_huqjP(@jR1KIzu3{^r+K%G!i;C>|T2)WAxms5RbSic1i1#B76B|H+q z^TEem0X+jA6LtkWA@K2I0Z$G1a*;LgAL%vdr~m*JCwQFISW9oyKoDNXPSQB{HzW`TkdS%@m0M*Jx3zGS$aYW>7yfg&Af$%f_}gT?wG$^wi-mAeXUt8~t_SZ|xw%_+0$8396 zuZwl{W$OhQJ0l9d%2?fw)i%7ILnnWv#guDZbqzagrEfUKjT^F(b_ZWBIu_iKJ8E@m zO=@{+=hSl4o{hH<@_J04<>*@tSdH2?3xqLE?U?0HtwC)K`?j!$>2yXdi#3kXgxJha z5o3wk0ktyx7Obe};vw%3Je*3=l@{SDL)*t}^$qV@+K`OW;xHIm8wKZCG1++{ zEFwlKxUPrA74hjHJ}=>C@}9?&`CpsOXChd>V_5e03o5Z^I$g<0hJ>qJZ9AsnnHOE- z#thlY9;JAy9KR&EOO83rNl1^GP{HZh_Mjh;Q8wtMD0x|kcFK=5hj%U}@3LPAzeCuu zcO^&ph+?QmZ;vEsAp)w~mg}`-=R`Q3ldEDXAM+z4!DZn5&a2%!`If>I)6C`M4K z(ASh}dKcde+v?oQTb6usQsh>VD-cGZ6tfZ)oS^7Zl!Klsa&uRL>(z+Mpa$k8%OPztvR+U*xnpF6zn*roh(wjztQdztN;KL?|7WGS6y$@NEDqUPMkPt z^U*FXg=|e}145!&ctT>IKtkdjBz9TJJB_D}!L`G9QmTcO_|Ncykm%0Xcbf6YcHC|f z>2xxlbI+W6=OgYL3n5fPMwS1q^RKA>`qhhvPY^O{yz?C;pDHp?i?`b@@uw5-86Lks z_I&?&NGTaJFAQM3_UQ086pNt>JhDzh9^_0hoABNbeUwq`1&)gu&KezuXPA1Jktk!d z#_%lTal^-_M{br}%bz+?=${dqG1y6*#BO0W^poQkz#4}ycpRH?Nf7!ojz_4+u@%=5 zp`oZD$B4t=cq%M$yale{m>hpAI)WoSyy0ka?3d8{1Gs`C*6`_L-Q!q?*bdC~ zn4=F`g5L#xAKC%58yvgfaR@aK{Tmzy@O=x$wxNF$=IQ}&N15j~aDN5PP#N2W{vBxd z;MrZ^+TeWx{C(h!00-K9oA6!K`vKHTtR+&`yYdM{#lSZh%pz93V~ttKV}AauFva>@ zXpz-CGj}v%Er;0V$BrD5b1!01damjPP=98$lBY%oezVduc)A`>Sn>iVoA2AKQ;6$T0bLdvYsN)5U&}rIWd=4b*$t)t&rsTtnM2xsL z@7m$9J;vh+@5R@SeKcdXSYZ3p{L=h4BPm_`uI+{-vV)M>@z!{1$CnvT!4AmBM7%_; zM3SwXB&)wa)C^o+!{O08O~lF(S^qG@0pkfeK8~VLi)!aYwQzKJiW&9JwSZ1eK;>=P zYE9cXM%Knrh1c?g_lt!ui9J6&an7(mBM!s#gs}8Pbudte_y+VFQt$G(M+^rprmpu7 z0SOBw7Z6IQCWM-ca$3*HX>&{o|ABQ}k2-t)FbCpN}`Cr5=Jir zmz*mmtI#57>(Dl!HIhlX(u}E?Z-V(PFtZ1GBPAIF;+jdncLCm(bqmYFG@&pBA5o}X zIsc;C=pIYn<~+p>St_fbRKtQ&wUhqlg%aOJ^L-+?6NnRY`~_OCGPF8LzXp3SzGP*| z+F#~;d3niC6>#11t!i0hJxr}0FBRWv{%h&{x77U2g8Z-RtKdA>)qSzxJeQtV>Xw+l zv7-4KP}}X2KFk&QFu}(_>bH{qP1#>Z*E^5hq`y-#zsq~OyTtrfb)LA7Thec@Tz&(U zzpVVOQrBh9%lWrq-wdVx;EH|*OY|f3`p|ADx<2U2ds)$sJ!V1v$0vEF0;$siG!-n; zlRQg7X8|k?y%pk?f?)ydl8`k!$iY2;;OqeIUuE6@0|*-xod5t54tSj9R^4yYFc5bd zC;eEz)@^JLAjHJ>#?b9v!q#i=ii?+o+% zh3z=+2fk0t&>pxTSC5a{bh@Ur15~3F`KVIbrKC`LH1)}5OzCjyg2{@Mbg+S@|1s0b zPf&^kzd)F!sQV-w`mUg@NVL_1*9U@XA+3rhf?Qo7#|z}z0=X`MS~xl3GtkXT`?29v> z;tffVIz3Gx@dfRwEG16_#hOI1L|iNQY=?b8zrI?i^+`rJ(P(nM)ONd_lZC8oG$gQ# zhVL8hHzKGvCFs`x5te2qKASLbc-j1oL~wBlF^)0%a@wEcK5qEgH13VfNy z`?10JCcl@}jP+SZ;}vI52YXe^?3F|(VS%dLglQq{fTxgqLk5N)iu|8b3l?k^l&IcYPdO-JTzy60h*kt_9I#)*v z{hhY(ukVNKF&Dj7WRJNm#y^OB)zSRJo62_ieBV_JV^}2mS=58IRs8$`&?chT000wY zc$~dhTTdHD6h7XYSzo~T2ELRe1Op*4m!wVGsA{8Ds?_&Z>YKGSOR?}JS{oJAm;8YI z*h=)JsuVio{l=ah@4&jEwe)c~GvB$*oShx}=L`{e?erKO!Om3d~{decRZ@uotS+C#!ZWIK5chVaTF>jt69@Eqrr7&cjdJ~_?zZpI^QFp-*t@DFy4^z;PYjSmErpZa^SHaGqjPz6U5j? zK3fup+dPH;9m)SL{O*9e&%m~po=J7>!R|n^djWZ<~jLGvDHV5Fmk~p7& zb0m#@hOxF%-)jnc;Y$klo%a&ayw6guYBu6-|1yriOD?AxfV zaSI+H`OWt))G}8!Wu=}g#5E1kY#p^EYOJq)#aBT`Y%9d|C~;jW zE~n@#@?s@vGA;LQbAkJB{P7x!lx%~t{?#P#I^A=>d!e`}Y1WHB)V<-v|J7Gd;j+de z^>s9QA@*^6Th>+W$xv62A1MfJ4qPuM2zi>b2j(mko;SF(vxV)A4iN;^V{KOTF z$mh&Z8Zw`8%_;x=oCicM)pobY|iwPTs>-LiQ*bkhK723JNe>&7NLSK>c%hBSu7K7&BUbf+HSIXakA2-yq zx5?_AHIw^*^(Khx{1E;U@ZuL`B1M4y%e!6M{LU> zw<5U3P_GFc&sAOMn~-(LCS(J$CdZohhZKK)xO09LvLdbZ2duTBfhEDwU`0YkgPa7B zUm+n@C6pyp??EjFO$l2-11JHHfX9IN6=Ev}g7Xw;Nf5s>>;U_~Ghk1`a|vzW2sj2@ zK;(G>h#X!5$1yknUI9ljXvaYKehqxS%(M6py}-Wr002Kic%0>0X>$`t8g5;dEgu+s zVlW<`7=y)a8;p!kh*<(8Ko*v4l(96jCrC5Oj0AS2QpMKh+y0#0AG7;;e?sylmD+f_ zr{9)(TGOK$fmo_mRgc@<{q}qJF+KAlg%FCNcohFl;D1r#`LlbAD+tA7xbiEazbJ~M zE?kc0wPN`|-&Aw23wp7*Wm=Y&tLUZy{jsa*E68DnBGA-PRxPz;9?g3zv`4Q7F{O0>av;c%lgJY5Gy66iWHUIHB%Pb5!aj9{eTe>Fm@Wo&cN6)Xdi?&GUhyh@R-Lq0v;ED&qWxs z06d7j8PMSYMjCWjgEoS{0FNu6_chSf4{chWS4=51*HujrUj718yJtcM8ISIfu`Ae?mk0-aujQN18gMr4$8a{^^lCVWyLjL8URI70I8vAbR) z?5S9)Ze&X)-ZyOM`<*L!VR5&xj$@%e;qFfgGNn57m(mNeT|_ccL{evV{k&#W9MjyDv?GrF;_s779?Yp3jMhGwbGzpAAvm8xZEdBrRg zRu%0~uiz0L`to{oSQ;7B#M?=?6>;9m5MJ&)4(Yv4iT9#+7A5wi@JU0nvqjym2)w!^ zyi$ZV?FBuUmq-wcwZ4N=2)Aoez3)A?-w57?CPei^a5|mNb@- zDMTl6gp(n3l^hkzuIYY4S2NJ^*r6X!I*V)U*TvqbZd)?A| zWL`eW=0sLA^4Yvv5zXk6n2}WN^mZfp37OTFrsf~&Mnih^y9W%2$$JuoN}m&6lLH}l zdJHufovs{KLVEZVlFw$+KM+pwnsS}w@LnuB$t%kBo8^NjSi}=K(`tyq5R&9~UaR1z zjV&1B=a1ng{uq&nNa!}6l5S;Bhh5@ECCrm=Rm-63w=5afx6EoWuNY=Up~fne15Gii zr9I8Udw{a_W|rBO z&99xErCge24jFz%GCWq&4MEb`a2D5ConrpZmej*+PRK!C!nj14M4@P!mOy+?LL9k_ z8k3OMi!6nab>#Z_trpEnUa1M0I}4_;71ZaZZj`H)?7n4I%My~|-ACKITN}?FfB7`K z@n}2yboc(FjfYQBunbXhU8mb{o*Q z1>hR;UjVMV`!9)8fS>MJJl#ob6!nd&7s-L_qs@3u#l;}GKuo*g9ycwW-EY)lRm)b6 z%Hk8&P2{`bNKaGUb&g)gD8G0upy@J{e$LOmwvq32 zT^5yEY0(c!e6+`hNw58#u)WnH+pfH3n0}0J`!kjv6uQZMMb@qs3i=_(S}b`{tH?KA zY0;H;-1OH4`dfbUFPbm-Xyv6{26^YhK<%kKuO8VnDTJYG{yr3QhTAJ`8|OY;Z+Izn z*PpXA?7c3fn^?XsvAoBs8reN~!RK$1sK^l1&ma36Ag0%OVO^gOIeuQ;7izPbuAq>x z&QcSCZN_UIyZp?PVxuU%{X0q~URJ(X}6e9H)t#`6|6v_l`?_%w7D5=L#inVuu(rvl^ z$AY?o^2- zt{x~SkUs-`1TvTLVQ=jQIeJKl!DDV5^HE&zQH1*C-f=)z1d!X?4!Mj^>Rn@CCBIPo zu|C60?>^4XPDz~j_^{lBH>M+ed}MK+Lh?9Mecf>mIzLXL_nIGp{UuHb=}On`z%l-v z&H0B-h#{5`y-*I&_EFY;iq^+ieTddivpNLp46BboIZWHnvi4CAhkl6r^%Qd7cN1B{ z{d)bDV@CQ~GqF%9l^5Kdqij2@5joFEFw*OiofCRL$mKn*@3KACiyy9!GY5{L`b;6t z?SVG-Ef(R;e2yORig zsPPJ%(4K>G9MzL}_#JZnqJS6&1bVZN4A{42Nzj`?_=QdWb6~?Plrzl630j|H^+^7c@wl$z(-^vLe z?KC_NBP73J^0`S0bO-P8VciYCabg((diIDJAkNo#`VST72-wRWRXuOqU%e7fE1!AU z%FYl>M`kO>^Fa__%lI%KFxv;g_NWAG-%g^&J)6}NZiMn3e;{UY;C`Rf6IX!zy2-C! z?Z~f-w7$gZSA*%5=IK9_UWDHjC@({K4YjtfT}SeL?IXPTY3J5hBAQM<#x&qdc0df^`j;(S3i|w_${|ycRm%pZX?%mIMXh@BJlEv$XtT* z4r-jr^d%DMU8SSe*r-ek)~%uXjKJmHPsQtZkt-Y4+vxQT=D&3mZuMA4A9eMJ{*UAi zwL@+ME&$jAH!e4soDEvvV)ac<&o1PB(L)}X`AZv?M_aVM4KjB4Jn8})n*3VwXDe60 z+qB-&74Qyn&6oe1E8s}xUELw#tr)^mnpyFfCw~!+9Ufb6D^JIpr?`E^}aQjK|d`m=tgWuq0q5Br+WC z`d}Tf0odfQ<%1o-rhxlR@COc`OTct`40yuf3m-6@o(N#M@|43@0$#L;EQfCZ-*Qj^ ztiA^jyy9FAjG$9wHPD;l@A6`2 zZ<>uB1F>UEaVmt+`wzrhif-!W2^N2Cvjd0wmFiw~@0q++`R#!7N|7lOr#HG;tG{7K z4z~{uiPx*@-q!P1oL|&X&W~ye+p}Bwd2_46SIp9mWkWe(yY{zt)o7&pnJFy3M$F$!%5gl5{C51|#PTpl9~C_vo_Ck7k|IB|wE z2ApxQDXyNWS~F^{o;aT{Y6VwIpSFCot&_%6eomKYw*xx(aJ#W))>CtVAh%ef9?YYdOB z!3J!+S1ZP^9^ZJ5X0+?U)M@5`>6Zno^Th`_dgL*iTZ}9WU2Hy_G~Q&2AbE&O0Jd zGl7q0y{0#=W%X23&un+lB9|n_wWC~H>Nkm~_aut^!b$?!!Pl?do+Z8<>~q6e94}q5 z0gDSyGW?jcxd?H0gqvIhPkrvFUOE$^dg%Q28xt@k-7cF=!*I(9vkOQ28TTx|FiH1# zq17WE%O4lW37sof95p4N29}ZM=IoYJw{2WbxWw?~6qVO&wi83nHZ7Qfq}b&xN0zAg z{bZ8xlkV`deEN)ge2$YF8FsO!yr&;&^^P+y=wI;q)#K0NF!k`#ogG3q;pg1^C9k^m z(?#Otz4NDbyINW2f(33iT&qiYDl9zS8YF?&s=C=fB_x^_Dc z5zciQ{r}Zx`V+nSN&J#PTE=Rc@)5jU$wpmOxI>T69OBx`TzjU^)3966!^(>T!C0KU zSCIQZM@VqNl3Y8@wNqR>(`$3$dr_YBzjMB~H#j+o$z6h+;MtBi=e`nvNl`xLhT8nU zx;%jEp1`>=9m((w zy9*@QcPClE3lL*+C^1Ws#Bik8SRgTwyzJBd)d0>*U|^N*nXko~RyR3xBy$HSw`;oU ztD}4RDC+wxV=T@RG5&9o|Cc5I^!oSro-&q*bLPJ+cw#KUGQ6FyI*s;8{TsXTW3Aq3 zeA{wer{dLHP4veP_V$?%#w@(bdahk>maDdBbMBMUoAI2EXX;FN_E|^sK3i#>+iuc>sHp;P1DlpD^uqw5?G)XJT+TCM(0BQWWYN461%4a+4)|Tfvw_&}60AMQ?xX)cr{|EVb9%t(Cr&@Whd<-p3xXs4o8?HoLZ;sI-RqIr(nbk{w5mrjz{e3?8hV8&j|Zv08)U2DOnW-DbGTSl;^k=r>&wT z4QR=>-SHO{f|yi&r52K9;V=X)m)wri*2vPa>6NPygLCnkzu4;17U#Q7xGO=uCTP?2 zy|9)tSWA2E`)_Tpa-sy6aNf0D=%)K%XAIb}qkD!xCW4KJZ_rM;X`ea@VAcRQdF(VD zm+uF@rlQE7F=Y(L6+Wl%6AGVF_`Jer6h5o)X@xIdAOO=G7!3!yV)|aPLB86yccLJS z8B&!I8QN16ghj(Zmk1)%Bt?H!<|4Nw_w{M7bb{}e}U&lnV}H({%B-3Y5AV`{0v*&g}Tj^vwOo(RN( zL3704JNCocp$%Xh33jUG@Omv^=M1o8Je8Fb+qJo#vixgT5j6??*}BlY7dGdkC1+0= zjDnhyTL#~)-LT@UI=lU&%gcW=`j|#?!C)joc3?VZO^MNAFZM`;>Nk-)=p`2=KI+Tm zk-~A^mfT@)8-h;P&YHpHwQG=qSPm3EX(BeQNBR?y%MVd{p@m*2qv_YUtF6>*zT;F^ zuAE-vs=j!kQMXq5q$|oopvsGk7Q-=Ys&jeCkO=FDMHuT>tBgKTvM!7+8;p*)j(6tL zK^`R3l|G@COre&-2B&oEN3U>J!~IVUlWcC#&vcDN+bn1NTSTRG`nKNWi>>xHbH@;T zRupvhYVPG3Qm(S{M4bUI&wOd{X%;QQ0&^oT|3= zD&d0#?M}Kpi`8&vA2#Xt~IBUoXia37HbdCBKU<>CMDbHo&zWj%-oUT%ptTb%@ z)=xdji+h;U>4?hJcg?Ev4p~q=zPzET9kA%Hz80p{$F8Rdyf4c4BLTl8`4oxx7eqXV z2xP6Oh-?%Qv0QwM&=+kQZ3b-?t*ns=+zkv@fn-?Nnt;qC+9}B9Av;ZEXCX7ky0tOU z(%P747txBXS1c0C;AL$|&=Tv`gH{l$STz3$oK@JlLv(39YYXqzS+{<)2JCHM{{#4f zm-Q-9$rV@U8u0JK)&cMz09Tws8<2eg-46s;=stqZ6X-mKov&a=)Uo~!9}bbzXTUlF z{!`$81$;3^|6<`~bU_MGWVb{V< zPvDLQ{D$On(fnGY@?S6?u_4(_4#{qDNOn_0vO97Ub}iJEE!3et!Ve1W3z3g@ACZJzmXmGgq_b-T?dds|0^On=YuWhs7x+I2D{>g#74lx#< z7MOjeQ1VI;Tma~&M|&@<2xNcKM@R8q*K`#em8emBc>TcUsNB)>)R z-HJIXNf|ik|3vM#*l*+a77M<~Dyqz7 zPRYS449Z-rBHzo%1wA^XSg7i{AjdO8OoQAQEOvcMu7wx7mX>`>ybvF6 zN-C5hlu>AGK%%-+gyhj-R-w58N!6ca-R1l?s0dmjB#)R&3d!TuDu^B-)jZu%`qx{OT$1Aon$v@O+OL8 zF81iHMLl^CEDD163WBG!O=+>TA&F4%;!hUpMTFMbWMX&M#M*;hc-gPFZ{N)B=D`5~ z2HeE2g-x*TJGadX0M4-U0ZB1|!w%L*eQ)d!N7rueX)qd(Z>B-u^`g;qLi6mbbqcW< z7&K%>Loaf%@u^K1vy4IR2h(oGIBT_9$uKMs`v@^yK{OC$#9mAXIFJ1T9K?a44&qQ4 zK^!9rR2QihIwuI5);Q9dGI6R@*N9u8y>@Bcg7Wko+Y6Zx6Yn&cMKmv)5>uIRyPAfP zqrtqHuZoyUqAa9ZmNHeT!ZODM8;l}tDa&zT`Q7B4B^|cZ7W2G(J*o?fx*>4@#?>3h zMGwkS#mkyTwq{$AR1%i8hn*9E9eB}mFwxkByjYNBdCwNH+I7i8+B>0j`uO}!VrH8;oYR~ z&0zItg=h)X2RkG)4(W&uI+8(7#<&z{6@K1{)O1NREft69qYp@iPP_1E7qdU* zZWj)HLeEF8+wwm)Ji=i4JkEqM&@H(yhH6!4Weol{=02BdwJ|lSyU6=tK#^HPLmJBV zfNy~lMxDu^-pAK3_lr{d002#nc$~#s`*&2;72cC%@`MQC5eS4N5MF^yAmJt9VITpb zAwdXFh2fI9gn`M-I5R<_*jlT7TIoZr+J{x!YSq?aEmlQaty=qB{R`T)+JB&H;Sco} z>DhDlVb0xipVLX4wLTB`oPGB9?Y-|S_Z*K@13)R1m5Bca#XlikI8nX< zDQzqrI-3?V&F0fH`dBiiW%RJcyxGdUkThftre#uEG#(yH2&rjtT%8@u!elpNOgkq? zQhFqsjeyz0rvh^XPNFE0fiCndP*3BH*}MTP%ccjQ8)KzDOypd$EhpPbYO82Y1%pPf@ferpwboV!?H7R{SYwsoWlV>m{6?4~($$#x#u*3;O0fjh_+ z<7g1rMdJ&Ir;+9?B>lS;{m8vYU@K|7RiK&rwkf)2lP}e-mHJdWHNNF6zGb9&1!-L= zkRk1D%=c=6cEva99%p%%JZl(F2l1>Wo=$;Bh^LG3tQXk8crMxS+{}135zl7g!GAwb zbyYdg_jbndf(^$G#?eE5cM?ah!22}bC(uvz-px4s6>iD-feq(g#yLQogTy%`@DCc_ z$9VP&9AG^EwBb3#cn%WJVd6O=@Lw7~%6RY`zmxHNV#5<=JST|fB=KnCc2Gy-rx;H} zKsWOQX4&wZVLYS66D6LrV(w}hKgW2+1mcXR-G=8b#*-wT6!D1rU;xhnJijxH=e)q( zjAyqE&pnK1f_UyFo{M7cN#ePW@qAw3e#WEO@O+W+JU~1T63;_o?mfiwCC2krfv+*1 zdu@2W!FV1fo=1pBT<3vDiRYV)=Q{$AGM>k5c)rVc9w(kBi04T$_ZKw&6yy26zz-PD zD>ghoVmwb1&ojjHteE=|jX%eDej@Nw#`8xTo}V$E7l`LY;(1BT{TGeD%y=-)R~XN~ zZFqjgcwQx**T8uGT^7^vxsA`XUy~jB#CzN80>2Sx0pmGyh3fbQ829)$!MJB!Ce3dN zUm@~wmHB#`G~@O64j9kc-$L%Wca?m3sKEjdH6dR?U z%8j>K<|k+;IHhZmu<^#6Cs2K{txZS8i*fYy#3Au&o=A}!_0`rXjB+!51ay zGRsP{Wu@xL@+?`w98}pDmMx9Om$ICyenVV5UTPjm7C=syxCBctlLDwmGHQ0 zLLDi6M30`2#z#E_f`aFObGhb1Vdbg^@8zl*(MHbbpFy?eL2;{P3WN*cQCEaIvgRIQ zK&|t^xlj#ISfLhx_d-?TO)wVIW7fVWtxgM}xOGzF;5zUp-$6hbx6_UK8o{|Xi=eRD zG=cZp)XA8_W7>uANFrr=QJ9J4UUaIVX8D6B3k zzi~-R;D%Jy-XD`Z8V(JBG#@a ztw?L3xJ6RJ;4(~R13kOL`+9mks7IBZ;9Q_CD6By1z(;}N<746}J=O1q>!HX6%7wvY z@Q6!=sR#9_asxOQXd@I>piSW8MixN4$if|1q!Qu4|uPA&3P?0u7@)d$tnNqZ3h&$_$(BwjPEqNUg4CPV78_X29>8% znz6&jdp=M^FF2RI4+<;$F7RIV>DjG7(fDXMqD@S7;deuE3!fJZ7h^IW-rv(d*gv>C z+}m^1>)&*Hz`11oP*}-s1@9%RNa?4;;$6<#^`sT+HYjeflrXpqliB{hU19M`>OnoK z902D6?S;Y$Gzi`cRGZP$89WQ`j>aR2yR8`}txiKw+&bk)!_}Cqhx-l;<3)A2f3SDx zu!mSs^FDB{+U@4?oA~&CD6FanzfUTyBuZH!g-#^6lF6{h2-R?OIjT2^&%6DLpfuVp~WoFJ^JdS7HA(vc(lm+>wC$ zTyQVL>>HkqO*Wz}QSF6oY1aP!oSoG^?BGaDOQ%`LGxQ6|l%7uGzU=;ZM880yu91^# zIr3^rTIx+?scP>ca`SbMy8I-kn7qP%y7U|LsUuii9kthucAE5GMs0=BKU4Nskx$z# zJ2HqW3Zj8TVK8{+$X$9;uq2}8#X41p46up|r3k3ApGZoW z?&*qN(auqISFpnTzj|w^EjRqlmAY#!isEzEPJnjJpgXoZCwEV5wcQODY|D%#+suPy z{014!{`IR20_5GuyIpSN(&^Ug7JdS>1?L*%ZS~=Px3J?N^BXKspQ9htH4E}Yfd>X`mMO#Z-fN&j$9hJtJXStO|?Rt+df+T#o)w-wKPkatr8av7FjvhJt1p8C$k@i>!$$X=af% z`HalbAL?6e5t(V_8I4v^+h&MZEQ4c9-fqFQFfof+a4kNAbM%M$)>s6mtk|MS2elzX zLaPjoEfl+jwuDJ(WuYzc8JeR%)VIzmwA@-JnsiayX<hu6$OQ-=d z_Yt@s_o#l8MR2ACMKsz-?FI|u8w-kPz?L@MaZW$HVlDq)Yg&)vYj0@2jas&{=$5AC{IRjkoR#H1v*PFvk9#bVz!tF3>=tUbTV=sm zpvqs2!U;R6-Qh0}E4l8S<~*$QnFmLIc)ZIZ4|(f3XxK+>uOVcWbjTKy+yZQ4r|T+q z7i{wxprb$3cdJE!${Glo^i#XXkkBqeV@oe?p{-_8+F59;eTL@f5A_XNgqFW-fR=ly z9Wa!1$Oze@g)&Ya#!$ zIob?UyU!R~FMS-P_K4BnCHoJnzLd|sICh6&yFuC>GmbaPWBe@Z;d5A*YCnaa12;?t zZ{90E-CBg7O_Gm&4tF~nJNdosIK}d*vmeXP=XQSZ_(U!bC0&$<5*w5ef(j~J5M-$F zfSVN5xnK^Mza${)Z7`o8?HUv;v_S*IB0>|PMM0B-#e^k2A&e6)D3E<=h6xwkOSq`uKEme+Ur-?XE;2me zf(Ho?DflwsD}=8rkbMs^JnVu;2$vLmi|}p2V+v&7C5Fdc@C4yW1y2#aNBE%v+4m&F z(=K?1@T`L82tOt~uR!)a%kY8=UL?Gv;AO(k3BObz`(9#r)djB+E-Sb~kneD>E4ZTI z4Z@oQIsYvMR|(SB+X~(x{7!+if0ytc;e7=k`oy0Je^ns+-eQnC|K{f%?tjz743z)? z#Zq{j?Oh3Y9L1Hc){I7%j$btQpy3k2Lbk$hI&B zb3=fH9ONVkA><|p3E>P7AcQLygxs*%O|nVICYxlP&1N?nyj@+dreD=`)l`o(8fSO= z`+h%F*Q#NGj5q_NW<_Nk!sa z@vi;hSXVS0jbvgTOqHovM>O1?OxS-9MdzzC@y=NINF>n{lUh_HBKtiW1nkoi=a6)K ze^(?CPWNtnUoww0K!$!@5`YNcWvTAIc%XB!h27|0YAb*IB zIX1?DjkL{h;H?8X655QgF$u|=O8XS)r1s5)bKJfvuwe|79c+d3qd{LSv>6NbP&+n3 z{aK)&;~cPKJc5JpLB|od CB0opFSUCH z@YDm}OikT|9(_0)`hv#mBC!22;Aw>O^KHzt@etU$0Bl?YbTQbv5cDij*sv7rOWIg& z<6axPfj40JEh2+(~HpFJR>1;vNwOO!bI68IyWp9gWEQ!mXi zI)l@(VNaD-6A^PN4wTwV$Ic-e6QF;mKy0brPGmc8+YR)vqHPk|(izkqOBU}xJ{B?{ z7t6=>N1!d2p&Lubh%=S~zXP7R5QAR$`wED`aodh-z>h1z{;Pmq4fG_?Yru~aV8gX2 z2G;}6jo|xDP`(-HEztG`)RxBJA0+#3g}-kD`);>+UN6wTw|dz3cFDeX0DUL4y~Cp| z*ZXdu?*aN=pznjWcS76ud+7b3r1wKWKMZZ}@@V@J;Q1)fdx3t8u)O*>VRJeC`x8+2 zNhp5`h13a$){Vvc~fqoC@_ksQZ=nsMZ z2PT9 z|2p9R74ZKO^)Zd>%|u*h@M{c}N9Q_vjrtejUQ^y6Z2tQ#$oU;=I~3VQ=^qH2AOA?) zwcWn~Jq7enAour#&8L61@$Yt5z5)I69}uVi1o~e<{|fZKA#Q&Gd;W&v@HdFVY1>bp zYdmhJ89!Z)$@~_L%gbWiELa~~0g}xbtDM}MkdiSYd8}8G; zc=u`6HpFk)YoI`o@mqtzms;Q-g8FU-YiA9IzM}q(0Dnd@f2>j9Pn`$P7~mNTJfne! z${dIOJ`4Rl#LX)U*80|XDW53c1f*{w=sOqmOhPiw0U76^KAH^tQ-Q*|+nNS+I?x$F z>w(S$+5q%IB8+2e7SP#1=K!4x^de$3-mNKMb0f)FXIS%r&+b>tUi$=o1@UNvzO~ok zf#YKCSPOG=AZ6s&UOU#-YLK}G*-Pc~dx#jwwc`K$1LcFw7@ z*X@C4QQHJNC)|V7K@9l42iE(lMz6ib54 zSUaWi7*tl}ihb`8+(?vbR%C%BFX>v@abL)h2h;40$p{*UI&Wsoa7AXm0SzwY$l z*$q5xz_SPO!2O5?_aW9kpzT1TKx05VfbIu+04UsFSag20$D4HsbnbOy85|_WHW-4k z-7mpQ#C~zs2l)PS5tQj1W>3@>y^o;n>9ZF?+~+aYaVF5I_!CrmF_tZvi8$$>r=^yb zqGhNfp2);fd0LmL3|t~H22_maiyE>O8PSu^xV^O6d znq~10iL*+<8JryNnk@N&A1#)rGwiXd$}D~!39pgUuv#CuI+?#lDGhP=k#RQ3G~VhuWr$cmQm!AWu&He6Qc>er zU-2_BZ5Yw!OU^VmTot~_%O*>#BShA-WKngRdN0)@5kD=DQbldiBI4bV<bCL{Rkz_4EmP|Yi3dRdhR8cd0JU*>P)|VBozj8s`%I z>~@}5KTe89Av7+<6K&J z;Y2*0k)%vlgfEEgIchjP-+HfJiG?~bJ9wQ#-UO+&se zoK4iZ;<<2WYZEn0lQhoqT8&dpbBMp>nrmj+97+ESQ+qBluqPkI@)|Yv(!;&tI@kJW zhdi6kBi`o|>1>=&%yPdx-Lo$$-M7p&&G`j}fe6@F6shS#MF^}m ziE$Cp&KvT=O7>-u2*{m?NitYvrr;8x5!%rdQ_tEUq-yW!?*#OAYQ z@oR~8K9JWD%N654>$1D_k9B)^?>r(VMBTaT4Bf4Y#=4ST4!7iS_+oz!H|KG>F#1EV(bL*j-X8aS}~h~~Vklb6jxs~R@L@n$$S zS)U?_L<4`;Sxwj*X-i_NL_dI_p6t_mBG?e5(}Siy#8MlXXG(FGwG)3`93`471p7fs zxFeYgdtBY*D}?WKjQO;+L*$p=S>^EW7x`=D`U7J9V7Wdn*5`ZKGt6tXMCBaR%aK=I zBTQC*UV0Ac>JOXO1eIaW>vz#$*0`*`yj> zy`r26QoEpR;1znk$~ObG*&pFWBfVbloJ^v3ToIj$Xe!wq?urkuf_CT75hivDKNW;f+iNz}z`u>?2mbAvdg3wg z-%9iY|7~LZX1V?j(mwVgp!M$%>lexO@6_8NPaIoy=|c~eisK1BOfMAgBKnx# zN%X_#-Fjb)WBMLZ&JtPAd;1|~e&X^zQJ;Eu340(H^~(SE6Ypuw8Jgip^q@j!wda}{ zb5wZ^Im|FbJu+!?$(w4aEbrFId6AO6mrO)D_eCY?^0YKB-%9BNq_7KVRePlUfW2~# zr=Ws&qPz9S31nw7%C-(uWq0{g7M2s1%O2VpITW)ez*scyOtW162mQ(4YkDpJ5YZ;& z205r77Ww7ZmOJF2`gw7Dx89ak*_MwKVoQ@?it)g`hnQWzDx5dM!lN$Q{G-Hs%m&kk zQyGc(UZS6~K1Q^uutAyE_&J;(-#;!YRPK(bbmj%NsUNrhgxEf3pZF&g!Lj7MxN51Y7n+y(fYZ#k!n)NpC}2pKNFnOi4teqX?qW(0KiTzdO%PNz(RSbI zgU#stamD#Iuk%#z%LUr|ir!xNXC!{@?KQc?n^J7|cZqr?EanXOs#ssLy{q5T+fmTA zdCdc_Sqi?{I{%5m53jyY%qE*!=V24yS0@Si0r8$zs%++{{G$fi;D^N5D4nItVl4MZ zrrtO|IaGM_V^PRea@YSvZ@l)GUpnJoQ$%ri>{xd)m0_;|*m7Fba6-~}wN&owvTToH znU<4u?r)lz#q60r_3d@yYdBuhpVL-8YQYP$Q|xc&;|2KpHg&*D>jI3HVSf5uF^B)8 z%Lfp?Qe57ih&Vrz$2Ye_?NHf$nx^j_TDqdKV_+0FVmLB_3T=Tv1>jr|&hZb!P}%f* zD)h-LeJsJBzS6eUU^Fv|z12JAI_2z0v!F9hflLc_m<=YmUx_&d`gs6`O93cD2m;Cx zDgc!LbkM+Ed)hEoxI>8KLj^0>@p6cjNAYqMaX;3V^K}k`OsK zKMP()+|PqgL4hrY_9KwIzR0QbkYh6S)D@vaM@aUcOWaR-9clf{wG`+yFAzp0H= znf4*=qj5w`<+F&7RK_3J;f7Tb?>Pf2=Ls^NmCwU-#b9VZ4mn2K*MWoZiBU%e|8y;v z8v?m!6Zc{x+bn#V$AXRfHVxqn8hp~U^Ky=YvZXS=zJlZq0l5W^g#Xo;`f|CPNOFfjNRorw1)$v|R-TNrHl&VxpVmVjm+dJ$Y7N0DuLaM-sN4&IO5c6T$_Tc#$sYlBuQAgzo!!&L%Ix#eEE8X`P%U#n zg7on8I+MqacKL?W9&%`Tu3nz!lRzx*A|%g_&PDwEJhNUZ!~6r79TSV8*Md(wQMrw* z+{nxGfi7g{7x43okUiN4ROkhh1+Tm*?*dj{#LG*7E+xXMb|J4{%*so6c{wXD=H+Fq zyp)%l&0^Px`3JDi%1fj-R3g0#OQd&kG4xV-O+Z(W+_e#bx>Z0|0$oG=KZ97y$^jS; z%Zv34!p~J`O+Us`j-B&$AzMfjc9<$%4YZ|zAYLp6vH460VoN^;k^4mDt^>LO=z5@+ z0Nn_b|5rGkU+JL?mjd0)6s+V5TFo47#QXy|;+L05Z+VIIhKixrf*%S}xm#Fy6)$fy zvug$BFV<`!?8x@hN&RV*pHAvey;R;-kD%ZmsZyTpKrdrDR&!f+0NrKgL$=+nfsI{x ziS&kwp_j_r33Rua9odO=_rUt!Tq3>YCDI!zhF%N4r9|bn0lnPJo@~3zOBDYB)*C8@ zUMg=7Yqyrm-)m;iYRo@?aadDKy+eukc8cn!tK~_=eG!4*PSJDp;U|7e#a}=~Sh*5b z)#cn4^%uW*FUwrg7jC%IL&51j9DWDuI&h}N>j|&dzKVUw9sZxnvyX-G-{J5NgQ>c- z&`lP$6&LeVft#lFyd3qgUbjYzde}7(Q_KMC4HZK#mDdim!)M$(ir~MW7ufq*xfW+T ze}Qef#SL$1s|2|RSUJ1HBhJb>TiOn?@(6rG&fi`0IN�Ufb5taYnqmrhf4!-3b)e zLf$uZvT~m{HFfoqoTP^wlRQt7(&l+L$<6Z`l0VOJZu896-@T_x z&zYcm0n)>_QXeH^-yS2meT%O@SFrM>yxfc8=V$wL2%)drr~lsb!esZ)aaP`f$69~w z{;}XoA5_mFpeIP*u6tLJ!mfK)k@13((t0-{yhaNn!Kg?fvL;A@kr{N$xy2nyA+w z=AmDrHDnK;=9seihq(XA*T_iWRMv%Ve7QT1k>)Shtm`m>_ zLM~-@N42VO=^a}ly$vPOyQf5Y>r13}UWxPui>cT2Ug`tHz1~>xL8>$19`7#Eu5hvJ z;+S}6FVK%MneANWM_D<-%l82N zgdnqCkoifW&iy>Cd(QnVt$VH|d(MInt5I1WXXX98{Apq|hN75%0LSiz66tL%re4$B z{|pgw|3DFQzk~bxDWIPv;#%)Mpr0q=TI_SI9Ovcx&EmTY^Os@`Ily`&#n4OTeF5kf zfqn@n{VuFK7r0N`E%=5g<#_<;L+pG4@~o5BKWye(7v}FT-*SFUnf+QT)%1u@%N{AB zW&CTewwxqHup8(huKz)xkFoP9s893yuaLs-tRH9PjN#L!tUZtSV?2s@r+S#%`zR|P z;pHdI{N085OEJq7L$B%8`YmKKP}p|r-c0!Ezx+H zS0cUjCDL0}BE8im(mS?9dK*flH&_h47JOxx%6$Up*ML3)^y@&M1^PT|*UQ^I$I4gm z@{4Bvv|;}KT)n=)+Fi-ze-r4pfPRD3ALsQi0hK z+f3w8`@Yi;a#VgGIqvFs`VsiVfPqxcPYNTcH)89kH!lNyMK3pB^m-_vK&{u(e&xtVmo+>TVug0qO*75)EkiLi74SEm4IefMMAuCUVtNko|wU_tO;e)?- zLWkek;{^BP`#^u(5B~gwm1mT|pR2h)KVs#Ry!;w5n$=EV{sFu?TwNl)RVC6}T_U|> zOQg4NN=!2dh3g+*Yxh~byntgZ`Ty-?oE;%+;P&iM+dy`R{nf3MK!{pJQ#v7 z3@{=OhIwEVV2A|1w`Pol(STaOSim@h@p&)-FirxOJrOX8xIgRTIA6jPgsFKj9dNFM zdI=2(a@{Nh-iNahE|Sm)m@i>r9xO$WZCD0qLRbRe>tId zJXnXY7I3kI4Sk&2qc==L6nDMfL_2Afa3@!eBwHU8+_s| z0Ir+ccPrpF!0iZr?Ry)-9X|0cggbrW0|Tou~IUbm#0OmO#VaneL=DFbO3~N5Z0)$4uLKi%DvgZ~daQ?+EvU$zV zEk#(K2h9j88Jyp1*sxZ);B$2L+)BjH?OhF6ix2>;1FXs;mtsA_8UQbEbdhb#&#gmP z0=NXQNkq0SK7T0!*UQUWTx8qwb6f}N*POXeUR%oR?WiJGT3vXpS*@)i_<%3ZvdD-pZOo@1m`dS0Fak>ob6o+d{akyAK9{e zVZhuZ+~x+G!+<$`flUnNk`DqVh=(PEK$eUo8; z2q`4a0`uP@^D8Op>TIpvNQkr0WPU){r+_#~sYz#eeSv|K{$`{1be}&E*d7XpeZ5hC zC6-PGG^(&X$f&PkYl&S-K(`HvB<<@Pz5} zIb5Tn)7{jzw{7oE&yJ=;a;;dDw9;9B&>K37OQ<1Q>tNg79s3SS^@_v3KI$624Yv3#GO?#SmG|ABh20W2aV8|E77u)xHI`*}9x42E_LlaX?yg`F98})sY zjx#YmR)LV&v0g;T;#g-PWE#yY;1+7v#1iT#l$Ai)MA}+#t=vMs0?MWUEr+@jOmjEE zk4o@kGSDgD&m_<>73rT2<@8#ciJ5SHKG>ZBb#6syFue? z9q3qxbkMebSMqZc=-do`ZZYxA4E@}y@UtO>pN*iSUZI2A-wt$##(p!@ZBf|2ErtCy z(6LjYV>ig`0lF9HKEnFVe$Yq1AAmA9@ExGHU6KDvWTOk}9n{3uO~gJx+wKryW2(o* zVN>mwi8!Y20D2U390463WS7exmt+l)?M1RakU0S|bd8{W=OpCWPgvhMWlk@1S)4JS zOzM~?#mjZ|gRTJR3PKx&K-Yj-cdewW7U{C;!)Hv{pcH2WXcX**ADOC}y5 zi8+nVFSMUL2znkub{`?E&pc}4vZ?+}i1{(l`vlM@fj$K>KMuOCAYE4=hO4HJFG?|A z1^H(b^4CoHPfIah1NrNy9i9gtUIh9Q(3eSKEK}cJNYNf&(zM4j3VGTdU(vM3b%p%v zru)5tgWNA5d%p2-vs(wK)(d^ zw}Ji+*!d#p{#~T|_d(_lfc_!SKLYw?pnnYXD?tAQ=$``pGs5QLWx)SA@Rz~%n_&Ab z)V^;)`@U_C5jPS0&)X!qKXCbXOnE;3dH;C_R-(qyh_-5L+6?RyB4cg zg6iFXdKFObyJo%5N%g)9_3*tUzmKHtLGP!&PsDpsn%|p{*Z%YDe*jt7vvaUffPbpJQh{U4zJ3pzdkKR!&Bb-?|$qkzd$mdZK`lYMr$fEEKS0a^-m zi;!laIzy&@KlGqqH=;BoX1@A$jO0ZySHIl?7B`16#zO}=e92!lo!Jz-l zpih!6&^g#6nk^WSDi&%gIv4qaizHd6&d5RwBUrF3>hJe?VmneO`B0Q}j;OE4I%a$~ zMl&WWktz1ONay!9U7Lh`5mlSGbjbo)D<_J@y0djTirF@I!NXGdcq-HIJ%T^7jS`zS z#`T8GeY3YujuF+Ji(5ivt5lPV!oE;gW46$2II(Ct_7G{CDtFT|v3!~?vZBb~@z_%q zTv<1I3m4VPU3{DQ>Yv$ z*vdqkR-84y;E-5WrRpCt#kML%TZ`pPPSWJYu~ibBZ0+P-Wj$-Fosnj%+!Wp;2AF$ zuJa9p#`T4AkGxgmYAJo*>j{U>MywqZ7Al>=L#OOoOBdg{E!P{d;dy`HEMKH-a6 z=#pJ6WxpkajkDQ`+U}%^qpK35TPsJukYwJiOZvhF`w=JEwpGtwMC|qK3GtpC9PAf+ zH>$qQYG(S8=&_5f^RYqB-x5)Ny_LM|&e-=#?D*yVIQp(H)*32(mUDHaPP+ZOy9YiV)RemzkkYNri zW*x4&F~>=0N|A(SO%fdSlHm6Aa?q@Ls!H|j&>5_j1Y1Ut8LX!@Jw4@_K4{sg39Q%w zgBpD08x9S4f`&~sw-I}@P(^GOf+*-2QPemrf3F&XKyG8F7 z>m_Z`4~peWNAHIh-QK9R4VUmF+FxhvI%x_*2CgtmU87EMK02b z_mpTy`}}7ddnWBw_1DuxJq?wH4RauQ4DTsXX@g{>RpNK28utNfryJx(>DM?|&?CKG z&~En$q{^$HwO6gOS0Pb;uOzSbrBQIjb~6tUd$Ut<>}N0G@e{3nMw38ickUR?&^l++yHeefdS@oiH446Iox980xpujqoG0og zuAooJ=zxm!8Ug9SQutsC;M?T+sS( zueA?}#Hw10D!7o5b0xN%yU(U`Wxb&xBkY$RwNefDiyG$YG!z}G+tJi5RlZ2nz1KBh zM54ac8dVhsTUvJ0gHxWS=4Ln1_f;o-=|U6vCDC+`ipGo1V6g zj;@vtT~a#Q_w8@d6qa@zaCdfWZ)$HrhE$tRg=*I-a`&HNln_W{TwHnUOLAM*MFo&h6z(%eG%`uxL^ zqRSdL=bZ2b>ALCjFb(E9>I)kEzGN*zHvB=azt@OrUkG$X{3n7&KrF4&uKP^d!+!P= zh2$-%yq+h1bc{+Ps1miS85Hb~JHt`RZK?h%w`tK2)4evlk_xEccT&?y-`W}(M{P&to*rZ;g7}6L$;%PzMy@a`AlYZ z1EaP3Yzg{dg^_$BM?p{G?b zuan-unkM`ijEG&HD$E+y$+ieZ{7;C9yd^+1FRa`UBd{LCm=t`qn8) z?yOcGz9w1-N)|#AAGXuXvOSqP2$@0UZnE#O_t5Fd1^OyIJ@GIFuAhWD^j-YvV28dX z>;y^=(kv$I&=5a6&=t=NJAZN$>Y04(w1i1-;kFBr?PAGx8|Z<$F`GAmB3-e!z0LZC zF&!5Yw&mY?i1ddYih2V^Btpbp%|vt5ncX&G-m*p7f?nS+B#b-B&x1`tK^8LV#6fX~ zoRUKh=0tSFrrn8>oN$7J8<}hobzus)08*B0JLrHjJ2w*vnag<>alRZiP8mu5!yCNE@;|`A^Hc=&$02<-8o6Ku)yq~RbGs!P z|FPeE6jU&NG4^{rNghE3;QJJ)+eQ+{-)fOwK2qUV(xp63IPka(M%!|JeIjZXo`>;L z9tsfnA3P~SK%cbo#qE_`AgMkX=`BmGHzWO-v~wjHeLJs5dU-ofB4RsFX8cOIox#S0 zk5W53;O%J|AG}Li!})1Ip>1YCKB{^7RK~C7{7lBL;`|K8ui^Zy#MZpB5XM|{vO%R&5^ z4%+2JF=!h}{4+*_wvOj!DbSVnavHCTTruYwY>60n3kJ9_q%z z?nI=E_uEw@F>g5)wr+p95tFisZvX zIq-HXZ_8@NZ{z%0dwro6%V%>;tbuxv&3bO97U+61x~=;L#tU2bO!U-cNzX=w9y@v8 zn8mzjvc=rQ_^fX+H#0uI#oWU9MtO_5l_dJdM`ero!}us+8yS5&Z$Nr^JJ%Djof{b6 zEVpwb`M90a)t^Z_H<9Fao=VbdXMC*~?9=sy`nnO>EC-v-MC=3GiMkK85OFPNWqdj3 zcc7e3y^yeDr*H6U#R#QwvbA z(XF$)NOsoi-3mQ+*6ZCQzw3278T~w7hxGD!d=C-F@Lr-`ulJFU=kav)XVT95Np9wG zy7#5N;(Inej~^gnA8-?OALu0FJl@6l3eIZu@wMVeE__9YB&m%apof9p0n|fAx2_x`nXN0wvKVtF z^Nm50=bK3+z4?avM#pP4*v9vYWnkwx5$Bj*qMl=XBr(S%+Awhnk;-^c&L&AYOEu5% zxupbvKL0P~mQ4CcAIZ%glKqD*i9T_XsQZMUeDv-;9B@FB#&?wQdpLiZq&IJ#CfT1u z?5r(gY^k5a5!HJND1T-f07{RTs(x>wW&C%yFwh9od4TIY!}xuiA7Feb^l>-8 z9%Xzd=ZEa&#Es?0uq_($WVbaJb{%kZk?I`;I?S$j@#|-S-pQ_a^Xumre~|O%?ZwfB z<;O68-FdRRFBf*HzPo_l4fGzMRCQv^CRTdW#_imfdx751t{;Xm*~7~(+KcT7md{pf z^6&ioL?5+tDNVmF<zy7FY#ohQ5gJlUHtkYO+%-3$_1~Skdxm z|G;vwFn*vvE`?;B zogV7rbJqG&`4zC~m+}y~P7n3#1)$Hz^2W~TQl1{*PG@3zk&NlY6v&g^!aUin%9CAp zF6=ts#5vXbvOPb}VEHkOe@-s!I^g^})%z0ASMB)|8A0-6n1}hfu397%y=2u2Ec5&ASZ3Gf3> zA)E%Bk`T}!2nYcN5V)=Z3A#Rf1|ggRLkPn*aW}$wz&!|jj^zBk2weUG!hHbF-w!w= z;Ud5TxP)*K;X%N|2#)|BmGHPtJdJP#a20{a%=u>#xcoH)9y8~kLwF%gyn*eWj>n%D!o?tIfTzE@C6O9%&#DPG0XTpgg3_+e}V88 z;B5rne>ne_2weUh1m1r*|5pfKOM$;dI1KnY;O`N>j=<%<0r)1uHxMoZ-Ua+4!n+6) z0N(=q3&OV$t^>Xe_&0=d0O!Ara0Boi!1oY%JVFe&AIBD^d>4Ow9~#@LbAeO zq;51@S0fZrtJ7*-cIkibBnQ#1VHp>;>zZlF9K||Cn&K@*iekP$gOFms7=&!YM}Z;? zWDy}7i(xDe*?xz#JoooAA;*6iYF8&h^2to#5gtBv3_Aa zM~KkbZ%|C8_zTKWhWfLLU`emgwKP*x%#2?c&gps8u+Byh5MjbQiBMO?{IQp4ph(sY z$E-Cv7WF8Zc%IL#ms=NGb%ocE!fVw2rAhNp6ye6EAW%f*N`<}p*fmk)V>^cB)L2%w zL=jO$88htab80V{M-}EF`|?MhN|k@8?(J9iYKC3wG-^Aq4k}N-e{=lXZk6t1KVO>8 zrq5jcyF;&57uVafD-KqSUWsvC5erznxY@4XC>_1hDhD^WZ@)F13K&MlWvg`!$1$z8 zPr)iEcEjpi2(_*`9Yy6wRVVHrR$5LY;kD1m`lMGsE%D9DY2T^?vbZ0+<|+TIX5^^T zW3tNJDXXJ9WaXFTw11YvL(FhnkQlN=v13*w!x^vPoXS03y_6>BR9>X*AfjST zJC@OGH``}4hbwzbRc5{3<|R_4lbB^TYAxd|VDa+-i$BG;6>0vQe?<#2A8SGF(?5xL zsbVYm$EM5eJx>%1^lHq+1=Ykg!EmI(j>NR6GW>NkQ&A3ONky?SCKI0iN%*$Z5;4*#;%P z0J7%mu4URb{m`^uwCm>UVNe4n1an3oJ2kuTuqLKRZP z`yIIsH|uayM5VTcDn-HLe)YO8E%8>uoMVSfab+e0(f<>eMAJM#zU6h_+em(fp zaJts;F~G)i?)?J4pMjip#~1my>&HFAvHNMPfqXjeAx{m*p&s&N65Tpj!)3@7$R)^C z$P#1`auIR?vH)2g)~jDfX7>PkUckIX(udfS3`$pN3h0bLGkK zCu5HJfLIc+91yF3m`@2P18V{{0Iu5W{t$QsJO-WsJdO_p@Hmfe^DO=g7;S<9000N0c$}qFO>fgc5M6)8vC}jO z6pD~486?U95``NVKm`QKrASaDI7Lh1lohTMc}+>hr9T;|y&$B9nRrY#i)|vY(#vFa zp5J`zYCrglF^kzI{5tTLIq#2NbssZkTj2a;*)o~UJg~RMVKnX(fSZ)pCXFUp z+=EfEZOYvuy%v=!VN|SI(B}7~yp<8u`c$~(w zA52|E+)GMbl-?8*jp>#R<>`8QYq9RTMV%``j>^YG^^!#HH=f{S(CA(nUCt6XUbPKe zZH3Z*6Gvyky>WooE8s~O@KY`#{wacgX>c-?X)u{4L4FN;6`lRd!gckKe?c1HH*(+3 z{N(|I-OBeJvUnf2+203yk?|33{!KQ5>=e*<$=0&{R=$r@#`g%{0(>LzE!ri$yHw*I z<=$WE{;A2!{Pu!o`oD9{Yq6=89VMrw_2^|>Yso)f@d#@#v@?phlv;u9a6U^yDdFXp zgLoXy>5y_!|D0{QFj*%b!lwFt(l-fq1>6eM38n&%;Iq}i=4cWcg#1#q6m%+}@HX%J zH%7J1tpETZDR`W)Oa>P@iV2Ds#T-QsS~hQyB1y5pxKOMz$tcEI zQkc`t+?yV*Zw(*$R9wWBgZ1n^kC&Lp-M8FN)mc z*y}><80OrOa=a_$m?!R7Y0f@upHTb(-9Cf;A;n9Irxc${-#w#v z4h>%j`W?V0vwH>GSBQIveEyX3eFHA9G5)uZFXQu8h|S_G;PWWz&A*!ghj+w1556Yu zzGdeIA+6&j@*@IWfDBO>M$NKh8Bw{+N`_H|VdB)ZZPPjy%$emS%r1nMj|*-Rmr@u_ z#e6UCjS!P(#uPK-wSw&!0h2jh@xaz>qXs`|QI(^Bv>1uFmqecwCoc8{yMD<|#czPi z8cHgNWREv^QdTmdP%^r|@1>m_)rKce=>wxuY#IZs8l4=l)^N&Vp=N! za(*)>3H^ zRHIq%_*l?7*lBM$^VQPDik6u(hWrgkRmFLtE)5%BAc4wzGE6zM z`tMTxb1jBBUWRKGs2gRYZdgTHoPuKn6+ont4H0@VMChdsgw`qr@f!MnD!d$$)=CGo zQ}hB&pXE%|8?bMO^(s-HoxTUoGPxeu?>YkIWiBjcPT@OMue)^@(AundZSyx}{qw>U96o^6197X**qC1vZI) zQF`izb5^$`>%77`R-qhRTfT_7mKpRjL0hwdI1S>yxi+r+yRT5J&)x&s#bf}V+1+}y zW!HSejNrk~?WwKSwExhu13Z7nix(>e+m_@`8|SsUL0^Kami^Ky8RsC|28o^yNa&Ys z#!VBS_rnNY9>vT-BF_79x369LIJtV8V}=Q|J??}d?x5H0r+GisVJT9#r~;3hMBEAZ zVNZTm(p7;+4DtKie)GI-gLm{&n0H&ext}Eb1pIX5w*f68xC1wkUj$pOfjn6|gf`ut z4p$2(eTzCUe}ia8&~;>X)_ zBVbm`uI|&kJ~`su^6PGYmiIT@{v7YGp_Q&&ErQGPA2eG>?S^w&+wcxf$$y}Xiz4UX z;ld@+G!}wpL#{8}`O>A2Ki0qFBU#ubgYsD5{_<$qB?5~)lK8;0b`b?M0r3veC!o{0 z1Ar!B5Eufu4#NU6z*q<}0wx7a2VfT9wsXLI2<8On0|( inSd9-fq*Z7F9q=Lc%0V?>`8dnCBEw9f%hNv@NsOydnw2O diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/R/delphiBackfillCorrection.rdx deleted file mode 100644 index 4d227b061bc9b519ebb4481b41a09b384d1f5497..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1098 zcmV-Q1hxAgiwFP!0000017(&?Y#T)s$JdG7#CDUEHZ4#E5J*UrLLg01AQDI$+X*I) zZM^Gzh{Lei9ow6(cg=n{?S+C60wGjU3B&=UqMqSg4jhm`NQe^>Li7d_;=mbBNG<~Brd^TkT^=eKYassRpQ&Q&k4@Y z!oDc+<`I4$wkh#%>G%aa%C^M2urEveMLPe&Pq0~~40NFAfazlF_Ag#7nd@)q1&FQuRhl zBWk@&wCYA(t!asOs-|v`rdm$0z;Fj|GTI0gpSRkdwcC~!3tT%MEmdIIMZS$;W^@^rf>qk<}GnK8* zr{(lUS=~!uMsKOrI(rEkpE0vjscbSP>e?FLM3IT4s%h=YIMUuqS>5KD>{WH1)x2R@ zWW_Ru0Wn;QgofXxArH=cmT|@F98!8oeqmtvMn51O&xb!m<&zCCU(mBVsR+Rvei)dB z!wWjcL>AHO^gTE1@xZ1}5sO&1Ph2k~^s*iBIHwXHYcnh!hSYZx*9jzGkdEy@GeX_X*x8V2r&~rNiPpPKO`ZW>C+%`_}cS`?U2451h(5H(IBf=HF}=tQ?Qy1a%z5vjXZwcDPprVk#PLw7L^&IWC!r4q+kD(1 zI3SF965jp+HA8#IbI+qttZBwaUyOxO!|<=%k6f~k51pMJKYv+EC2=x{_5&o3dfC?K z7grO8E~IEVMHfdoDTVkRjh|H9Nq(Y3!?)Qr$+unbX@~mMHHBh&{Q;Ub QpcV`IKLrl57*q)W0EfdRJpcdz diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex deleted file mode 100644 index a91f2f9f3..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/AnIndex +++ /dev/null @@ -1,34 +0,0 @@ -add_7davs_and_target add_7davs_and_target -add_dayofweek add_dayofweek -add_params_for_dates add_params_for_dates -add_shift add_shift -add_sqrtscale add_sqrtscale -add_weekofmonth add_weekofmonth -create_dir_not_exist create_dir_not_exist -create_name_pattern create_name_pattern -data_filteration data_filteration -delta delta -est_priors est_priors -evaluate evaluate -export_test_result export_test_result -fill_missing_updates fill_missing_updates -fill_rows fill_rows -frac_adj frac_adj -frac_adj_with_pseudo frac_adj_with_pseudo -generate_filename generate_filename -get_7dav get_7dav -get_files_list get_files_list -get_model get_model -get_populous_counties get_populous_counties -get_weekofmonth get_weekofmonth -main main -main_local main_local -model_training_and_testing model_training_and_testing -objective objective -read_data read_data -read_params read_params -run_backfill run_backfill -run_backfill_local run_backfill_local -subset_valid_files subset_valid_files -training_days_check training_days_check -validity_checks validity_checks diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/aliases.rds deleted file mode 100644 index fd26621643164829a81fef1417b4e77bc24a4a5f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 427 zcmV;c0aX4UiwFP!000001MO4GauYES6p3Ah&BGxi7mnOH!x!)cP{nOZUd`G{w9=v( z*~!mS%&etNTzmq#>}tBDR?kDf*2b7iv$&A&QkG`<^T+ou-;BAqlHjvhn3eo*taI?y z*#m)%4%p@$a^>DB=j>zZkBIlr4~#b3fuK)Wct)z+N;%s5{!D>>XQs{utl&mLRZRVm zBJZ1Umr=C9`3x~J;De{8^RaT#4k(v1V~&-&5(5@`9|W`PQ^S`I1KS#`kr>A8Q*KsY zuf@r06@-hp}5hC{1OIz2B> V;pHj(Z%@Je`3vH%a!?!w006}s-cA4j diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/help/delphiBackfillCorrection.rdb deleted file mode 100644 index 511908712e9a218cd905de9c3c8b731369315483..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57034 zcmbT71CwCgx}?jtZL7<+ZQFKr8C|w*TV1wo+g-NJss7H18|Tj4iI|u_u)Ut0nQtxt z0ALii%t1vd*?xKi=lNhCG>9;#hD#vGCSQI-P)mVRy7V=&+V~X}ZWKt?On)f{K-nHZ zb^v{Jtvtd?4U#lArtA*3=?Fx^LMe|p?_pMwo5_cl=g?U5ZNFEk$MbnY9>(|8h2)?0 zr-lBCsY5rtX@==iNo&sq3ZBJjbf7A}%t_*Dx>8d;JL*()n7P$3Jb=mDWBKB;&MJ_> ziqvBwwPVJm3qhliCjJLMIb~Gc(^JDgRwbq6QP}0+I4Wg;!pR|2sO%Z{TzJ}}4{pxJ z_tq1%7Mi40eIXf%bI$d2rhcHo@NBH|ThG2<|4Q$6{>jTzu)=hz8RD-&lzLE$OORQC z2d|(Rf;g5WC;YTw<@K~&GC#B49J!G=H2sTqeiGy5&X~*Agh#zCsB9rHO6xQT&Kx8( zNqZ{~A}yQwE?`E>JUIgP?gw5?$q4`eezjZXqK1~;Ix9-DU9FQ<{FF>{A@K8()ZEHy zKOJCjp-cimuP>k|=RKh>e`un#a56~zR5D*(cU!Y*;ZN)N^hlW~R4cI5D~iIxE1c}R zxOmxaDO{QIEeUE zPh}!ZqSca6{jf!m2h`!c6pw-A6{TeNq}RpOl8SrrUR+rn=dXwT&t%g@ATP+r@dXVT z=j3s(@`>)seYP$FSlgIrI3;ps9DL}wU8b()2M!A*UVJ@LTSmloj>yCUQ&+T{X!mC^OWyV$- z9Vs&J^xk5F8VOVfdVCm;D3%lzDl7LZx9pTX z4G9`rC8JJGwAU)B>YT95vd>l-V;y;)8l4waYL@VQi`) zvvhmR$bJ^MzMh5D!z62RXn_e*lTg#u3!kAnJk8QRhAg-U{?aP}m!L@^+50t!dB=2` zm9^g8(e9SDK8YsnJ0w-}1A4hFuSa(&d=><39JF|7NT^ZvieMQgkoD@lu`aop%E75l zC#a@c_UNu-MN?z(aUPts|2VV41`tXokS9h-4iZ;DmK~;$znf{*46skUrst0GRtVgx zafA!CLX;~@pHO7&fElXQr4N=Pf92%twICBrw&QZfjl)n7CZRtdv#U~|%j(>Jlh*5O zhy-^6;yiSMa3us3l+_i`QgIs-q6%k!iBOZ6#@u;aIppBRF)pEDtC|Int!Iluoumsr zzT#AjC?C%0ssYnif50|ZElj;bm5bmMc_y-BK99!($_?;QM4)wUX`Ka4SP1h|6g7|>p%F8kHb@f;V-Yf-jOBWdVi*9*5eB{zNcwebYC_#q6WI>u z${DCt&z3dp416;@F+^wpLGJIw!Y=SCRiGkKXyV5qR*v4@IQha z6@6?;2disK`{a~1ad-H3kx~e;ps@JVq@p!)Np_%*?SQ%f-zM{+jyR-Shj?n9#^N*5 z7rOjrprRJWSPr-B`4{K4!3t0zW|s$*Bk)*GE)bt>Ewk^;O7P*#$$RSd7*-wnv_x@e z_O;Lu4)rk|XWe76u${~8a)L9J#Ja0a5l+RtSx?h`;@oHQI@@w?R^x8EH5oEHnVb0r z$S$322jHdsF>qdBN;?Bga=R6v!LunEV!lsL{{-3~$#Zf>pOq*qHZ{F}L#wu$v@9`0yso}|YvSAukvPzU0&23|PH>^SvlRoM z7zIyr#%tEk9S9|FcDlH_Fl&(nlrqf{7e5<1HZfvh0KaMtddhgTh5)NX+bH%a6G*{h z0`!2tp%RssH1@Zf0*P5&HKsWoXmMpJ<0Z9>0b@Xq?PiRPvO(vh9lqE&N003A+Zkdz+#`l#ioosu9 z){|(1r@0@qV|_|x57bH+G>Rw2PLU2Y2)%lIJmi^S=+BP#@Q;m77UCi!DOJ*+V}(Ot z$5$iy|Dtyy-#JF`EPcB3!=_^KtcUB(;b0jjr!u%rf2ozS+1V57Qe?4%+#h>>%3Ge` z;x6F{V%~%7C>lO~6g}c?K@3{V48lu|u2;c$(CV3P@Cie|Hs7-0Q?u%;6}wy|9}r9(c^dyVZ_ceUTTS%q}wh~VE$ zulH%X;jqM3I5CJJq8sopqZdcptZ_-WQu-kKBf@o#Yyf|*yYSoLj%BUtUa3nzeYD7% zvh}ArSca<*IxBEd?ALiT>1RFB}Fm4c!27`aBbUaoj z!*zj)if;&4;F~!-WGFJo5H6FY*vVgN`fEoFlz_cJ9t-08m&JpAH!0QVvefDViv4XS8V~p2G9X8G$M$jhbA=GYSClrffVp#tYrK!9Y|kz{tvt zwPJ2y<$U}>aN&Es z8!=+Q4NpUJEe6?Jaf7}!I8tgsSes3P;Og7|?h{^l>sD++-ZYMy8JkiiwpuG!svua` zfjGM~eM}x)8Y!>Ro2nf+mdP!Clee|u(r=oh3J>kRF65V@E0J_#K$1+u4aUjQuYwf` zF=U&aGOR2lc>~ON^K%iLCV@$?;MNP2jARXZY03TJ8GD8GrvF5(7w<5-k=au{(gE^j zBH~A@?VG+J0hDJ0-s&B?1Vx=b&bXM@o=yzqxD-(vjre1tr&^gfItb>zPI?0K&r%~R z&kXcIPv66gAb-`U#UGXE$6df3s~SBuQkb^27=&KD*8v2jxgq}sW{MxSor^M+bC8F(=3T`Db%3)OdFJkON$B3xFj+UxB=Wg5F|@BpF>t4x`qZk4DikhZCLxk&$!3)pA*m7%n4QXEob;bejI6|UqTGinB96qcHk!Mq+RiQ zY9Qd8F>G&n*uai>v7~6Z^5lz$bAcRv*$FMg+(=wC2meo(sJl>IZxxpsmnzQji$f>& z?C%x6FX)4K%FJ)}{-=CS{u_mO=5U zH~C}2l?y>9nWmYCZmh&A*+>N^IaMtnVSWsXe!+9GjdOv12d126wmfgy2czp}^KbQf z3)Rw^zF~|Bu{hd#v~(#8U@gr-O(J)DUlyNBvoA1_uYL;494M2!vz5M^0GB$jZpJ}Q zD}ow9!nZyCLoxQ=oUon1!y|GQ@9)cuACV61-~3fw>QB;3m6q9PDbOtq9n!BttKc#- zAGr_Iz!}^UoXxaL`vsV4RZ0c`fY$J>pVbxY<~dPZ+FfWthkR>8dISEP=+4rDl7M(( zMP8tmop$aTC>jsSTGGl8t%F{~T&uYoJvWeBp&{jk1|~^fy5_Hp9_=}G;k?cJBGd_R z+PrNylAV0AacurF`i-|1AtSivOB=BceIBcY_O#_7i(I@Stwbtl3Q|H=yn$tWQ(2Ln zVRVUwrcTY(#l-Q#Jo>aSrqtJL*VXRD1jy?>P-4zE8Fjj<&;@g0HOf}^uEQfQoC6Q9 zcL4UYGu9$&&*{PT)_YZ7{ETt?Y(gA)7iK+$qnLKU%$!f_alPdoweeE!YP6nGI{nkU68~u6<0G{YFuTu^GN|0`|K6ElXv&AnO*emjJue&D<7iJRf z_Huc=odiXLB8|>qUeL?`F(JKkWy-ylg~Dl-YWjCvV2HH|f|dts zryQD!QB!~{IJe+i=^y(Rr=g(7MCr)L z0;FHSF=C-Bwqz7-dwVhXvQX-_hPQ>GRdJ;lXr!Nf8~B3K_RA6dgpF~4L)}T_b3*9A zaI<)<)eugT{zN*P!eAd@=wC}$RDDF@Orcem5TqjbeymT_b+|jba5Gh%9>hr~2lMOj z6Z9UI#HuqZB{UF;5NJs$=+ndnC+HT%jQNq4n4o#N2ImYj0p(JF32jksk&g~S7VL^d zVW`r5WQ3>_8G@gXq(pKwV-jOTwnbXgDJkHnM~>Kgxb}gMr8hugSPaw-){=*yYd^3Y ztq(vz6n`*<+VG5Q3fL8`%sf@d^hbDt z-H$mM{a5ma(c1ZVsFarGuq4-2nyYO{@TEg#;&twZOAAConeLS#p#6lKFWnW{7{E+@WuU7_cz3I)pa3_G>VEs*>Ph1TeJxlpCc2 z;U_HK?On-^k8!a?T_lf=x|1!;NG`e8qC7QXR9y@h=Y95qJ<7NxA^3?^U3jNwOc%xj2UaY%0K%iZcUDv(!=q9m zb5V?7F0)Pv+pfXyU`x%)ZXlFmHjxsgZru(uB+r@NY_%Z@q$<@^{bC`_Uz^Nz^(wC3 zDvMwV9K(`=+6GYtG3;eSVR>FF)P+yuYq;MTR z2)y)aOj27=cx+S=aJDYsRlJkzkx10QK_19F3ATedVpgl-+&E@h?Lqq*cGx`#T=rz| zaKq7*Yuc(FijgzeK0EJqOj>mGT$yJ)7lfOfV9ldO=!gl`&s0)~jMj6~tg)l+mL(fkVS&6V^@17dMs*l&fbb zR^GsYHEH5Wk-5_S9H`F?m#y@nUGH}_vU;^ZOCUr60^f?>z>}CEKIT^F&M>i!J7b;K z69Ll`r@6`=@5ozF$cMB(Ii*oGV1TIKqwd#o4LlB;KmDzXWMbmh_`uXi6XcMMzvkHs zC%Gh4-W_#TfroaINLEVCp8aWQqA<*Oj=Lhpb{bB_MA2>Pm;oJ&_I4j9s4+{>mNIk= zv}1Gbh?NuPLu(+7bS+CXsW!0ZfWF|wVG23s(;U#R9<-6FOl0BAV{kjx)&gyEa1nKkIkwB= zjqY3mVO92Zazm)Hl}`*iol;*SK_Qjwt-06s9GK4UI0AuU!v|)wL3~La+q=^^sWvq z*$6G|VshyIfP-%TodnWEZ2ZW-rIo}!@3bX`AextM3CG}Sid?SQo#=x#^&}$f^PYg~ zr4Uj5Vi5fS$-+Y;6CyExi;q!T6FWRXEbUy(i+)Jjbh)VLy70hJl6|_Dqi$I;c zi^VN-uE;|-mH0g(yC=F2WzD| zd_mBCditLMe(V5x|II^-@-jI{R#$cGOkKDqAqZ*bWH;OQ1m{}2II(e%;x9dGKI3_w zND5_M`7)YFKPsIS>m^r)Znnz6`itna-)BoZXIjfZ7GW$1|vY$NpYiYMKt!Cg88&82ANGVj~<&up{2wAq_%gS zdw7W1{g%6$_7Tc>`2X&0|egC5ZEAky!jgNITPmvE|Aq6maL z!+_@0rTl=j*4n7RxeAzMt>GuAoJf%x002hxe^E`=ql7w0VTWT5j{=FfqX*0rV^5JJ zmAUNnFV|Feo6uV0*iHVi(Y@#Ug);TVN(6qYj@`fOvb=bCk5+h=odpoW1VC-WDhZ95Rq&Ri_=S2!%PpE1 z?u*{@mj=|eI-g$w4Br4*5=ek(tm$0=I&A=a)*cwS0CDqnya@~bIcp~ z#9@ire~`TzSWnOREU5=mil8OAY6s*<2uncL9}CeQ_*r!5kXm?xd?`ViHR7Bj?wxQF zL2z8!xaH|NjgsbKO^}xw2bgba6&K4Jl`C9^MpBx%cgt{uFt;;WDY3m|duqx^S(U&I zC@?uf4QEQkD`PDb0~l4Ifmq>OK9}>Aqh@w9`U(&DP-MMKO$F4`i5tWx)ziZsmpOdA z8dY9?d=?pNS_WR~Q>Yk%*2)!iH`@Po0#&iRx}R*>z-w{1QhB)C+jZ!tByI+75zR1W zq6GPn`0Y~9D00dy&<1!{&YOv^d?k4(A|Sjk{(d5oVml@J17Y9{n$QMzjjAt4(YyL@ z?eyZA#C&W7Rvb0o-}^X%8F>g;=}jr08pqrMFIYQh*Hz2m>0swrej`Rruqjktp5 z5rnpwm8d&XqB5wq*p#GfgtF-b?Wac740mDMS!vq&?VQ?KVSjW=k7TodC4wl@+M!+} zJs((ssv>Lq@1b5-h}`JVomxTU(FV7meqYBA4;&f0D@u_NTZdL$vDt1JaJWYIcXl+@ zk?U+FU5d01F&pY)D|s_5ETqb^Iov!W!@^BCUQ^g>Ah%@Fa{DXJY3P9(?NibqLOu&u zrBiQcx_*X`eEBw-8Z4v)v9JRTzabHuUTCsoN{Q^sOL2tLi&!r0mVI+j0@KzB&J$NN0+Vytw8BG)?jE7ZH5KJ$ z_m{NXOtTxwAUQH_)v=q&6BZR3tW7I7(@QL!iZ^YS{rJBeb`8Hd2><^k?edVcpMkD+ zfjfM`umAT@F^wP)>^oHCVkHc>v!f#$h6oBlYtZ|tpA8^l{$5}9!@{glLV~KAYgp_k zo{G%abT>x!0`sVRR~@M*4M2LaQ$Nu>6=D>GG#4vM4w7~$pJtwEkvY>}9*IeJsr5Rk zi9TySa~^2g)I!I&%=Y-(*b*Tj56!_I1!<~D3i4(#)u(w&UGF~}++2)anyV4ghCMJ(w#YBN9ams>mgGZ*2_jqh%hyQzV4bkG zHzy%EJwpDbiZB19ieZ0KMct)>B%RcWhQFyIRSXi)Oz0x6eNI@QF3!uA;;jE)Q~T4S znC{!ulKmy2!uL=Jp%|CTC6m>rsTlNeAjD=pBu!(pM~FY3=(di$Lk)+VLiSVgygv5UeTlw%tT#`HS82>Ak4) zbyl~Mc;V+t&9i+lZG*whGp&(28BR=5FLz#i7|DR2@^k#16AiGmJqBs z;;O|gI`h{icPu&jZ@sT%%$Z`uSoAjJyH?SG2%aD+XjTI4o4(}lnG_;RKKJFaH81aQ1a4OFLhssJ7M>7O7HxlK9J0IC>-xp``f-0B@axse!i*EaG{n2+tx>99Ie}hIYuQhr zc?e|XxqFU&d>=)yf>TOiOLWFqK%$99eFHDBQOTw$za!nlSsJGIDshwF>=ZRPJVdns zqu4PWBkD&GrWPi#x>ivHdo&5DEb^|_L`ZI<)Sb7S!C{%1#2?x&6Qw>towrN<1diH zRIj-*kV1YAFf`8%sir}|@%gj2tTxGzvG#uVx#>3+i#cohmJ)W(6+C@55T9ZtmfQ zBWT7lO4&|anLVnN2NDhEw@zr&Sy{q4I^Q!v3f^5aTQb8Fl3|BDM$8y=FU9O(t9WV+ zxJp@rtN3v%E>=`@p(3lz#h*nZ?)1pP-KpE5E#GJko~PH3*?n#t4bdQ-&QucJ!z??y z9;i$!x=WfAj>BLYKB7sF0_mKlb+dXsR4(bc`f21Yn7Jwujdz!w*3veS*1;;S+7j#h zmWMOb3J3hP>Mo#^x{Aa9p6tyfH)#-%OwG1Iqgj7XRiM77DmweoGM1#Z)Sk}}|1}u* z_zOTuA&q)*Mmtk(cf0HoOu|BuwK7jvZE?PVKME+ zKEb0Z)<0_S^08DA006eqhumKbn>L>H^DX z8BzE%qOKw>VfKTBk15Pz!ZcpYbsU(|g_a+Nb4r7yXfe`Lx_o=~aqY*5|M2=cJO^-u z2Z@BE*Wt2*{yl(KqoeQ zV2^}S-v}Vv&quWm-HPJ}Ru)l8k+To~t?<}5jDMmiBiGv3=9Kd}S=)|IkoU|R>hOp0 zXunBOvaR@+SuWFmGTCSeTr{j_T3#c(nkJZd?YO0rl1m!r79D!I#p2f6yx6hq= z{^PA(8~a&kWeJ<3m}7hYsWt9DBF9WP2!2P}`dGY!YX}>XJvH)KlqTHV!Iy%T2YxSc zIz!xd!E968%j4!JTDL?Di3XNIVviA0nA*s;o zYHq|z9;MaF50x1UgZQCNW%qkvWP>I^>6O=Jh`|n`O3TmHEhYhj;+cXGv1&>raw z2J;H*HI@=ZOqt(TC9TLP&8K3`qFt8gg{v<@H`?ki#PPbixVv~ba#&Ra2c_jP-}aAp zy?mMg?(MB$YsGH+za&1F5WPJ_!_1g#prP7#4b#F&B%ou@(hO`XE$@0WYEYs%?s41@|G!OgSTkzBQQZ}S0P)H0;9_l z-2jbwB@5qfuh=bf^Pdb0!&&M_Vy}LU!)CxBviQt3y#<6t0Ts1Bd0}~m8$VUHk!A$* zs9{PNe%*H4cZ}s@A(R{{!d#$5`s7u0vG0YR!_J!{03nknsOycM6c(PNHQfGox*Y$i zAV~3>p~Sv-7^s}$-EE(N2-77jaiYHVr+tk0+Te!E8N8%D>RtybE+zMAE%l(SZ4aEq)Vwcj4R)M{35%4H;-MV# zGt7^X^NfIikC8y4#!v)A-hR)5c7b^%$ER5Z^mr+6>dlPq82U*-yZ~=*3^wO4jK*EN zr?9%P$$6xQyM-o+m_|Yk$N^|QtVs`;3rbx69+sNrAu+CGsT)CLS%L&$ZE#QTTO)Pf zJ{o|FlbbnN3SmPBBBYglfbS9ph7-bF9KoS!ydbOQ0lavZ!i5P$UR2b#*Sof`OkQd< z32hw`s)1*!4qk;$fIM|ja~zh1Xg;h3g1B(E%wOT=d|L=L(C}|WCTNfj-(@ykwR2eC zPZS?s_uII7UYvcT-UTk!${_o*ba4o`b&oTC+Mcn#Ue-w?^v;s)@15SNc@KIrAYluM zA#J9PnN(69q_;~L$a;Az;xHXDRzsl}pY4ZZl?m?Y<5lQgr-WE{6?EUS+1qE5sJCgU z{B8G{$h@32XciRlXhXPNUFomo9u_cZyBPnZkk55{P5}X z8l}XQ=ZbhcLY%QhAh!9}M<6u6A*_|gNH49=DHHUk*W~@`L4GG;7ScTp{W6C8>rpr8 zVbU|wL{WY+x3x-Rl&%HNe2J=Hgf-*Xi~w`c)LAA@)qTl7=*@hlm25U9JsLH`6yvo} zPnQls*w42h?(?4$z)QW=U*xx}@JU6r!JnY+S^&vdIXYXq!6VX{9VZMGgqalXo}jZ( z*LGuCCM^+i5}J0_W4;;}fAX(n96Z^p7bZ>cT&1P6un)^RJ5<|epsiVKc+rGt&v-UG zgdhR$=sS1~NR4};l_i&Ab?}+k2_?zFA_y5ZG8?!pIxQ6Kgdk|=#V559e$QVqXbs^? zs}KnPRv?4Q6^ru7Aj5$SIeMpWla0Y-jS~%#5~3xwRV3QKyauF>oB!9Z2 zEDt;q@;iNt70*B`BOz0diuNl0W|9a7R z3+?L2U!cZXxd;@wsiBCm0ls8Cy}=0Fq>F945q=-(7?%6xoS(g&r?TI6@AGMuS}k^X zHf!3U@?ic4tQZQJ%(h4hS6&oAv$h2sFI6~o)qtIvrRrJog)I!TI2`~0ede$I)A<|k zJ9kShyO%;J?dk%v)3ylgAA7PZw}U$ ziTxh8cjja>%!eHqi$sZjupRzW;^6K2c1NAJGX-8nFx(e3EQ=#~GT+dl z#R2uiDPz4@&_)Px0(l2pS^j_x9eHqPkHIO879}DbFRj~VR?S)8+&_ITqAQQPgh(4xEHhoN;2Y!p%`?)>3;gdqv-8Rp@Q2@E>mWg##tYXnZu>`y;ZZf?S0SCKDnnA zWL9WH^cNS`RG2muafWOHka}M(;AL)i{UIPt0m=ACYc}l>DpDa@$rcnV=zz|>D^s*S z16r@3k;+KlyDrg-tR|Pgd0Jd#ayG9SsuUOf!ZMEBEZ?LC^kiHw_vS&pd^@~9EO!&a ztgaYhlEByciwMv-m8q<+d8>Fn{`51?j2{{KdxRmQSca5vfaEhw(Pu$r!DMM|j)xaZ zB)2~PhtFljK%>}Xp>tC>%qfRu{>z4nVgHq}pScEf3W*KW13w}q2b+C3;QFyyOC71> z=24SAnG@=p$E%b-Q}WqrRf+t(LSVs!wyC{N&+b!TuQid&?A4(U^NzoaS}Y~Ocmnmd zJN)1{1`iP*a~RGL{sX2lgDx(e!jJ5f*m0PxkhXWGEYDN!a^j38L^i1m#4Mjq*WX)yS6 zVj@>6voGzv76}$E4^^WOCi((loUAFR<7v!y8`)CVi_xH`Djo%7u&{6897`FZ8ue^y|x@r z!E2JWrG@vfa~d3VyCuC9>mLCLb$j+1a_MfxnCOPpq%sLn>WXqDs<$xja(uW*)B>2Ks+6?Z0>BILT^vR}}un`?lQC@Yg_iJ-`55D^Re1CJ0BTEPiq-Cj>xH*}U<&1Pr$iMC_ zr(5Jv`@bia9N!a5H_@U(jV_b=C1ZPr2G45SYocCr#rpz1|}p| zJe%QwN^BrSbTSc@+1vu>XGsOF*GOm5y6002VNH_cL}{~Ebr zTydbF8kbY{SqU6A3!4?2(^}kiSwrhnKuC=+z=(JKuz-ec{a?4XCN8%~f`{h$GRmuz-=OJ){rC|$TUrQ}v4OjC&mUEuxNq@~CT0*F_=nj>guBFbU%2m_XBi&1H|Ejb2 zl2#sw8`7%}>$iujcs1f~O^9V3JkQmsX-AEhG-yZ*(b3pQN1)CFTN#oVk_iGW1+r*2 zoQnqy!42<6!U1gnclmwV!vf`}#Azb1zI+FGK8G2RloO-u1cKY$idbN6Y!gEpW@ zVT~eqR((P-Ahl{*3M|+@#e6nhb>h~0h@yEAfjHW9NjCiy*MTc;51y(OraDA@&_O_0 zKxgWOy=F*G^anW?b_Q@I;PtDBlGqk93P^MO~oOt3?-%IYeaPKaj} zGe(#-S29wleJlKgv{a57UBJETN5YCvtK%rHv0RNd3`LxB1syu2v{`U~$v#rdC|tc6 z1y|3KBJm(Lr8dSLZ}()<71DhFgtnro=4bL68r&+&gSDl>A$Xp9NEM5#^Wl>Z0)iCI zlC%%hYNh#F{Z&hvI<;Zdf_*t6eM~jf8JAIQv8&9l=leP)-g2%UdAE#Bisr0})m`dwMvvW7*$p9qK>U z`v266{odN}@3oHCGyTsfj~n2hZq{G@a$dd6RIgF=Fa3hIh5-Bf_f&BR#NZoTgI}ds%e#p{K#7Ko2{Eo1(17aX) z?E({5!)&o+1_+#7G$gw9gX?#v=}1!u>3FhzYdk>}7kLr4nSS4FelaVEU5zk0DsiTKG0snlf4Dc`KGos<&N854>vfRQIrUZi zj;|kRuVW7U}fAkY!Xh#h_@c^o3^dJawVhW+?;*_>9To#4+ zSt6_B&Zdts1X)m$@wGiJQt&0BT1 z+l#`~5TM&iWY@#FHeMmRFgF8w`TZzMcRr3(}H?iY; z$)98j+0yfYdp9YpsgAy&1`M|N!x~qUYeOGsKjjD(k)OTvB0Z{>+Yja>srRz@0|2A* zxzqLb9xxp2j|JL3DCc>KPZWlWdl`PVAq>moNv(VE$Sj>HMlp2XFwzjB@i2(C0>8{# zz5~O;xM3cQw=Ilx5L4uf`mS;Lj~a0Tsm!g5qsWUL+enz&j~n`(tgF<#Gfd)!$_vw+ za31XEIFD^bf&aj(Zp>ZXZaXf!?)4bRQZV&^ilXR(I1td04uOR9-lC7#kx)x&g(KkW zF-UA2Y;!;o3pRu}w28U4Ab||^Ce6j`^A1c*E0I+!cjkDTUjlk8L#{P+Q#U!)4vF2C zN2A>#Azbp~6bnn?2Ab)w&GAHu0&-3!9)YRBH?ctsUv4|OP})B9w#C+-Cg^6;Sa>BM zGR>Qc#SSc<}S0LPypVY6H( zgXLk9dQUUQl@TWa+G4|Xvui}V4- zeIH03h-;iN3)2u<4mt5HlHvmn_e&8Q`5fU)A@^z{xSx>Jcs0I`glsY>>XTBTq~zzH z{*RLMO5mKmV!y+^e({R|atGa(HDndHhaDkRs^3(Tl`_uL;f7zDr68(f1+x&uwvr8) znW)dF8w5G}Rr0yNNH#L^mkA7jQ}E5O>N2|v(i-}nh}fFf>#N3H#;oW9IK!YX=J&uO zVP8N4jro98PD%bft?7&|q$w6C*l7h?c!OCKJHP}M=3zlIJ;p#p85a>Gn&d)qp&|$=!g7*$1Jk|(W;A_yaACB|Eev49&wflit4L3xd z@MleMC?~oCc0lK19FwG#$;{|7(_vJ$8^42f>$H1DdaG?NSua}%T*S|fTm`%ZE9OHh zD1N(4WL4`k0dxf2R&6k=>pN=+(r_5KqFyBF7hf&_kN1TK9?gUSfXmA|>6Dsmc}zx8 zO*G5YO;5zS3U7GwiY6X;z(M!)y6A@_r{)=}`gQo(vddRN%9}&n(2cn|Hztfqq6G4L zh6Hg#wHW)!FB3(Sf0Q#zPS}1e*81to8Wwne2(B*rE++nctG0OGr)^5J@;(Y8x&S4H z1B4b0PCGMlN)aR*tDpDakA2P9RDHLA#OYjQ_N0eidR+e^jYT#38+RP!FV%860 zdd*=?!4$;-@}MaCWlnAd3?M~8cS`1aWKWG`I11U`{m(&CWEWkUDeJm|;Xv5^Z^hT9 z7Q7ty9M4|VN+b<&2~wh3w`&Truv7@}vgla=s<)9{WyQwPDsWMW>kL)yh-xJu*hng}C*K!85Dx zk1j(Zk(j9FRr2raBzISQym>Y57*va6Y>~h>bOa%sv>~G{lMS1J(NZKl@=F-ND`QOI z{9}N9FNRgCC(t04LL1w2BMlmIuMWSLT&yVq<+%ba(xO_<9GgeB^H)b)?oyQ zt&dMtYcYJ}JTW_eS@yH|X!H%n!EoE!_@>R)*RM$RmdP~L0t7kFH8|vXIMCa^q(;Uv zXeTcO4(U|y8s($})A_+n;=#<<3_`)A@c6uSkA#V4pUeS)}T)|k=cOIj>B|-;PGVryt0H^Fz z6=+a+;?@m0LCP+y`YbegOA+RYt5td5pC2;{3`-`JigB0i0HHo^cgM82>v-w&+!onT zO7tO*A#h=bW}zl%ZV$*V2^hF z`2&O&axQaFu2>aUY}c%${b&@Gh^)7kHLZjX#guzEKKh82xTt}^#=}Odu$jsxUIDMK z1d*dyhAk}>NCVf330>K1_NYHVc>_l`a-(QzxviXh4!Op$3vw!#^r?`PecJ&Q47TuG z+Xq7(?VF*^{*0mJ3L{AX?MjFE8%$hf@HR@jJ zw~ATfWhjceU#0^eC_CFilhJLc^`i z>~ViTE{kAI7Bi3NC;)lSzTypSzs2&u--j%xLP96gLwJQc3TC%(Hz0S^t;HtqPf}gL zdZ7%?mNSwxIyAOuk0xxecfeiFB-VROQEkCiC(kooLu`PF?JZ9IMk0f*$EA@N>4A6aXfvn@i{`#KJw`Uatn@KP|cx2JMEceRPP!a6-I0aC*!O{12a9egb+bsHbRwib`3UxQOJy+&E8B1 zspiK4lXnajxtBXND$4gBfx1v^bjGbjehb75-&?p3hThu5kK6H;kMQq%oVRx^et{Dc zxwT)ag?9itV0LY9*s!U&K8aN`r9&2D(0>6xoKm&|No@6f#xSwL7WorzE4^H_mi#MRR3gmc z930HG^2a7D^p}77d)lw*D-TrG>5#!Y6Ru3QPVaR_3xuh~$7j&Hr?S>zzEc$Z1*}`{ ztD=f_ms{D7sJ`IV@rlltpddr%-`e|;(cVA11UFw^bI2&I)rXH4vpST1??DA*KqYkl zAkAh?&ptt=9+1&nG;=>JZYiLwl@kgg`Et4Tj6$I<^wZ{pA5j?#s8iT32mTdw_VfjG zLP{0~}2xinIa#e+uh{|0=AHwM@vo&zAA*397(I0kI$X zmaSb!BEcflG08%fitZ^1j&soodlaV!E9^+_MuxTRbj_g^J89Bu?yk~QjdL%;e$=C? z2^%&M8p`;}9HnWDc710le;uK5N~K3kbvgTmar3MMAhhG zU1}7Xb+mUsunY?1zWYo!|1>-QZgMxp+1{m1VbRJs46}3aTarzHQHO@1(Ddx_%?0NE zeI^8uk{4}5)YgKOl>001mz&)rToJL@Y$a_U2{YKS6G;4=D{nUR=o9YH3TMUgb+;dt z)jPxI!K&PILj=6ckY)QUfa(Hq+@WJf0;!57km{Tj;fd`znP;M){fccqkP6Pl= zbk?uWS1$O%BVeB)FQb>>!;y4z1|iyytyi~apG;iL=eZC{|CLBYhJbpRThNmqjP;uA zR^`_G4?obpl67`;yS!fBDVPcfZ4f~)`Nw1Ge>k*@^q*kB>ZOkq)tHzMSC*nc24_>t z*SCUMj|<&!(zp$NON#Xmx+eRba19zqjXsD{VUw zaq$8erV!2GKMRgPGYBC|Zv&hUYNIyfiL^v<6MWxkMWbb0HQRn)VaEkIhg z8`p5yl3SDdRd$n5Mkb#A%VfX>mLpOdWd331hnpzbdF4dQEcP z0ArdNA2{cG4s$9jgoc1@j8HM&Nz!j;Yaod0(ZXDDAR72`6G>EUv{Y@f*kTv3b;AZz z>o=)2TjJ>RNsQ_s|3;QFze%)01e1f>+ai!GSdQ<~^G=2XzF)+_f(?{Ss@Qz*)_BF< zYo?T_`k7-~ae6c4I0>E4N})y4Eb*hBpkBo+H3xR4EanZZ_H+g8Q4DzV%rtlwvCEy`|Q;JJLl`}+xMP+yDxdm+AAw_&$;H9WBi8TmN^&n z-6P%cWo0$;I8>LgLBMD)xE()}*k$CMFzi}>44oa{b2bHNbnDU=o+hM-)^0_rJpSav z2GYEhXE4lo2o?^5oWQcF&pHZ*v1d2q}GQ%RYaX&CES~*E%9}TKhLgijJ|`F4Q}ei z24^2iB-5u6>BPR5Qt&R@3vkilpW08U0@3jHNts+72qSkp?=py-$96o-BX!F_jT_A1 zZKlaF7T+tzsn=^dP6Y4@9`vgx)eMnN~fYRMixroth@Ny zm}NQaPDnXesy<_1+XI)ZrD=DD6lt774{MK}&`xKimWku7-UZ9qm_+zQ9NQ2ajz(?`tTHmW9`_I+{Ru5V~ z{72EFhUo1P<{ful<{jyeem`Y^k7CUA`08uZ9lF^1qmaih_sEO)Ya!fr(9t33qt687 zr;kE3?Fx{9EYL`%l3Hpipi$x@2cGh1t{Hfi>KT8W)AMv`ovMa|Na{24}b z`N_YG!H--O*8$@_pZqbc0>8)4&YsBQ-V{DfqI*J4Q?KB#Mc{;U06q(Rzvv@KPZbQ| zUNJglQH))XI&SR7wO(igyvt6}a6aKPY?A@}l=s%V39qgTXHZTQCOwYQw2HG| zHq%GAl#=Z{-BA|i5*PDzAsblhmk6$K+q6sZcXtJX((`JG;9`Ri)+EGIGwebc*SgWX zD8daFwBntBN8|ZIX4eUgA*U0L+8UY)1^_+bS2^e$WMA4)uB1sE4fTs&fxcJVASyyV zW}OMjgb!BzL84^Jm+%mAMPNCO zW;1T}hDgWiVyn8E&(?hSBKb^l50x^WZZqkON9_<;svu`khXfyGOc5|m%3|`& zZyR+ULB|FHa+X@O*Yzrrg}>djzKg@kbNg{b-aDX|Xv|asQ3I=M=P&W67lxeBYlHIv zM^b`xg6bH{H%@IRbULtr8rSD^dz4t^wO3FK6_`-UoqY|fkUcy%@Fp%L+Fl({X#bg% zpV|AEGRlx7A&;TVWlm3@tgAKdKUVkXxEYcJf~v@xLXE3rk0F*_HSM)to1NM_4Ac^Q zue~Xyk-dVBqLDQ<4!BsVmE8O9k)FARq%@p_(C9MxHnZPE<~^3V9ZMbv47<=F7Cux zfJ5l8mFEF?HrMYzVd~~V;_R8tZVYtNjXe*W*+LBSjX&j<(SQJ*ClGz z)=t>L+uHe_n}38;J%H-_-SdocBbYwZjsnlOWxplveZ5!BT~=|p!!;H^7FADQ<4aU_ zXhu1+Nj}_ObuiJj^Xi~$G5l5`GF%x;3$^R>1?y%g4(~F*nuwF;&ob4qMjhZKff<=A zlKM!-^oy!wO^GffDQt1|$!4C?rC7Fn2Q>w*B_A_!@yzPZj6?MAPNZ=rR4MINe?sw& z@_uq%OkNM7h4Vsxk1&_!=JU{V?#low72 z2OzFr`b`55cvgS2B4rtQzCgYKdj{T98gL6PU_;a0nzisyX5()|)7LiM&o|-KVuLg@QwIq|@qE`#P z)ReX8R{ruhTU=&Zbx-9KKzh^HJVjb-U*ULy+>-V8aXA^Mexr=d*BP7wMm3tb3w0T|`}NG0M(>i?vG`GhV=)TKZH!YLy2SXoG^o1wX4 z6G~e4=AXgi+>ro4@Oatple)koTgoNvAPoo}6UR2CXh2o?YS)xt&}&Yj!7S9;sAaab z^R)A>3ZISSgHLN4AI9aKlVSRbPGIe9(eb3teyPIQjh=BML!y4{hixULA)^dE= z_x0yeOpRUWr7wFV-uW{aZabl_VUYvCo|NJwaypT>-t_Cv#R{E7@r)I24th^sDx##{ z4K%1lis<$l0KEy@Z_|4Re`_mN0|LU=1)T4xZOH;&IRGgtkb)^xC`zlwmnCy@xLGjl zm0yWqOfekH`icccVQ?>}=|3$;JMwq%mOEQ)j2^ZR`i4&{Xjtyhj;`IKChsrW-7lP- zuU3_X<&zT4g}usVl;*N%WwgC0qgnR&rR6DnltbZ(Xv8gqx0lJ0`*sXe5%_7X7Klh1 zt7xlV7fw}Ml?ln!HGmjMXl`G5ruegak;x4s4pHun~PJv zw;c-~ob~HZb}f1uOB6l`76MFr_#ON&<2}lbu8F%N`Aa&6f^bS&_}0P$83Ci|D|5oi}_4vv&vIryhnL^4E#x8VI!GbwKTg`#1-1 zBF1K<-KZ#7=`Sqec?g`Q&I8_(Y~JU!9Ixmn$u=RoZLW+SDuAv7 zaq+$2i~d3yo^$hqCJX4CmN#&Ko8}o~>`B{_KKR}aPH+luBv%k|+nWrq_P-X)dM5ID zV9tX4L_b^4-6_7u;gEvbVF-e|gj|t*ht@0KUQ&&9AvD=0KN7Q&@kim143VAoN_EDW z4`ib;N-P+xsTf4ipsz`Id41`&KHmlc%FOXpNl42w{1%yb{NV=psc?03xHUMQr{?Jv zJKzDCJ_x?s(jSV7#lhO8p4 z-lYV?NT}7tp$~e-g{&%GT6M9aBxkohT7YLOEkHeLf(D+lupUcjLLo;-ZkWIqv>LHg zDZ`In12^C)h2CXQ7adM9y5LX)oM^0s$A>rqIWTQoSrCPYd|Hm3;W*~ng|>o;-?+C%g3wXjN2<)9#*-@a$&da5h6Sa$a$aAvOYiD71w4C$&+ zKf=^P{DeNMkSzp<*SGwZ^H$6>4^iG=5K=*O`c1#>dz&J)sZd#8ME;hxW1nN)&03X7 zqF-*UUNP^T2<>WKMreTM!TS|3y22p zq56q`${dJewAV^s8-L^dL0l0vqWQp;O293E+3UnLQ0%zgnNw~BpvICk2nt%^tYCSg(kypcHnD?*{wT=3Xb{KNRnF%kuyayn(% z6h0-pmX#xZV?+QC^;YkS+hP6ZUART`_;+Sjrs)I8wyLMH@n9JW5FL=X z<)$FE-`3f9|1E~kA*_^g60VNbT z1Dy$k{!`aikrk;Fd>BH)#Gsf9omm3Ak!i%b7vfg42-EY3E;3=d zjeuB4`hx%q=W6*y&ZJ-vxwpImLb+bH4||*0>yZ=L@ivNzkhS0jVA*nDRPg?GI?!G| zXwgU)ds1yHnKA04U>Ko);w~siqUYv{;?~?pY?bT{EVi9D0tEE^6(HEDTYUx)lQ#7S zt!`%ek6NrLd95R{IfYp)fgzZjw)X`6fh$>W{ek^sOjAOnAafJyI`w#6HdC*5<10)V zAedFCKo1TK;}&{%)?>flw2`OU}1AvR%jszmt4Z!WeEx{$?$%F6S+CLUi({Hpw*$8UEJzg3jyC1Q;w-bB+p z_l2c2v(jNX*HeC0b3K(k^iI2eLPBW#G`q zJXus3a|+#&4_9mskO8(uzgt(vuEJo{&gmF44R7k`z1ASsKonBL9W_1LqnK4&$@B^~ z0CEX^Hd;8paxtYVX-q_WNQ_O}oJ{J27h#L6 z*@JP^Fi(WWY%-D%j`#|*af8Tw$E|=mWRvf`f!)se_BJ@zUJjV8wX?lqJwdLBnd56N zAQXy?j<97f()d7Vqwz;ZxPM5dQict%5wCMZ+2F!zX#FO&5C?uZ!Mr0oYBo~F z8>1r{_id1bnrt0k`i?MabXSzB17$#)f}b~56uRTTra>G>u7P1!uSDZZ$SeVNodOmHj z)7k_)rxN%c)>7hF&ZeMamc$68FA&>7@{8M2Zdb!Pje)yC&%zZ|;J&E{8ah)IT77WG z8TG=bEBIYB$0^(``S%*Sj%fS|>DA%8eU3t?=iT{4>s?~zT!u>Z1S(WZJYytXY59MF(LPq4I7##30lEs}B(C)G|f z&97pFOL@UOZ)RD2f~B9WjW96aLOKl+V)5BZp=NtmKpuaUvKS$@d=tPa;8}WxkcM@M zc(z^0tfp@(Ov-Ywyuu-G8ybSnUdXMp`NIh@&Enlia-UEScBfAApx)v+4hu-_!QnN_ zaIYp-z|B|r`4?86wMYf8-zRlbne_C=33u!1WYN8q&Ra*PQ`+ZaP*<#^Dom&1>Ehl; z60cJP5r=POF#NC%Fxlp}qE;ya(Ez$)B`OW7fahVi)XU`DLG;qz{xGM+iRRojj~c}( zU65VT4~N;nd3>eKvc+Bf9~|HSSeF2J%@V-BJK^!K^#G{#Klyi!0RG+VfBScF{`z;l z{_*c#O%`?l{JSLJCCSG)co|Rt)a6(oUd9UJgi${Lb!h@XT{?fFE_*&vmzF3zBy#1h zYK5dVW*C1_m*w{7lva8i!Rh?ICbe}mHA>6?)MaHz*GUkc#NoJu%(#RTb#30_ufayr z)^sZ~Ii$UUPvB77k91mo&Zb)57SV&=kE7QGL=O&;-v&%{l`j6ttV4Q?K5C@6-z=;( z=krUozPA<0?6kHr@QYqd(bXt&@P5=waT3}dOgpWOy4BS{Bd1(rv(8B#$qUq3KOUyZMfQtpz z7f3ucD4{F!jo=`e9}62*P2tvOhBxQqr}wHurGpXt028N(!JOU1rIx|6hx7gh=R=h7 zt%b~N9b83i#>e?~?t-IBHDhOw&iWp03v&f-b>z;^fDCoK!4$#8Y+yp-=wsibC8AsD zR;eg8zR;7DDAD2*H@JD(y2G+EyweTVFkotX@z<6K|1%x<mzQiQjIc zKHbPJ;8AO-D4A)(YjIeO=4F_+c`(6S3(@=C$TNI7;HW;)iRBOeR~L%bi4|6>B8q+3wv$E&I%a z1db!jL<6FiOdYwdq^d)`R@W2cdy0;raeM-#o>!lsSrHPO|Hc=7Q1QjZGdc;7cN1*cY~L0djuNGe zIE=#WQ&e}fCX_z5MZY2+k)5Y4P5~~{>@9?~#oRMe{zhX3u*0SJEHU()!!PMTQrJG6 z;FMIUS4-4*g>-6}N_$Qw9{3n{G@!6LVuQZ6!8*CHAw&MO=&ICfpW4Y((1$p(LwXOQq!& zzy{J3c4cNq|m(0(+tCUXbCLCl(W5h@Q3wN|ffob+pWS^8F&S+bYE>i7pQ^ zxc=3e9;parSlRRIo%kW6|A(-X5ML2u-y&J3iV-cgD9s&DFcsUj)H+?)S3Px@x$7Il zNwoTz66q*R)d^>Zc!;qWWosyDn4h3~u*xIrysZAn3Xm0D4%9)EnO@p)*Unci zx$^**l2a2|)E>NNhiGwW9iU ziXQ&3hfKI&7mk-JV3=qpA4;|!WAHYoJ_t45xbju^gy9nN)d&JXt%+(1t1YkeVePQP z7+T0AbT-j=kJmE3#|2ZZW@p+e6J#ZD6@4#T;8I8agEcaO#;ncvEJFE?%vp}Yen;4o zNHB~CYs`6-b+d~QC;wx!2G^ZuT1};q>lpMAWDkAB!GaDyd;JOWAE7j_J^$`Wi^)7- zV#STnCIdxJZBQp9BV` zB`S&2W58YS7fx~rPau=W)5{fw6;&2{+a0*G-gT`(ggSxGlJ|gi7Z*=72G$* zR8*&BkUzh9JuUJl+-8^81hC@ACji6LgPaVb5c7u6m@5PK&$e(%_mC7so1N>lDFYaK zNJul9FGK9J99Z2g3x2}FX7*Y(Yk=9qS~{a~ ze4r`7HrF8W#+V9|{S#|H38dQ|Ft~fwW-1}dNx(mEn-(C6Tb`YL93OFsFZ?0n4jHlj za9;SGVWnB7PScGX#PvOZ$3ov3xtQLsDXONXS%1$4p=V_6Nzxc-wao(hhtVi^OIE-M}7J})^{%eZiFHFgPKA@@D|Zc2=-2Sg(f zC~|KtlK;HVF3K0cQcg zN$H0F`5$u+d`r}o+;Qx5p;C)`Y(NV(>q1PJ)toVJVs?C5vZWF^Gi)U3JH0_h6XyI@ zLFibG@_qKFiYtliMuOoWm=cz-NBFgdGFolx&Gq`A)ez^T6zOe6cuQQA)C-sL9xb)# z0spyvPuq(Z@eTkk})p7QLF7nd%NsfY;PkOxJ zUAZm{U2;~oMtYps3svhc5}hb@Jq&L1 zucX>f+FxK92CBv07>k28alcGd@U@3TcIip9Vq0ZC2HGRZNp@^EV+H%k^DW@0C@?TE z*Ik{)4lt>z7t6KAa0$?L+Gyq}yI;##1yI6F;FH4RL{=^wS005mpe}eSM{gZ|nf8jz zccy_Ay^gm;no_`tlH7H3W!pEx*$GpAaPBow^L~L>4wmx%-|GFpDn}CjAIxO*W&>7w z@l&!)h5u4HQjqiS%8^=Nf_&-ip$<+Kq6`2td5fN4ItMh3mc3dwyf6Qw)=YI~e1!;= zGSCd?B7<^Dvvh6%FLH%(+=0AUUraF+<+dHbU4SdtM#4B=nNoy!507hW}6J2wC#q z&Jk@LX<8GXZ|4;R=TrZ5j?@7=ykaSj@H%7X`2e%3?`@|FRS053w!D;xtjD;X`6F}mxuLgwSd3$> z7WbE}Bpm^@yWx*_^i=Qj%U{1*a}Tb3!*UPG(j9rS`bAr!&WAemTUd-r5kELCQLfBR zi&HsDZITrNH|vgcmWV58Z8qB6a5-~J)7Bgf0(#;tHj5(R$h8JVwOoQk4{|rI9V5&k zmxR|cqBL6GMXl$vWKfFYo6I%pG`VTskF_v2s#&!P+2<&|9a_FX_WcThxg_5%!bFlK zQf#k&BqNWM6vN$O?3;XtH!J7ML`A4Fk@ASbX*T=m43~j)$-aG{cJLc#)S!f(){L=n zQ@6Qd#SLt_#sKq-d&D|il5=wK2XGHzpQvlN!JUAZs&fRrw3Ezr=nyjgiXV}}S%b_< z23YDSouZHFlj$y-8R?_)aL@&vZ9Qn>Z30uz(qcbcNkP#nz=TPP(jmq@kEP;!iE(B7 z?mpDF=bzv1f{AkM3T1fHc}~zVM;MaS98sT7DH@m`8}}2Ga;*u|M1(EYG_NA0Mv%1a zxG=XBVojti!`oBW#f~Xh8WLk!`DPhb!6NntcPPF(|I)f(2gsO3uPlnLnr1&!Dz4ME@?)L z{SE!X2OrW9Sv2F{%77F*^0sUsBK>cU$j^_cAkYEcDCx(lJtiVPb6m2(-LX&{TQqOo zMLTjE5M!4DOKh~uutP-`@GRI?L-YzK=LS0`+|WrbTF3$Kc?j7=7pd<%#`iM?*K73k z0Cy?MRF717A`cZ!7c#2Ev)Ml@ZTH?O&B9wONS!0p#kO_V>u7=JMsPxKhRV~#vd7@B z^Q(6E`TWI>(k(A`;)9ULR|`w4uqX9W@kxr3TWDI55tV&I!;K|DB+vLtPL3g70{R`$VL5p)|v_VtNC+&TTz{;G=@J(JNaLn%1tG4+c>N({3 z?BB5XC(8{Q%is&Vme*EKB5A*H_nV$i?T2WL=Zj9aYSEhn?PZ%ZtPqM%lK8V~znfCC z6iTo}bV5}?YVf{rB-Ki-|499Ff&N;Vv^BGauZqWMH-=k6g0>+y>)w6A3$%V)3=5c* z5&thXuD?qA-(>WASM7hWaUuMZmSknD)&Bn>Ey=FhM$pBmu@m)b*<~;USY_}Tx6x&UbN2GtQDg5HdP5@%k_k`v$MAA$8lA~&_ zehp}VvN$R{1p*Ss`jl3z00u4uwEc`rq-*V&I#p8XZ1A);Cg39Wkmo9g*xU+YvP(|HQ;q$r4qVR5(~al$_^aY`h%^+M}0*4vVx^93JS(eV&5idxFaB zHl)qPkE7Z(2w4xgRGHc7luEi^PYJaO3~bac@UIOW%_{+p-dwkc+9FSOU({HM8JpI1 z-Tb8SwowzR3_IYzOwoObxf2UkYorr8UScxkFM_{;5j9N0Zx z&&pfDn!_F-6OYfr-SG20pmmZ0`P)DTSYHfCaquM=L4(CI$zCRJm6Rjc5wJ`)S5Ejb z27#}ZZAspQJxa5%jsJD>08Is5)owND0)%pC<~vP>OiLX~z+TN;5yv{C7~)T?J+nx( zKU2q*!t<_CgpF6$Ke}KD)`}V|S`(4%E8>VjKwPD+T^%>#ATb!pCJ(o{zo}WLNs-2` z?_O;zpqN0iXUhypycRE5FUk;z^PSnMxL}z8)3VpikgV5=U0JenN&f&T(o2&fc6z4X zf+1MYOB0$TVCN%`IU;aNjL{k}E7@Po)bZPutKM+AKVEO)upQQnEF;sI7k&i^Ra_3Z zS=fJZG#WsU*$dpM`8LqZZsdgykVoM5(%+3#Fs3AG>yHr>nhZ_HDr_>GQot>qZ6SX< zdeECPX{bdn+^7gIREK>NNf&S}^)DxEcq4Cs?}L)h2PG<|pQXX`9eThBMGMzvk2`rr zz>1;MO>z-&1gHx3A(Qf6m80XC=OL(P!7*33-F0Yo->-4hbwIy>Uk`kykSM=r%h6m) zX&4G6mD(w^^J^oX$e%0J{j3UtYqWstWnw2IQ%F*1Xra}a(E+YyFKeid3K%y^;8cs| z)M85E4}$y@Ps~#1(PK6GTXL3OQOLc``nq9%;_S%;9TfVWZybPXm5-6<;*c=~Hicau z1Ed8l!5t6}^H>IeCA$x!M0T%78b?|KcnYm0zgs}u7`PoS*qSD`d(X$U|#Y0ZD#c#}~Lp)02Frqy)rtMdFA$+3Mcb_+Mq zK3x3%6vJueWN!_Dgt{|NKtq3ehK7I%{rY*(KF`ib$qDrw=q2z7yc>&-)~_Y1oVJ7% zOyD2Xk|ETDr^bN2(CIk{_r@=!d@hr|c;xH`aJSTB7^?0(Dr^NMwz7Rh#FLWV=#^~V zK(_E}%SjItxIvHergX#;Fp*91&r5XlzDrb>bufDBS82w+JpeY^98s7oMxL;1#mC_3 zk7I^3RTq!(EX>LLZPC|nnXWSrrocWN`+c*^6xHf+2pS?wpDw~BW!d8@I@#C@kZ0&W zWFvVklBAMZRfvM*{U)fZ_QO<^N>GM7zZx%;zJMlRiE0YEO8+WJXVi1YS%8;6>ND<6 z|Go~g0-f!^lp`vy%demJ<`Pkp1zZM=1Q?60-vL^MRwRf)WW0P>9XcRdO11sgOMjdp zhL=&Ew+(@4Iz^T)qMb-8mxe;gj|@jlsY;3-3;M?bFr-D4Z>og>;s+~P=WqlpXj$Xm z3$A_+lKEbdxIB-r{EKW^V42*tVLuzi8vBM(HDQ++Y!zql5XgmGQ8}io>?YNl6r)~M z^n}T2AGFz!Q8MQmoa}um=c%ti_2{UHc6#OEM1Q4Dz<_uu=xh5RA@AE#`t^vmv%nKM(q5ME4`jjUW5KCX)8523`tA;!w zald|Mr5!nE*R>|?iBvh(45q)2?DxUtVu43?MDTsD-`;t%I&=iT0%UUC_O)h=Q?8Kq zex>}@(Y=!p0O*g65rDgL4o8iwN+O%l<~LENZvnX4_I}W*cE?UN1*tv!cEYg$=f(Po zyLXq#o>Z5>z#_LX6mbMnmPOoG*jP2tP@)4_HnunJg-^h4`z%+>no+s)4b#^ASM~yJ zzqdx72fT`8Y;bbssBIC|dmxz21H7@FGVC{lO%RD->sz?DDJLZjtLYrREm7%SIA`Wc zYxT!8rfZ9LigQiTG6M^5)@`{fIDbXK5xTgMn|8J*k zL`@(dp~%JHC5=P%(tmqB$p2?Csr_dUy8yW+8$hpz`+w8xLGriPO8GQWw-*-kx2_3)+^U_>OVN`==)Yk=l z)Z4VD@E5SgjjUE!NLGl$BE%1CLr!+*S9jx%)*4@Puu4_;I!UDSJ@M}0JBP}guXBCn zlygoi&c({JIaU%}-1sdy>O*xOVbXSiIX5C&4Wx{z@C|~fX}cn(AoumyjO+q*ui1|Y zMm3lq3JHdOs9#|(2LEd8-5sLK2CgM4r~oI<4`Bgr)pbU$W!a?)UzJMjoZ4vEKd$mwW;4m$GDU@V=ATy+@2@H=t11 zW&iWy5(ZkfXV_16k6a`ESWzl)z+()gs;fgiqKx}(+KT!q*(9?t&MX29U1HwEp$;R% zqPhw?pAjC=RtTJGKmt<_q_*XcZ*dSDg!_3ontgZ9t6xfX0&=a_U>;OGQRc z_FAS%<0llS0Bl<9qQY>UC<(M#muELOD`6%|EH|HNzli5`MHc4W%Af5*=M(|Of_Y<@ zoJ3@jFY*E}e{NDpVP>C+^7a=Vq2RlC$au%41u<(dB`BcMjA&eT3e03s zH((}QDK%S4#LvME|%q! zRB737;JG}6Pqabjd8~rB0lyTMM>bKn=8MvRKeIPC5VWBt$-^4W8Eev!3O0vGli&4+ zvuI*3FbJ4d1+1eTqUx#Hl-xQX?7gSw8<79mEbyPq`KlC(_8eh%!k=^T444wX#o?8)NJYcpc{C`PSh45optKfu?foEJyxlvJ zN+O^6ZFH`>$@0w`G}}Fp9WaZ*{f~FFnO&X#bw@h}2qv}uUv{*`(GKnE3bA2n6IWLy zxv7wetB-l{?gN39Y${+sX@C?xNim2a3I*u-ArA-s7zm7F6Bs$=rUfY?sOt?FX@$g? zA=iRavr%9D^^T|f3ZpKrs0w5PkyNyy zqc&p!0K-p*FaTh9JehaaqbDC*5(M&d6ThpUfyQw!`D4;|%rV04 z>D+GKTOHxNEYVlyRn%X_vBceYPK+&WvfJ;c|GqI+o~FP&qCA@V{D<$SB#b)oYAN+f+3bQD;iWq&Q)lnphJhbeG?a!0#-EraO zqL9-N?XuO%qQ-f?F@|l)U0TV!7IT!fAJd(6xACm<-X1QGXI^GPtNTJBaoGT8*y^nh z0W|!c_OCecCiFTQo`ifF{0@P4inGSsOBt&(@3ms8B0H$eW~J27Md?ZsgRsK|8`8WT zs9{NbP+QQa0T^P{Ff*O!zB0Q%NF1;~k)QTGII9B*mIa1K8I!n(!REsV^~2J^`>1y{ zYVZH(5Bo!x&8D-cZAB}2L0Hq`f2&ngvs_P!%R2^iQLaNa`Fj97_u8!v#npsp5@?T# zT#y0)Pwh_Vek(Ut+xt69gauFDjd~y6epQW|q3x!QFoTDOqC35C!9&*m0pMX;tC!y{vbRXlc ziFCy%p(J(drU3=@ST9^Gk@$da(&0(fTdp$8)g5JN1hVgVNWP5L4o^=YkW0X8G5Joz zd2-t{AcKWw7Zysg`t`$N*n$kXI`o>`h;xAGu24v{&=~v4NfE9*Lz-`12KXu&+41DC z3MP$WEU~#VK1fG;xElK8dnG}Zkz1vd!)80@biqKIXcDNuTgc#0^arh4WpFpROa2>a z&6<4zXz(E7 zI-G}iGJMJeG|q@G!lw95K(<0Do>D4Xw6f0rDwmV#x*71QTgbuCWExZ|@b2M>&&?k= zsho7TxnJ9IjdK|B$croU)Z*YvMx`_|KYzXHQ>2wtJDscqPsjWLdden$2}+8Xc~Y>u z05^Rwn5+YZo6GTv$r3{%Jxg~{g##A(`06j#!)WYD$vVZ*MR2XBsFJD9#U$J{$ON)3 zek5UZk|ceKotn|aM8}S+2$v+Vrf$$Udt?1@rEy5xZ=WN7M4CIZ{QKjkoAEmhvyyl7I2H3$1(&VTyV!>;Dp^bP zIgFvmX0_04^G+WZrjxRHNu*|5g(pg)itKov`BZVZDT^5nyUWS0?PT7_w644$+59aA zv!(82b{0fCS{;`jHj?`atdVhk1pdRCF4j3X>NY*K(fk}<1>xxq=KF{7%LV4d+!8I= z=*NMbUo&s9jQtJ$#WkiJ25^lLHC*JtPG3GB zTe^kFwOs%LHSYJJa(*PAJ$2S9X-fIqh4vmErNzbqdn|w7XLBe4B-?c8E~QOK0LixG zFUgkvF?s4*L@Xua@Uy25K(bXs|E{#sW62LKBgvIz;+SGmnB(2B9Y&UJKSW`r0ayl{ zYfg&)wG5nDTI9tgXH(w~Pw(|#dkA;;X21q~%YpwOz5Sa_h2_rkttV#d^B4SNe5-vf z1NEq4$}gYGz<{&2Np93etkG`a-=K)K$lqlE5EMB>11tk9)+GRr?Z1`*hETdu|0zml z>ZhP5^sjSyB|t!w)SoWm7yDTegd`ntOC%6TxvzEHaXd&+EDjJ@y4k&i;*x@#%)WZ~ zjQig+tfX%Rob5>7(4W6NupU;n)u0;4mf^EcjO}S?kiM+8zTMwXxM@u@Rb->yTy}yL zs<<`Rx*FhWQ}=|EOOF!n15F0}m>cowJbPSoNIt{r(f4J_Ky%0YDVma`+hO`7ATjH> z>yC4ex#BVDG6l%#wD$ zyax6ERThC&*HIZ={ff47grOiAZS;EV6t+xNlD9dHf^Qw)p5f>)c8(=*+;K#HY0-B` z7<&O(Qh-gE_<>DW@Ep&ZX$_OZE^Hnaqt3!_S6cKj-zQXL27@RY>q$YGTcpMshy6|U z>?E{#0-+OAL5oIm4_GyD#R5a~YFBf#uZoMLvSoFvV>mHpxLGAc#|THWNKex>Cf$&V zZHGS%b{NV4t??nkK}bJNbZCDO31D5Cxi&ngtttQ+*64+rCa+NSFm9B|{8&>zrh+6EFfUQ{SgEdQ zp3welfyR6*B<-eeKz<8%Z;=zC6 zzjr57gmec>!`Tw7!80zn?IQ>;_lhE@htw1Go$Zw}h&=%?ly?xxXl5(pyY6`Vn)EXJZ2)v>9GF^&+{1r|jOtNt0dxM&uv}XHC zogHrPvTzTpVJAC%y;e3EzlY@gYG|m5VLi5>IdacA^yj5Z8vv*<g`&q8oYGwUMErgE0xKIjgq zrb95SIbHLl#$>30qw6kBfj7!^)Gaa31>>wpMi%Yp*#%A+q5vdwrFuquO zwaJEr)LH5{%cOrAz`mZKg@Ny3GU-U&jT^&d`JK&}(`gD#ufgB4;yD^mtf58GyZN_@ zQ723M1Z0at<|XFxWKCFd#klAyEz3`hIr?=z^wyI_mct~#nU zT7Ctlo;A`YdFkUFhJLJVv}FyoCzt&a)9WsUu>=?sVzU+v0zb<3cLSGe6T_BPEQ9!A zf8MM_ITf*JM4J5a(HujH)-@aOhM#lQqy@SB&myBFgL+l(^DX?c!~+*ktqV?8^7c)o z^-{{S>rrNhauDm;Lfg+Z_ZVmfP4nhlww^kNKcwgQe6<0rty@~96E}^+m-;kT><2WYE1OD zQ6j6XfDKP{Xq->QA4}TDpgrsKO05_E17w#R`Vp|?qW*jHHT&NMcdVrVJ&JiQpn(1` z@aTW~!+ZcWOo>5u`!R+l&h077S6l=E4&pe=&XYm;&hGZ~93(^>4!SqQU+#}$l3~1A z&LmpY((?6*Su1w@?ZG*aDH^XzmEChBwd_hUW;F4^J!u(qAw&g?K>jg#^k>8{aL+md z$~9D&c52ka049$C#UFNLhlX(~!kMX9@0JC`icAN|DlO*u)>2R{@?=ezvW%@+o}xFq zXLm<$%U0$Zq+-T?6GNmgi>kGw?AgKY79pHMfRN^)4Q__)Wbpx}V})=x^|!U_CAdEm zD$&7Y<^4k=Bc!>(V-~NM4@I*xtBnAY$HJsHH2}i;>RN+)VaWZXDX?TMAX4L4;Y$Kt zpE?!RE&(z$BPUe5f?)S&zlN<+@huP#6g$A=p)zR!fXF8ya}L)yp^V^9p?R#qRS3%D z4LU8G>HW#i3-*~dBu2EQG8hhgBYshIwxA%Mw9Q?P@as+Hf9H8R8Z3IqB;s{K6p%J# zom-VhVTX?gaF>v(G9lPU>Z6_VucDiHRwr$(CZQHifLB}2I-0A0i*Is+qI_s>e^JQ1%V{X)c&N(u2 zjd5MSL>StVaXn;10ytE8?Gn!8hfP$~N|Y=Eq-h~57mWPL-|e;PUKnCrsutcN?u;H8 zDKx?o3LJ_*WgRWc(H((?dnT{f%a$XAm1eh1UVck4d$o1qz((N0ht?nl5^suB}NVW6SfQTI^os5)}i>3*L( zRbj7OxrCr>Jm16>D3v|za#Z@o@UWKXMDPVmv`2JUCOY0!3|#yNA!anyMs;8Zc)(3` z+`#o^wSZD)+gsNZ2ypUr+e zIEiHSHbUGXms}WSx*OFu_rloD>;5njRy)bsR#m0FJ>=B}k)zANuGNX{aHo;c17iUg zRh$7*Zy6m8C4CF{0mrxm((i?4y2oEpRnKo>mU6W}Wsv_&^82rzL=G#x!CE5sTx7DY zo^BVQ%N<}JO^d)FoBwzYDTN{7wK)P$>itHAc zhq})20aAZcI0T5j0^}f1s%?I{=!(0|mMxz%4bD;UaC&M_iD|;RDgh7>Fq|D%K|BzQ z2BA?4gIUCrf+wc?<%`Nyd7kw;Ld!)XR2Hf2S>p5U%L^Bd8^w4;1%gFJk0m~m#_bo; zcI%vnuN;m7VJYfcj8{n%vM`>ENUi^iUm^kBn(It=?;;EM7Gf2LoEDk08%w)6G zbW2+&?4XleGke`?=XhSHWX5GP@U1XvtWwiH{!4tV(a&Cb$~Y>meZk!%-m$CZ3S#qp&peCZ=6h*;6_q468|Q-x z4&Go$b!j0s_G=g9m7uY>C7EZYOCAo{7K3ptr}vh3dE3K!+#&dK-sAFHc|fpN5aQVM zs9E)~_aW@rV#G6NIwv`OALZcg!2R)T#KyoEC0w!}hNC(L(ZOf-H@mq-j|I13mp6BI zF@E4tVaN$xoM;Ii>giC(Xrk>LO`c{TxNuMRG5hQYp~7S_$aTlJY=Uaz?fyuzp!~EN zxoi0I=%O;X1pL>8D?e|$N{w1RGUo~_LIxsPIbMpegOkd)M-E3SkWAK1m<-lUgx!6< z=(Qlb^scPS*$!rX79e)vw+8Yw{OYR-82)^xHRU+N!dYuwX(gOC><0T($rSe$dbiz9~iZ81xnz7@EXde%%54PBtV zU}!@)d`xcL7Q+ij(zE{qBP@X||C=uRMs~!2hXMP{$tsbRwi=x_JSqAX5Tc<>;}CTX zGQzRU^eDb+Ry1`c*HIcxbz9ylF+;J@#dh3swxj6U+ZmKLmgykX?J~@(W}QfNg^#(E z06ZDx#Q@>F2Bg;;_6NhOCZHyReRfCOGW`JL`&>X1CL;1L@{67xXh>pGMDSAegMW6d zUV)&{NNI<`zXL<;0Dv8UnoQ5M;gTgDWmPC=-x57%l;!a0)!)e)oRK0LV8iN1t#cIJ z6uPk_G_F-80jvptK@s_P!;1V4&{Y!YvU5=n>#zH?)3|Yg>#{)?SalF}8Efsge`w}q zZPgkAkUBj7M(PxCkGR#@fhq<&4>W0YzZ>2t=ryWxSMl3^Sm6Mpg`nQ`O(@(3~PHRJ9=C2991bTK#4^K2mxJ< zY#NBy%kY{x!gD{3gH4e(U^l>a0$J~zt2eN`r4kC`+tnPi zZ*qLh-d>(ck_8)z$V7kVY^)N@YA{BM^SyD^FU}}oJ;-gn;7zKCK9m#9-baO^wP2%W zjDc{V4tOR!I9oQG4Ou11#LlwLzJ?qEC;XxqsN+mJS=x$cT5dYU$=hYQm%B(jM|nY! z>kD-)xTCCTCA|Sb8t1)<7%KTVoOgwvBpsT8u1(m-(!{x{Cc;wG?pjSVgQ+a34YzB4 zoslKJyHPMgp=cP_*jZB)^xiE4e1JV0u`5(ptHvn}MX^&q_~ud+1zpJ!lM?T7BZa-!Q;jC4ysb^BfUrf~B6pSI~L|e6!yad$; z$L~PeoE7X5khkpwa)pz@ciZwz>P*lVx}8dsR-(xx_+@des}0SD>3-FuT6NY48;r_L z{HSof#g~X_(&b!8tyKnRRWNUj6_9?Z$pO)$K!jJHI8(+TXyc@d;doeCJzstY;xd##TAlaWv_X?U690xlK(WI82~oeHd6 zG@N)6V1L1(RHi?|ypc;8QMlS)pSP%;jVe9TF&k0+lNFAP$j0f{5Epl29`-Z{0C_5% zecGoRp`Ih}xER-#L@&&ynj~x;*V%r0PU!Az*_z7~gukwKG^e2rXR7M5>l>x}``!_1%+y92jux>E^tVsVRArhjpWuEOT z3bTCvvj!j`M!;y=RzU7(02MvU&`ZEd1?u@B&qVeP2PClxjQtj3gDb{w=!+O{Lj;fz zjbMJ-{M>uuDj(-!`kRY7fRoEuN)cNRUN5=oxDoYFc%NI-puD&gB02zq_@CxvbHN{q z&y|2VSwv9QY|k??^ry1yVp&N(Bg{mw-9hex|&Kc_73;*abexo%H+H@V>yjwF=iw4YL%IKFKO4Een5Jgn~XRovZg-RjnhYr z(|y_fQv)eGXfRkt;3u#;7<|x*j2l={Q>y8_$0#eve68t@T>mwf6xFM(tV$y5o`3pAQefi}d?*4t(-XdN-K_7j#J;t?hWm(72;+vwu zz$b*z_p#&_&znCd{-v7Acf;@A^RnJAnax;&LWlU9Ah5<*Ni*U=uu?6Vb+B5{ z_fVN=i(BnC=2JBhZC-qtuiST=U`*8xmVwzrToa76>Rc*YGC!$YHbFJa)`fcR1ms%} z)h1K}R$P+IG>LuVnb%A=;b)v<_PcgdX_ZQ4KtW@NL)5U&zJP9@eZg@*Yfp{DV&;^m zXV9D7hwwy=oTgwp?-K$0E!ZxLKJO(Vy-owZnsGuyu6Qou=JRIqaF*wms_;JO*kNyX z&hNFWnp;VO2&;%?!#nZ}4BAD#a~j|tON zcn~Foh5rag1>5h8R9&rHly)4OdrgkW;m!-cX))1fJL6n}ddNxLHrhV73T5$UG%vLs z{l2!}rq(<~b=w}60i-@_wb;Ew@l+va-n#PRM9q_FbDg(s2a{xW1`rq#L^m#0IcO>C z%2+z$79oV9)dSoBfi==by6#R36Qz@g_RW^^dw5{ler6a;I*yQ_7&BXup1{DAd^W+r zFB9J7>|=URt&094z~a^#odcBSs=n_c9kQM{Bqs&^siG%0_nnFgS6Bm?oDhnTY*8iY zW~!PTDnlp&59jUz1vq|T54$LL5K{82Ru?^ibPgzxnRxSiOgTvT@aKE9o3Cgwuqumz zDv#R=U&NQ-_aHT3vv(6U+GQdz`H?$vokD0aV+4XKZ(WB`=gi{`ZtvaS>ya|yl);`B zt&z!P+YYdqbkDLb($?vh1fN}JVA7I#VikYwPQP~ilW`_AEu~I_D??55)p)y@K6(jDelO^t$xcfd4r8DUY)k09QJ#OV zG{EW?j_^ehIsE_}RELFBx6TiE@II`zv^NLFel#qD0DC2HCEAIozMiGENS7|1!*#^l zK01`WNv-;4VIB@dz*rp1Wnyz@;wf{;Bg|T)MtCWO#8`7&vPrj8#572`Hh>&}=rsTL zAmq6Zr4fXem`aFxiVl4*hSQ*bq&xL1gEwLdXb8HW_bHKBg2XsBETil<_fQfMBmpGk z$a7SE`+EMwH+JDQXE_Ogzqg)$+O`NQ6OSE?iQ_>a_X|avF0`TRS7%IJGxb$tsmy%8 z{t!-+OO2GiiyUL1i@eTs47@KY)_=bSude&x%7I~Shik=%2U;MDBr`2cIp#LYxes~( zpzXer1B%eAxujY0A>q})O=26Emd}(QNH$@!yD224I62_^K5IrGo@N1Dt_)R69I*NA z^Q03O4(lPXzLrksM*&smc`7@X9U@IU0^nWt7y?qQa?(7Uz8^vATntvJ!VGbhoFr7m zYgBTD3a=LE*oMY;0@hvMGgmocj-%KF16#p!JIhOVTa3+q+7Ep#!Z%uCLzWgW;tifjOr<`Gr{`kSj zeW94$KH)#9K7S9&unT{wT)7BZ0kwnDkbPXX7?5({6Yd$-wvXbNYC8e#p~XV;fS3v* zVO&g0u0?yT- zUQ{Nw*b=P(#wdn01BJ;rq@{*D>97cxlf}zT+NIcKVXR8jS{>T8(0;=%dg4*yP{3^Z z``(bPn<>Q^E_$7x{HFT6@D~!Vc)G^qa>eSUM~TAd@NZGD!*oEw&hcgL96HOT&z@IR z8T!A20WrCrQx0OU=)){?J@D*;RqJ>7X14ORH|e5RX@8czfq^E)(*cbVz}_*a@c)%O zsQk$f#O;gz*G`cdx_Ti&2K)EbRZVLnr#dkx0`9nh$KU#5&j2X=zM#OKCj_f?T`(CT z6y}@7>0edbsIj5)tY2|5QjTNz5#YNl3M?gAgf*%1v9m7*Ge#SdfEmP08Jg1{8pl>v zUJW(H8s*274y;fKgie&!W!w)uAjy=eS57r<+68zQ({>rurgC&8hVN{ieSlR`poqj+XAgrAB8V7 zC5JR=r%a)BxBfHgm|9~jw{%LUWrILK8a1D8nGffA5oFb_U}j%Le7+sGbv{Xf-d#>{ z^5FCs#P86|fn8m!m+aW4=f}cH0bMCa?@2Gbm(6yZ<*}VkzwOK$!rVHVJ%(y6yp5cV zm^s_mv`Jz6F&Kj&o<4{)m_@caH#=G_Y86U(q>r(_v z%wxYB5Z{PJwo>OH5qb4cuSZx+!zFv5DRt(^NYO#6-LMJ1>fD}*G*?OpnQO#(8$6y5 z(OGj=!-6vDD9cz=<{AT?*bYc1uyOLR4Dl*A{>6-qb|0xvSXNYRxz1dNBF*ROuJD&U z?GNFJ`$xli{O&L#t0Q`(adE!Vhgbw2Fec}ftipbN6t^N0^(u*%S7y9 zFTa{C6QYi2*%0*I&1P#_Zkxdakq7}Zj3aofG7xdn_<6aMD!Ah7qn-P@s9nqyJgXEO z`|dLI$32DA(E{r}hso!A^o_?-C;Q55Iw(pfN9Ey@CAIdglY)T7YHh+cjki&RqVfft z6Q>U5VVzZY%0{i4$(JUC1ouDR5ucEd)SH}a@4XwtW6$rFW!c(hz6m2pAf@B>w`_1i zQj;^`^~->FMBEr^PRJq?s;N?A&0>#f499MNZ`<;bc}U;Wa4o(UzXO{?0VI?p(3b6I zP(}(mCZUX2H$62(D?KXt|Ds#!dPxVP>PW$Rl^9*+%>+9mX7ZA>UrMtS91h8aU-%*^ydv@woxj z#I3RbWRv6utU&i&Sg&YPnAtvPMLFcL|57!0BYB3`;Q;<#xE^g3h^Gwr>Gbi)`}#_@ zV2oIu>v6uK@k{cXT@N5_2U|CvaR5v>Y9g%_<1(}h z`}81d?$pmcDUw#9cYf<6!gF(sNg4ULium}**c3!GA?3j3>`=9+0h?U+s$s2P{6bYy zRb%f_iJ*X0BfiDY(%5HzN#JzH2M3$C-)oc9IcFm#fWeAMQozN%PgNR}v1)sj)gxRF zt}{mT7JgdRzipy2A@S+XeO1@}jj47n7E+1-yGqZyFvWUkO`}B4;Eja?Bd00CvBm%V zjvl%v5%3?m)1BPJbIZqf9&rbs7rF}j^{P{RA1tPjXgRNfaiJvdfPmZ~>qccxzl3%n zR*Nbzz9aJ}Z+cm-n7y9$`d^X;@!e%}F`^jCgTCn_ zTqu+=fj*tl=X?PI*2B z3Heogn=cxeE7UHjd{c9jcFk(QjElr({}~sh9y%O1C}4?e#I9xlNHK(`fjzRt&n~H1 zmc_WuR|E)Pv@+A30C-IRmK%WWc;H=vLDd+mj6pWm8p;#m3T%w5?mmCYZqx380#s@e z>c~k01-S-GR(KSI>e|5L_9;F4(|VI8q#^36nOr)Nl9jmdTbj;^C_OMd%prLOKy~{a zA|DPy5*pc1TY&!$)eUett#m&xXKhtuxK!-DB@q-*bL6*h$8BADF{jAO&=!2wGsKx3 z78rD`M5+FAxsCl?T`k8xXKZ$$E`g`TTGtgzfcM5h_4TZ8g}%m(2oPgh)9(LyrnpXZ zo|pNAckRbN03@U-MhD03>|WO}g~#34bIb{eu+6eU&mC`ETkyku$)A^bNI+FHbH%YYa z6qnD;E_n4O$xL_qVirq;R}STB)Ksy{RTYWinrf**Yq3z~MgXVW+&x8pgx!y1yLvpA zcfQkFl+~_&`bJ1)l;B`M{?3}3yHxE(*MP_i?T|EXFuRT;f1jRk z>t#*1!US8sIS+cl$a8Hh7=ZbOxv2xOJ}i2MAm_o{`31&A8mwKV!z7WYF9agcbTSTX&H}Cw51VG?MNUCy6O`JxF*WbR> zvDBWYKJ)xLXJerT(qspQs+}#iJ2Yl%^M@+<-i+&#aP&^k>7(tA=saKf@tlYA;%6y(P z*W-Ic0H~`iI4oG*s;7aUxU&6He_e0tf<;Z#*no}|L9HO8jYpUpsBrX|VY7PdlLYsg za0%ICCj9qN_5Hl99?IuMaXVBDx=o26D0*(mjoD34#-8^Ov@l|QQYRwUa9e)|zkS=o zNx1x$j&8Yk3@UN>=C?utNUiz2HRIty0aIYo!+M~vq1L(c1w$;G1D5E509Qy02RR%hxg)|ow6Qf6`tC&d(E%Xo0Wf;X*y#u) z-W(QS3C_Z33XzV46|r>(n7zPH2<6J3`^5oxI5XSK;;v1Z+|Gc^KVvf}7&~Q4?dxb+ zP&D30+mbI@nKZ4;Kg(8U<4#yC#tU>E>RVU7(pJjf`gkvtRnSl-Rp(|Z=P965d=Gbu2}2rrL}G8Yj#$=fd< zqMr7kx+g4pTV&-s>9%~;BI*Y=BVwBZZIE@1Vg1Qs@crZ|`U8Y+@ea`P{%^ae{}qC2 zh4eH1L@Vq+CEEVoMLqlv0{MTLVE8WrIUvE%Qk=6~QbNIglaq%dQHg=z-vn}&VE};~ zT&GOMPd-nVkQgPw$Bc!Cw4nn@IWeoG>G11kba@#?K(#~QNd{U*k_)S3I-{(kBp|w+ zC05O_8LjfsVQNCgiKg*i35J={mJNh!<}jLeF0QrTwH0h^jNx>{XM#Ua08;CtLxkS2 ziH^lJyzOO7zY$Qg3J2F0T?@c0o)&3~srY(xGc~8!*#u*iqVsbWpBdY7Yr?cO=9aJ; zII8gyiG7-QLrwrxIdb9i0>~`i>v>QKOZprD;WFhn00E)%e$L{ht=ISvHF>0b5XI8| z%*E}ID#S^EiFP*RHW!#iCn8qb@d<=?_OK2)or4#4=km5R7qlB$d>fWIL?gq8v!0n+ zi%#`?ar_|yc__y--c52rcw+uAf9?Kmp8i3OpMaXO+j63ZoA|x!RSH&5U&O%vDzhXa zEr=@vtIB^P_$fCqYRTATpmPWGW;1-;>ib-+aysl&IonaSh@Tvi4Z>@uTs6y-?f> zHP?wAcp{XP3$~gai_!IDjh*5;)Wja z7ExP+aXrO1*fA^?w@BD1LFqS+ zicJ>wGYRUIOX&j#%lOqG*gI@<$xjQt`6cXiKwra5lR9p?8R;hR_J~&IAV|Z~W=Wba zpfHnj2SAp4wY3TN#fqe1);%Z;2AmFZO~*4@D&&J#9q1j6smw;&*yC_e&uYi#LA^_$ zU7+7hpCvgB4UqduL07(XXD)q>acO~8!2^M8BK*BXpp9F^GA;@~JK-qMgo+p)taAcw z>ONHLL5pus{*j{Re{Jt`i_kId<9Ma0y`z7p;L8EC0q*T=C{mzvN9m(O)n^5IbGVy- zN@W>IUcWzR-6`k_Db02P_x*!h)snO)z#HJea_pKym zI!Yc!3I);F*ToEZ=|OnTq976F2{yqfyP!p?E#=GX^d|1^4>{&Mb^X&uYOJ3)j#3L$ zhX)1Lnvu^L@)jkGy+M1~b?P`YN7ja&Ny4FI7rhhh!S?y`EK4!TB2v1W0!V}T|MtEAcr8~P`qmeo!wfKL{2U#En$df>Dm4M2nGHIPo z|CJ5u!2CSUYAmy#&{9IZEVHa^Ggs3oGoY?CSZst5zE%PjyjbBQ=&iS%r9K`)oZKP& z%UX<nqQ1=XHL_#aONQ?jWVuYs=Te1C6fEL^`+tGia93PSn#9n(IvIT5~<7 zf!Q}dX!|h_Ge%mFr|fX>ED;x(S+m83BHi|=o~(r?)w$vPbz3K+Z?PdVB*FbG5CtiX zK*2c(Oe)f*g)U-uWIuUN5DyX1(eb94(7s&TelvzF#1oY@mnU6gH z;sX18?WX1Vq;54-k6?ImFa!X#xO^YG1^2kSBzoZpw$x+*`t7$x6KhF}soyvM$7M(a z^fzEN#M-l1knohgTbXg)Z=j#2-xe4L?h5ixeqE;O_;=Z}uk4nJ%be5*1sz)gF*j;f z(eUpFDQnIU!8_`KpfeE4!EZWyb16d9U#WM%trmMS*F@gsui##e&VI?mPku=d_rqTl z>dr>HcOU8xw9!EGTuHY>=_sv1aO-}i)E-zF=Oo1D%T+eaIrCOO1Vn9u@k_{xCb;Jm z&u+y#LiAPCgpJcDpVJTM8I2K%Pn1xwD-!x0&k_Tpr`Qg0+c)Ce7Q&nczI6b1fW6f{ zk7q)0(COp@Yx#O9E1zFx)`l$>(}0yOsb)e&bqr?M#X9FU_lj4`u@q*VPvyF9H9hH0 z`mr~DM@t$G5cY#OLjD$xGNdU|GqJGJHgq79_BgJ+H>1|TfXT)z9+Sj* zX*CsP%QebC>HA`uzEvUFW9d1XMR`($g#cG^nNpmQH4C<(y7!{5k@`mwcFeWD7~92a zsOXJ@uBMV6m(xgNU&vTe;8? zCI6wg{ZHb5fLZ1NNEJnX;zPoJO@MzeQTOa*U`rj~6oWOS+VQgk6D1A}^NnF^0fbGZ zFr@y%o&;s-Uco||3A6&8f`MDtr3BX^)TesXW&9dFzWOg;7u=e1oT%EPHvSPb*Qc8} z6nx=N^|HTdYEe(C4gdsA-y+e|{txm0wImn`gSCcbK(0!L$Zd`*Fu*?2##1S9 z0|H_~`uiAx`4UTg1;Gd~5QB%1e~MX&V!#lCiobzSEgfnnGM&!H z*_^71^cr~EdO@plZ&KBwQn>hH-c$9{*<8EgSL%gpQ?YZW{nBj*g|!>9*R;>sWXIK2_7UpZfIgyA`u*Iu+-R3_0O-QI<9D&Qm=HwcT0R-5 z>cnH5IX4hs<6|p(H7HrC1CPs`4GQIzmW$)b3+NPVmGfYGbSmBLgSU3_^)UR)E-c*O zui71cBA5ZlfqCzWWp5lmXK03)PNDZghCLi;BO|uGOx=7oVWOaW#;0W2>tks}Mj4c? zR)&3?SPzXLjc6Yx{-l&VoXaa|IN0rhRe66W=H}?!*Dp=C;F9Z)a%j;P3j{4W5~)(5 zLIbHv$}2|kB$lx!Dmej0u@WgR5M#3~((@cgLNLUtP1M<)BU1Ac*qs2W@C<3D*+=?7xj>N%$H;bMvJrw-Q>%p zk&}fFH;4!9F+g4vO7%0&5pWypD=H1u2{T3FS2lpoCBE1NFM=IABU|6m$oa$n67yUKO+cdYw+)dZ||AJEU@%A)P*f9}hw%dC6?55>IknbAmFG zyI&#JeNe27PO8~vV2ea+kdyx_oa#5as!&o{!#)8^6%16#nn1T)HBk@3)^-Ix#TN7| zWd2cWnAT3NBUe{Ohrpj_`{SE~z2Ma(F4X--qqH%+Q^-!o(E3l8B-JE@Na;gnU+E_i zDw2`KjwI`*N4$?HwJ%;Mqre@p8GVf?hlCXZGv$c73M>TNvtM7brHIgAOf;BXWm#d6 z7X1PD)VW&b=AkXsd5?h0N__GyvzY70NGQDq_eoD;M@4gt`r1Z-<9YJyEIkjLQCp<~ zCcU;WlnQ&1V@`oma0%q3+z4N~G2@CYB$0NJFQmvlz9`abR1oet){}V^WED`bR(FQ4 zVN8JE5q-d-0^d)Y9z#mzQ_X=ki8C8n=re_1XxDu00`Fs`so~owc^gsO*WsegbpdVQ z#>~Xlix}St(PHT^JgAwr0#GU3he5XHR|Pv7uy;1Wg#f9F!M;5TUoSY_m)O$Qh2YQ8 zp46*Cs+b=6Uu$PHfoLgzUo+P2JU+^lhX|4~bXN-C70XYkWOuIVP)h66;`#~oLK1Hz z>Fzp$TW7vHs=`uz9iu-j5+{P(#~Kl8$Y#Tuuk~`h8LfK=M=z|w>K?R>nIzg@#e2IHau1ZOwr)dQ8m8laS zz}1V#*ky|PS!KD4j%$_LJ||?f2~pV~moeNJeRF!L_E1=41Z0oaT?B56*|^KDdVY5!u$i<^v=_MK`cRu;4p@%`i55eJ zUh^*eW(G-7;eQ9}D|+UU=^Y&+wzUf5!kQ{I69d-;Aik>#X^?WOWAP$8G)!qH{~*BH z(^MT)YU`szLBBQmwyqYzAxt z`r^7oKik&+Cn52tX~lgX@>ZxM&FUuI>Wzog#!MIfN8p*!)P@y4GVYjeWkv_=B*dn2 z^n<~fRl-uMpW`#7ZfN?t=1g@GOtg1BZZOSjwTVvm&Bnr;U~k4Mi;idnslel!{KR!d ze*T@$yL_(8WOupcB9d|mR#_!|>GLT*tVdNm#KL#g+N$@lH_zWpEGjRa$7c8j%{WJ* zQSeHqUh0o8i-(6I&fSYZLPWBaPA*C`9W*~nA&5ex0lxtPYueNK9)5RZVZD=qDV5dh z-H-y8(7kyUmJiH<7W?{(?74jlgUAkcLxT;CPaUZCoyzj@62ki({d8ey4bYLn{`Vc( z!3t^Z-b(EGC*-bY_@9m}M+aR!7cgoj{%6$OApr~o8Z&Uu-9Uh`wWT2y4IF}iW756T z!-FK&b6~8f{mIn@SyXb4ik=2f3JC2RN|!kYPpT`D!v}>i!cTgL&?%Tqfa;@XR4{c3 zvvC~$K0Q^S@D-aBoH&7v1O~v>Ro{u*gP!{J$<BO*4N?JYRV$|09gCf~l7nOA26|%9)amCJXvu3|EGo9fB9K{J2{! zy)esJ2QW@e{bQVZbFIOCsKfnbrJFaSyPw373|u&ZF?iFMOAt@r$pzi1u@`2Q>J7}A zT5t>m#5xa{Fspwk0@87}(Ik+1DJk=^oZXL0GiLUZuVJDz*RG_(ns=t0|!&Pj5jg~?c*$Y>y_0n>(JMdFQk zDcx<|O08Y&-WcG#VxNBOE_tb8d19_C#$BG1VT{j-m{iM3%SM*IsYdXRBrJBSDA72g2`q18#z@St}( zZs6t}f;F4l%(woiuLM@B2g8Lfm)<3ljUl?!dbq#pCJq@SCZGLShiwl~2bt0JSig~x zTQWsA2JlS7jL@{et)cw5?vN?{lX6&MrF|IngC!B|STSmrczL!f3-xe4q0HZC(Z=#Q zXo})(Z;{C8#X~0hQJ5IU1^ayc2U530=_D4CwPBY36>%KcL;Md00;v zeTQT1?8jXVNp!O&LWfUU22(y1DDFdxz6~@evtg7t4Z*8TbH$02g@jPjjMkV}ow@kd zDau-iv{EYX6lKF&Cn6vXn7~S~k5@S&y39hJ|7}Mh+W@Y*t2b38X9^s*I{; zMo(cKS%JczsNXNSNh5w;htHdlUji&y!4rSXVOaIo&nVp&(q&7utIB{D|9cq~@p{^H z4X8lN_Te4qX4i146ZIk%M`>l{)JE{1@$}QGBwZYX4$=9terDPW&uF4=ukF62Us|G^ zjLcdz&!ps}oCtl|_nULVzRVv)t*^#TAVjTKIO;IgJfh3Zm6^IeYfa@D`>dN-8#nPN z)~_q?u8D4dZY9m%$Ff#pnt|=i%J{b{r{mdTa|L!(g4<`8ahaAEE#N>~81JHR{YTAreN(SrJM0$U~Yx3&>BK)CeVFA8KUa*y)U>nF_nz} zgY%o&Ct1vxfOY+3*`9#=m^N<`uG|Zx$X~H`AfV97c70$q#me1BQWcSHc58xCy~>vq zEZR#TY2-l4ztNokQ*4(lb%Wl z=)|*uJ}<3E+>!Stk<;=}eJFOp-2rU!_HVp-4+;UlUT)I7;pEu~@2!;=a$9Q$C1PNwf$zs-g8vfFOVa518t9?D|2*jS_XCAv zcrAn(gdS?rC^!G*O^ozp6NG6YB(=`4&l~)L;PbM;YoT*QZ?pWEj<+cRq0ZHma9_b< zpKqR0s+NNgLA6jr5vfKgc|Y=JJXNjRnP=YPY+zE~(f}9#_-+3qT>ML*vp=XN^*n;i z29-a1FFjd+x!M(&@^b4o!yu6Atk&dMYbxJ$9Aj0vV{{CuPQ#xJqwCUQ$6E&0S_o>v zJOKNIHK+v+svSH+HNf8!9;+yu^iUW%3|q%&nm=CP;dgc*{I(!R_n5CB$pA~k@;e=+ z>d*m*2FV9hVmW|&f>&)9&cNDVZ1?#-rsW#4j3_S?=8wICFSrSL&Irk3taD$C9iuPO z9?xnR>4Z5OuTvc4~|-C|d9`%e8zdVIG1g?N||(7ho4kGmJ#-s=CG zx);}!X%Z2SCVVIMm#F>G^|SexdiD7#v58oCLVbXuq)kJoy5iE(99SfJz5hY+{vWo} zHz<`^okW15^M1yz_4Qp^n+$~Va!$Y zj@hm>`YFaKV3)MuA=CXf+XBVv8N4#pFnE4+3Xg*!T*H`j*B*lU(ZQNCUIuK{=E`d}}9{}b&BHnFX$eQs6m z>c`M1k~T3gG-6|{hqDl9sLx|rAP8-O{#({Ajp5c!8nvEZ8ZA~uq#3XCjQAIpczeB6 zEvzS{h?}$pjMXIK1BbY1W6rz@f<3j2+vK-wOZw4cm4VZYAeJ#Mx;iM0(2MKQZK|G6YS+T?ase6)`*%&{~uRY?^nW&#VMed`Ourd$Ep|l~)Xs1Rsxtmpc zV(M-8Qnv|;tZ;jtf2#{&SC9r@H$`PWHmIZBCKGK`$Mv8N@OuI2}ThE>`=BX+p~wp=Tf$Hw>GfqMUj})!$IKH*37| z{WlW^2%R2}?^F)AzB|FuZ9y?|)RObtmVxhAFAi!h6FBHr^*mnwcBFJ3sFE4#Q~QSO z^{{yCJ0oh~7oTyIDbSzdXqqZg*dUgGG;sjK2H~} z_h|_?i{NgU*brx%)+`- z%v*zD{PKCA#nzx_nzik$9}=6xazLI@_qQ8hG;)<@VI*V@lE>9jqTOk$JdZjdT@kKe zW|l1V?th+zq+jc9vA#~5ml#MqZ0k9}@&>6BvM_vgWKq^YNbfR#R?3lBmT#WjYC>hg z8_-Z7u>IXBARGz8Vc3P2mBQ+PKp(`{$kNGVhMB~IWqD3c)(WA7_xA&{^ST*vOvmX1 z$--b$!-*}w@EOl$HEvFBRn_y7XLz|icr}JRpfXS%DRSrkrr1-Ya}p7h*{fe^KM1%W zc&3_bt}$`O*r%J`{zxNa=6sAZ0`&@>4JR zxaXh*XX{GyW#_(4s8m{&8DR{olmNL0^7AB&9XgTC; zhU9zN1gP!LW=AaH9Y&cDb<1bj%QNEHD4daiz_6~iUwh-r&(80ZctMU@@F~>p0!f)y zYAsP_=(uI3tu{M5f*M-mTYdeiOv-MFnip*F!cK*rxedR*+5IZfC{@B1)lx>f>ul68 z=6^bGewA>y%6>+zqdGU%{1q3wG~PG5S#XvG#Z@?vntCv$HeQ%mGWDAsVkO% zCk4LcLEo8}=bYBU|Dfgi*LbDX2qGcfL!>6%-91Lf zkgkDUBF%u28zo&sI;3I9hY(SQf`9_@edhObJCVu9YU+S-1<0CrG<>fog;TAU9UThgG+TUC^M#oD_6P3h(R zMzIWk!|5vDEHiqb#@}K>Z=vHuLBiD)-=zLy=zp)S=$RVm1<+z-s22P*uj5SwnArJ( z>i@c&ePDu**Z;4N=l(D3gD|QT(E(3FZrsL-h9Gb0P(0?#6aEBxMJ}a6;O#7A`x7R! z2mh%KCn92%{2MT`C%`@!a)C;L-DucCbj260WH5gmJ0p7&H6RJ_qh%P7LJd*;35pxm zLaS>y-hB}IoBOIU()(#6++8>&UB6Cxm?*kI!$k3Q7kGE?>a8%xt^zusHAd5R+cE^iQ4_Z{?U1$$fsT-kvrYC0$4rARNMW;_A%#CW_5prH+{=z8&{_{w#l8Y0&qmxTM<=_`JN%<7EnQLqBh5TV}^*+e5Y>gLu z{|fBg9&wSep-AgsR@M65+qZ64hr(CC^S(XR6ID?V?&gmhygTk($ONWEWU$mNUpW+INB`FA!=3~S z8d7OhKFS+eTi6P#*QoU>sELQCyT^6yg}tC6CPfH*1Vxge>=d|Ct!@`;+IiEFQSQ-R zS)Bc#Vv8O&Z80o5nY-u6@QYWqWhj57^fN!*`UTK%f}CM#eaIFtSj)3Poc`@_gmg;477mbHw+wZ>{?m8YWb+6h%zVz*mP0Wz5!Zm@~)uoG)0@Enn)3 z(o!XaxH82tTKmt#)i!t|>+nc(hmXCMib=caNGN_X9PY9HYdT4{XpJ zr}@rsJrU}*tk91CjgI66!O~e#fgOC~mYpf+5~J@tvdxtZ0u)6gvW;UgXPyF_|16VK zoM^2mSN<6P5u2Nc@qwCW1*zUjSu<+nLhG8v4uU_pcSw!VMjHXIXBoNJzyYo5JS$#a z9e~b-_1kME^d@JmK8)k*3bH`yTSg?Zq#$yYcfy0fM{bda$*Z&2lV8JlDfW5QpA!fH zzh9s}*UIp+&BntmDN31Pz-q#%B$1LzYODCRI$rf3TZ4nrFDX35_MUGm&bTfb?Ox{n z;-hr^ZaTWy6J_ugB-?ALfaw`CDJ7! zU1p&6F?_FIR;5PYNH?lG<^z`ZHN1dr7V{w;^Np&oiXjlncwZva?_C{~dQd*e>`78Q z#W=hDM7M`Spgh7U)NfMGzy83(1yEk8uU|s8KXe*Dl=XhNFSpy{1{GcU`&v-O*EeSk zKCj+3U%s?C+o=^g^jm)OO8mF~^S9m~e>vl`zCxPgLagFkTcbI-N69HEXma z!ZB0ih%5RbsY+lwF%e#V0+YszX9+Us^BG4MA^|K8j6bk}DHg04_=wBaG;!9F{u2XFU{u zvrxQ1JmUegcSHIelps5J>DAQ}r9YZLxZrC`2A?POpv&3dh{))O*tXy`Gi}|np7B|Q z=dYPlPx(p6LT$)x?-2lNyZ)R5)vgRmkXx-heZu(Ay(U=ykqIYPf;bha$>Q|iL}}oW zgW>Fgl60ESYurpM^}@N9o$?yGy@d<)t>3Nc&oK3wNqElPI7q!;e5B#uMCs?e6sB(X z8PaFZIxqpSW@-Chz*?|l9$w9PHjhyS{Y}l5`BIY7dubiws5sU#r`m7sJ3@Cx ztxNOs`L}jFsvxI$_$Ac80%Hv|>fPb^a0&OzeppCeDp{K|>~Ef2bBVbMym}L6%Ooa~ zs6)>fKQn5jnsK`q%yoCiW<*`jH_9EDOP1gD5n~425|_T&s1HWD9&*^KrNoxaEB71- zM7H#kYdPxmN*8cw>rnSoXUfr+AG(;8tZ5{x%FtN1n;=1wBNHrqi1-_UmWL-TyOQ-WODNBWs*(P|KHBowQuJ9iraFpAf#c4*M+l zEYvy(Uz3-ws9v?s8yo% zJfqv;h$Hrqf_|xW!gW3`+GPW0@Zo4Vr#UlOMSj)f^UuRb%(?D6(t;mY`Vwx z$l?H=ex0X1RqQrs$ihY$1$YL@0vW&e2eFWa-YMPV>JUU-%9VRsWj+O7ZzSHFQS3vM za`AcFoy8r+NC(Ht2JRr^e)*V@j}xAt*R|WLr8_uh)rmMV9&UzMrgz2itUe6=ye3~_ zB!sP_lR;>*LA=C@cZ#Mj2@=#LZE4EvhC^t04Pd_lMP!jUlPpsv3Ntx)!|mjcZ2n{$ zrs|N7z08y6FO)<+lQ^;O4qhZo=6I$J3>~N&7 zm`}RCux_%KXe1S$PW}w1+0s;q@OWuAM4Oy(%Vl)%v4f$1ETD^)j$o7h3yJ&RP0s&U zNjOKE>%s(yW$ogF0|5s80C^&U+4y&pQ~s@7U$@*23_=cjB^|i+QcC+Xfsh=arPXpg za48UpM{DU^s=R7bh;GD~w(6`w!jH{@`5rS*-*^tc{jy<}?UjTks@M=w4e$sgvLS+c zdg*4ciupfAUJrl7?2Di8b{Ojjw_e`Y7a&L}Xj+S8q1Y^Xrm(TeEM#1H+2#LZpaNmGnBUKhQO*B(_S z`NpQzHyd|F?$L1^_P~+=YD}=Kk|84EQzRUYHb;i*5SCCiH6BrFboo&irUz8( zXQJg)6!yThfcy*AL=%2URrF{y*vrwiIG)!*S6A&Wz=KasE0w8eG2Jc;y81TRTE+SvlXJ9l1BJ`!q zSN!ppv7LxX(EqVF*5)0{pY`K@7-UJb$;YLXcwM6m!$A$@Z^~ntxK|R zgvbum@fwQK(fz~EcE&$H^DH0LcU;1zcfb|OANq~r{5J1xx^>R*o)s?+ZW-)sG4?w7 zuC1(}zre{QwA_}Y@v2_5(awUuZOi}bOLKnGR!#;IIbSc#v#p~OF}>?-n-O{&?xq(( zQddErsJvd*)tqWMgC_cGqP!Kz)xb$BRb0Jm9hW6?T@SwG&GorU`@qJX>(eLNdiZMW zj=?fP^wqjGu!zZuY*vlXC|k;)5AGT3iM1?k`l)mia}RP-(bGgIJV@5a&n>s2h%1G{5#zKP*kO__>^|%=E6*& zpr%Wiq!o|E&1ke`qBV7(tG7nDykA8HwUrv7Rv5m9;nlvN1u2`~){=+G-;&^1A@?-m z(>E8&f3k!aZ-$udqe7Hx)iGAOMb5fofbYuV$HyW=dE$~p8OD#^7O;?B-&OaK8pH$E zga>V>n&_uTO@bPnbj%^9qU%3v*X7BDrzguz_A*@X7rvNL9-&qj-Sh@It1ngaFxDTU zZ_M6mv`aXt?DV2fk{)SNg0$MqmyIE%NYOHyZxqm(|qFr+IIGz`YNYUDn z=0Q-ez=N(sGE9WobG6z^-m|nKHj4SP;uxEr-l19IjVIwM!aX#K4ME0^^*hov!1~B? zZnE5NQ$B8zhk3qOTr%9CZe8f`R{rmIG2)}3cbo1RtHWm;Zzxt;c%rwmH!v&>=4y>} zg%<5AsR`dg)(58YhF8q^r8hD`Z)P6Jih&;SE zc|l*GUgM8oHP50t!IlYFycF85ZP%+?ltN!f-Fdc=GvusMM{3NSz<%znU9%_QWk+|e z3A3RV!legVeK}saBkQzV38b-uom~M!h$o{2I;k&5s1&-vfkP73Fg5V`DwMc*nH8TO zGGF_XuNKgee)4pQ6&%YZ{x*~O@pd?5m8vHRYD;>P3yPWAI(xeS4Z#T27TGO@d`7S< zG6p3GFMMsXpg&WuEveH*JtY8%n0)pZ7sqza!{8oaWlNN}7mIRo>-f%9i!_0!s0*aM zVYf~lCXHfzuUty~{zXB>kma$ezvx^w%diPy#$ssB5J9~>rp4#*;*rXBH0D8sd*TM- YtAq@#t4{O4Iq$XV_Se2tS1;NB0r)k{A;pUNlMqi3TrfOi)g0JfMk&i!pG;gW*QR zSN$<>-ZDu~_3yn`ul`l_@@A$RjIjY$T?XGZcxAQMU)%HO6l2R)XO4W_2+s<5AgXeJ zv1*Du1Y^}h9(f<8T|i#>Ch&X6YYzi|kG#PL{u=qd85m9>Kd=}4zac-|qWQK=z;F)P z1Fa)JBR|~)eWS>`PlDzLkq^LAJ&pY8HQ=9+58q_0aU5A*0{>p*`Ii|RzKoo{0s2lL zAG;soFCm}U#Ms~#)`?-+7 z@DcJ?2SM*XlpwT}IXJsMyYyiG3|07Y{VY6fW@tqIxbveAVBbT z7=caLP?h#tAR{XZXh>suqC=%isl&1>q7-r#yh<)6N?UGSq?RU)%C*-(?ud6nW5Rfb zrwQ3`=F@7Sm$t2i&qXlTn_Mto2<weElfV^ZcT(_Oj$Mr5 z5uQoiSvv~0Ss2EhAyG*hsnl>^rLhfrN=KdZ&O@(uL|C3!!@cuJ!BrKO^;vpuH8|Yn zvkHL&g7;E{YRyUChTT$o9WKfhP4DQJXNJ6B3e$*AW8M<}ygR+}{QGT0#43}v42XyO z3WNL@%BtuIgCd0;&q}|n($OqBv`2HRtNQ=$Y@6=mT4%|R`~&<9^2Al(zP9qz4dC6# z&piwL67v3wlnM_Q`vLex3VNX)BNxJ zP2*2?fp;N)I7sK8evHol{0Q}*-%R<-VXA-mE;|34kLiAXboyRE{smfmrLWCi`3|_R zE&ee?vwIla(hXq9F%c)=g!9q8p>&c;u5b% zu}N_&#cdR~Q=FtYMR5nwNSF1fMvrRrmIPCc9@XekjULtLQH>te=uwSyohH@Tq#B!4 zV{^&ett2s}ZG^THnj|zuXh+^KnENAE$sbY_17*AG3LT0T_N0cEZK-AKqs3R91b$JB Q3|!Lx0kjb`A-5aRQ0=V}T356){e`xF9&&hqih5lOHqVJi9G~xG9d0_)VaitIVpQ(MAk7 zL}-0rA?cWPZz|Bm-n5nVV|*ScO0+t&K2^XKq=U=2&xLV384>t6t8jP!wd_@CnYM(P^R_TYM}jdJ{+n!iUD~upJMq}c zqN~!P%h3k{O05-ioGjH7CFbW2CPw2rn8wP|(EQxsHx&)9HlPGl7Zr8kUIZIMJ&aLL z%4gk|!n#ho6h(E4r#glEdb*O!)VXfWqK_4bQaeSke`M>@^Dwyv&zEG<_XIv?&dApun`06z4T9r*7V^XF~er zMcP~IdPrQ*r-k-mdHSvf(r%HRQSVEmCI`#?sd{SEL)G6Cva~Hu%gEuB>w2Sh>9L-5 z?C&t4aLaip-CIu*np@1!cITQ~ZGM+1ZtQ`J)`UpBac4d##0&oX`~?tK?&x+D007+) B8leCH diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html deleted file mode 100644 index 13e5f70ce..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/00Index.html +++ /dev/null @@ -1,93 +0,0 @@ - - -R: Correct signal outliers - - - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css b/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css deleted file mode 100644 index 2ef6cd609..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/delphiBackfillCorrection/html/R.css +++ /dev/null @@ -1,120 +0,0 @@ -@media screen { - .container { - padding-right: 10px; - padding-left: 10px; - margin-right: auto; - margin-left: auto; - max-width: 900px; - } -} - -.rimage img { /* from knitr - for examples and demos */ - width: 96%; - margin-left: 2%; -} - -.katex { font-size: 1.1em; } - -code { - color: inherit; - background: inherit; -} - -body { - line-height: 1.4; - background: white; - color: black; -} - -a:link { - background: white; - color: blue; -} - -a:visited { - background: white; - color: rgb(50%, 0%, 50%); -} - -h1 { - background: white; - color: rgb(55%, 55%, 55%); - font-family: monospace; - font-size: 1.4em; /* x-large; */ - text-align: center; -} - -h2 { - background: white; - color: rgb(40%, 40%, 40%); - font-family: monospace; - font-size: 1.2em; /* large; */ - text-align: center; -} - -h3 { - background: white; - color: rgb(40%, 40%, 40%); - font-family: monospace; - font-size: 1.2em; /* large; */ -} - -h4 { - background: white; - color: rgb(40%, 40%, 40%); - font-family: monospace; - font-style: italic; - font-size: 1.2em; /* large; */ -} - -h5 { - background: white; - color: rgb(40%, 40%, 40%); - font-family: monospace; -} - -h6 { - background: white; - color: rgb(40%, 40%, 40%); - font-family: monospace; - font-style: italic; -} - -img.toplogo { - width: 4em; - vertical-align: middle; -} - -img.arrow { - width: 30px; - height: 30px; - border: 0; -} - -span.acronym { - font-size: small; -} - -span.env { - font-family: monospace; -} - -span.file { - font-family: monospace; -} - -span.option{ - font-family: monospace; -} - -span.pkg { - font-weight: bold; -} - -span.samp{ - font-family: monospace; -} - -div.vignettes a:hover { - background: rgb(85%, 85%, 85%); -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs deleted file mode 100644 index 8ad6d2508..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/startup.Rs +++ /dev/null @@ -1,4 +0,0 @@ -## A custom startup file for tests -## Run as if a system Rprofile, so no packages, no assignments -options(useFancyQuotes = FALSE) - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R deleted file mode 100644 index 83f3bb312..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.R +++ /dev/null @@ -1,4 +0,0 @@ -library(testthat) -library(delphiBackfillCorrection) - -test_check("delphiBackfillCorrection", stop_on_warning = FALSE) diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail deleted file mode 100644 index 7824a543f..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat.Rout.fail +++ /dev/null @@ -1,33 +0,0 @@ - -R version 4.2.0 (2022-04-22) -- "Vigorous Calisthenics" -Copyright (C) 2022 The R Foundation for Statistical Computing -Platform: x86_64-pc-linux-gnu (64-bit) - -R is free software and comes with ABSOLUTELY NO WARRANTY. -You are welcome to redistribute it under certain conditions. -Type 'license()' or 'licence()' for distribution details. - -R is a collaborative project with many contributors. -Type 'contributors()' for more information and -'citation()' on how to cite R or R packages in publications. - -Type 'demo()' for some demos, 'help()' for on-line help, or -'help.start()' for an HTML browser interface to help. -Type 'q()' to quit R. - -> library(testthat) -> library(delphiBackfillCorrection) -> -> test_check("delphiBackfillCorrection", stop_on_warning = FALSE) -[ FAIL 1 | WARN 0 | SKIP 0 | PASS 250 ] - -== Failed tests ================================================================ --- Failure (test-preprocessing.R:109:3): testing adding columns for each week of a month -- -all(...) is not TRUE - -`actual`: FALSE -`expected`: TRUE - -[ FAIL 1 | WARN 0 | SKIP 0 | PASS 250 ] -Error: Test failures -Execution halted diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R deleted file mode 100644 index 3d62d6a7f..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/helper-relativize.R +++ /dev/null @@ -1,13 +0,0 @@ -## Helper functions to relativize paths to the testing directory, so tests can -## be run via R CMD CHECK and do not depend on the current working directory -## being tests/testthat/. - -library(testthat) - -relativize_params <- function(params) { - params$export_dir <- test_path(params$export_dir) - params$cache_dir <- test_path(params$cache_dir) - params$input_dir <- test_path(params$input_dir) - - return(params) -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template deleted file mode 100644 index f2224855a..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-run.json.template +++ /dev/null @@ -1,8 +0,0 @@ -{ - "training_end_date": "2022-01-01", - "training_days": 7, - "ref_lag": 3, - "input_dir": "./input", - "export_dir": "./output", - "cache_dir": "./cache" -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template deleted file mode 100644 index fb8309e94..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/params-test.json.template +++ /dev/null @@ -1,3 +0,0 @@ -{ - "input_dir": "./test.tempt" -} diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R deleted file mode 100644 index 59ea2beda..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-beta_prior_estimation.R +++ /dev/null @@ -1,130 +0,0 @@ -context("Testing helper functions for beta prior estimation") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -geo <- "pa" -value_type <- "fraction" -model_save_dir <- "./cache" -training_end_date <- as.Date("2022-01-01") - -# Generate Test Data -main_covariate <- c("log_value_7dav") -null_covariates <- c("value_raw_num", "value_raw_denom", - "value_7dav_num", "value_7dav_denom", - "value_prev_7dav_num", "value_prev_7dav_denom") -dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", - "Fri_ref", "Sat_ref") -response <- "log_value_target" - -set.seed(2022) -train_beta_vs <- log(rbeta(1000, 2, 5)) -test_beta_vs <- log(rbeta(50, 2, 5)) -train_data <- data.frame(log_value_7dav = train_beta_vs, - log_value_target = train_beta_vs) -train_data$value_target_num <- exp(train_beta_vs) * 100 -train_data$value_target_denom <- 100 -test_data <- data.frame(log_value_7dav = test_beta_vs, - log_value_target = test_beta_vs) -for (cov in null_covariates){ - train_data[[cov]] <- 0 - test_data[[cov]] <- 0 -} -for (cov in c(dayofweek_covariates, "Sun_ref")){ - train_data[[cov]] <- 1 - test_data[[cov]] <- 1 -} -prior_test_data <- test_data -covariates <- c(main_covariate, dayofweek_covariates) - - - -test_that("testing the sum of squared error", { - fit <- c(0, 1, 0) - actual <- c(1, 1, 1) - - expected <- 1^2 + 1^2 - computed <- delta(fit, actual) - expect_equal(expected, computed) -}) - - -test_that("testing the squared error objection function given the beta prior", { - theta <- c(log(1), log(2)) - x <- qbeta(TAUS, 1, 2) - - expected <-0 - computed <- objective(theta, x, TAUS) - expect_equal(expected, computed) -}) - - -test_that("testing the prior estimation", { - dw <- "Sat_ref" - priors <- est_priors(train_data, prior_test_data, geo, value_type, dw, TAUS, - covariates, response, LP_SOLVER, lambda, - indicator, signal, geo_level, signal_suffix, - training_end_date, model_save_dir) - alpha <- priors[2] - beta <- priors[1] - alpha - expect_true((alpha > 1) & (alpha < 3)) - expect_true((beta > 4) & (beta < 6)) - - for (idx in 1:length(TAUS)) { - tau <- TAUS[idx] - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, dw=dw, tau=tau, - value_type=value_type, - training_end_date=training_end_date, - beta_prior_mode=TRUE) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - file.remove(model_path) - } -}) - - -test_that("testing the fraction adjustment with pseudo counts", { - value_raw <- frac_adj_with_pseudo(train_data, NULL, 1, 100, "value_raw_num", "value_raw_denom") - expect_true(all(value_raw == 1/100)) - - dw <- "Sat_ref" - value_raw <- frac_adj_with_pseudo(train_data, dw, 1, 100, "value_raw_num", "value_raw_denom") - expect_true(all(value_raw == 1/100)) -}) - - -test_that("testing the main beta prior adjustment function", { - set.seed(1) - updated_data <- frac_adj(train_data, test_data, prior_test_data, - indicator, signal, geo_level, signal_suffix, - lambda, value_type, geo, - training_end_date, model_save_dir, - taus = TAUS, lp_solver = LP_SOLVER) - updated_train_data <- updated_data[[1]] - updated_test_data <- updated_data[[2]] - - for (dw in c(dayofweek_covariates, "Sun_ref")){ - for (idx in 1:length(TAUS)) { - tau <- TAUS[idx] - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, dw=dw, tau=tau, - value_type=value_type, - training_end_date=training_end_date, - beta_prior_mode=TRUE) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - file.remove(model_path) - } - } - - expect_true(unique(updated_train_data$value_raw) == unique(updated_test_data$value_raw)) - expect_true(all(updated_train_data$value_raw < 3/(3+4))) - expect_true(all(updated_train_data$value_raw > 1/(1+6))) -}) - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R deleted file mode 100644 index 07636e140..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-io.R +++ /dev/null @@ -1,118 +0,0 @@ -library(arrow) - -context("Testing io helper functions") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -geo <- "pa" -value_type <- "fraction" -date_format = "%Y%m%d" -training_end_date <- as.Date("2022-01-01") - -create_dir_not_exist("./input") -create_dir_not_exist("./output") -create_dir_not_exist("./cache") - -test_that("testing exporting the output file", { - params <- read_params("params-run.json", "params-run.json.template") - - test_data <- data.frame(test=TRUE) - coef_data <- data.frame(test=TRUE) - - export_test_result(test_data, coef_data, indicator, signal, - geo_level, signal_suffix, lambda, - training_end_date, - value_type, params$export_dir) - prediction_file <- file.path(params$export_dir, "prediction_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") - coefs_file <- file.path(params$export_dir, "coefs_2022-01-01_chng_outpatient_state_lambda0.1_fraction.csv") - - expect_true(file.exists(prediction_file)) - expect_true(file.exists(coefs_file)) - - # Remove - file.remove(prediction_file) - file.remove(coefs_file) - file.remove("params-run.json") -}) - - -test_that("testing creating file name pattern", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_pattern <- create_name_pattern(indicator, signal, "daily") - rollup_pattern <- create_name_pattern(indicator, signal, "rollup") - - # Create test files - daily_data <- data.frame(test=TRUE) - daily_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_data, daily_file_name) - - rollup_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) - rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_data, rollup_file_name) - - - filtered_daily_file <- list.files( - params$input_dir, pattern = daily_pattern, full.names = TRUE) - expect_equal(filtered_daily_file, daily_file_name) - - filtered_rollup_file <- list.files( - params$input_dir, pattern = rollup_pattern, full.names = TRUE) - expect_equal(filtered_rollup_file, rollup_file_name) - - file.remove(daily_file_name) - file.remove(rollup_file_name) - file.remove("params-run.json") -}) - - -test_that("testing the filtration of the files for training and predicting", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_files_list <- c(file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-15, date_format)}.parquet")), - file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")), - file.path(params$input_dir, str_interp("chng_outpatient_as_of_${format(TODAY, date_format)}.parquet"))) - daily_valid_files <- subset_valid_files(daily_files_list, "daily", params) - expect_equal(daily_valid_files, daily_files_list[2]) - - rollup_files_list <- c(file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY-11, date_format)}.parquet")), - file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")), - file.path(params$input_dir, str_interp( - "chng_outpatient_from_${format(TODAY, date_format)}_to_${format(TODAY+3, date_format)}.parquet"))) - rollup_valid_files <- subset_valid_files(rollup_files_list, "rollup", params) - expect_equal(rollup_valid_files, rollup_files_list[2]) - - file.remove("params-run.json") -}) - -test_that("testing fetching list of files for training and predicting", { - params <- read_params("params-run.json", "params-run.json.template") - - daily_data <- data.frame(test=TRUE) - daily_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_as_of_${format(TODAY-5, date_format)}.parquet")) - write_parquet(daily_data, daily_file_name) - - rollup_file_name <- file.path(params$input_dir, - str_interp("chng_outpatient_from_${format(TODAY-15, date_format)}_to_${format(TODAY, date_format)}.parquet")) - rollup_data <- data.frame(test=TRUE) - write_parquet(rollup_data, rollup_file_name) - - - files <- get_files_list(indicator, signal, params) - expect_true(all(files == c(daily_file_name, rollup_file_name))) - - file.remove(daily_file_name) - file.remove(rollup_file_name) - file.remove("params-run.json") -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R deleted file mode 100644 index 2a1221344..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-model.R +++ /dev/null @@ -1,173 +0,0 @@ -context("Testing the helper functions for modeling") - -# Constants -indicator <- "chng" -signal <- "outpatient" -geo_level <- "state" -signal_suffix <- "" -lambda <- 0.1 -test_lag <- 1 -model_save_dir <- "./cache" -geo <- "pa" -value_type <- "fraction" -training_end_date <- as.Date("2022-01-01") - -# Generate Test Data -main_covariate <- c("log_value_7dav") -null_covariates <- c("value_raw_num", "value_raw_denom", - "value_7dav_num", "value_7dav_denom", - "value_prev_7dav_num", "value_prev_7dav_denom") -dayofweek_covariates <- c("Mon_ref", "Tue_ref", "Wed_ref", "Thurs_ref", - "Fri_ref", "Sat_ref") -response <- "log_value_target" -train_beta_vs <- log(rbeta(1000, 2, 5)) -test_beta_vs <- log(rbeta(61, 2, 5)) -train_data <- data.frame(log_value_7dav = train_beta_vs, - log_value_target = train_beta_vs) -train_data$value_target_num <- exp(train_beta_vs) * 100 -train_data$value_target_denom <- 100 -test_data <- data.frame(log_value_7dav = test_beta_vs, - log_value_target = test_beta_vs) -for (cov in null_covariates){ - train_data[[cov]] <- 0 - test_data[[cov]] <- 0 -} -for (cov in c(dayofweek_covariates, "Sun_ref")){ - train_data[[cov]] <- 1 - test_data[[cov]] <- 1 -} -covariates <- c(main_covariate, dayofweek_covariates) - - -test_that("testing the generation of model filename prefix", { - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda) - expected <- "chng_outpatient_state_lambda0.1.model" - expect_equal(model_file_name, expected) -}) - -test_that("testing the evaluation", { - for (tau in TAUS){ - test_data[[paste0("predicted_tau", as.character(tau))]] <- log(quantile(exp(train_beta_vs), tau)) - } - result <- evaluate(test_data, TAUS) - expect_true(mean(result$wis) < 0.3) -}) - -test_that("testing generating or loading the model", { - # Check the model that does not exist - tau = 0.5 - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, test_lag=test_lag, tau=tau) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(!file.exists(model_path)) - - # Generate the model and check again - obj <- get_model(model_path, train_data, covariates, tau, - lambda, LP_SOLVER, train_models=TRUE) - expect_true(file.exists(model_path)) - created <- file.info(model_path)$ctime - - # Check that the model was not generated again. - obj <- get_model(model_path, train_data, covariates, tau, - lambda, LP_SOLVER, train_models=FALSE) - expect_equal(file.info(model_path)$ctime, created) - - expect_silent(file.remove(model_path)) -}) - -test_that("testing model training and testing", { - result <- model_training_and_testing(train_data, test_data, TAUS, covariates, - LP_SOLVER, lambda, test_lag, - geo, value_type, model_save_dir, - indicator, signal, - geo_level, signal_suffix, - training_end_date, - train_models = TRUE, - make_predictions = TRUE) - test_result <- result[[1]] - coef_df <- result[[2]] - - for (tau in TAUS){ - cov <- paste0("predicted_tau", as.character(tau)) - expect_true(cov %in% colnames(test_result)) - - model_file_name <- generate_filename(indicator, signal, - geo_level, signal_suffix, lambda, - geo=geo, test_lag=test_lag, tau=tau, - training_end_date=training_end_date) - model_path <- file.path(model_save_dir, model_file_name) - expect_true(file.exists(model_path)) - - expect_silent(file.remove(model_path)) - } - - for (cov in covariates){ - cov <- paste(cov, "coef", sep="_") - expect_true(cov %in% colnames(coef_df)) - } -}) - -test_that("testing adding square root scale", { - expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), - "value raw does not exist in training data!") - - train_data$value_raw <- rbeta(nrow(train_data), 2, 5) - expect_error(result <- add_sqrtscale(train_data, test_data, 1, "value_raw"), - "value raw does not exist in testing data!") - - test_data$value_raw <- rbeta(nrow(test_data), 2, 5) - expect_silent(result <- add_sqrtscale(train_data, test_data, 1, "value_raw")) - - new_train_data <- result[[1]] - new_test_data <- result[[2]] - sqrtscales <- result[[3]] - expect_true(length(sqrtscales) == 4) - for (cov in sqrtscales){ - expect_true(cov %in% colnames(new_train_data)) - expect_true(cov %in% colnames(new_test_data)) - } - expect_true(all(rowSums(new_train_data[sqrtscales]) %in% c(0, 1))) - expect_true(all(rowSums(new_test_data[sqrtscales]) %in% c(0, 1))) - - for (i in 0:2){ - m_l <- max(new_train_data[new_train_data[[paste0("sqrty", as.character(i))]] == 1, "value_raw"]) - m_r <- min(new_train_data[new_train_data[[paste0("sqrty", as.character(i+1))]] == 1, "value_raw"]) - expect_true(m_l <= m_r) - } - -}) - -test_that("testing data filteration", { - train_data$lag <- rep(0:60, nrow(train_data))[1:nrow(train_data)] - test_data$lag <- rep(0:60, nrow(test_data))[1:nrow(test_data)] - - # When test lag is small - test_lag <- 5 - result <- data_filteration(test_lag, train_data, test_data, 2) - train_df <- result[[1]] - test_df <- result[[2]] - expect_true(max(train_df$lag) == test_lag+2) - expect_true(min(train_df$lag) == test_lag-2) - expect_true(all(test_df$lag == test_lag)) - - # When test lag is large - test_lag <- 48 - result <- data_filteration(test_lag, train_data, test_data, 2) - train_df <- result[[1]] - test_df <- result[[2]] - expect_true(max(test_df$lag) == test_lag+7) - expect_true(min(test_df$lag) == test_lag-6) - - # Make sure that all lags are tested - included_lags = c() - for (test_lag in c(1:14, 21, 35, 51)){ - result <- data_filteration(test_lag, train_data, test_data, 2) - test_df <- result[[2]] - included_lags <- c(included_lags, unique(test_df$lag)) - } - expect_true(all(1:60 %in% included_lags)) -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R deleted file mode 100644 index 8bde8c68e..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-preprocessing.R +++ /dev/null @@ -1,132 +0,0 @@ -context("Testing preprocessing helper functions") - -refd_col <- "time_value" -lag_col <- "lag" -value_col <- "Counts_Products_Denom" -min_refd <- as.Date("2022-01-01") -max_refd <- as.Date("2022-01-07") -ref_lag <- 7 -fake_df <- data.frame(time_value = c(as.Date("2022-01-03"), as.Date("2022-01-03"), - as.Date("2022-01-03"), as.Date("2022-01-03"), - as.Date("2022-01-04"), as.Date("2022-01-04"), - as.Date("2022-01-04"), as.Date("2022-01-05"), - as.Date("2022-01-05")), - lag = c(0, 1, 3, 7, 0, 6, 7, 0, 7), - Counts_Products_Denom=c(100, 200, 500, 1000, 0, 200, 220, 50, 300)) -wd <- c("Mon", "Tue", "Wed", "Thurs", "Fri", "Sat") -wm <- c("W1_issue", "W2_issue", "W3_issue") - - -test_that("testing rows filling for missing lags", { - # Make sure all reference date have enough rows for updates - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - n_refds <- as.numeric(max_refd - min_refd)+1 - - expect_equal(nrow(df_new), n_refds*(ref_lag+31)) - expect_equal(df_new %>% drop_na(), fake_df) -}) - - -test_that("testing NA filling for missing udpates", { - # Make sure all the updates are valid integers - - # Assuming the input data does not have enough rows for consecutive lags - expect_error(fill_missing_updates(fake_df, value_col, refd_col, lag_col), - "Risk exists in forward filling") - - # Assuming the input data is already prepared - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - n_refds <- as.numeric(max_refd - min_refd)+1 - backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - - expect_equal(nrow(backfill_df), n_refds*(ref_lag+31)) - - for (d in seq(min_refd, max_refd, by="day")) { - expect_true(all(diff(backfill_df[backfill_df[,refd_col]==d, "value_raw"])>=0 )) - } -}) - - -test_that("testing the calculation of 7-day moving average", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - df$issue_date <- df[[refd_col]] + df[[lag_col]] - pivot_df <- df[order(df$issue_date, decreasing=FALSE), ] %>% - pivot_wider(id_cols=refd_col, names_from="issue_date", - values_from="value_raw") - pivot_df[is.na(pivot_df)] = 0 - backfill_df <- get_7dav(pivot_df, refd_col) - - - output <- backfill_df[backfill_df[[refd_col]] == as.Date("2022-01-07"), "value_raw"] - expected <- colSums(pivot_df[, -1]) / 7 - expect_true(all(output == expected)) -}) - -test_that("testing the data shifting", { - shifted_df <- add_shift(fake_df, 1, refd_col) - shifted_df[, refd_col] <- as.Date(shifted_df[, refd_col]) - 1 - - expect_equal(fake_df, shifted_df) -}) - - -test_that("testing adding columns for each day of a week", { - df_new <- add_dayofweek(fake_df, refd_col, "_ref", wd) - - expect_equal(ncol(fake_df) + 7, ncol(df_new)) - expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "Mon_ref"] == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-05"), "Wed_ref"] == 1)) -}) - - -test_that("testing the calculation of week of a month", { - expect_equal(get_weekofmonth(as.Date("2021-12-31")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-02")), 1) - expect_equal(get_weekofmonth(as.Date("2022-01-09")), 2) - - expect_equal(get_weekofmonth(as.Date("2022-09-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-09-04")), 2) - expect_equal(get_weekofmonth(as.Date("2022-09-24")), 4) - expect_equal(get_weekofmonth(as.Date("2022-09-25")), 1) - - expect_equal(get_weekofmonth(as.Date("2022-10-01")), 1) - expect_equal(get_weekofmonth(as.Date("2022-10-02")), 1) - expect_equal(get_weekofmonth(as.Date("2022-10-09")), 2) - expect_equal(get_weekofmonth(as.Date("2022-10-16")), 3) - expect_equal(get_weekofmonth(as.Date("2022-10-23")), 4) - expect_equal(get_weekofmonth(as.Date("2022-10-30")), 1) - -}) - -test_that("testing adding columns for each week of a month", { - df_new <- add_weekofmonth(fake_df, refd_col, wm) - - expect_equal(ncol(fake_df) + 3, ncol(df_new)) - expect_true(all(rowSums(df_new[, -c(1:ncol(fake_df))]) == 1)) - expect_true(all(df_new[df_new[[refd_col]] == as.Date("2022-01-03"), "W2_issue"] == 1)) -}) - - -test_that("testing adding 7 day avg and target", { - df_new <- fill_rows(fake_df, refd_col, lag_col, min_refd, max_refd, ref_lag) - backfill_df <- fill_missing_updates(df_new, value_col, refd_col, lag_col) - df_new <- add_7davs_and_target(backfill_df, "value_raw", refd_col, lag_col, ref_lag) - - # Existing columns: - # time_value: reference date - # value_raw: raw counts - # lag: number of days between issue date and reference date - # Added columns - # issue_date: report/issue date - # value_7dav: 7day avg of the raw counts - # value_prev_7dav: 7day avg of the counts from -14 days to -8 days - # value_target: updated counts on the target date - # target_date: the date ref_lag days after the reference date - # and 5 log columns - expect_equal(ncol(df_new), 3 + 10) - expect_equal(nrow(df_new), 7 * (ref_lag + 30 + 1)) -}) - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R deleted file mode 100644 index a733f2a1d..000000000 --- a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/test-utils.R +++ /dev/null @@ -1,136 +0,0 @@ -context("Testing utils helper functions") - -test_that("testing create directory if not exist", { - # If not exists - path = "test.test" - create_dir_not_exist(path) - expect_true(file.exists(path)) - - # If already exists - create_dir_not_exist(path) - expect_true(file.exists(path)) - - # Remove - unlink(path, recursive = TRUE) - expect_true(!file.exists(path)) -}) - - -test_that("testing number of available issue dates for training", { - start_date <- as.Date("2022-01-01") - end_date <- as.Date("2022-01-09") - training_days = 10 - issue_date <- seq(start_date, end_date, by = "days") - expect_warning(training_days_check(issue_date, training_days = training_days), - "Only 9 days are available at most for training.") - - end_date <- as.Date("2022-01-10") - training_days = 10 - issue_date <- seq(start_date, end_date, by = "days") - expect_silent(training_days_check(issue_date, training_days = training_days)) -}) - -test_that("testing get the top200 populous counties", { - counties <- get_populous_counties() - - expect_true(length(counties) == 200) - expect_true("06037" %in% counties) -}) - -test_that("testing read parameters", { - # No input file - expect_error(read_params(path = "params-test.json", template_path = "params-test.json.template", - train_models = TRUE, make_predictions = TRUE), - "input_dir must be set in `params` and exist") - - # Check parameters - params <- read_json("params-test.json", simplifyVector = TRUE) - # Check initialization - expect_true(!("export_dir" %in% names(params))) - expect_true(!("cache_dir" %in% names(params))) - - expect_true(!("parallel" %in% names(params))) - expect_true(!("parallel_max_cores" %in% names(params))) - - - expect_true(!("taus" %in% names(params))) - expect_true(!("lambda" %in% names(params))) - expect_true(!("lp_solver" %in% names(params))) - expect_true(!("lag_pad" %in% names(params))) - - expect_true(!("taus" %in% names(params))) - expect_true(!("lambda" %in% names(params))) - expect_true(!("lp_solver" %in% names(params))) - - expect_true(!("num_col" %in% names(params))) - expect_true(!("denom_col" %in% names(params))) - expect_true(!("geo_levels" %in% names(params))) - expect_true(!("value_types" %in% names(params))) - - expect_true(!("training_days" %in% names(params))) - expect_true(!("ref_lag" %in% names(params))) - expect_true(!("testing_window" %in% names(params))) - expect_true(!("test_dates" %in% names(params))) - - # Create input file - path = "test.tempt" - create_dir_not_exist(path) - expect_silent(params <- read_params(path = "params-test.json", - template_path = "params-test.json.template", - train_models = TRUE, make_predictions = TRUE)) - unlink(path, recursive = TRUE) - - - expect_true("export_dir" %in% names(params)) - expect_true("cache_dir" %in% names(params)) - - expect_true("parallel" %in% names(params)) - expect_true("parallel_max_cores" %in% names(params)) - - - expect_true("taus" %in% names(params)) - expect_true("lambda" %in% names(params)) - expect_true("lp_solver" %in% names(params)) - - expect_true("taus" %in% names(params)) - expect_true("lambda" %in% names(params)) - expect_true("lp_solver" %in% names(params)) - expect_true("lag_pad" %in% names(params)) - - expect_true("num_col" %in% names(params)) - expect_true("denom_col" %in% names(params)) - expect_true("geo_levels" %in% names(params)) - expect_true("value_types" %in% names(params)) - - expect_true("training_days" %in% names(params)) - expect_true("ref_lag" %in% names(params)) - expect_true("testing_window" %in% names(params)) - expect_true("test_dates" %in% names(params)) - - expect_true(params$export_dir == "./receiving") - expect_true(params$cache_dir == "./cache") - - expect_true(params$parallel == FALSE) - expect_true(params$parallel_max_cores == .Machine$integer.max) - - expect_true(all(params$taus == TAUS)) - expect_true(params$lambda == LAMBDA) - expect_true(params$lp_solver == LP_SOLVER) - expect_true(params$lag_pad == LAG_PAD) - - expect_true(params$num_col == "num") - expect_true(params$denom_col == "denom") - expect_true(all(params$geo_levels == c("state", "county"))) - expect_true(all(params$value_types == c("count", "fraction"))) - - expect_true(params$training_days == TRAINING_DAYS) - expect_true(params$ref_lag == REF_LAG) - expect_true(params$testing_window == TESTING_WINDOW) - start_date <- TODAY - params$testing_window - end_date <- TODAY - 1 - expect_true(all(params$test_dates == seq(start_date, end_date, by="days"))) - - expect_silent(file.remove("params-test.json")) -}) - - diff --git a/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds b/backfill_corrections/delphiBackfillCorrection.Rcheck/tests/testthat/testthat-problems.rds deleted file mode 100644 index 4ae5544a52e5d7530bcee31464fee74828076082..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16934 zcmaL8c|26@8$bS3Peqb7QkW1bBq@?*iiCvhyD3X5q>?Sm`BchQBt_Oq$X2FFmcq1(78&2CHcK|i=n^asjtIr!CyQ}B5P5tik zcpv1uPx05aXA-t|i>7w?7}u)}j{0}$<#I}Q+VZd$t~lDc+S$dr*7I~dtE!X)6Wpe^ zda+a2KU7*?4X6!qAw4jF`!3kN=v~_=c#eOmL#B!3yvm>HiT1MPR{xOe)vlAh^z(ew zmStM6OKqLju+ouP&eP?VO)he&WoNQYJ04allvN+8_}EjW6S2Enk^S?35|3=|WG`;~ zakP-+<>jm1e{2rPoI@@LxcVIaU0s#RE&r#wI-^X>fH!!`Hs(LjU0Uo~@c=N}?y;{W@_Z;BMB1;E-?U zGG5eu!E61prrioBl~lj{F%i^MnfCfd;Yn}X;J5AXHE?BlVz!a_&x|~bx)goYx)jgd zVwdR~CFbQ-U%q;OdS7iSbsufG!7oOC;#{+ZQ^`Qp}fec z>icO^^$6G7ZdExanj20SmCats8f%&tdu4pbx~Zmo0CjNNSj$Yuq8&=_4l{@DS@-S4 ziHSRh1~5lYx+ZMh-=LPV;X03;eN0RCy7{}7aEUW76F5`HOW&SQA@uamWG`;f05sl>c!tHEqWlPJ@75i zrUu_y-#if>WoqBldqq3!nNBu0hG2T!Tr&yu&UQ6-I*UGSvyE1S-5EbBF> zJ5t@{qu(J~RQy;}qRsYEpMz_Yvw?SiPD9Gi@N}mx%g$#nj`IDK!VOfNjQ%I*VB;De zVm+6Kmqqsd)F?=*9(-ABP~WgMKnI4dL(x0F?l^+^V#f6@U zP8xfdtZ}kD`_Gn5V~+$UU#S1KvAavfnZ|BU4zr_=$7~J9`@hnDmJm*ByRzWR&fU-Y`~zLrR=(A@RYM(Ieh``-f!H!n8z`UHCKOzYXW zx8L**&d4nfciQ&vqnEngKWh#9SzfS<>GA*d%FPd`rUq#oUF>`Q(kq60&b0%ky_M=-JUrHHL%m zo3Bc5jNE$YJgHf9>)4(l*R7*_&iwo0!4Hw6O6Q`}PDWbzyjcC;=BrmFE}5Ku(a;rU z!a205Aq+h(O~Y*&EYn@~L3SS@4PfE!xFr?eNe9KUSyOh5|36~aA8|#Fex+3le96DI zAJ>CUKM*_)Z;<$;vXHd0a+uq7IsIt0djEd2>6I~-=bkLLDVJ@Jjs^cLpF4k!J^nf{ zOQjb-nB6n?zQXACY=6@cPl*rTNNRWIs-FA3mz1}$%U3eO6)Qh{-l?J{7ill?HE_$V z6Lc4All@23F6)H3@GdBebY!@Nye8aw`uN0_=i8$1t6{sIUZ$X+rD3oh^S+z&6V*9B7uzJ3NnDJ}`8R1-U`G@WHYjtKBq9;#B$(}j#>86^T zX7YafE7_6FN`xlOuGQ{C22Ufy|-=--RmS+{O4Vh+8t=nqPbKFq!m{`otm zOJ7v^5!?7<*~6>{9&3O2Q(GZ+y~HcL=#?;*6Ue-Yg~{6*)gd(Zv@n%g;&0QRafSW<}BBw z5{HSG@9Db24;7V4`du9EeR*n#%#=3%E-k_DCuE*ibXd~Y-Q4aKL&&WEr4N-Q_PLm4 ze{KKwZ>$ZK=-gAmagZ(WPfZr=BPVOgIq@^43qq%)kfSe&JXwBWd3=thQ3jnOpK9j} zA8wm+f&Cp+iQlJ7kdM}~(grF818rxuO~|&|oSs~2@J!{YKJ=*nY_W^-rMTM^hdhsL zAml6%Vo-jP&UT-&nQ5kHst|ZJvLWvY7WlL^Y}yWHW}a3(6@4~^?LG}e_OfL=r-r|O zk7jG|UmGb|{U*iwiw&YN9eiWd;&EG(6?sXdc@~$o3wxi6ut*V@x`o~cn~0?%YC8DM z)}g7K3h9@Sd^5oUN-dB7jo+EGst*+=&eazTXAa#iqEikmbFgPZOTj%MsR;Ya8p%EC169{cTb7au<})skNJC8 znoQAJUV~09T2N8uDXZO{pCdk~E6+kHYvpMRmHgZ^-}N2gWM6U#lvf_F40l&0>=Ee~ zWFA1C{Q)=*YU z1-2=xP{tZN%|w8bZFc0_f+y`ETx$Tg?MX#dH)V{$>ezxoe6i#b{0_((E|eA0uMgj( zG6qM^CWpY{tQBQ=oN&z@D0U3C!y4<*_rp;@v#~t9`l=GsQ066{6a|bJtqry$*{p9# zvgE}BH6YR^N(Jgkm*Gr8;-yq8HcJxkazm*eZue(ZL8d@G7_cR+PN*LJtE4SJJsW1N zF2j%hfGtt!re%VgVCfXLT^nqvvc4rkF77nH7d$cChg<@u2=&umnGhjb0>?Sf_);+h zWEEF;8f}##;Fd1Wg&Nu4xT{%@=W5ZjMX>m|{Iyciei)MBHghy(Uc%_C%mHnBB}__1 zo2T&Eb^b(8@Bl0N4)CH9BK$Mt z0$IuvTzud2;HeU1CYd5-4w=!#WaWix9)VLO_!P1{Skt51ij?4eLhs*C7HmsHofY9f zBj~#O*l=pvet>tvl&npI-*eVn(?YDF(h&`yGdo#)jwGib6@_WKv*I9aJo?!J4onkn z`i2nh_S2lB<6q$!^9y~3t|{%$|KmMJN|i;Fu@j_K$m1rhXyhHMmg}v>P&s3X^Wl8# zJ-;yY2Ub#Bp9`{hX`${WBVMP}cJUx^DB?b|WXT0*VeI;tzjkoMYH8EIWlEtdj+-;f zaS9Xurc{JY3VlKtC(7iD0oHIRwv~#o72!t!r^4s1-AK!}hBso<2O(LN72R9Fw=D?{ zZe=Vr1)5YGhEpM94om08`~p`>w~dwo2elnSoe|+nd>~lBPQXn=tYJ0(+#2w1YDWmK+-pSKgdqNJFCgs-{I;6IE$8(>n!LSKr!<-|LZu(N-1foe;b2BB8w%D{4giM?de<Z0)hHb z{cL^v*cOY^{2s=m;X&lNEF3b6j!H!Yfty1J(+WS1J6sjC)HbS3?{P?BGamf;qD`+3 z_s{12ZK>%Bg+CDF;OX*GyAwtw;(AdVI*SOm z%uH!?3r9>OlGpl@67~+%SrhvGv=zP>hPPN$CeQu@dMwi_PM~f%w=>J-VCptY6T?+)VM7w z9OC7{YW!(zVV9wiN3CF+`Ao$Xi+b>Dt_-TmW7cLCb2(N^$AqkPpjc|!Ij9QR?k)M&xI$7YR9%)6Z5yyyt~3hx2yKu8{%leu$qT$QrtTqvm}!@f@QC=|*O^6{sblP7 z%_lWM-~_Y>pDW26r_Lvs{gt^U$A4&Ab?O8Z)0lpL5bXd`fJ!gt9S~84oP6Lr`)Aw( zk7iB?hzny-d`b54rX|J57T?d%r|jeU9NC)p@pT4PPtnyapQcl2ZaC}MmQr(gC1uGG z+MvP-ZXBu50kuS#EVeV<2G+(#m{id|Z7c~NivjhWLy<)KX0ebO?S0OWf(i%r0b--A z;_v%!nn&$hJ;XGWHi!L%IVF8fqbsna9$rFI;atG#Mk(e9Ricky?+iT2YR0R?>5XVJg21 zt&@f(D7U87LS|08r>Vdm^M5F;OEB-i`&g)6^3`r~(O)JL69B zLqJMQoCDi$)n;2#BEB#|2qOmB&M#X|fo&nRRd1$*a~Y$VC55Gc91rWz)r8ydi$(&d z2x0tIj-Yht-=K#vi=ULLVOm@9dEjP!w9Ir`t( zM%6v9y;8Qk5G%ST3_MZ!T_n8QJ2TiICXS~0};20rY8 z@QVaj<5Mv%q5Js5GQAmGiljL#o(mua)&}Q!+fjnnN04lzp5bu`IX z3EH0UWe74U>60KG1|A{N_>f2^$a^i{WB2Dnbx#egxxwhy>NruV?%qM(pH8otkOmNQ z8-@9V*P%(#Avpq!FOpy+!A=8OLJIo@=SpbSk5=$9Fyl+VC9Oe}ZRJ9#!(27yE4V-_ z#meY5PJf}oVXhF<^;r#iHH}c*BwxfS7JH=`!<%E4dV=m| zc?&(!W9|;k152z%CQDAMh1{pP2#aI9oD>?9!vD}?LGmZlCwz<0bhdiPgYdixRx{W9 zAZqn0?oCe2`)~z%scgG5Kae=VS8GT8y&c?Q@_$6iXQGX8)+hKbPc$2#bEs z+I-q=NS{PxgzYP4?SGKF5MPke-YUv6j0hjmZm>+4P3pkN_>dj(Iyr(0io*ljjPT8% z(NHl zKTN1~{)>@WP#Hl#<95SjK3ra`3fiawsu@-S6l8>|GI7g18&BJgjRJknQO>2s9tM^L zej(ZOwznPpPLmX?!RJeIiu{{_bVUjc@x1kuGFe{oWnyq@QK=w6q#svT1sN&NqAfW3+C0BBRAnW%ZjFaO zxv0v-sGf#>eu4N)*JfkW)>!8SZ!`!zJmCrcmf%<+?FbX&=u^WqjBA#V#%D;fXcDQu zSEov#18w1ZPxH+}^U#V=UQpry%UC?u2WbFOTtE&pSgj3JwNT(GlaQn_`EW_|S^Gy3 zW%}@U&V1dU&uuiA)1#aE4eu9g!rGDO*mMF z`v8C#tYG5cMATE@sieJ$&sA|y*fMN@9UnqwN-}5#Lhx0zwmGQ@ncyD7Fe!E8PVA*+ z{+4XE<+DK$e2=ae(a(u_3dz&eWaruCv};&Ij1wtr!%G;ayZRnH6uDP<=>;Vh2RfPQ zdH%Kg#VAhV3{Xg1O7!{-iCA>C5PcwTfDm>_Z6AG+wgRcFYBAjbNX!M>!#nARaj=Q@ zY6^z}6krIJpd`?bfz4UZN2aMlOG2~>$^^%fo|7De8P*|(_mYzR zU?H2-gDhoK*vH-j0m$IZqp3u(O`sf5p0(*kL#eGV1f`;SP;u^> z6!0+oNnT)^e>I%$uUiA{R{2U(7nYfGz>T(jhcib)sF;080!WyTchiT`BAhsRck_M? zYk;-}TI<}6;b<6i5?#wE?s;FoVbVp8!ABy%wdi04v?U?)?aWrj=6s0RH5_+)7BzPn zt4AfO_EO}NXKKBFvCR-b(Y z0%_2Pl2UXV2Qm3%txA4?cKpuSOyXzv1>WWJlNZeD9p;=b-Qy%{W&9~yc3wFW2dYZ= zUffCSZd zwEKNX4VVAzj|K|>u+mDRRWDmR;{M24u}sdJvb8hjuiwf}xDR zAoTr=#1e6P?gv|bhfE=@+%!*fe>nB&tPMY7|MJ3VZ1#E{I$@(NY{E5VkY}Y98WqrQ z2$`CMwL)`%ZIhSVS5Bo1xgFR(65x2R6kRv%Qck@zpf)(0Q!szZ1jPl#q$BtI$A?Jg zQaJS=0P8aT3|-0(zAin4DOxK9fMW+a(@ndC6PXqndmBIgh21t2J^~ugg|mAg!o5sK z!A_4g-zy6tCrG6W(b>yeV)6JA+%xF6V1`g&c@Md9Pe{&L?CIdpI;B*Re+B8Q{LJQw>hyho@m(87_ zc}kCjB=WxG0wtMnsQ%A4{8Mtm6(03(c?5iER-7yd<BZ)CnP0 z?;(W)mll#SqEfK0Ek6c6CF7Q@DVu?To z)NEul5JFd*9k-D#;Wa10kXYdPyl6Rv!wcLN9hnmDGmL&3!LJ?&xz(2DF zwl<$>2jHs_tluE_XLP#2nuKB(Sk}A;My8mCfM;eCjFWc3NNLpR%P|oR{Opkds;&ls zPbd0A*gyMWU0Fc;vFYx%Y(@Bec|ttyb`B@}gvuN-0#svKg~B1mPG+uTevGHp5!Bv9 z4Zz>L9I+KLH;(al?n7-)pdNfRJK^E-mMpvmzK78^JcN8A$!6sfGC_w0M*>-`vIKiR z2UkKzOQ;1p5kIcmwonU3_CZOi3CMljS~*pM)U@M>4x**#vLxnZ&rp*^dQ4!MGy&7FF}H@+u1oB8&kgemEWPdqPkXx9t9(edC%uFABw zf7B8SaY+3{U6n2bdNonPgiL>}L`)L@&i$gDSLZn(Y)^QPKNLn*0q-A3LA*-gCo6cf z3te9d6p}tB2K8P~O->Z0mnUiX^|^W0wM|lNc)yb!ZZv5=L%!Q`$+a3PQ!UoDHm(cQ z&946`1YxO&a%(Q*^+D`zro+C|aA&c4i169R2o6q#XPKCtHLSA4#?2{$8qh~4n$iaV z+0^}nVeeLV-zr|e4Unzuh*Vw2GM1L-5yU&WYCOEl>eD&kZni=sWs2n-XV&Yg5M(SBr2s;)~J<5W(+2Sz|ox()SuQGjg2-qiMm zn(LmvRCHwub94q4B_Q6U@b7ezbo@1|K+#&}p2q46Nx*&N3ay)oh1yYR94)})eZ33f z1#KR`Q-uTVp&v`Lb4g5pjspDPFL(f8H5|zaDeO!~1s?$VGq8pGS(@8(ce=sx<`bl# zrG0{3<~_71><-Y1??^+AeNH}rjF-dNQihMrieDA9Gb+4aTk$t1b)bFyJQq(I#u||IWyI9nse34(WjcYXS-^I{XS~Nz?1$m2-Kg7bOlk zDzi-gq(JCNgb>bfT9Hcvabsx$AVUWRt>aWWrZfgEeEf|l0mvjakmcO zky6||P9^}G0wK|ktb6J}k~re?;|rjQF0b@X!tNkDG{JnY#Z$mkTMr~2R9U8}*upn? zUkLJAeJ=#Cu;J4MycP1`st^ihN};GW30AxKa2!^15Opd=0R05Z4jx?;{*dz^?kpgi zI-j6h;6yJ5^k^ACZL1%3{_uH_s8)y((#Ua;4IxR?3u_|so#4P~3xFQxFiILJ94b&! z!GL6#B~S2k0^0qB-%qy!z5z=ECPQgADYwnf6?QRS%tU3a(S_*iY`he3UlPGVjUZUY zf6jCSB+Pq~3HtUIV1&jSI78loG;&Njlhb-CUbLi-6v}>HbaCp~v!TsE>q7+j{ogtHI>F?{6U;zw~eCy9Qq1ZT`2 z(wwkE02tyQt{J7Jn|AV{hC-`4cj4S;8M+UWx;rR<=De#{Nlxaia1aJNv$Y)q=m@e` zHGBr!OZB!&p^pl|e{T-Y7jV&|71hu^@|oTE>{Jk;%-)_XXTa|}l&r~!^uy!YjO=NL z`kwIH_>iU<4)(qz=M89n#M;7#nPEi${UnSZ_)#LrO6nj+VToQKyPS%@qlyVH^7o}8 zz-Zv@h$aXenyNs=(vQ?9n_dmt!yk<)>rJCoD!Q^`!TVfjE=LLc`mwDKR9w)*Sf;h$ zn1GK0ikX40f#9a8BpRrYSzaK$7F-{tu~q~V!l$X>&Snia3c(blrt+1w5zF!dRMC9` z(Q$^kq>7EM5^Cf>I;z{r4C6p6U)513a8}K16R-2Qsi@yJZ!T8C<^H)M#j$n$hfT@D_h-4upqycTQ(5~i> z2lx}#X)~01nTW5O+1iRe>|+AkpP$@I`LEmC2ok@uvO@+D_!d$MRoTI21!M8m&^9n` zQe9000RgE`I?nL)iL7ErV4D?KW3>gW5+za9;M!;`We^1>UeFKWnr9b<7gs#y?jXXK z*XDm=v$ipzO(0*ok|~m^u%Ara1Q@$SriH7X=1+qDZ5BO6W;a^K-g^oey-V)ybcQyR zWT5P8N4A52V5ER@&FjvJv+TRhbWlmcy`DG920BOBx0G75bJgNT6bGAuew>C2)quC^ z?!~!eE$doQpXz(@Ba_Bpqf$^dkYi3}V3#LLHJV9+&MP>^L)7VD_b*;U-%5<)ZBFyX$Ic~xJ4*b z;m(_KpunMQT1q?E?&@x(0%`MK?&-%o!J0*Ul_~`t-&f@hDXDPqeLzFZoMj_8zcK+# z*uZ>5fxt~>gY}qVNe&>Erg$Oc0>GUJHm9I1Qh4Jka_|0plW>feQK3dZP)8nz zPaK0A0LjkKRh0vVQu(TTFe(p7q6bK4Blu(6?LpA-aGD8|fPPGrpl7I3NY}7+gD7(m zzNx=F&=DlJF2v0T-J>x5I*9i(TPdWVe5eA@$PC&|Y-Wv+>u2dwZo zRThJG5F4^C%KF&a0^Uuw1wxx0hOCVECFag#p9~}=e=9jz4w|l zHx1aveLpT$lF6+WYUEl!Z~RrusXqMrh#IJbR^TrIEDss4OG1+7W^gJ1ojUF>Y%pOZ z5ZQfrTG~X3&`BVNMJr*)bxPl+aCR-~^A{toTiV>A}@Fro5y-Pg#bG0oK85;?9HDzkx+o3PTz# z$rXAJv{x9?aZZZ|K<8o(XZjO`AWe4NK6C8vsP&MB>wqY*ztBDk>!QHd<`I2($O$$C z!7Q-kDFVl}_1E4C?Fg=!ZZYPlB;lWsbm$ph6Cax zdv;au^nKn514!sD!r11MPFAvWjdRwFKv;i}A!vAkNmSXIhWTLl6LsW%c)L6ii5G@* zTHC92e=)QR%>o+?tcQ?35js00^xP*}A07z#5Gm7QQYx_Mckj)-7b{5Q+o762BhDavs7s&YC053**>Z zqrn{D`d#>0|5MRHXf4AXv@R6xzpIz2OXBJ?R@tr(qOi=VL%2 zK(de@>yEOXP8M`#BLrcu)?M~*p%g{ufHpwzo)sDg{Zlu>q3+K&!vFu-ltQSvN@x;NfY3u)|8v%eRAC-zKEJ36fOGnK*fftkK@>u{mj#$6U?gD!{+q*oE>>~J#1$E5$nkn z4gk3XZYq!O!`G~5SNzy_WFZ75g+gC7vs?F73Zb?UkTJ#UVMFv5hRvB?yuqUlyY(X| zAzk6$kN-)LKP2Gqo@PnE%kRmS{158Ex$)`tSY1V15}OCw~uo)+W@65JLMRAibYpIZnX2m^ml zga@vFz*Iv=O8O0h8|71@MX)&BzSVcY)4OE>J<4XKl|x1XS91DJwqPxPWY2nAgg-3x{U=|07F;Tra?>37yOq;T$7VMIh29TBd7MWM~hFvl6ImWFrt6 z3~X)sOXy0vTPCWK&Y=3-VqZQR7EZ5EgX1;F@C*%boa?Y!mS*g`SCpX|=!D9cZiF7u z5#Db89<`bu3w&>osDev6PYEx6e+<*{S4ncwNfK=9=0tCA!+dAfTg2XL!PUx)zYs!v8q+@)WOw|X zwT+g3D8sZcH^5Q;>i)2orV5l~C<7&tW5DRPaBtL0&Jy=|k zTEj3va=LBdpc_Yr2q!?rL5bli0#z@hJs32!~>)4@2 zBn$r^b|4G`1~A)rSQNgByR5qy^CD9kix;oGzH@#2%QzT*TkJ+M?5w3I2GTz9QVwy>F%0Q z7rLh9(-|`5AXL0eiFINJuWjx^V|eT<{KoTaF)&=GX)dd~Gjc)oJWq+n{p@A{0K5wy z1UgFPQ=FR)eW)toX`NhL2TI7?p9RNtsEpwlL6=tn{wzNmcx}7z6z#L}_}kFMra*3n zDuL(izzZ}tMbCEobRs`}wm>!U-cX}O9^ncZj7CXRna4TK&73Y|CurdIrJ>KH2#iLZ zpbXMg1^5b~@p7VXCBVTLH|z^=d`M5Q0m}j~7_m=?J!|YjlfgM0bEw%vICxM3e?U1t(_IJ?c&?u}d&V??0WnUEmZQ(6c_8h$fvvsB_Rnep#WalSX}0j5=Wb^0 zXxn&km9AWHalBaO8sh*iN0NQxPC0yBU`|fougq&ZY8ILF4ax;LNidPawxl7p%cJq~ zRT|%(&o@gPfL{IBV?w^4j)V>a>YN%4M*a+`KPh+;)j2^0!`hSYZ!YEMw28UDxqI2>N@`-y_TRtLeoFN#@kC4?58BYBc!%E*G zEz~&n&3!?qw?;!I$F=N7jb}c7Nyz2qG_WKMKDpmC_-T|rT*Rd(D$vkPtM zdjG{|ALpNGAy(wp&R&wZte`{)R8C49LAPGGy{OG49EfaAY=|^h{zoSmscZD>TGwdP zwR!S0F?aEdDA>64p5oh9t|Qg^UD7N5+K)iNRKxCNgT=por&~lt*1>GV3FqanyqBgnQo_y(QLO zCIl`w2sq!GZ}A1&1CVMnC?|zCWdft0tzR8QQRfV`3vA8;|^yL)p^Y!_k?d) zMD=kG&-h)Ym*0mwZh@PFdbg^WdZ-;c&V4;tJS%BhXY%k}z^^8NvP(|yI-6vuDYc_L zZ0KU#xbaaE2KhKkiTbkS(C+`|pWAyP`={1crj{5^!M}ofyfcyu=t{NBdxKd=B{}-jE^39K}K7+{0}8+lkm6EBp1oDN#$-7$V8; z4*Q^*$j4ANQ-`}WykYW69K3n3$K!ec#wqby&@j{bs`m^|1WEQj$GKbGXW)i zOZM*jMs|ueZYgzSWV6-pUm#pr_#OJ_{^z28Rgs=^GJnmTxf^yT=g0MC!D5knaa7x% zmmZaBBk#INlS~g6$)2mfF_T#qku9#tVb&XHWRL$sCcUoUi+N-$n z9j}}6cf0p_)0ExD-QPL5e+;TD%PyN>v~W`nt||h zLzYylw)EK6=6=MUKShJ%12_H(BkGPhO-QGmVfCN>Q1%M8y*hAc9`i9)Z}jg5Le!cs zD^=Z6Z$k6TXH&-tox#a#iI>H!3Nz08ckj2&h@)I8e1n*iGp-53{Cm1t?NmEy@^?qE z+Ah(khAm2?s(%+L34Bjk&0S!vDtSGX?H7K2cs6kBnAeGWrnO3i!B>aP5-mHvHoex+HoiL`a%(t{9-)m6<2INvGX$N7i$ z``Z2BV@3TXF3RQLcll={8GGf#+jY&1T$481TW5S8zbEx@EzLi;L_1sQ)qWXC>LN8G z&$Wa0_h;!5g^bZ$H6v%Mdv7MNkFQ@EvPFEc-lupYWaar9xlAW?zV+=A>%U2gW4Re&4#=Y|-!G!s%JD&O}Q8BeEJdP<5TQ`uL$++`eBDi>b(r$5~>>Ez%Q7 zhvF-=IbZBqjlUmC{4`TmS{#08(=6I@U*^)EYoR%Qw+kjF;=>Z;2@hxH_YS6Zzx<>> zF|cw$Gnchu`r>+R_d)OtqvAh$dssymPc&ey8y8Y1HCbxI7bTL$#(r9lzYRD2U&s@s ze$D#E4@XCgk+FXwmwE%m)1;jbl4%F14vh|0x8?VVRiF5G-$LC^g&xzhkvB8_uerVv ztp4~Ng^yMJoitwf^`;G?Lan9y&pXGIwZ>*d_Y)iaTbgy{KaE*i9$)qP`u^xaWa7DS z(K2O`+nkk+%$^@BRTH#_zZagH{x!NHw_1~*@xiymcWMN8jq1l@9@J74{o9vC+&Wm$ zsn;X%w|5Y9MOxHl+3#)WRP`g1^n|^NC{`ExG*v9SDg0HyICd7DhV2E^c?Z0M| z8E2ghqoSK8_i=iM29GA+A{nY|nw$-EA>SKc7zhkkoJl;QQ}QI>*N+H3VwN)zs`O9s zRVR$8EAL;#gw)k^`j<_^6V8de?-wqhoQ;SGk@o~lM2$IQyr0GLe%hp@{!TEG?ce&;lD9fjM(vb(;d>h z=$2?@p45dTn+yHh8x>kNEtMxWN{?DzlAw7_42rdUy4`Uqavq~sJDnE~Mn?WICuhZ1 z!cLvI@z(ZMhT`ICWXRc?;lHk(`;B38wf%eVosix%S@Pk`w|fsZK^?cg8TKZyV#a(D zYF6F2^ogRG&zJnlS@~0&V*b@Xx`V5n%wMZ@xT@ID@rG)*C;3y;^s}V}Z0-k5_1{rS zQ+*>Dg7D}~AB|<7Gjg+bG`0o#=2qN!=o%RtX%`y?qnduUvvH&iF%0ix-!NG?C2vZ8~S(*6ONb(p`bg7~47zWs{ z&wI4obx3^cK(!f31bNQORsVl=1LL9MPu+&xl)lFvYtki76c;S99ADx!gE!B0j-KSO zFQALcaZ?E%W{MH~rH31y8LnJkd|~DPYwCIAZY2YWc5S@%jS=LtykcCj!B>>YT6Aiu z{=WpC)Q9fv2^(y3pYXXGzr&y19l3kf^ycQS;5JHyWyN2$naTEMgre+sb&T^*5ufpu z=GD@Q%ICTl-m852PLnmBLs8|k3I-BQZ7>B-n+}a0|IkBH^Ph7UWj+az&b4mMF9OMM z;GR?+F>8MJ`uCX0-{%SLXhOJY-h7Bo)r%3&T_S&Ps;v5$E<>%taG$3|t$&bN>F4z| z^4Puc#iRa1k9X%$niCG<9|n9>{p5S-PU_zF_-7plFtxTvb>*VEC%l&UKiy=?Qy&vV!>SX*_Wt_Sq1nZ3Y%9y2UcxfVwl~ku-mWlhF@0kwk$A5G zkB=LShH`j*evv~bX?o!T%sFNP<%E{+M;MdMx`N`UZ@rPL)G=Wou+qOt`a^)vpIOcf zo{1EGFS%&QTmG^jx)50+AL4cN`@wF%x5s|or)5(ryQMd?LqD9ptx>+DAQfy{e8pd_ zml7Z!_a{?Li^bSJJI}W|^^S~^cj*mv@$hkkS1)+Y9#xg2H*U#(^DIw3WD5E7vHa7x zO~yy;#0SQTQay7!`q!FEa}1!$R^um6HMUtYF!r-~@G$p$;WK3}ys|%`H|h3I;k!|sBoEUp zi-xl}ojiE-_~_xQCof+=dGY)Y*Zg$gv$wN@f4lp;9s4hSx;vfTAG$l+@c-@ooxR;Z zbh^9yoxMMZ&L8gJGtXy123?s1vxViBz5hZ#FJa|_)965i6&s=U`eF*NWx3kxc$UCV z`CjDl=_E)*I-ezRl;yQw;2)@TAiC{N?NKm;qP%R*@5`~;>qd1 z;Xm;@n4H$01@UAC|3&^A@wb1^;7{-W1(Ps~&fCNByd8z}A8L=He3-@48MX8}co*d& zn16_qILI!rFVp!ByeoEO}1j8&oB zD9xZq9tGL(4BHap6y_M_`F)h6({VJJwQEn~VKm8Ml12#d>63?#pC3Jj;X!L*ejr|n z#t(h5-QI0?nyuQC@ifh5@Hm_%7gSVZ5p7^a|Y0B z!~dGKSLuff=>78)chf#R>(m}khG_`nJP<#>{&DLswdYY3<`2{HG)@BE6_d0kw`{1k z;P+d-J<;7g=xiT!_o3p$zY0L=+8=(`pDMI}AUESpw6TQ#!%L?B+uM8ld)qyv|Gm!6 zhW_8h=iwYyd@@TeL@=FZ>3d1IfFeg3QuarWpT7LZ6QpPYD8xm(_B+Ry-wpmhKYaH1 z=;h(V$G49E+r8el=KngKeZ>ErZf}GCck#I=eu{weg7ZEUCl?~)v$h9JMha)>OhjQk z!xv}BE!CnA$bvM2FdXcM!TWpwPX{o6r_rowzYK$mbaWm?@0_RkSv+#f<-Y+#GYpc* zd5RU&(KwyV&h+yk@Hw++5XRX6x;2PC#JK^5jNJhC?+gb5n^(U;24@cS^aI@FgK?ZA zZ#S4vkU~nGK&IZ#wnun==39~@cq92Tbt95=1k`zX39wA3K zm}T$=`$LlnoDqE2MUcH4;P}jw`DAb+@wN1!Y9t=>%qd1tJ^2wh;|6np1Ra>T-{z6~ znpw>H{U7evOP&YAEY0(>7g+}UVN~+wB!@-`j~q9iZt`*fZpC6Dtc0a{u1Vm=0*7lZ}poo2Srqxkd;@NGbc zEIb|L!!&aXYkr(o2!A=R4U-O-R{bW2CI%9ZO3REvcEa5iRlQ$O_W~pv=amu$U_ECr zF>sbd?rPKIWVjRBym_s6T1HXg)qvzKrwofaG$X_OeaWly45qC3j%1cLBT?~rG;xc= zc`(ac!!#O|Jf0-ulBd&?$b&7PXFxu^Ey8c|X!bl9d-J1YH~7nYV_NyTxQV3z2xaiE z_X_1W!dV}g_*Iyt)4?Ptc~e*?emsre0})PA!1b*1^?4k6Z&`>ugSB!Vqkx&ENivRt z2~^lHWuF88_v*oowSi3D{_bwI{MYFy{%^Ot(*^!-XM4Z<2eG^1|32mVBT*YnvpCHL z$drtUwQ9e*Y5pI$|L(3U|M$Ax4gY^f{Qo`i<9tF(o|EMNcUX1t5?5UuqDCMXp0#WD z?!o`X>$5o5>P^%JP=(o9B# zD;;g2JRw3-s-o6a9G2;DrFvFtZ{sYV3FPuw0nGiK1j&p?ozJIuexZt312b4c`d;WU zd;uwHvV{ugAQNNY^T+XjxQJ{C0V6;{Gz&z#R=fADIGT@fZgOY?W>!SNwP*Mo|NDQ) zaE+m-GgS>ru=3h^4Ks#HZTUyOhLOUw{cTH(qZ*So}CCJ|3WlAa2Z{_Np=5bpsr)PP~DnYG6QTk?dU zf#d@PVaWI-IEjHFik*_}c3Xfr8Dn!e1KH^uweGo|Kf{6u;>rZ^aWy$O*opQ8(WqMD zLyOvLA@rvBSQ8>R!E(?WF47eD#7i@FST^eIpJA~1pK(wD5Zr(^2(>0?0RaLv-T?T) z$(!ym{plSy;e8g(=GjCvXwi6~unWy*?UGgn?@x3;^<)!b@TLkR9%JFha;H!kdY!vf zoJBeb7}fde1j`|eKn$Y{Z)_03Xh;H4Owa2q8c!2y10`FMUboaUXC?7yL2RcY;t~Gy zBhDQ6h$UekWH8TNVEPpZx*&C-L|;)L0pTxAK&q^IAB<1JpyK6elnxTSyQzL(@do6b zco@Kzu6WOPXbWEs^7&{Kf2b%!NIjTcOrwedybeU*s^*3yK|{UsvIPszVUCmecp@ga zNV8N8EfVKv@$k&u4kAVxY*hzBI|azR^=v>;1t016w2UCcSatD`<}(0D=q$#oxImpN z_>^{Y3PMwkgaxEDt1fP0sj3;k0g#TlG(CWR%&`*C3E1c`A}SD1aJfW05wW>!cZmBx+!w5m@!u<{Ubs>{wS{;u36Mi)HO*-G6rzPl z*0Q#H3$|p7NJ2~K>qGBjs6tEcu$CbOEs6Ej05tlxBr>)PEoiy;Sq-GHk}YqETD-;k z!$zlt2V%uOk~Bf|#p_o;KW-JMfPBFRT|PeIyjLD1K%d4WOV1k)$R@t;aA$>ZFdiB! z%$Z#Z5!WjNkD$b*K&7Arc~AVDI~gdVAb#s!?lH zq0O#1ZPXMtbejMRtL6p9{(qTgQ45% zHd|suBk6aet!@uaqAUn!X~I8w&VT_J=W3#$Pv5A%nxUYj%$%5Omr>E|z7ZbsQd&xF zfRrM>lV5wsCDc@vs_~>NIRch^HpTjW%F>kCoFq{ zhR58V1=t{Lc$>8)$<=0T+u~t>QzV}M{PZcckNaL0b3|2_DKtle>^igpP>S4P2@J!x zE`#C^-mv%pe*kQEg#bYH3h${5-!az8?mivXjfoBtb2IXma2H#4qGj6SqYY2=80kim zG{SRz-eGZQ5}RQ*K-LFd0Qa7KQVR*vDXv>_DyZY%Iozw8dgE~*tF`S_vPN9m6$?RB zPaYVkRfxexiWRc{I>NYN!j#;NKNd*qb}_9%NP<(b)#VfH6Us3+Bzsqj^s7ka3$+PL zMfPRth6Q?%WonM=Y6Hq>LZt|232y~yu6AvCtG0lXaRo(UHEUOu%_=Iw8lZ%9`16s= zEj@iXIC}B)m&dPKwUuSB@{$V$GBeNL$f1%Y4y{EAt#;N0&#o3yr7p^ib-O;&w7UO{ zC{evx)+(C3T)j=K?Y2lyOnct8S*6j!PPo|o2T^P%FP z%d1yUjqE<`oPlAhVeOI5urZOjz1SiK?y zbJSa}Yf{{YO9gkc zy0|q6|7su%CAWVruUtah{YOnt{GtuytSQS^j_Bo=kiywL!3K|F$Tr{1;qe;}P0JDW zjjh>(<<;zP6&*^b$8v=O}c$GkL?cg zPH@R*DFJa*e>Om;MHPQtG}4jDy?Xr_NBFTff5ijwRw_GBYh&2^=oTM(>{w)IvYjoY*tjV zM^nKtmQ^dvMAfD)0&wS{s!Lmoo8?(hnh{dYmWr38I^39Z-lYBC5crQ-01U)cZ2s2p z|LJwR+n)d5-tP9s|ND;oe;%r3Hg-UAH^4EYwu4mZp{fIqsbQnj?tnNA|MqtA4}Isa z^v^#1`#t?5zklDXy?XrP;OXH{2w|^Nd;R$6HU5+ky9eE!mgoVbZK$x@ZPs4DI(+i{ z$@8BEj}HGyg?js33!nY^YG_g-j1%&_q-x=3Ag2IR%_ZAW`{uV1`AeCoiMrL!QhV7R|~^0x)Uu5IeUw>sAV*5T=kpX8F(zdnBaU$}mQ!@vLi6|Xc(x>CxO60VkRrDQ9m+B#n5oreTt@Z{*|=f`rd)PL;` z;yjo2d)D`D`JHNxYp-8G+gRu5B5z}d8qHb_Hp-KShp%6}f=)euG&p+l)APfp1eu*E z4i2Oj1#({hXUUxZr-aV`!;4uhX#OAGV@h$cK0HGYsIwvVh86|C(2ujpX}wTnl*~&C zkd5r%EX}9tB^9tAqpm~L!}s-8i|YI~j{!qwSdxv36ynl07Zz%*6c_-%Y`=YQ3!wb_AX{{P*c75`;>2krlN zcD6U?|J|JbPtzb2ZzaWhOH4o*jreNqG)Yg$YMc!+bKky?>7|gBYp7=5kb$oS|Ks$! zjQ>jb-|cpH_WbxC-R=Di{@)4ye=A-^Kmf>L1Kq$m6j3|@HaH*+@kklU_&4j*`3$)p z`)=5$59DPoY%g(MrpQj(xEOeu8@N@9ZM?LCGQhVziV=uF@%r$e?@>^-j8+O_g)s5rp{95gL4QLa| zr!k-hOidgzDW>XqTOr5IP!7V+QePk`@=DYa`7|2FqYFjSOr!iF9$Kbbc(oCe!yqmA zStPe#A_rnhzUB76(bWEJ4Q)|qqyy9id#6TW%X0me>Mig5d*WH}4hBBYB5@vln?*!E zuuMBzcV+p=`mzT3h zhq3lH4#yTHWqa?___$?^*^lTWzV0rZGydFA)t> z8sGslIXr6z*=a*RZW5~C0iR~@8erD%|MUL%eppApETG84GgH_)rg@6{+WUw;7RL8_ z(>=EOZbSTAk?K zR44F$U!4Q@#OoK2UL07nBjYn5eL-C3b2&_r{&QdkEdl_hV!U4Ka*B>Q3lt2I_5S+w z)LbPJ2J<)r)+!0`7}1i4Rky1Strtl0-bK+=QU;0&!6Xj@O^kvhVJE)`BMs;2I6BAY zC(#Vp2=-Y4as^yXNZ{C)8Mz8*RZze*N&F-tTfB|_^@b+$kBx@AqwHb-$zEmqMN|B* zVcuN;KbVo*)vd?P=CN~vSbIU95f6zM(5Dy>b|OGk#FVHMj-Hu>GR?v@IMz~%yj&iv zl`u3*n9HTI%rXkTI^|x0K-)scHy?FxFG2R;{$5Hdtmu^?SIshtxB@()h6)_g!V=Jg z^BcpAFH--D0V{7r1H2^u)6RZp*Vq5<>~%N#-#gO({uIr$$;XyFRWa6(R7JydX*Rbk?w3M+zX@hUC^xH6JsK|hM2E(L|a6d3T*cjL0d$GR?^Bcntq zQO#AKfod61(tv#wv84-|{qq9ib2JoL1jAcRK1S=!1t+gwSp;8KM>YMhuM%X82^?IT z4G)7;V3}O55VrVs2hB(VI2GroXH<*h3nPz%J_2GKjq$pmW~n}NOKD>sN}W+1BdSDo zaX+5ix2iOCe2FI`3c3<$vABhSnBy4+A;WANlVKzaOtW+tkpc=C`JXofNxdvha zBn1N#6T{AGu8|F{-`7oQaqOOW1pApCol*N1B_U_hK>gh$Iu~`U&pS)LrnDanXb^|> zw%QpQ;^FCO7M*guC;*Go1`H1_C`~JMMi%DH0{<#?3q3^qJvd;jZEW%W(cNHw6#5Fg zj$f+IG|S*9v~Mu7H((9Sm|7Z*G~P6uE|_~5B97@rD1$2HEUTnhd>Z5KCw)ty|EiX1 z*ie~96IGLA8G_-mVy$>loW=StY8Hj1UZ=xUahgu&NjlF(gSEayFMmF2>d9BO?LfAm z(g51X`9fkgP<02YY@_KcjD^K!(h0L{X!a<4D|antSut2h-(K?E1ZXk$5r_j4)y>1) zObHcts2JAp$Xe6VKD>*Nm!ATzM-BVh!>M9qYIh)S-az%^W3S)Nn5v3gT9d@&g0^Ki z;>9qhjyu~j{Kq}<_j#PC1UES7C;<6SVY`{Re0~y+h;S(IX7I>P=(fUg_>X(jkz3I-pl3v!2eFLCI1E?a_lE!})e!}mh7ti9aqcSVyW z%j2rB{`9R^?=AFcZq%v|Y$qLtB~JtqoFe1SVFA&Sbr_&IE35t%_&RB9LXdAsSX0+ncMt6u9+6u4BmzsAw48 zCbYVSW4VP98?cK<&E>8v z#>qv0*f8CwlPNffDs_vF*HuUZVm4bCy{CQAFAKt;XfHsmD<3+?X|7Dyepy?H8%zsO zRnEFr7dIBpp$E0Ug%R>4^$!IEg;YDhunCY~ri}^DE0Q zK07KY=RLZIEr~0Hk`gCO)Io^ynr{Q-#|r8%oXP6B{DIiKlF8f>`^`1r{H;%HU=fgu z#Y{sjdqnAsd(D70Hy_~UngMTCu(^^`HG~ykF%m*qI0PFNSB{EM6&gVTE02&+P1Wp0 zeFL(@ak0FO@^0~-Q{1NPBm$5hF1kWBaC^CJTOPS~7l}Kh7Lh_z;E#gin~#jVt*wpe z=eyDXTiyh$7vIyL54{cWESd(PJJs$qJAEw9voptDS)AmBc<5vbt3pP(AP!HmsKbl* z#maj^2;H+{(QSWZRxBPRf_n=@-hnL^cRCHnc@CKm>D=aHQB`pY1=A9i|6EIuV`9s6 zyONO+a{0A3WY|Do$5t_1rWqCn?(`0lXYTbqWk&F1R~a%!nQ2NhYGMp4Go<+wS4E>G z38J_))_Ba`HeqNAlPWhq6ITbvTv}V8mOiL;Nd8hY-}Q;?>eSTR45drSYGqDsfLicS zg!4=X;)5?RB5Bh{7JC>jm9H6GXfjbVuqDt7*OESeU|DxK)sTs)Ite_o>0xnkgB8)9 zRb_cq@m`_0(}K!2*eKtBO|P!MN5T(86lBzAPqk;Z+wpNzd`HG9r5!0L|Au1clGaZb zD;f6Unlv#^E&DjUyeA${LY=n(r597!ddW&!TdQHclsXS3b-Bv56&L;r+LyJ}c305U z>LpT)Fhv_zIT0JBirl6zGQ$#;SNHY1Ud^J1L&NDy%Z0t1Y50=8ANq1TSpE>DEZ$|! z{af=kE9$Fp%4>zE*rGIzdKqPC6IU8h0kZ@piLwk+idH%w2{zvw7H-vxm*?d-%zAhD zuT`2xVWnyG?}O3w!?_2xZB5Dez2wvpfBKX7;}R4dM|mEcMvXez*^i5?f#pplol+>B z5@ykvXSngx(de(RoH6nMlEGukG;>jDkqReRxxS-(6^+yPkq+CO!%jBwW0mPv?%V=N zK)u9jyQzV;GithFMSs5_FqT=Jx^gw)kQ@z%Wx$Dk3r_)MrBQjo z*TCv#y#S~^`!K6Ze(l~paTLwa?>9jIr^$SL5|KePCMM;ZgYhs4rm(Z{I6n)L5ocaO zQ~lt55GUv!+;-<)Z8SCS3=723pIR(yJ*#Ys8Xn zU#gjN-%DdfICb8|(<#yn*^HLTwOY-({=COdu`!cqc`3(TLLwzkO50gSvVe>uYABXH zzj|Vu`rAB(0rTb!zGJ3KyT@{Q=SU(2m__ED`6tDofw}JN({MCif06}wcoq%cNh@n* zF`Pxe&FQd|h7MP`lQ%A&E(SwvDsPmmg(K$GJi$<37Zq5eAst3;Y`VCp}xjk~%b zC)Z9sl;N1xPDTy#DrlB5%2ztJtwZHn=GBYre{nKy!T@+#{@2cK&$s{G>uut{-Ie|C zkH)UicCNHFbxeoZ??sy^kc?kUGa=PvW#+R|k4$MIrkI)&AXlWm^A!2ES#2NsqqXcLRUxi|$VIqp?_%Mf>t^;YGLK zDLm`ZvrBC`VW8thLkaNx60mzEVD~lH5+L7~0QpM^knbf^Y%O%(UwLG5YK6-^9KsKM zT9$A0n6}*CwAIWtVexw0um4?HeLJkF>Ybgi?&^K(Ubk~S5}sC!jfz|MFf=xzeu^W$ z!T>>vRdm5jCLI=8n$DzI?3HuMl&5yJ^SsFGG%}|TKE&hsSaw9(jJgD%s&{@!l11r^ zoLi+8(wFE9CJowBhcxsjyX($T|v1v`he1!-WBrZ10pq}wb-JT6ycdCJHZkaQ$F%@n zv)*s-QK?07`c?mURM@wkDuM!yQ^p-bujY5YY6_7GG$cSu!tHU6e-f27f1rxcS*%y& zI#(Tb*PE_}70r9~j{Zqh)mz-n^?Io{>>D}V$4W!L1caA0?}tO;J4EBPY3c!v=buM$&}oYI@Ugdoe*zRS6+DZZTB{hRn6;3^gg1N}BHbs)6j z8acca!>xHH42F78oF-yr8Zrxb-ZJ^Y)dugYFU<-_Z#3~acGf#g+rY6UjSTNfTFzoL zn`QY$yoirpGn(^z%;r9Hmp@`5HKIe9t=DwU;$NQ}fpnc_bo#%BV)}uAehudd!-#V? z&GC5@pPu1}9=?&FU3r(yZxw~TNw`hDcgav;Kgm(BYIF0z-Bhv4BS&$7EsRO*S zaxakT0Y5}(F~%tjSw`?9_cgqkVIN0@!f5SiCVq6NNU@ibP%FfTI;R;% zCIDhVNd6SFr-&(VV9D&Fh4H%*3^`BaDU9=XZiGIIaiM=emd0CfQJBO zQDz-ksIoy@C%kr58|_4fE)yRO)Jxm&zV7K#|7yf;j7-6{V0Q3-jACrb6HrF!fI4bGLoK#2gBT=Fe5I* z+F@R-Y(AXnprvdFN6?9|aXdSs84+x({AOb>f6DChOH^P>lhZG_0}jt60cyGTPhoJu zQ35ga@~seKhKD|l(m|2T15Z*m*pXd1KZ~F@8HU`6Rpcwn4U*HK`KQ$)KsYuXKG(qX z0bMZ$zC54RNl6V^y!+U4*jOzNA~L`Qy~(mL+qK)(oD}A!kFlLlK&;o7Hg0>oQcNf< zPtp&NH}P+Hj(?Vb^0$trgVrvvRc2%e$%NF2ks+UXq2l@JHy`=$W!;v-^)0Xi$6V(7 zkWRn(s9t)20GnJG@W87MkDz)mcWZisSM|+f3Y2Sp)_v;im8@H^&HUNct2!!XU0PQA z5WPaf{JjDSBH}ciL^9?1(6z)Wx}3MH&XsL~k}bbfv1@Jm>Rs+AIB*wgEuM@FI^moq zQVCVFf?@@e3jx%1b|JA?hGP2KA0z*kB*8QXG#V(5Mo*;-(rX2KWwqcY zhUf1t{^u0+HY&*6b?g9_?(t3XOM9_v@D%?; zWhGiL$h;g_EWTHJD2(8eQOv^n>rFX#J>CFS;c!2$b zUI23RDzfDA>YUFzjIzBm$d*4MzhMn=L~z60vZa0j!=o8SY5g zo|XqerGSRYzN~QXokz3h6cN$A^VSjI3h_@@3I*3vBZSjhra4yGW`4AzVM@o?d*Zw8 z4!cslm+}}7P~Ql^AosUWC<-t_+bOxjVr3Ql4g=cB{v01S4@h}#g)Oad2v*L65dGFD z>+#7&zYZ->u3Xbe4_7cI_&M-~VXlH0*OIWd-+!S!wh!X5ELg_SqpMpq*juec`IN#3?pXY51 z!HR+#$!e2SU00+IU3oSGLW$&@RXhIQmIW0fCF#oxrEmUcPhF#D2XA)Gf* zq$M65ldGfL^@IWR57RJg03Ne7UpmpC-995U({GYCfmPJ~G`*5UES758PJ2a)RBl>% zEs9l+$J9MqslhqY7x_I&C#O+X9aq_km&{RG{ac-~z-a}n*P|>8%0-yXy-`78-=tYc z(MT<{#5ioI?81g*zOt4$*2rNEtr1fTd0&$lvgo83`+d_s6AFH6%utPKD6DjY!uqPd z*)#(Ae-m7WEkj?$`8(dSIc@~lE$2ZNDvZzqpgmOmUB(+&ZjCV>A7|%f3Z&_QN16sP zQURnG8Yo=^Z|&oG^JuGU9qxKA?Q@q13D#Gp;sFsB)k1E#Dm*RmrM$m&PmX*As^BB> zf6|mvP>iEslKE(hmlQM!j$^tUfx%LlRs-#b&_WtREGheFy}j%^1qw#qx2kJ@M7=pl zWM+p<)wzcEvL*JBy2q!m>odG?VkMdrH2qMF&$UiWJ91h4*l4Bg=cbEr`TA2|CgW^}&^g}Pw5e?X10@a6> zP?}}veqT88u;jYfFf5ZXPF+REw*rReH$U#z@6YSTJTb<(?jGzZDnctNRdna!oA_9D zqj~JLVp<1^6T5a+G({Mck)w8%z^)(7C-v1}@B2pWW=|peOT}nC$LVBt#v3RY4uOXu zlOLJ;SzR+6I$SH-J7s`Kqg0*~c$!1ka8Dvz9E@i2-lGw3b({JnABY4;?P9bHd4y&fQa;7Zh;2CI z(d_LUn$1zUyr`S0(@wj5p7u(osEg(vJ+y<{&fuRNyxZT!fA-Ye>8Z*hdk%+$ze-Q~`!UtDIFK?wl~3OidJ_Qrf`R{8UOO zO7I>Bp;iwvqhSsZ+8gi^Gb=`)PCc=L8na9jQLfdn6&hB$CVzMM;n&wvP*iFo!;upg znG_ecjPRXsbOzO04kwz(+FB0NVN1>7hHI~tUgTHkKJHoD?>)v9g@0wAmDE@!DLMr; zgWuq1blMrF{vLzJOf(75b-bj>rLUrJK8zZAs{t>4(Nx-5_)a=Z*03(Hy-U^czctfG>3LqTMafDnrZaqR>u5Po={v2^H0f2r8Bewr)J1Js9YB=^6jcHzBMh& zDYnAXmrpNGr1h|6#!VV|Gep#7f6%&YJzq(BDsjPuZdv-VgEsuO8!`c@`2ek|OxwJw zHoaU}T~ezm0P^)h5PFAJLNO&O0O7kqo}^^;vtXQC%etUuUWN>eClobG$cS6s;@Uq^ zJE~&*4;%KC$65*!TyYKygvTJpmc~=nlFhDvsNT{=WLdI{<#%i9(##H3Ar&55>1d1C zx@a^)aX{q(b5sK!R2HUNo+e;HRre550k=y4l=c$AU^kgzD(R=or1{O+Y~G@cBKOBF z+kec`G`Xhb$4d4ed!3!G7yoIm*XeESKkg|0)5EfqugI_JoUdqokyPgEuA~?y#O|U{ z1`#_)k)UsD9I+deOZ=S^L;>;4f})S{TxbhF3k=CI6k85B=?6$hFi0PIkENN+7&(!~ z0BhMt7vd^dW1?GnrB-mdV(X%V(-eoNyzteiT&E8ens0%fH5NIKCt-TNhRCv_op`C& z%CdyEg?PjGntU%YhHi4*ytP$nYX`hZ{R77A!Irn2rZSL&=qjj%FI&?#D31B*&Pqp($ZZe>u}`6T{rP8S-$I~lVt znS;}tY%8rPDI}IBjm#qLH>u+b;4_$@4>ITr`R9fsub7pqR%fZD5VZ25Hd_VVU^-O^)rwz0{0@uzCSh+qmH?l@W(e}lT*k!^}QRH?mVb522Mzw;2X=S7= zB_mlAaIFM%3uECqdFM=EgwnVPn!*+G%!z*KaL|`uel1znwNjw1ocwHQ+A}xbnTbqd zh`*JLEOI-rMj`3^S{OeUF|r(Ot+be{U5S`@2g-sanK>4-q}hU|SJ+LAW(lb3>vtws zTU9156KyJYR2H>mkq+&cSPS#8`7daCW&SFa6a&xlzVO0jsVQpKOeC{nIa2MkoN3|; zB_r02wKOUv958p}P(7D}0P z!&SywY=R@||Af!hvu9h69@Qym4c1d8QN^@}!ii!}E$MgwYqpb4mp7wFEK`|O%!f{F za^-LmH>Jy5sI6S0=v+mYiIIjf&bl+q-gz1C6hUCjQc6yrS`T>y9T&Ao*?ZQ<%_5Cq|Jfwdi$yA zgoUsO6emA-Om+&PbBcA8xw_wq8;joQi;kz@^h6V|mHT1UaC)KPtDrsJ6;hu(9hab) z@_NjTU(&M7N@aPKJPW8oBBh#}5sAr$e459GA@SJbK=bM39>B5RDA4TmLI#ojw$5$0|DC6kwqnof^6*xn zBJn=5tsGKRZ;9^9m=rSZm1K*PBt0i%aJ=Maw{GlMJQ*hQFbc^p0ZLwQw@zHAYn2F9 zI_96BVVpN~bf+w*+y|MgO;S%g)&&t$wzv8qBnj5GS!FITic@Dd?|cfcVPf|>ST?{Z zfO(;lX?T{Ra}0_^5C8G}CrY6n%wh}=ri`p*w=`pP0CP0aJ|k8n4_q*-f_q3DX#Rs7 zBA7AMVKMUHy^0K+p+W2zdQ0h9jkCuLT_@PTr;D5d7Q*`znUL6OlrK6CbcBT2c?ujs zG{%rcEz4jH&HSa6n_-D?kJz&(y-cjKDqWx5s(m~jxSj3ocPK(BUOr^l@r!isgz@m^ zS@MSmmcy*ju>$!SU9HLp(b^sb_<;6l8=tA}Td8bc{PN_H7$!kHwkJz-psO}!*}5!h zj=Y14&FKJE2c?!Zb&RGVk(9*{Os{fsp>W5Z?_dtoY^qw@Uq$q@8)i0y2T2{^xEWRu zS*x(&rVgwe8Y(>OP@6WfNRZmiC@$v!#1kP!6{v{OKx03KYNPpAQ9qq$=}BD2{ZT(n zrtj)*THrDo!~i@dy``B!b{Ut_u%j%Md;@`$R^RviFnHu-x4JtSSD;l zNs@R}*IK_#slvpPYQTLaB{nx(xgNSYt|gk~(`XpO@gLGmy@a{6=El&C(#gRb3O<41 zD1#3$9A}K@U;Wa6(Jb&wwSZk#!xwM{(BbH6I5{>Y9+6tdX+oD>}angw|If z6s53oIzm|&m-d7cM^aaQ8oWGwBi*+Q9ud{Zb{ftKb2nif}t2AQpttI)??+HMbO6e@HMi;jlNlXN~v@Wi~}vxmdJ`COU2 zGvO2Ag07@pZjr@O&%C%SPI-atXx8+>?;NIl?qe%$hQrS&UpI}$Q*9Fuue8ytd*v*> zd%51#j+a%viuY4IpBWA(l?mpjD90(W8nJ`LmKM%ftZ%}?IGlXztz?TBiZamyr8^XO zKv1OND=3ykl%v8Pqy^HZQ`Q<4?EkHqv9~o(`(NQ>TXhrA@~6jQ0;R!a!!M!G%_8R< zvR8xmYEX72WL^i-oW-HyYt_A6>yBiR*|4@N#tUFI6gL+w`Y1t3qr&bFRY{pwuGTOcwyt^b60ZD;i021m@7W1^ z^U~oNYprz#q55UUp>0bq)zyY&l}~KDN&74P@^f;%rLO+bHZLvqQ@HW~^zmC${#(`6 zLNDF$O-_bkI~pu2T~3uQZ*f1(SzOvdDrFn=E+|Vb@qBY>MXhk%`QLK z+^g`aD>47lS>FT4;lM9wtpi>dTDOIQeJZRcO&NehCzR1}ekg;kR$V8Bp+tG3M*YPk zxe)h5>6(QRbo5jQGfu-m8(IQu*IT9Fd2{oU9nDWrLRLp3k(`))+Bg!BQbnt6-s^O@ zrD-~yClsrbU;KPj9cF?fT#^ZHkY&N-6bVH>6aXPv;7#SfqPre(2B)v}22B>@5c8!` z*c;WYfgXyz^gOk4b4yzxS^MCC<$}R5n&dDnWAaO5I5^-zG@t=kezyI6`QjzAyw46_ zKkjw9-(xVnX|CV?_~hl0Z8|Jt;*X=#KzFV|Qi--gla|GQ1fC%O6_^oeV~k)y!02>3 z^`;F`PG$oq6FcU-#L2bl+plm68|g&;0_m^8RleOdi}&8+gmNzq?y;|3@Fs z|95A9cjpgccXR*uN!NcXOFvwkM%Ug4>#zUb&d%;;{Xd)aFG-iTj$Odw`+xYcz2~lf zueZCix&Obb`+vzdTK)-TLbokk1J{vp*y3D@Q@W~R`#HbW#=hpO!2hC_Xf3m#rTE_| z;Q#ja?gszw7XOU~hSUuhr^1SrA<;t+L{ z{C{V^Tj2k9H~HW082>Fp7iC9QG^j&^(>dBoZgBT^g#S7_=q>U8ySoMce{U22?~d`` zWYaBLOOW;UG|J)>c>$SLNM9;p&AhE{;UCA8_qrYiadN@NvY0hzKCN$P#utkJb}ITS z2mnj*f5+4R_PX7j&G~=F_>VsJC76E}X?*~G$5J2_pxHr#Tng|i&KTf??2Di-;hGzi z|4Q(m4L)z_{NE{_|2v!W|L*ah7gE*_5sh+v3H< zu0ISW=u3n?c0;^D)ZUzAFg%_ovv`^)JLG&rNQ7ZvW%!|GWG9I~)AJTl|-cTypxWlOOHbIcEl6 z1loZYJd1$MLH%{%zY}(ET@~Qc`@h{CAOH7yoeloqHU5(e^@6Ay8*KT#;(sA_=8f|I z+eQ1|o$dzz?;8Jg{DX4&KbaPS5QX%?%TB zL-=1A0%$cdu-yL7$N%m9-bVhvWBf0~-#JOsBnl=uP_RuD%^<4^8u#0LNG7XMZF zx^eJw zC7_w4lPv{ZK`Chj;c)JrEFDG^khefsIJr=uGuLor1CNElJjhI!-u^a^0cYNR3(!v^ zaWWsii)LRiby^?)*AD`=6#siZ|G&MvvH$y|_+J(TY;{_YZ-~KHh5x8oy@mtua`}JT z=l?hUKX;D*lzxRS{ovcBr|6Sw1Tx21GuUi=cIV$>TiX=edKc2iB%>EtWzqzShn+hyF z|Mz_R|LvVk{Ett8|5nneTg?Bu!MZOE|1tN;E#ChXU;nq=*~I_+H25#GbT9V(p`t<-WgnlmcrsGF0Og%VMZxQJx?-BoXLekrc|GBq`|M$u9UoY|k1pr^GH#i+gb7KJfyTbn~CjeNA z|Gi!@|HsDv=ab;SodCcJ04!~O@vRB&!Rg>Bx`)n@zQOgc2mc=gVK~?igZKF$n1lns zlhbI{eig3U#uEJR?(Xjw?0+`?e|Ph_FMg7NJVJ4DAtc4`9f&Z+m>nVt;~Bmvea$yRmOFF#(5qbpm{xopLtW9 z`H!_*-hQ7$a`>h69|n+)&ZFqv6$t-|^dFNwc=;bY+Z+1-RndRFjU2*nKQ$CTokZdc z+nx+laxFnDg`z|_1r~?aMkxiPiL!C-keZrH4qXzj+aubpZoFH9jow@0JOony>*L4& z_2}@QM}xz^|NYhHM9@`#7m>xXEBbZs=5>QSEtm4IfEi?4et9bpiSx4bmQ&oDT>j62#D3ZQKYLybU=27j%PsK#-MvEmpY6Sk{_m@z z|Nb^|j)9Vw<}{FTiGXLKPGVZp0$uk35P2`~>z@-X|3cVjpFV#D9Pj3{7X8o9;?eBt zy#FfnAB{a6`rqB}?(J{r|JOwSX&X62Up-+;ue~`rxE#LYOfw}sUdaJK6^QDi3EPDs z{+|=gr>^jUY2I~oQGID{pQvO1WF{I_|K0n3>}4=G5ecN!@04P(^Y z5-PeVd>scL23c@!srSpz2~*uUFRF~9jiyF5*{gRa%;NR(AKZ9 zP@B`>^QHg8EP@rni0A{L1%v2AoZo`|PiJ?hp#R_5+3^2gApM`u;w0B}zij(x;$QKS zPk@K?(n>xYkEl060x!Y~L8Xa}R#d(WTmussmlJe+Q$|(;t4&~S8ETR6t=hlOpSZ1p z|G1a~m@%N%WbqaOWTWT47&hu*5GNP)mZ)cGlFX;|W;yxL zfXZlyA1~R2Jhnn6UpH2Ad5~mdxxusX@hZ`rZ@FuF#!GvLVPd9H7N^KpzlV)^N+}!_t@%mqX z`)K-K@`8!~kMN8iIhMI>e=CDg5ji3n6UHMFYG-K8kQ>tPgKwzNb}fNfoot;j{Wf}G zG@O_QVTCeSLb?37a)dlh^xSqYKe|d?9BUwkQHEM&tvGNi8;!Cv5lgDd0bSx3AcCQm z(5sD}ud@alVgBo)|4}jvu4ntPiu}K`>(T%2-uCt;{`VJ1|4$;Iyi?%bvH?{D4w-MFr3YUit9QkaQRU-9LY%Ixu%6& z^HToRZ+-e#)|0Q7t}VoWJacF@^J@`-CHTL)x83pWzc%{6uMq#ucG5(^dBl|988R}X z`DAEVkTZ%Z9tbLlSp~8*7@pN0OH8K%48<)U#wcN)%n8y-fbuPT(R@?QFiwtpfuTF5 zdySf~BBZt87Z1}gkMIZ>HT+{dsqt5Mj{jnGb^HrLZIGdyOuw=ypQe+X%IUnM^n-Gk z;y0tSs0pRD;HEe|sd+3@H?&kwF_ud-3CSWF9};suz*Oz1CZ&3?^=G8`=m77Vo;JFj zW)olFTn(mqG!N4OD!b7GtT|1x(*6 z$`L3AYjld+rxCSJ+v3M3FOOL%YBZ`^-()Jdfbs#2C$;~^`2e~nfP=t&VpiugnZD!2Ih|+eNnD3L z0KY~-4#Tabz%z{F{}7pL5wM@AlxSqpZ}T{#p`&vGIz~+Z-I9dwgyk^aGSIgmnhoCp z#cA3${73HuQ%`SA&jW+V?UDI^53||rbi&{wRf!QFisI#bEOBE(x z)ctV}v>V+DIkm2yOzWD7bFP^&=ZdLv45jiO*HN6RMCWf_+B^Hg%Mthk4r04KDP zV^qfl?e|$*6kGKLz)#PB-l4d;SpGwzK*64_r2<-d|GT^I%YPgD@2^1rueOho_RIs8 z^&ayBo8m@9@N+=X++fVFPmV+mNFCK)LmB-Gj{9UdPhA66w%P5cNSeQt!Om0b%ms&&H|FJm826*zjx;1D* zf5sZ1n|G1Ju&FJGwRco%s zPxc7=YXYlkwf7_MT`piF_=1Y1%ky#@i4~LH~NB&zTm6T7p$Qk zyRvHG#+A8-FkF%aGN}e^dt(${MLM71q|DM1HfdGzxH7A<@chR|gKI(#$}`OJ)atie_ROzlVOzP{~i`x(OoR^TX#k>Y@-8G7362 zNtNJ3B7kx)5|4m(%tTFTs|0&vq*Ne#`D|-m8+uNGyE?IC*F0?+)>JN_>r7-mTsbx ze&*qoHv8S(?;CrC(=qn37LEp!=-keO zNe7ZV0`-2A{FgF)chB;h+|jpp4lbtuAi@rU@ZYz4|F^Tfw_CXX>ul`5zdHL*wVli{ zU>?XQfRaU&suRFbM6Z&ocZWC8DG zxm@7bce!@a#3m?{Xyynk>+8HIIM@v~*rqQ$ZwX_4t-lkTmXPd9OK29QLP6wl+^$cL zcA4=bs5+LwZSj!#!{eKfHzdn3bt0rNM@=EIqmm|4?`fLSi%KushiMnFF;ie7-)xCL`R z(HHf)grL7$xs%l+6)na*2P^hWaO0tL@To3hcSZ0z}#D=EGh zr|;`TngTL&2~5-wShqy0P^vU2VyOlNc~u@$YXO^T92Z?`#yWtO?Xu4DKe?&P;8m84 z?rMn*{i}q-8O9R1fEuKPV=nZRwI!9cR8oAdq$mFduBR9~aP70`%VQFk@P9MPihaxe zf4%OuAOC586aVGwvHwup$YTHW1IhJC_pM0m6JZ&r94xsSEz3D~Yv*M5W@1<5WI<=T z#AhoR(BVx#zO1?C0Qntf0{he}u+hqXru3geWvrCsvy|PH&Ea_k__Y=H>+=$9I-8G(IR?CJ0m!ZH( zt2nWoPBNnFQ)u`ZN*VEFn9Re7&ZAM9B)cMtW~s;;pnK89!JOW+c!}SP4;BK_7sU9AW{uy7w?jDXBK1RS2@n{` z)kfya-&(83d#`Hw^yF{xi+E_ce_6*hE3y)`jJk-D=1oV72YCf1Rh_R(>AzIz-J<XblAHB|o{(lAfU%7o8>aU(K;eTzc737t@)>&lHX@-(SD&0Yz+S;HN>Q0h02$_)# zUpj9vtbfYl>v2c~YPDn=E2BSg|Ynn3totO|tj*ohpX4=t{pQ4}V zH7)gSH5T6uE0oqy^lhwn?!bD-KR<2|AvZGJS0MjQ)9E~cEjLhl@AY|s<@w)sefyvN z&H4W&p8szCx6xA@#deJIT!8w`hO zrtZfBFVC+B4Ndu?O2fQ}80<*i{Mk2!flzm@4ugh~woQO-K~A02PBAI2<4TVK?K0rQ zMyb3IK*s54K0zHjI_d|bnY_kq#9Q5_ep!P?ET=|hw^e<%41Au>;vqf^W&-3BVI1!9 zK4Ayj-IYmvkr<^j)RNgC(Dfcz?~@3f8bKV08H~D`-~mWFwkzwv52(azg3WL;4d6#* zxg3oc5DFF6aRMAz-CVBkB>FH@u&7Zat@3^(V=y_e+XhDn6Yo>R{?L z(k_{sz0zrc8ZFVAge|e1!9P17Fe22J*i-Yx)00K^qzxP0mVp2Fpc+(y?@hT~#=zz0 z*kxkH$5D0~$@}UITH?_~u*RXDaS(%MvTL*Slw^Y24uPL?2Z4T6mhlNA8uB)DBil(|dG(-mwxY!a z)9ztIT5h&1yUnI!CrgWTJ5Wnd1QLtWKms6iV;KOd&^B&?x!EuKlSwu_ldDD!uK3|4-XyQ0-1 z{Cq7JON&#METf<{m$vx)r~6F3XW$4J$faJ3A6ORuXV1U?@AdY2oBZEj{QZCR_A%VR zq9QUUcr-(03SCH|w1F68hYzp__=BkwV5~?27Q0Z{){@vk<+&Rx@-^yQ<5l?}i_w5? zF2&>L5-;eH2~A&?eep|8OZYp?EqKkO_g{v;)perRY%EGQ&X-?d^2KLMy(u>_iI&<@ zU6XaW3vKE`{-1A0HFB-t z%`Orc#dD0T?cDsV9#e$a~jjOI^@tho;Fp4CFQv%41p>%M1l<=rl9L)4Y zM$GvDrwh&`J_=6aBxai?Sr9sR`7sD#?w~0=XjLc65&=|B!_88Kh4_!Q%f!*&F8=R! zeE;A5?#BP?OT>T6|4Z*3jqLUl74$s_MIa=T$g$;DY~)`N(!{q%leJ!%UpjS3M^-F88Di^;{66r zrp(gYVligMH)z9jdZFW()XMv6sbj36xs9qm;IpYzgUt&8$eNRJkGlWfz{pWpYZ+@%kxn8NK!YyFb&?o8UDA})Dj2azn;D*c_j};>vA5qL|1BVHX!$%67wO!| zNMg^bRB|0y0ZxRDj>OL>qNY?U>YMYHAoVUhqh1?D^1~OuJb5IBNf3|q>}aLgs?Aup z>y-_WwXfHZRze<#vI_&LV;`j~RQsYq?`thGsFAG-^xcV+@k=ARTshe2mzAi`+s-An6DwSt=D)w2)6MPw6}ncI_4xYaf5WieD*ty4 zKageoUuVaU|JUv8Zuq~i!2hGTfl3)c&mZJr83`bIz1Sb52+}ZrB@Q8Y=2A34W1|V@ zNM5wq^s+T7lekT&d67dB>zSEimc}V@;eGb zR*Y)D4t3pdRlj5YFQ+g_1Bxcf(RXiy|Lg8`3;F-L-R%wk_eIgu%d& zLq^fTd1H{vRYBiqD5Q}Bj&?-c@C>a{U<&{r3Y(Q~m1Xf<(6ZpF@S=!@)`{Q@G}))& z6CJb*>A$m51DMj`yQ>@kOYOh+dp`Z|ZSuc-`SxEW+ee=POdYNXRAGbnUiAj%XtES` zCHa|K{&nr=q$j&^&C=%)x@2>sxaN(bu|-wJfF2AH%ujK@Dw|AHa&b3X$aBUsGIk5& zaPsX;1PR)TUMRa$96_0wg64vF3Z17sodm)jJ7UpP`o#j4qZ$gB`N(0S-&UMm6_~tH zDSZk0pZO%7ZK2?nKe%oic*yVX?pE^u@bTk+Zufxy-`$-5pT+v)zi4m%;Fi~aXM1~p zXKxqzKZ^glS^v*#{oAi@#r_i(+t2^d+1})TxSLNBpJYLH(NJsLtf?neJEK{vkv@+F z?s`?RdW*vB4bsWrJjh6Jm8tEUo8$irrT=G8ILfxN2yY$V$Nz~|qX=YX59h!p z{==Po?%fmrpjFjwX*p*tQ%g)ustyUykHPdD3A}ap2r3a-# zgo}?P;S{`Uuq;tE9BrfUx@p(amkXtp^joKnzMUwoWBIdFrHpRDtgz;v3I213wqSW( z=frE`e{b8z{~b&TyutrF_#i&i3r6ns15xjFdcCbq7yeH^40T7f4F&e;sl4}t-`n(C zZ)F70en3CDzQHDb#gnnU` z^_nz&haa7G7pu|pX;80Oi5c*bz6h(i(Ol`JW+@7s4!#cZHX4~XY?g<{rlYXoaP%zdWHtI=w%HE=b{V;eBbtm&A8CcYNm;@TnWv zZcTc!N>vQlQc~YMW+2TP@OJ44GmsT~|16yhfZM~`ud&zs_17rWPtWFAE`OBwhP(7* z7ON^p!Hm8(Ybx3YEti?k83v?)V+f7F+j$g)4P2upuj7D7(R=D3RB2@RyU_)jLh0Cc zn@!eKl@{Bz3h98?ScI&x=)$exuB+$^r`d`r7k=;kdUk`~*adNMum`{?H{2TH&jN;3 zS&vo^Yf>QG;)qj<(`bSR5p2ek&AQ;{EYa=|V>W-lf--N#YrAF9EcxmZ-Cp9fSS7EAc7tw4F#p8>osea%>~`J`&|-|Qrfm+QFBnp@lo7O9tz@K zx7i{b>@~4hMC5-XbR_30%A;5uYR4O4S@=(^f9-)5PB1tYOZ+Ar=TgKU|rIHR*0x)?6R8|2ir+}AHpb|(iR)Uwo zfOae}k+jM@6>oaS@bE;gynNO@7F%4$7osk9c8^h2|GG!&>gfLa4AKH%aG>3IJ0)2B@Ik&jumBa8Tj<-OxyIAimO!j2S0|91Xw4+jdC4*6LVdogu{q#I!Nf#oNm z6p-N?O~;x@$oz5&6|7=q#BdQ@n#5`Eb4BF}mu(WHVT$_8lga2pux+ zVN|Grb8RY^O*FvmKQLQwF93ndaUj>HpN>|hUki&-KB(N8_?%*=~=28wzT-bC@F71V%e3lK+%Q2fa>i%X7 z@a^v-jM~DC1+HDDoPB$;xGRRz=w$gbPQ*e8F)Gz~T`Q4TiAt&^vMze4@ z=-}gzrt1%9FiX8M3&$(_ziiFiX|XD`3Ns)lhVf~J@uvr7>gWmtP`J(E5K1l7S_*yO z40X1A3pjuzX|qcdXGADyTua)lS~^|S8o|2#bMa}d4qB~NPNGebN(N!SE`xAIf+mv( zz`;furqrYp$Q_qqGO1J)7I?1PpmrwA1?%t^;MV%2pu2z;RHN#RcHxuvpt^lStE~to zxC$k4P5i~uhmHwi;^9F7a;vc7Z+gc?Dtm>^K=XK0)O4%cUAAPOqFXoFr`3A3v`gP@ zFWy^nQI~J7!s;7#q;h)|fwzjgh&I@2bh3m@jxf06iKvj&mPoVu(0d3iMYmt5DN_ygr5$Km`1bTg=&%N$#8GU|rcAsCT+2_?P zJDJ6+P}N9#0;#GY;-q6oqVNGkO$r{jYK*Y#`lRWZkJgu59ki*pbziXa#?aC*E9Epw zMTk4eHmRhwMw<;-YqI2!7J6pcrWg^s)2QqC=dcgwb&R9b)`5%=LbJ)Mi%70wr#Fh2 zM3xlIro80m8!N&&?pb8tUU{~p#GH?#VA9}f-<-#QnSXk zrj=&_j!_Dfq@Pn-PcLaGCm;ay>$G=kw{OvMW7aI%l0}`~ef33KwKQwjGXJpG=Z{sk zAL_zHALC}SIVcuISv-Zk0>#pke=7-&idc|Z!ZsVTY0^UT9A)2SneEzncv<8Wwbkrc z@V1cdeL_7~@nn?RWxg3=#9)0(B)1oADl_c!fOpra4*W;MZ-0{Ub3_nf+udkIebwk% zY8D(*@j;Hef7Yll@~K2siFmfkNWPL-YKR4c)9fP@9mt|pHic|sXU*2C*i&NWt+?xX9khSf(|{tLEtuRcL;)^h!hOwY}7tVp@EWun6(yX>~R zjj)ik7r{VG(X?M5)K}Rza{D%o349633_@aO*e^(ADI6yGFwo{oijJeZP{ZH})EeZ! zWiu*TMKHUzL|iXk!tqOiHjXZvWaj`N@#l}qSfbpv63~-G!bz5%Tb*f20r8U#Lj~D! zMp3jV^#`F!6vrSp*!2b>J2HvRJ)3Zs0MdI~7In#>ZZ)^xZPPn@Yf3Y3Q=H6fcbXm+ zSnofsu)W>RuS7|m@n~f975A_JNAt1Yz#G%ObG$`xn3jRbr&|{$YD6HKDMmM zVg->>*(zsJI#&*fh8jqXVirvsor67?QlAzy-*gZBXU7gMR)8?uMMKb!jumtMD@GJ1 z11^LT=OQ1&Ca@CBA>>_$0Yv}-T>ko|rJ`E8PPwx&P~J%luzE+uOU{ zoi4_I>uzu2zkHheKWBMMT|ZQ2bJ8NCK8EiJ@Y3#7KgXDGHJyc%9z7(V*ul#z4d+An z@`$dRYCr-}qaRjZ!U0KLP-34hjz;Pb*sqOHbY~i58+hoB5@{^L*~XM#<&RerGVcWB zPC3Y(n+Lgj4UoGF>?b+9tqORt-4gpP0pIu3m;K6`r5mU}Y>+c@5C7T4fA}s?J?i!7 zIkZHM#^<5A$&|O4@&;4iUgDP@vshkD!tOdB%R0Yy2b95sYV@q{+w%K@t0G_mb7V&; z%^KaO$BbflZt42~bI{`%?`04j; zNQ-KiQ~sh5uO$?MwN(0H90$M5CDO@6wGEAnTe^PpyKb$hza_4hy9C=y0{e4AHt^kc zw^_7F4X7%IP;D>${IGOh=iwA5rebPQ9WJMixG1tz6f$2^uK2D) zSkNEOHCW|#EZOM@W3G0D{Ez#hGMqo?Fpd`Ayt1(GAC@%vOs81OF5!YFGv zU@&71-Vb4iZZ93xd>cM<|{9%H?j+R6K-Z=~`e)6;(4fGo3Rp`odwmX^%w}q{vc6$=zX|MF3PK?UU|)OKf$I zo8kfRRK;yAI|MCh3xI_t0Gw6yEFM`mYxEPHg+^K@iBHU??7CB7%D*u$j%~KEyhKy5 zRY>8EQMF^LRUb1_>CMTJa+Br@(Ty^^qS;6R({NDQsH6e3AA+`G+`)C$U|T1`L) zV3FvNJXjYg5xEF1B1-%Ypyn|t8oH(RWj*<4>8>IkSKEah-x%Cof~zdcG~6{PAP6m1}-~rAdOC-94(gy?)K!_LZg=YHoL| z>0NvaSRjbyc;}KtyYUjDcs^c*7;b;2#88WdOR1Ke!hJeKg7?z;gw0)Vg=f1tzS>_= z=cJV=fqxi=lnj)5rC}OY5j%we8y*-?UcYdn4#d7%ap~S$(oxSmyI4o2 zB&EcYTiqRurK~Bl$?enpo5i`1v%P$ZczU(bNt?9{HNU=-~VT4 zlmFop>HleY%k=)DGWbelayDTiT5ZdsRc}wMhdFuR&dxAGlT0)LCADR%7dc7AXRC}_ zMVW*c=u{JkN5}@fAW%6fKf16V$S4G{=$2_ihdl*Qr^_ z&aiD~OT3q$Lkm5ezOU;GZ!vZG* zHtU|H(_W_|rs;H^r1P9l_E@XF)la2sQsoBfMMGO8mKb2ELc}CPGyXQOclJ8l`*meB zB@0(|8IhCSpCe!#jB@^YDpk^mG)3DxhRSN2MBeVlG}qwtJIK>IJa~YM69g(ru?VBv6d%r}QndaM-X>{^ZKk!MqRG?Z#GyQ0DU zO%da((&gqV>{@Z3EF*?~@x_#|1pY!oSXFol6)Yb~B^B`tE+8R=q6_KA=h6uea5ecY z>F^?I=a*ef>ip76=$v1C8IdcS?4=aWud#@{6`(JtZDmy!5VqnFig|f~>8!UO0P;rh zdu7g0Qp4or zS<#k^c>$%K9zOf~qr<9VCSbo8vA+~1?S2OO)8OUdqv|=4Tc2hSBSchy)VrPvR_A$; z#m%*{}pUr3U*?hjf&;JjQTtIIC1_J<79j!+I From 4f8c44653ad6456040c220e0d4b9cea74722ac79 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 29 Sep 2022 10:46:51 -0400 Subject: [PATCH 141/145] remove local testing .Rd and imports --- .../delphiBackfillCorrection/DESCRIPTION | 1 - .../delphiBackfillCorrection/NAMESPACE | 4 -- .../man/main_local.Rd | 55 ------------------- .../man/run_backfill_local.Rd | 55 ------------------- 4 files changed, 115 deletions(-) delete mode 100644 backfill_corrections/delphiBackfillCorrection/man/main_local.Rd delete mode 100644 backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd diff --git a/backfill_corrections/delphiBackfillCorrection/DESCRIPTION b/backfill_corrections/delphiBackfillCorrection/DESCRIPTION index d74c639a2..5c83c72f7 100644 --- a/backfill_corrections/delphiBackfillCorrection/DESCRIPTION +++ b/backfill_corrections/delphiBackfillCorrection/DESCRIPTION @@ -13,7 +13,6 @@ Depends: R (>= 3.5.0), Imports: dplyr, - plyr, readr, tibble, stringr, diff --git a/backfill_corrections/delphiBackfillCorrection/NAMESPACE b/backfill_corrections/delphiBackfillCorrection/NAMESPACE index 133d2a5b7..70c1e92d6 100644 --- a/backfill_corrections/delphiBackfillCorrection/NAMESPACE +++ b/backfill_corrections/delphiBackfillCorrection/NAMESPACE @@ -14,11 +14,9 @@ export(frac_adj) export(frac_adj_with_pseudo) export(get_7dav) export(main) -export(main_local) export(model_training_and_testing) export(read_data) export(run_backfill) -export(run_backfill_local) import(covidcast) importFrom(arrow,read_parquet) importFrom(dplyr,"%>%") @@ -42,9 +40,7 @@ importFrom(lubridate,make_date) importFrom(lubridate,month) importFrom(lubridate,year) importFrom(parallel,detectCores) -importFrom(plyr,rbind.fill) importFrom(quantgen,quantile_lasso) -importFrom(readr,read_csv) importFrom(readr,write_csv) importFrom(rlang,.data) importFrom(rlang,.env) diff --git a/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd b/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd deleted file mode 100644 index ae6ef023f..000000000 --- a/backfill_corrections/delphiBackfillCorrection/man/main_local.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tooling.R -\name{main_local} -\alias{main_local} -\title{Main function to correct a single local signal} -\usage{ -main_local( - input_dir, - export_dir, - test_start_date, - test_end_date, - num_col, - denom_col, - value_type = c("count", "fraction"), - training_days = TRAINING_DAYS, - testing_window = TESTING_WINDOW, - lambda = LAMBDA, - ref_lag = REF_LAG, - lp_solver = LP_SOLVER -) -} -\arguments{ -\item{input_dir}{path to the directory containing input data} - -\item{export_dir}{path to directory to save output to} - -\item{test_start_date}{Date or string in the format "YYYY-MM-DD" to start -making predictions on} - -\item{test_end_date}{Date or string in the format "YYYY-MM-DD" to stop -making predictions on} - -\item{num_col}{name of numerator column in the input dataframe} - -\item{denom_col}{name of denominator column in the input dataframe} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{training_days}{integer number of days to use for training} - -\item{testing_window}{the testing window used for saving the runtime. Could -set it to be 1 if time allows} - -\item{lambda}{the level of lasso penalty} - -\item{ref_lag}{max lag to use for training} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} -} -\description{ -Main function to correct a single local signal -} diff --git a/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd b/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd deleted file mode 100644 index 6ee6bce71..000000000 --- a/backfill_corrections/delphiBackfillCorrection/man/run_backfill_local.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tooling.R -\name{run_backfill_local} -\alias{run_backfill_local} -\title{Corrected estimates from a single local signal} -\usage{ -run_backfill_local( - df, - export_dir, - test_date_list, - value_cols, - value_type, - taus = TAUS, - test_lags = TEST_LAGS, - training_days = TRAINING_DAYS, - testing_window = TESTING_WINDOW, - ref_lag = REF_LAG, - lambda = LAMBDA, - lp_solver = LP_SOLVER -) -} -\arguments{ -\item{df}{Data Frame of aggregated counts within a single location -reported for each reference date and issue date.} - -\item{export_dir}{path to directory to save output to} - -\item{test_date_list}{Date vector of dates to make predictions for} - -\item{value_cols}{character vector of numerator and/or denominator field names} - -\item{value_type}{string describing signal type. Either "count" or "fraction".} - -\item{taus}{numeric vector of quantiles to be predicted. Values -must be between 0 and 1.} - -\item{test_lags}{integer vector of number of days ago to predict for} - -\item{training_days}{integer number of days to use for training} - -\item{testing_window}{the testing window used for saving the runtime. Could -set it to be 1 if time allows} - -\item{ref_lag}{max lag to use for training} - -\item{lambda}{the level of lasso penalty} - -\item{lp_solver}{string specifying the lp solver to use in -Quantgen fitting. Either "glpk" or "gurobi". For faster -optimization, use Gurobi (requires separate installation -of the `gurobi` package).} -} -\description{ -Corrected estimates from a single local signal -} From c482b2e5bcde46717dae616f22234fd6a1108a7e Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Thu, 29 Sep 2022 11:38:50 -0400 Subject: [PATCH 142/145] fix "no visible binding" warnings --- .../delphiBackfillCorrection/R/main.R | 12 ++++++------ .../delphiBackfillCorrection/R/utils.R | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/backfill_corrections/delphiBackfillCorrection/R/main.R b/backfill_corrections/delphiBackfillCorrection/R/main.R index 788eed7f5..70f3f301d 100644 --- a/backfill_corrections/delphiBackfillCorrection/R/main.R +++ b/backfill_corrections/delphiBackfillCorrection/R/main.R @@ -41,7 +41,7 @@ run_backfill <- function(df, params, training_end_date, if (geo_level == "county") { # Keep only 200 most populous (within the US) counties top_200_geos <- get_populous_counties() - df <- filter(df, geo_value %in% top_200_geos) + df <- filter(df, .data$geo_value %in% top_200_geos) } test_data_list <- list() @@ -55,11 +55,11 @@ run_backfill <- function(df, params, training_end_date, } } - group_dfs <- group_split(df, geo_value) + group_dfs <- group_split(df, .data$geo_value) # Build model for each location for (subdf in group_dfs) { - geo <- group_df$geo_value[1] + geo <- subdf$geo_value[1] min_refd <- min(subdf[[refd_col]]) max_refd <- max(subdf[[refd_col]]) subdf <- fill_rows(subdf, refd_col, lag_col, min_refd, max_refd) @@ -114,9 +114,9 @@ run_backfill <- function(df, params, training_end_date, geo_prior_test_data = combined_df %>% filter(.data$issue_date > min(params$test_dates) - 7) %>% filter(.data$issue_date <= max(params$test_dates)) - updated_data <- frac_adj(train_data, test_data, prior_test_data, + updated_data <- frac_adj(train_data, test_data, geo_prior_test_data, indicator, signal, geo_level, signal_suffix, - lambda, value_type, geo, + params$lambda, value_type, geo, training_end_date, params$cache_dir, train_models = params$train_models, make_predictions = params$make_predictions) @@ -173,7 +173,7 @@ run_backfill <- function(df, params, training_end_date, coef_combined <- bind_rows(coef_list[[key]]) export_test_result(test_combined, coef_combined, indicator, signal, - geo_level, signal_suffix, lambda, + geo_level, signal_suffix, params$lambda, training_end_date, value_type, export_dir=params$export_dir) } diff --git a/backfill_corrections/delphiBackfillCorrection/R/utils.R b/backfill_corrections/delphiBackfillCorrection/R/utils.R index fdcaf42e4..8aac34f33 100644 --- a/backfill_corrections/delphiBackfillCorrection/R/utils.R +++ b/backfill_corrections/delphiBackfillCorrection/R/utils.R @@ -158,7 +158,7 @@ get_populous_counties <- function() { dplyr::select(pop = .data$POPESTIMATE2019, fips = .data$FIPS) %>% # Drop megacounties (states) filter(!endsWith(.data$fips, "000")) %>% - arrange(desc(pop)) %>% + arrange(desc(.data$pop)) %>% pull(.data$fips) %>% head(n=200) ) From ee6d459912babdf91dbb2ac8bb068deb20eb56ac Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 29 Sep 2022 13:04:43 -0400 Subject: [PATCH 143/145] Update backfill_corrections/delphiBackfillCorrection/R/main.R Co-authored-by: nmdefries <42820733+nmdefries@users.noreply.github.com> --- backfill_corrections/delphiBackfillCorrection/R/main.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backfill_corrections/delphiBackfillCorrection/R/main.R b/backfill_corrections/delphiBackfillCorrection/R/main.R index 70f3f301d..ae0fee9ee 100644 --- a/backfill_corrections/delphiBackfillCorrection/R/main.R +++ b/backfill_corrections/delphiBackfillCorrection/R/main.R @@ -201,7 +201,7 @@ main <- function(params) { if (params$train_models) { # Remove all the stored models files_list <- list.files(params$cache_dir, pattern="*.model", full.names = TRUE) - file.remove(file.path(mydir, files_list)) + file.remove(files_list) } training_end_date <- as.Date(readLines( From c29665c96780f5add8974d510aa2f7c578e6c169 Mon Sep 17 00:00:00 2001 From: Jingjing Tang <31444565+jingjtang@users.noreply.github.com> Date: Thu, 29 Sep 2022 14:07:55 -0400 Subject: [PATCH 144/145] Update backfill_corrections/README.md Co-authored-by: Katie Mazaitis --- backfill_corrections/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backfill_corrections/README.md b/backfill_corrections/README.md index bacccd81b..6b7967c65 100644 --- a/backfill_corrections/README.md +++ b/backfill_corrections/README.md @@ -101,7 +101,7 @@ Required columns with fixed column names: - lag: the number of days between issue date and the reference date - issue_date: issue date/report, required if lag is not available -Required columns without fixed column names: +Required columns without fixed column names (column names must be specified in [TODO]): - num_col: the column for the number of reported counts of the numerator. e.g. the number of COVID claims counts according to insurance data. From 94ed22b0141ce3eda5f0949f2df483984f822d75 Mon Sep 17 00:00:00 2001 From: Nat DeFries <42820733+nmdefries@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:06:26 -0400 Subject: [PATCH 145/145] remove tooling.R comments from readme --- backfill_corrections/README.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/backfill_corrections/README.md b/backfill_corrections/README.md index 6b7967c65..1cf5ca07a 100644 --- a/backfill_corrections/README.md +++ b/backfill_corrections/README.md @@ -21,14 +21,6 @@ To execute the module and produce the output datasets (by default, in Rscript run.R ``` -The functions in `tooling.R` are provided as a user-friendly way to run -backfill corrections on any dataset that the user has on hand. This local -processing can be done by running the following from this directory: - -``` -Rscript correct_local_signal.R -``` - Default values are provided for most parameters; `input_dir`, `test_start_date`, and `test_end_date` must be provided as command line arguments.
-

Correct signal outliers - -

-
-
-[Up] -[Top] -

Documentation for package ‘delphiBackfillCorrection’ version 1.0

- - - -

Help Pages

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
add_7davs_and_targetAdd 7dav and target to the data Target is the updates made ref_lag days after the first release
add_dayofweekAdd one hot encoding for day of a week info in terms of reference and issue date
add_params_for_datesAdd params related to date
add_shiftUsed for data shifting in terms of reference date
add_sqrtscaleAdd columns to indicate the scale of value at square root level
add_weekofmonthAdd one hot encoding for week of a month info in terms of issue date
create_dir_not_existCreate directory if not already existing
create_name_patternCreate pattern to match input files of a given type and signal
data_filterationFiltration for training and testing data with different lags
deltaSum of squared error
est_priorsMain function for the beta prior approach Estimate the priors for the beta distribution based on data for a certain day of a week
evaluateEvaluation of the test results based on WIS score The WIS score calculation is based on the weighted_interval_score function from the 'evalcast' package from Delphi
export_test_resultExport the result to customized directory
fill_missing_updatesGet pivot table, filling NANs. If there is no update on issue date D but previous reports exist for issue date D_p < D, all the dates between [D_p, D] are filled with with the reported value on date D_p. If there is no update for any previous issue date, fill in with 0.
fill_rowsRe-index, fill na, make sure all reference date have enough rows for updates
frac_adjUpdate fraction using beta prior approach
frac_adj_with_pseudoUpdate fraction based on the pseudo counts for numerators and denominators
generate_filenameConstruct filename for model with given parameters
get_7davCalculate 7 day moving average for each issue date The 7dav for date D reported on issue date D_i is the average from D-7 to D-1
get_files_listList valid input files.
get_modelTrain model using quantile regression with Lasso penalty, or load from disk
get_populous_countiesSubset list of counties to those included in the 200 most populous in the US
get_weekofmonthGet week of a month info according to a date
mainPerform backfill correction on all desired signals and geo levels
main_localMain function to correct a single local signal
model_training_and_testingFetch model and use to generate predictions/perform corrections
objectiveGenerate objection function
read_dataRead a parquet file into a dataframe
read_paramsReturn params file as an R list
run_backfillGet backfill-corrected estimates for a single signal + geo combination
run_backfill_localCorrected estimates from a single local signal
subset_valid_filesReturn file names only if they contain data to be used in training
training_days_checkCheck available training days
validity_checksCheck input data for validity
-
+

Correct signal outliers + +

+
+
+[Up] +[Top] +

Documentation for package ‘delphiBackfillCorrection’ version 1.0

+ + + +

Help Pages

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
add_7davs_and_targetAdd 7dav and target to the data Target is the updates made ref_lag days after the first release
add_dayofweekAdd one hot encoding for day of a week info in terms of reference and issue date
add_params_for_datesAdd params related to date
add_shiftUsed for data shifting in terms of reference date
add_sqrtscaleAdd columns to indicate the scale of value at square root level
add_weekofmonthAdd one hot encoding for week of a month info in terms of issue date
create_dir_not_existCreate directory if not already existing
create_name_patternCreate pattern to match input files of a given type and signal
data_filterationFiltration for training and testing data with different lags
deltaSum of squared error
est_priorsMain function for the beta prior approach Estimate the priors for the beta distribution based on data for a certain day of a week
evaluateEvaluation of the test results based on WIS score The WIS score calculation is based on the weighted_interval_score function from the 'evalcast' package from Delphi
export_test_resultExport the result to customized directory
fill_missing_updatesGet pivot table, filling NANs. If there is no update on issue date D but previous reports exist for issue date D_p < D, all the dates between [D_p, D] are filled with with the reported value on date D_p. If there is no update for any previous issue date, fill in with 0.
fill_rowsRe-index, fill na, make sure all reference date have enough rows for updates
frac_adjUpdate fraction using beta prior approach
frac_adj_with_pseudoUpdate fraction based on the pseudo counts for numerators and denominators
generate_filenameConstruct filename for model with given parameters
get_7davCalculate 7 day moving average for each issue date The 7dav for date D reported on issue date D_i is the average from D-7 to D-1
get_files_listList valid input files.
get_modelTrain model using quantile regression with Lasso penalty, or load from disk
get_populous_countiesSubset list of counties to those included in the 200 most populous in the US
get_weekofmonthGet week of a month info according to a date
mainPerform backfill correction on all desired signals and geo levels
main_localMain function to correct a single local signal
model_training_and_testingFetch model and use to generate predictions/perform corrections
objectiveGenerate objection function
read_dataRead a parquet file into a dataframe
read_paramsReturn params file as an R list
run_backfillGet backfill-corrected estimates for a single signal + geo combination
run_backfill_localCorrected estimates from a single local signal
subset_valid_filesReturn file names only if they contain data to be used in training
training_days_checkCheck available training days
validity_checksCheck input data for validity
+