diff --git a/activitysim/abm/models/settings_checker.py b/activitysim/abm/models/settings_checker.py index 907e7e999..2c387fdcf 100644 --- a/activitysim/abm/models/settings_checker.py +++ b/activitysim/abm/models/settings_checker.py @@ -322,7 +322,7 @@ def try_load_model_settings( ) -> tuple[PydanticBase | None, Exception | None]: msg = f"Attempting to load model settings for {model_name} via {model_settings_class.__name__} and {model_settings_file}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) try: @@ -348,7 +348,7 @@ class InputCheckerSettings(PydanticBase): ) result = model_settings, None msg = f"Successfully loaded model settings from {model_settings_file}" - logger.info(msg) + logger.debug(msg) except Exception as e: result = None, e return result @@ -358,12 +358,12 @@ def try_load_spec( model_name: str, model_settings: PydanticBase, spec_file: str, state: State ) -> tuple[DataFrame | None, Exception | None]: msg = f"Attempting to load SPEC for {model_name} via {model_settings.__class__.__name__}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) try: result = state.filesystem.read_model_spec(spec_file), None msg = f"Successfully loaded model SPEC from {spec_file}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) except Exception as e: # always return a dataframe @@ -375,13 +375,13 @@ def try_load_coefs( model_name: str, model_settings: PydanticBase, coefs_file: str, state: State ) -> tuple[DataFrame, Optional[Exception]]: msg = f"Attempting to load COEFFICIENTS for {model_name} via {model_settings.__class__.__name__}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) try: result = state.filesystem.read_model_coefficients(file_name=coefs_file), None msg = f"Successfully loaded model Coefficients from {coefs_file}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) except Exception as e: result = None, e @@ -423,7 +423,7 @@ def try_eval_spec_coefs( else: result = eval_coefficients(state, spec, coefs, estimator=None), None msg = f"Successfully evaluated coefficients for {model_name}" - logger.info(msg) + logger.debug(msg) file_logger.info(msg) except Exception as e: result = None, e diff --git a/activitysim/abm/models/trip_destination.py b/activitysim/abm/models/trip_destination.py index 6584134ef..c07b9ede3 100644 --- a/activitysim/abm/models/trip_destination.py +++ b/activitysim/abm/models/trip_destination.py @@ -820,7 +820,7 @@ def compute_logsums( adds od_logsum and dp_logsum columns to trips (in place) """ trace_label = tracing.extend_trace_label(trace_label, "compute_logsums") - logger.info("Running %s with %d samples", trace_label, destination_sample.shape[0]) + logger.debug("Running %s with %d samples", trace_label, destination_sample.shape[0]) # chunk usage is uniform so better to combine chunk_tag = "trip_destination.compute_logsums" @@ -977,7 +977,7 @@ def trip_destination_simulate( alt_dest_col_name = model_settings.ALT_DEST_COL_NAME - logger.info("Running trip_destination_simulate with %d trips", len(trips)) + logger.debug("Running trip_destination_simulate with %d trips", len(trips)) skims = skim_hotel.sample_skims(presample=False) @@ -1102,7 +1102,7 @@ def choose_trip_destination( ) trips = trips[~dropped_trips] - t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0) + t0 = print_elapsed_time("%s.trip_destination_sample" % trace_label, t0, debug=True) if trips.empty: return pd.Series(index=trips.index).to_frame("choice"), None @@ -1124,7 +1124,7 @@ def choose_trip_destination( destination_sample["od_logsum"] = 0.0 destination_sample["dp_logsum"] = 0.0 - t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0) + t0 = print_elapsed_time("%s.compute_logsums" % trace_label, t0, debug=True) destinations = trip_destination_simulate( state, @@ -1155,7 +1155,9 @@ def choose_trip_destination( else: destination_sample = None - t0 = print_elapsed_time("%s.trip_destination_simulate" % trace_label, t0) + t0 = print_elapsed_time( + "%s.trip_destination_simulate" % trace_label, t0, debug=True + ) return destinations, destination_sample @@ -1484,7 +1486,9 @@ def run_trip_destination( else: None - logger.info("Running %s with %d trips", nth_trace_label, nth_trips.shape[0]) + logger.debug( + "Running %s with %d trips", nth_trace_label, nth_trips.shape[0] + ) # - choose destination for nth_trips, segmented by primary_purpose choices_list = [] @@ -1649,7 +1653,7 @@ def trip_destination( estimator.write_table(state.get_dataframe("land_use"), "landuse", append=False) estimator.write_model_settings(model_settings, model_settings_file_name) - logger.info("Running %s with %d trips", trace_label, trips_df.shape[0]) + logger.debug("Running %s with %d trips", trace_label, trips_df.shape[0]) trips_df, save_sample_df = run_trip_destination( state, diff --git a/activitysim/abm/models/util/canonical_ids.py b/activitysim/abm/models/util/canonical_ids.py index ab2623916..e3246a623 100644 --- a/activitysim/abm/models/util/canonical_ids.py +++ b/activitysim/abm/models/util/canonical_ids.py @@ -255,7 +255,7 @@ def canonical_tours(state: workflow.State): ) non_mandatory_channels = enumerate_tour_types(non_mandatory_tour_flavors) - logger.info(f"Non-Mandatory tour flavors used are {non_mandatory_tour_flavors}") + logger.debug(f"Non-Mandatory tour flavors used are {non_mandatory_tour_flavors}") # ---- mandatory_channels mtf_model_settings_file_name = "mandatory_tour_frequency.yaml" @@ -271,7 +271,7 @@ def canonical_tours(state: workflow.State): ) mandatory_channels = enumerate_tour_types(mandatory_tour_flavors) - logger.info(f"Mandatory tour flavors used are {mandatory_tour_flavors}") + logger.debug(f"Mandatory tour flavors used are {mandatory_tour_flavors}") # ---- atwork_subtour_channels atwork_model_settings_file_name = "atwork_subtour_frequency.yaml" @@ -288,7 +288,7 @@ def canonical_tours(state: workflow.State): ) atwork_subtour_channels = enumerate_tour_types(atwork_subtour_flavors) - logger.info(f"Atwork subtour flavors used are {atwork_subtour_flavors}") + logger.debug(f"Atwork subtour flavors used are {atwork_subtour_flavors}") # we need to distinguish between subtours of different work tours # (e.g. eat1_1 is eat subtour for parent work tour 1 and eat1_2 is for work tour 2) @@ -317,7 +317,7 @@ def canonical_tours(state: workflow.State): joint_tour_flavors = determine_flavors_from_alts_file( jtf_alts, provided_joint_flavors, default_joint_flavors ) - logger.info(f"Joint tour flavors used are {joint_tour_flavors}") + logger.debug(f"Joint tour flavors used are {joint_tour_flavors}") joint_tour_channels = enumerate_tour_types(joint_tour_flavors) joint_tour_channels = ["j_%s" % c for c in joint_tour_channels] @@ -343,7 +343,7 @@ def canonical_tours(state: workflow.State): school_escort_flavors = {"escort": 2 * num_escortees} school_escort_channels = enumerate_tour_types(school_escort_flavors) school_escort_channels = ["se_%s" % c for c in school_escort_channels] - logger.info(f"School escort tour flavors used are {school_escort_flavors}") + logger.debug(f"School escort tour flavors used are {school_escort_flavors}") sub_channels = sub_channels + school_escort_channels diff --git a/activitysim/abm/models/util/tour_destination.py b/activitysim/abm/models/util/tour_destination.py index 2032d21b9..d99803bd7 100644 --- a/activitysim/abm/models/util/tour_destination.py +++ b/activitysim/abm/models/util/tour_destination.py @@ -92,18 +92,18 @@ def _destination_sample( coefficients_file_name=model_settings.COEFFICIENTS, ) - logger.info("running %s with %d tours", trace_label, len(choosers)) + logger.debug("running %s with %d tours", trace_label, len(choosers)) sample_size = model_settings.SAMPLE_SIZE if estimator and model_settings.ESTIMATION_SAMPLE_SIZE >= 0: sample_size = model_settings.ESTIMATION_SAMPLE_SIZE - logger.info( + logger.debug( f"Estimation mode for {trace_label} using sample size of {sample_size}" ) if state.settings.disable_destination_sampling: sample_size = 0 - logger.info( + logger.debug( f"SAMPLE_SIZE set to 0 for {trace_label} because disable_destination_sampling is set" ) @@ -562,7 +562,7 @@ def destination_presample( trace_label = tracing.extend_trace_label(trace_label, "presample") chunk_tag = "tour_destination.presample" - logger.info(f"{trace_label} location_presample") + logger.debug(f"{trace_label} location_presample") alt_dest_col_name = model_settings.ALT_DEST_COL_NAME assert DEST_TAZ != alt_dest_col_name @@ -652,7 +652,7 @@ def run_destination_sample( ) if pre_sample_taz: - logger.info( + logger.debug( "Running %s destination_presample with %d tours" % (trace_label, len(tours)) ) @@ -740,7 +740,7 @@ def run_destination_logsums( how="left", ) - logger.info("Running %s with %s rows", trace_label, len(choosers)) + logger.debug("Running %s with %s rows", trace_label, len(choosers)) state.tracing.dump_df(DUMP, persons_merged, trace_label, "persons_merged") state.tracing.dump_df(DUMP, choosers, trace_label, "choosers") @@ -832,7 +832,7 @@ def run_destination_simulate( constants = model_settings.CONSTANTS - logger.info("Running tour_destination_simulate with %d persons", len(choosers)) + logger.debug("Running tour_destination_simulate with %d persons", len(choosers)) # create wrapper with keys for this lookup - in this case there is a home_zone_id in the choosers # and a zone_id in the alternatives which get merged during interaction diff --git a/activitysim/abm/models/util/tour_od.py b/activitysim/abm/models/util/tour_od.py index a74309be8..5ec9dd493 100644 --- a/activitysim/abm/models/util/tour_od.py +++ b/activitysim/abm/models/util/tour_od.py @@ -155,7 +155,7 @@ def _od_sample( else: alt_col_name = alt_od_col_name - logger.info("running %s with %d tours", trace_label, len(choosers)) + logger.debug("running %s with %d tours", trace_label, len(choosers)) sample_size = model_settings.SAMPLE_SIZE if state.settings.disable_destination_sampling or ( @@ -163,7 +163,7 @@ def _od_sample( ): # FIXME interaction_sample will return unsampled complete alternatives # with probs and pick_count - logger.info( + logger.debug( ( "Estimation mode for %s using unsampled alternatives " "short_circuit_choices" @@ -607,7 +607,7 @@ def od_presample( trace_label = tracing.extend_trace_label(trace_label, "presample") chunk_tag = "tour_od.presample" - logger.info(f"{trace_label} od_presample") + logger.debug(f"{trace_label} od_presample") alt_od_col_name = get_od_id_col(ORIG_MAZ, DEST_TAZ) @@ -711,7 +711,7 @@ def run_od_sample( ) if pre_sample_taz: - logger.info( + logger.debug( "Running %s destination_presample with %d tours" % (trace_label, len(tours)) ) @@ -780,7 +780,7 @@ def run_od_logsums( choosers[origin_id_col].astype(str) + "_" + choosers[dest_id_col].astype(str) ) - logger.info("Running %s with %s rows", trace_label, len(choosers)) + logger.debug("Running %s with %s rows", trace_label, len(choosers)) state.tracing.dump_df(DUMP, choosers, trace_label, "choosers") @@ -989,7 +989,7 @@ def run_od_simulate( constants = model_settings.CONSTANTS - logger.info("Running tour_destination_simulate with %d persons", len(choosers)) + logger.debug("Running tour_destination_simulate with %d persons", len(choosers)) # create wrapper with keys for this lookup - in this case there is an origin ID # column and a destination ID columns in the alternatives table. diff --git a/activitysim/abm/models/util/tour_scheduling.py b/activitysim/abm/models/util/tour_scheduling.py index 0a7c6675d..80474db59 100644 --- a/activitysim/abm/models/util/tour_scheduling.py +++ b/activitysim/abm/models/util/tour_scheduling.py @@ -140,7 +140,7 @@ def run_tour_scheduling( if estimators: timetable.begin_transaction(list(estimators.values())) - logger.info(f"Running {trace_label} with %d tours", len(chooser_tours)) + logger.debug(f"Running {trace_label} with %d tours", len(chooser_tours)) choices = vts.vectorize_tour_scheduling( state, chooser_tours, diff --git a/activitysim/abm/models/util/vectorize_tour_scheduling.py b/activitysim/abm/models/util/vectorize_tour_scheduling.py index d4593c21f..85efdaa70 100644 --- a/activitysim/abm/models/util/vectorize_tour_scheduling.py +++ b/activitysim/abm/models/util/vectorize_tour_scheduling.py @@ -161,7 +161,7 @@ def _compute_logsums( mandatory=False, ) choosers = alt_tdd.join(tours_merged, how="left", rsuffix="_chooser") - logger.info( + logger.debug( f"{trace_label} compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts" ) @@ -194,7 +194,7 @@ def _compute_logsums( if preprocessor_settings: simulate.set_skim_wrapper_targets(choosers, skims) - logger.info( + logger.debug( f"{trace_label} start preprocessing prior to compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts" ) expressions.assign_columns( @@ -204,7 +204,7 @@ def _compute_logsums( locals_dict=locals_dict, trace_label=trace_label, ) - logger.info( + logger.debug( f"{trace_label} end preprocessing prior to compute_logsums for {choosers.shape[0]} choosers {alt_tdd.shape[0]} alts" ) @@ -426,7 +426,7 @@ def compute_tour_scheduling_logsums( ) chunk_sizer.log_df(trace_label, "deduped_alt_tdds", deduped_alt_tdds) - logger.info( + logger.debug( f"{trace_label} compute_logsums " f"deduped_alt_tdds reduced number of rows by " f"{round(100 * (len(alt_tdd) - len(deduped_alt_tdds)) / len(alt_tdd), 2)}% " @@ -758,7 +758,7 @@ def _schedule_tours( """ - logger.info( + logger.debug( "%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)) ) @@ -908,7 +908,7 @@ def schedule_tours( logger.info("schedule_tours %s tours not monotonic_increasing - sorting df") tours = tours.sort_index() - logger.info( + logger.debug( "%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)) ) diff --git a/activitysim/core/assign.py b/activitysim/core/assign.py index 870904db6..fe39bfc44 100644 --- a/activitysim/core/assign.py +++ b/activitysim/core/assign.py @@ -360,7 +360,7 @@ def rng_lognormal(random_draws, mu, sigma, broadcast=True, scale=False): ) if trace_label: - logger.info(f"{trace_label}.assign_variables {target} = {expression}") + logger.debug(f"{trace_label}.assign_variables {target} = {expression}") if is_temp_singular(target) or is_throwaway(target): try: diff --git a/activitysim/core/chunk.py b/activitysim/core/chunk.py index c7a3d4d7a..a9064ca40 100644 --- a/activitysim/core/chunk.py +++ b/activitysim/core/chunk.py @@ -1224,7 +1224,7 @@ def adaptive_chunked_choosers( # The adaptive chunking logic is expensive and sometimes results # in needless data copying. So we short circuit it entirely # when chunking is disabled. - logger.info(f"Running chunkless with {len(choosers)} choosers") + logger.debug(f"Running chunkless with {len(choosers)} choosers") yield 0, choosers, trace_label, ChunkSizer( state, "chunkless", trace_label, 0, 0, state.settings.chunk_training_mode ) @@ -1250,7 +1250,7 @@ def adaptive_chunked_choosers( assert num_choosers > 0 assert chunk_size >= 0 - logger.info( + logger.debug( f"{trace_label} Running adaptive_chunked_choosers with {num_choosers} choosers" ) @@ -1275,7 +1275,7 @@ def adaptive_chunked_choosers( # grab the next chunk based on current rows_per_chunk chooser_chunk = choosers[offset : offset + rows_per_chunk] - logger.info( + logger.debug( f"Running chunk {i} of {estimated_number_of_chunks or '?'} " f"with {len(chooser_chunk)} of {num_choosers} choosers" ) @@ -1342,7 +1342,7 @@ def adaptive_chunked_choosers_and_alts( # The adaptive chunking logic is expensive and sometimes results # in needless data copying. So we short circuit it entirely # when chunking is disabled. - logger.info(f"Running chunkless with {len(choosers)} choosers") + logger.debug(f"Running chunkless with {len(choosers)} choosers") chunk_sizer = ChunkSizer( state, "chunkless", trace_label, 0, 0, state.settings.chunk_training_mode ) @@ -1372,7 +1372,7 @@ def adaptive_chunked_choosers_and_alts( alternatives.index[last_repeat] ) - logger.info( + logger.debug( f"{trace_label} Running adaptive_chunked_choosers_and_alts " f"with {num_choosers} choosers and {num_alternatives} alternatives" ) @@ -1442,7 +1442,7 @@ def adaptive_chunked_choosers_and_alts( chooser_chunk.index == np.unique(alternative_chunk.index.values) ).all() - logger.info( + logger.debug( f"Running chunk {i} of {estimated_number_of_chunks or '?'} " f"with {len(chooser_chunk)} of {num_choosers} choosers" ) @@ -1481,7 +1481,7 @@ def adaptive_chunked_choosers_by_chunk_id( # The adaptive chunking logic is expensive and sometimes results # in needless data copying. So we short circuit it entirely # when chunking is disabled. - logger.info(f"Running chunkless with {len(choosers)} choosers") + logger.debug(f"Running chunkless with {len(choosers)} choosers") chunk_sizer = ChunkSizer( state, "chunkless", trace_label, 0, 0, state.settings.chunk_training_mode ) @@ -1519,7 +1519,7 @@ def adaptive_chunked_choosers_by_chunk_id( choosers["chunk_id"].between(offset, offset + rows_per_chunk - 1) ] - logger.info( + logger.debug( f"{trace_label} Running chunk {i} of {estimated_number_of_chunks or '?'} " f"with {rows_per_chunk} of {num_choosers} choosers" ) diff --git a/activitysim/core/flow.py b/activitysim/core/flow.py index d3e83e72b..9bfff5816 100644 --- a/activitysim/core/flow.py +++ b/activitysim/core/flow.py @@ -77,7 +77,7 @@ def mark(self, tag, ping=True, logger=None, suffix=""): self._time_log.append((tag, timedelta(seconds=elapsed))) self._time_point = now if logger is not None: - logger.info( + logger.debug( "elapsed time {0} {1} {2}".format( tag, timedelta(seconds=elapsed), @@ -94,7 +94,7 @@ def mark(self, tag, ping=True, logger=None, suffix=""): else: self.aggregate_timing[tag] += elapsed - def summary(self, logger, tag, level=20, suffix=None): + def summary(self, logger, tag, level=10, suffix=None): gross_elaspsed = time.time() - self._time_start if suffix: msg = f"{tag} in {timedelta(seconds=gross_elaspsed)}: ({suffix})\n" diff --git a/activitysim/core/interaction_sample_simulate.py b/activitysim/core/interaction_sample_simulate.py index 9cdea3292..8704674b5 100644 --- a/activitysim/core/interaction_sample_simulate.py +++ b/activitysim/core/interaction_sample_simulate.py @@ -133,7 +133,7 @@ def _interaction_sample_simulate( # assert alternatives.index.name == choosers.index.name # asserting the index names are the same tells us nothing about the underlying data so why? - logger.info( + logger.debug( f"{trace_label} start merging choosers and alternatives to create interaction_df" ) @@ -169,7 +169,7 @@ def _interaction_sample_simulate( ) interaction_df = alternatives.join(choosers, how="left", rsuffix="_chooser") - logger.info( + logger.debug( f"{trace_label} end merging choosers and alternatives to create interaction_df" ) diff --git a/activitysim/core/interaction_simulate.py b/activitysim/core/interaction_simulate.py index d0af58e77..0473a5809 100644 --- a/activitysim/core/interaction_simulate.py +++ b/activitysim/core/interaction_simulate.py @@ -83,7 +83,7 @@ def eval_interaction_utilities( start_time = time.time() trace_label = tracing.extend_trace_label(trace_label, "eval_interaction_utils") - logger.info("Running eval_interaction_utilities on %s rows" % df.shape[0]) + logger.debug("Running eval_interaction_utilities on %s rows" % df.shape[0]) sharrow_enabled = state.settings.sharrow if compute_settings is None: @@ -91,7 +91,7 @@ def eval_interaction_utilities( if compute_settings.sharrow_skip: sharrow_enabled = False - logger.info(f"{trace_label} sharrow_enabled is {sharrow_enabled}") + logger.debug(f"{trace_label} sharrow_enabled is {sharrow_enabled}") trace_eval_results = None @@ -633,11 +633,11 @@ def to_series(x): raise # enter debugger now to see what's up timelogger.mark("sharrow interact test", True, logger, trace_label) - logger.info(f"utilities.dtypes {trace_label}\n{utilities.dtypes}") + logger.debug(f"utilities.dtypes {trace_label}\n{utilities.dtypes}") end_time = time.time() timelogger.summary(logger, "TIMING interact_simulate.eval_utils") - logger.info( + logger.debug( f"interact_simulate.eval_utils runtime: {timedelta(seconds=end_time - start_time)} {trace_label}" ) diff --git a/activitysim/core/simulate.py b/activitysim/core/simulate.py index 6cd4a51f6..fe22ca9ec 100644 --- a/activitysim/core/simulate.py +++ b/activitysim/core/simulate.py @@ -861,7 +861,7 @@ def eval_utilities( chunk_sizer.log_df(trace_label, "utilities", None) end_time = time.time() - logger.info( + logger.debug( f"simulate.eval_utils runtime: {timedelta(seconds=end_time - start_time)} {trace_label}" ) timelogger.summary(logger, "simulate.eval_utils timing") diff --git a/activitysim/core/tracing.py b/activitysim/core/tracing.py index 88bf0fc16..8c9fb61bd 100644 --- a/activitysim/core/tracing.py +++ b/activitysim/core/tracing.py @@ -147,12 +147,12 @@ def print_summary(label, df, describe=False, value_counts=False): if value_counts: n = 10 - logger.info( + logger.debug( "%s top %s value counts:\n%s" % (label, n, df.value_counts().nlargest(n)) ) if describe: - logger.info("%s summary:\n%s" % (label, df.describe())) + logger.debug("%s summary:\n%s" % (label, df.describe())) def write_df_csv( diff --git a/activitysim/core/util.py b/activitysim/core/util.py index 429d3eee0..aeb09ae16 100644 --- a/activitysim/core/util.py +++ b/activitysim/core/util.py @@ -722,9 +722,9 @@ def drop_unused_columns( custom_chooser_lines = inspect.getsource(custom_chooser) unique_variables_in_spec.update(re.findall(pattern, custom_chooser_lines)) - logger.info("Dropping unused variables in chooser table") + logger.debug("Dropping unused variables in chooser table") - logger.info( + logger.debug( "before dropping, the choosers table has {} columns: {}".format( len(choosers.columns), choosers.columns ) @@ -733,7 +733,7 @@ def drop_unused_columns( # keep only variables needed for spec choosers = choosers[[c for c in choosers.columns if c in unique_variables_in_spec]] - logger.info( + logger.debug( "after dropping, the choosers table has {} columns: {}".format( len(choosers.columns), choosers.columns ) diff --git a/docs/dev-guide/logging.md b/docs/dev-guide/logging.md index f2a5a3db7..b00451c46 100644 --- a/docs/dev-guide/logging.md +++ b/docs/dev-guide/logging.md @@ -69,3 +69,67 @@ handlers: if_sub_task: WARNING if_not_sub_task: NOTSET ``` + +## Logging levels +Python's built-in `logging` module that includes five levels of logging, which are (in order +of increasing severity): `DEBUG`, `INFO`, `WARNING`, `ERROR`, and `CRITICAL`. One can set the +minimum level to display messages in both the console window as well as the output logfile +within `logging.yaml` in the model settings. For example, if the block of code below were +inside the `logging.yaml` file, than the console window and output activitysim.log file would +print every logging message at the level of `INFO` and above: + +```yaml +loggers: + activitysim: + level: INFO + handlers: [console, logfile] + propogate: false +``` + +However, if a model run were to crash and the user wanted to print all of the `DEBUG` messages +in order to diagnose what was causing the crash, they would need to change the `level` within +the logging settings: + +```yaml +loggers: + activitysim: + level: DEBUG + handlers: [console, logfile] + propogate: false +``` + +The following guidelines demonstrate how each level is used within ActivitySim: + +### Debug (Level 10) +The `DEBUG` message indicates detailed information that would be of interest to a user while +debugging a model. The information reported at this level can include: +- Runtimes of specific steps of model components, such as the time to run each of sampling, + logsum computation, and simulation in destination choice +- Table attributes at various stages of processing, such as the size or columns +- Evaluations of preprocessor or specification expressions +- General repetitive messages that can be used to narrow down exactly where an error is occuring + +### Info (Level 20) +The `INFO` message gives reports general information about how the status of the model run, +particularly where in the model flow the system is at. The information reported at this level +can include: +- Beginning and ending of a model step +- Intermediate stages of a longer step. For example, in trip destination, the trip number and + segment will be reported at this level. + +### Warning (Level 30) +The `WARNING` message notifies the user of a potential issue that they should be aware of, +but doesn't result in the model system failing. The information reported at this level can include: +- Future changes to dependencies +- ActivitySim needing to force certain travel behavior due to such behavior not working + +### Error (Level 40) +The `ERROR` message gives the user information that is causing an error in a model step. The +information reported at this level can include: +- More detailed issues on what could be causing an error message that wouldn't be shown in the + traceback message + +## Critical (Level 50) +The `CRITICAL` message gives the user information that is causing a critical error in a model step. +The information reported at this level can include: +- Reporting to the user on the teardown of a subprocess \ No newline at end of file