From 4e92192f1f98ad0bdb3e28514b46c71c077f8341 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Thu, 22 Sep 2022 00:38:02 +0200 Subject: [PATCH 01/30] Remove deprecated callback hooks --- .../trainer/configuration_validator.py | 43 ------ .../deprecated_api/test_remove_1-8.py | 133 ------------------ 2 files changed, 176 deletions(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 023ccb09bd974..4202849b4f984 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -222,49 +222,6 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: if is_overridden(method_name="on_init_end", instance=callback): rank_zero_deprecation("The `on_init_end` callback hook was deprecated in v1.6 and will be removed in v1.8.") - if is_overridden(method_name="on_configure_sharded_model", instance=callback): - rank_zero_deprecation( - "The `on_configure_sharded_model` callback hook was deprecated in" - " v1.6 and will be removed in v1.8. Use `setup()` instead." - ) - if is_overridden(method_name="on_before_accelerator_backend_setup", instance=callback): - rank_zero_deprecation( - "The `on_before_accelerator_backend_setup` callback hook was deprecated in" - " v1.6 and will be removed in v1.8. Use `setup()` instead." - ) - if is_overridden(method_name="on_load_checkpoint", instance=callback): - rank_zero_deprecation( - f"`{callback.__class__.__name__}.on_load_checkpoint` will change its signature and behavior in v1.8." - " If you wish to load the state of the callback, use `load_state_dict` instead." - " In v1.8 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" - " checkpoint dictionary instead of callback state." - ) - - for hook, alternative_hook in ( - ["on_batch_start", "on_train_batch_start"], - ["on_batch_end", "on_train_batch_end"], - ): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `Callback.{alternative_hook}` instead." - ) - for hook, alternative_hook in ( - ["on_epoch_start", "on__epoch_start"], - ["on_epoch_end", "on__epoch_end"], - ): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `Callback.{alternative_hook}` instead." - ) - for hook in ("on_pretrain_routine_start", "on_pretrain_routine_end"): - if is_overridden(method_name=hook, instance=callback): - rank_zero_deprecation( - f"The `Callback.{hook}` hook has been deprecated in v1.6 and" - " will be removed in v1.8. Please use `Callback.on_fit_start` instead." - ) - def _check_precision_plugin_checkpoint_hooks(trainer: "pl.Trainer") -> None: if is_overridden(method_name="on_save_checkpoint", instance=trainer.precision_plugin, parent=PrecisionPlugin): diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index f0910d57828c9..9bae227dac295 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -156,58 +156,6 @@ def test_v1_8_0_deprecated_lightning_optimizers(): assert trainer.lightning_optimizers == {} -def test_v1_8_0_remove_on_batch_start_end(tmpdir): - class TestCallback(Callback): - def on_batch_start(self, *args, **kwargs): - print("on_batch_start") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_batch_end(self, *args, **kwargs): - print("on_batch_end") - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_on_configure_sharded_model(tmpdir): - class TestCallback(Callback): - def on_configure_sharded_model(self, trainer, model): - print("Configuring sharded model") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8." - ): - trainer.fit(model) - - def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): class CustomModel(BoringModel): def on_epoch_start(self, *args, **kwargs): @@ -270,28 +218,6 @@ def on_pretrain_routine_end(self, *args, **kwargs): trainer.fit(model) -def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir): - class TestCallback(Callback): - def on_before_accelerator_backend_setup(self, *args, **kwargs): - print("on_before_accelerator_backend called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6" - " and will be removed in v1.8" - ): - trainer.fit(model) - - def test_v1_8_0_logger_agg_parameters(): class CustomLogger(Logger): @rank_zero_only @@ -358,42 +284,6 @@ def agg_and_log_metrics(self, metrics, step): Trainer(logger=[logger2, logger3]) -def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir): - class TestCallback(Callback): - def on_pretrain_routine_start(self, trainer, pl_module): - print("on_pretrain_routine_start called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_pretrain_routine_end(self, trainer, pl_module): - print("on_pretrain_routine_end called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list): @@ -617,29 +507,6 @@ def test_deprecated_mc_save_checkpoint(): mc.save_checkpoint(trainer) -def test_v1_8_0_callback_on_load_checkpoint_hook(tmpdir): - class TestCallbackLoadHook(Callback): - def on_load_checkpoint(self, trainer, pl_module, callback_state): - print("overriding on_load_checkpoint") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackLoadHook()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v1.8." - " If you wish to load the state of the callback, use `load_state_dict` instead." - r" In v1.8 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" - " checkpoint dictionary instead of callback state." - ): - trainer.fit(model) - - def test_v1_8_0_callback_on_save_checkpoint_hook(tmpdir): class TestCallbackSaveHookReturn(Callback): def on_save_checkpoint(self, trainer, pl_module, checkpoint): From 5467354471a5312106b249daa32dee7bb37aeb7a Mon Sep 17 00:00:00 2001 From: awaelchli Date: Thu, 22 Sep 2022 00:44:30 +0200 Subject: [PATCH 02/30] changelog --- src/pytorch_lightning/CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 0dbf13e4936b8..62b60fa4e6f35 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -198,6 +198,15 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed the deprecated way to set the distributed backend via the environment variable `PL_TORCH_DISTRIBUTED_BACKEND`, in favor of setting the `process_group_backend` in the strategy constructor ([#14693](https://github.com/Lightning-AI/lightning/pull/14693)) +- Removed deprecated callback hooks Remove deprecated callback hooks ([#14834](https://github.com/Lightning-AI/lightning/pull/14834)) + * `Callback.on_configure_sharded_model` in favor of `Callback.setup` + * `Callback.on_before_accelerator_backend_setup` in favor of `Callback.setup` + * `Callback.on_batch_start` in favor of `Callback.on_train_batch_start` + * `Callback.on_batch_end` in favor of `Callback.on_train_batch_end` + * `Callback.on_epoch_start` in favor of `Callback.on_{train,validation,test}_epoch_start` + * `Callback.on_epoch_end` in favor of `Callback.on_{train,validation,test}_epoch_end` + * `Callback.on_pretrain_routine_{start,end}` in favor of `Callback.on_fit_start` + ### Fixed From a7d36dd24c7d8bdb21481b04c3b3a0df8cfe296d Mon Sep 17 00:00:00 2001 From: awaelchli Date: Thu, 22 Sep 2022 00:45:41 +0200 Subject: [PATCH 03/30] revert --- .../trainer/configuration_validator.py | 8 +++++++ .../deprecated_api/test_remove_1-8.py | 23 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 4202849b4f984..778380158bcbf 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -222,6 +222,14 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: if is_overridden(method_name="on_init_end", instance=callback): rank_zero_deprecation("The `on_init_end` callback hook was deprecated in v1.6 and will be removed in v1.8.") + if is_overridden(method_name="on_load_checkpoint", instance=callback): + rank_zero_deprecation( + f"`{callback.__class__.__name__}.on_load_checkpoint` will change its signature and behavior in v1.8." + " If you wish to load the state of the callback, use `load_state_dict` instead." + " In v1.8 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" + " checkpoint dictionary instead of callback state." + ) + def _check_precision_plugin_checkpoint_hooks(trainer: "pl.Trainer") -> None: if is_overridden(method_name="on_save_checkpoint", instance=trainer.precision_plugin, parent=PrecisionPlugin): diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 9bae227dac295..520a1cdedd7cb 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -507,6 +507,29 @@ def test_deprecated_mc_save_checkpoint(): mc.save_checkpoint(trainer) +def test_v1_8_0_callback_on_load_checkpoint_hook(tmpdir): + class TestCallbackLoadHook(Callback): + def on_load_checkpoint(self, trainer, pl_module, callback_state): + print("overriding on_load_checkpoint") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallbackLoadHook()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.deprecated_call( + match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v1.8." + " If you wish to load the state of the callback, use `load_state_dict` instead." + r" In v1.8 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" + " checkpoint dictionary instead of callback state." + ): + trainer.fit(model) + + def test_v1_8_0_callback_on_save_checkpoint_hook(tmpdir): class TestCallbackSaveHookReturn(Callback): def on_save_checkpoint(self, trainer, pl_module, checkpoint): From 599deb579e6d15ed85e6b02df1a00cafb26b5bd2 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Thu, 22 Sep 2022 00:47:31 +0200 Subject: [PATCH 04/30] removal --- src/pytorch_lightning/callbacks/callback.py | 70 --------------------- 1 file changed, 70 deletions(-) diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index cf57c5c2f7847..f16c85b7cf92e 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -56,22 +56,6 @@ def _generate_state_key(self, **kwargs: Any) -> str: """ return f"{self.__class__.__qualname__}{repr(kwargs)}" - def on_configure_sharded_model(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use `setup()` instead. - - Called before configure sharded model. - """ - - def on_before_accelerator_backend_setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``setup()`` instead. - - Called before accelerator is being setup. - """ - def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None: """Called when fit, validate, test, predict, or tune begins.""" @@ -146,42 +130,6 @@ def on_predict_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.Lightning def on_predict_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: List[Any]) -> None: """Called when the predict epoch ends.""" - def on_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on__epoch_start`` instead. - - Called when either of train/val/test epoch begins. - """ - - def on_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on__epoch_end`` instead. - - Called when either of train/val/test epoch ends. - """ - - def on_batch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on_train_batch_start`` instead. - - Called when the training batch begins. - """ - - def on_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use - ``on_train_batch_end`` instead. - - Called when the training batch ends. - """ - def on_validation_batch_start( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int, dataloader_idx: int ) -> None: @@ -236,24 +184,6 @@ def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the train ends.""" - def on_pretrain_routine_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead. - - Called when the pretrain routine begins. - """ - - def on_pretrain_routine_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: - r""" - .. deprecated:: v1.6 - - This callback hook was deprecated in v1.6 and will be removed in v1.8. Use ``on_fit_start`` instead. - - Called when the pretrain routine ends. - """ - def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the validation loop begins.""" From 9723db9c1394644bb6e11da89433799662d8518f Mon Sep 17 00:00:00 2001 From: awaelchli Date: Thu, 22 Sep 2022 00:57:20 +0200 Subject: [PATCH 05/30] Remove deprecated on_load/save_checkpoint behavior --- src/pytorch_lightning/callbacks/callback.py | 25 +++---------------- .../trainer/configuration_validator.py | 7 ------ src/pytorch_lightning/trainer/trainer.py | 22 +++------------- 3 files changed, 7 insertions(+), 47 deletions(-) diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index cf57c5c2f7847..7309837797c4f 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -293,7 +293,7 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: def on_save_checkpoint( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] - ) -> Optional[dict]: + ) -> None: r""" Called when saving a checkpoint to give you a chance to store anything else you might want to save. @@ -301,18 +301,10 @@ def on_save_checkpoint( trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. pl_module: the current :class:`~pytorch_lightning.core.module.LightningModule` instance. checkpoint: the checkpoint dictionary that will be saved. - - Returns: - None or the callback state. Support for returning callback state will be removed in v1.8. - - .. deprecated:: v1.6 - Returning a value from this method was deprecated in v1.6 and will be removed in v1.8. - Implement ``Callback.state_dict`` instead to return state. - In v1.8 ``Callback.on_save_checkpoint`` can only return None. """ def on_load_checkpoint( - self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", callback_state: Dict[str, Any] + self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] ) -> None: r""" Called when loading a model checkpoint, use to reload state. @@ -320,18 +312,7 @@ def on_load_checkpoint( Args: trainer: the current :class:`~pytorch_lightning.trainer.Trainer` instance. pl_module: the current :class:`~pytorch_lightning.core.module.LightningModule` instance. - callback_state: the callback state returned by ``on_save_checkpoint``. - - Note: - The ``on_load_checkpoint`` won't be called with an undefined state. - If your ``on_load_checkpoint`` hook behavior doesn't rely on a state, - you will still need to override ``on_save_checkpoint`` to return a ``dummy state``. - - .. deprecated:: v1.6 - This callback hook will change its signature and behavior in v1.8. - If you wish to load the state of the callback, use ``Callback.load_state_dict`` instead. - In v1.8 ``Callback.on_load_checkpoint(checkpoint)`` will receive the entire loaded - checkpoint dictionary instead of only the callback state from the checkpoint. + checkpoint: the full checkpoint dictionary that got loaded by the Trainer. """ def on_before_backward(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", loss: Tensor) -> None: diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 023ccb09bd974..fdb12b4c2998f 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -232,13 +232,6 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: "The `on_before_accelerator_backend_setup` callback hook was deprecated in" " v1.6 and will be removed in v1.8. Use `setup()` instead." ) - if is_overridden(method_name="on_load_checkpoint", instance=callback): - rank_zero_deprecation( - f"`{callback.__class__.__name__}.on_load_checkpoint` will change its signature and behavior in v1.8." - " If you wish to load the state of the callback, use `load_state_dict` instead." - " In v1.8 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" - " checkpoint dictionary instead of callback state." - ) for hook, alternative_hook in ( ["on_batch_start", "on_train_batch_start"], diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 14659bbd00a70..be504d7111992 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -1609,11 +1609,7 @@ def _call_callbacks_state_dict(self) -> Dict[str, dict]: return callback_state_dicts def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - """Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook. - - Will be removed in v1.8: If state is returned, we insert the callback state into - ``checkpoint["callbacks"][Callback.state_key]``. It overrides ``state_dict`` if already present. - """ + """Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.""" pl_module = self.lightning_module if pl_module: prev_fx_name = pl_module._current_fx_name @@ -1621,14 +1617,7 @@ def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None for callback in self.callbacks: with self.profiler.profile(f"[Callback]{callback.state_key}.on_save_checkpoint"): - state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint) - if state: - rank_zero_deprecation( - f"Returning a value from `{callback.__class__.__name__}.on_save_checkpoint` is deprecated in v1.6" - " and will be removed in v1.8. Please override `Callback.state_dict`" - " to return state to be saved." - ) - checkpoint["callbacks"][callback.state_key] = state + callback.on_save_checkpoint(self, self.lightning_module, checkpoint) if pl_module: # restore current_fx when nested context @@ -1661,11 +1650,8 @@ def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None ) for callback in self.callbacks: - state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key)) - if state: - state = deepcopy(state) - with self.profiler.profile(f"[Callback]{callback.state_key}.on_load_checkpoint"): - callback.on_load_checkpoint(self, self.lightning_module, state) + with self.profiler.profile(f"[Callback]{callback.state_key}.on_load_checkpoint"): + callback.on_load_checkpoint(self, self.lightning_module, checkpoint) if pl_module: # restore current_fx when nested context From e600d3f9640ec60e9b78555289f9e64c9dc2e9d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 22 Sep 2022 01:30:18 +0200 Subject: [PATCH 06/30] Finish removal --- docs/source-pytorch/extensions/callbacks.rst | 9 --- .../app_components/python/component_tracer.py | 1 + src/lightning_app/utilities/introspection.py | 8 --- src/pytorch_lightning/CHANGELOG.md | 2 +- .../callbacks/lambda_function.py | 8 --- src/pytorch_lightning/core/hooks.py | 42 ------------- .../loops/dataloader/evaluation_loop.py | 6 +- .../loops/epoch/training_epoch_loop.py | 4 -- src/pytorch_lightning/loops/fit_loop.py | 9 +-- .../trainer/configuration_validator.py | 29 --------- .../logger_connector/fx_validator.py | 16 ----- src/pytorch_lightning/trainer/trainer.py | 11 ---- .../deprecated_api/test_remove_1-8.py | 62 ------------------- tests/tests_pytorch/models/test_hooks.py | 38 ------------ tests/tests_pytorch/models/test_restore.py | 8 +-- .../logging_/test_eval_loop_logging.py | 16 +---- .../trainer/logging_/test_logger_connector.py | 17 ----- .../trainer/logging_/test_loop_logging.py | 4 -- .../logging_/test_train_loop_logging.py | 16 +---- 19 files changed, 9 insertions(+), 297 deletions(-) diff --git a/docs/source-pytorch/extensions/callbacks.rst b/docs/source-pytorch/extensions/callbacks.rst index 72f02fadb6af6..d8b01e7a86054 100644 --- a/docs/source-pytorch/extensions/callbacks.rst +++ b/docs/source-pytorch/extensions/callbacks.rst @@ -152,12 +152,6 @@ state_key Hooks ===== -on_configure_sharded_model -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. automethod:: pytorch_lightning.callbacks.Callback.on_configure_sharded_model - :noindex: - setup ^^^^^ @@ -266,9 +260,6 @@ on_predict_epoch_end .. automethod:: pytorch_lightning.callbacks.Callback.on_predict_epoch_end :noindex: -.. automethod:: pytorch_lightning.callbacks.Callback.on_epoch_end - :noindex: - on_validation_batch_start ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/examples/app_components/python/component_tracer.py b/examples/app_components/python/component_tracer.py index 9edc48cf51a29..4baca5b380c99 100644 --- a/examples/app_components/python/component_tracer.py +++ b/examples/app_components/python/component_tracer.py @@ -27,6 +27,7 @@ def on_train_start(self, trainer, pl_module) -> None: print("Even the Lightning Work is available and state transfer works !") print(self.lightning_work) + # FIXME def on_batch_end(self, trainer, *_) -> None: # On every batch end, collects some information. # This is communicated automatically to the rest of the app, diff --git a/src/lightning_app/utilities/introspection.py b/src/lightning_app/utilities/introspection.py index 856f6d6ea84a8..9f04dd4bb985d 100644 --- a/src/lightning_app/utilities/introspection.py +++ b/src/lightning_app/utilities/introspection.py @@ -88,8 +88,6 @@ class LightningModuleVisitor(LightningVisitor): "on_fit_end", "on_load_checkpoint", "on_save_checkpoint", - "on_pretrain_routine_start", - "on_pretrain_routine_end", "on_test_batch_start", "on_test_batch_end", "on_test_epoch_start", @@ -184,18 +182,12 @@ class LightningCallbackVisitor(LightningVisitor): "on_validation_epoch_end", "on_test_epoch_start", "on_test_epoch_end", - "on_epoch_start", - "on_epoch_end", - "on_batch_start", "on_validation_batch_start", "on_validation_batch_end", "on_test_batch_start", "on_test_batch_end", - "on_batch_end", "on_train_start", "on_train_end", - "on_pretrain_routine_start", - "on_pretrain_routine_end", "on_validation_start", "on_validation_end", "on_test_start", diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 62b60fa4e6f35..d56bc80792af5 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -198,7 +198,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed the deprecated way to set the distributed backend via the environment variable `PL_TORCH_DISTRIBUTED_BACKEND`, in favor of setting the `process_group_backend` in the strategy constructor ([#14693](https://github.com/Lightning-AI/lightning/pull/14693)) -- Removed deprecated callback hooks Remove deprecated callback hooks ([#14834](https://github.com/Lightning-AI/lightning/pull/14834)) +- Removed deprecated callback hooks ([#14834](https://github.com/Lightning-AI/lightning/pull/14834)) * `Callback.on_configure_sharded_model` in favor of `Callback.setup` * `Callback.on_before_accelerator_backend_setup` in favor of `Callback.setup` * `Callback.on_batch_start` in favor of `Callback.on_train_batch_start` diff --git a/src/pytorch_lightning/callbacks/lambda_function.py b/src/pytorch_lightning/callbacks/lambda_function.py index a37122cb2aa04..e08e03d7566ff 100644 --- a/src/pytorch_lightning/callbacks/lambda_function.py +++ b/src/pytorch_lightning/callbacks/lambda_function.py @@ -40,9 +40,7 @@ class LambdaCallback(Callback): def __init__( self, - on_before_accelerator_backend_setup: Optional[Callable] = None, setup: Optional[Callable] = None, - on_configure_sharded_model: Optional[Callable] = None, teardown: Optional[Callable] = None, on_init_start: Optional[Callable] = None, on_init_end: Optional[Callable] = None, @@ -58,18 +56,12 @@ def __init__( on_validation_epoch_end: Optional[Callable] = None, on_test_epoch_start: Optional[Callable] = None, on_test_epoch_end: Optional[Callable] = None, - on_epoch_start: Optional[Callable] = None, - on_epoch_end: Optional[Callable] = None, - on_batch_start: Optional[Callable] = None, on_validation_batch_start: Optional[Callable] = None, on_validation_batch_end: Optional[Callable] = None, on_test_batch_start: Optional[Callable] = None, on_test_batch_end: Optional[Callable] = None, - on_batch_end: Optional[Callable] = None, on_train_start: Optional[Callable] = None, on_train_end: Optional[Callable] = None, - on_pretrain_routine_start: Optional[Callable] = None, - on_pretrain_routine_end: Optional[Callable] = None, on_validation_start: Optional[Callable] = None, on_validation_end: Optional[Callable] = None, on_test_start: Optional[Callable] = None, diff --git a/src/pytorch_lightning/core/hooks.py b/src/pytorch_lightning/core/hooks.py index 86b3d3f92e9c8..7831c94e61d90 100644 --- a/src/pytorch_lightning/core/hooks.py +++ b/src/pytorch_lightning/core/hooks.py @@ -63,32 +63,6 @@ def on_predict_start(self) -> None: def on_predict_end(self) -> None: """Called at the end of predicting.""" - def on_pretrain_routine_start(self) -> None: - """Called at the beginning of the pretrain routine (between fit and train start). - - - fit - - pretrain_routine start - - pretrain_routine end - - training_start - - .. deprecated:: v1.6 - :meth:`on_pretrain_routine_start` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on_fit_start`` instead. - """ - - def on_pretrain_routine_end(self) -> None: - """Called at the end of the pretrain routine (between fit and train start). - - - fit - - pretrain_routine start - - pretrain_routine end - - training_start - - .. deprecated:: v1.6 - :meth:`on_pretrain_routine_end` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on_fit_start`` instead. - """ - def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: """Called in the training loop before anything happens for that batch. @@ -189,22 +163,6 @@ def on_predict_model_eval(self) -> None: """Sets the model to eval during the predict loop.""" self.trainer.model.eval() - def on_epoch_start(self) -> None: - """Called when either of train/val/test epoch begins. - - .. deprecated:: v1.6 - :meth:`on_epoch_start` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on__epoch_start`` instead. - """ - - def on_epoch_end(self) -> None: - """Called when either of train/val/test epoch ends. - - .. deprecated:: v1.6 - :meth:`on_epoch_end` has been deprecated in v1.6 and will be removed in v1.8. - Use ``on__epoch_end`` instead. - """ - def on_train_epoch_start(self) -> None: """Called in the training loop at the very beginning of the epoch.""" diff --git a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py index bbd269b7045fd..627c4233f55bb 100644 --- a/src/pytorch_lightning/loops/dataloader/evaluation_loop.py +++ b/src/pytorch_lightning/loops/dataloader/evaluation_loop.py @@ -267,10 +267,8 @@ def _on_evaluation_end(self, *args: Any, **kwargs: Any) -> None: self.trainer._logger_connector.reset_results() def _on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None: - """Runs ``on_epoch_start`` and ``on_{validation/test}_epoch_start`` hooks.""" + """Runs the ``on_{validation/test}_epoch_start`` hooks.""" self.trainer._logger_connector.on_epoch_start() - self.trainer._call_callback_hooks("on_epoch_start", *args, **kwargs) - self.trainer._call_lightning_module_hook("on_epoch_start", *args, **kwargs) hook_name = "on_test_epoch_start" if self.trainer.testing else "on_validation_epoch_start" self.trainer._call_callback_hooks(hook_name, *args, **kwargs) @@ -295,8 +293,6 @@ def _on_evaluation_epoch_end(self) -> None: self.trainer._call_callback_hooks(hook_name) self.trainer._call_lightning_module_hook(hook_name) - self.trainer._call_callback_hooks("on_epoch_end") - self.trainer._call_lightning_module_hook("on_epoch_end") self.trainer._logger_connector.on_epoch_end() @staticmethod diff --git a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py index 0877bb7347ff7..858c71e44c59f 100644 --- a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -200,9 +200,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...") batch_output = [] else: - # hook - self.trainer._call_callback_hooks("on_batch_start") - # hook self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx) response = self.trainer._call_lightning_module_hook("on_train_batch_start", batch, batch_idx) @@ -232,7 +229,6 @@ def advance(self, data_fetcher: AbstractDataFetcher) -> None: # type: ignore[ov self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx) self.trainer._call_lightning_module_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx) - self.trainer._call_callback_hooks("on_batch_end") self.trainer._logger_connector.on_batch_end() self.batch_progress.increment_completed() diff --git a/src/pytorch_lightning/loops/fit_loop.py b/src/pytorch_lightning/loops/fit_loop.py index 48a5d1ef124e2..45ad5a2a61a30 100644 --- a/src/pytorch_lightning/loops/fit_loop.py +++ b/src/pytorch_lightning/loops/fit_loop.py @@ -219,8 +219,7 @@ def on_run_start(self) -> None: # type: ignore[override] self.trainer._call_strategy_hook("on_train_start") def on_advance_start(self) -> None: # type: ignore[override] - """Prepares the dataloader for training and calls the hooks ``on_epoch_start`` and - ``on_train_epoch_start``""" + """Prepares the dataloader for training and calls the hook ``on_train_epoch_start``""" model = self.trainer.lightning_module # reset train dataloader @@ -245,9 +244,6 @@ def on_advance_start(self) -> None: # type: ignore[override] self.trainer._logger_connector.on_epoch_start() - self.trainer._call_callback_hooks("on_epoch_start") - self.trainer._call_lightning_module_hook("on_epoch_start") - self.trainer._call_callback_hooks("on_train_epoch_start") self.trainer._call_lightning_module_hook("on_train_epoch_start") @@ -298,9 +294,6 @@ def on_advance_end(self) -> None: self.trainer._call_callback_hooks("on_train_epoch_end") self.trainer._call_lightning_module_hook("on_train_epoch_end") - self.trainer._call_callback_hooks("on_epoch_end") - self.trainer._call_lightning_module_hook("on_epoch_end") - self.trainer._logger_connector.on_epoch_end() if self.epoch_loop._num_ready_batches_reached(): diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 778380158bcbf..bb3e541bb6787 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -48,12 +48,8 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: __verify_batch_transfer_support(trainer) _check_deprecated_callback_hooks(trainer) - # TODO: Delete on_epoch_start/on_epoch_end hooks in v1.8 - _check_on_epoch_start_end(model) # TODO: Delete CheckpointHooks off PrecisionPlugin in v1.8 _check_precision_plugin_checkpoint_hooks(trainer) - # TODO: Delete on_pretrain_routine_start/end hooks in v1.8 - _check_on_pretrain_routine(model) # TODO: Delete CheckpointHooks off LightningDataModule in v1.8 _check_datamodule_checkpoint_hooks(trainer) @@ -188,31 +184,6 @@ def __check_training_step_requires_dataloader_iter(model: "pl.LightningModule") ) -# TODO: Remove on_epoch_start/on_epoch_end hooks in v1.8 -def _check_on_epoch_start_end(model: "pl.LightningModule") -> None: - hooks = ( - ("on_epoch_start", "on__epoch_start"), - ("on_epoch_end", "on__epoch_end"), - ) - - for hook, alternative_hook in hooks: - if is_overridden(hook, model): - rank_zero_deprecation( - f"The `LightningModule.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `LightningModule.{alternative_hook}` instead." - ) - - -def _check_on_pretrain_routine(model: "pl.LightningModule") -> None: - hooks = (("on_pretrain_routine_start", "on_fit_start"), ("on_pretrain_routine_end", "on_fit_start")) - for hook, alternative_hook in hooks: - if is_overridden(hook, model): - rank_zero_deprecation( - f"The `LightningModule.{hook}` hook was deprecated in v1.6 and" - f" will be removed in v1.8. Please use `LightningModule.{alternative_hook}` instead." - ) - - def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: for callback in trainer.callbacks: if is_overridden(method_name="on_init_start", instance=callback): diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py index 87ff7428103b5..46b76c4c22d69 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py +++ b/src/pytorch_lightning/trainer/connectors/logger_connector/fx_validator.py @@ -26,8 +26,6 @@ class _LogOptions(TypedDict): default_on_epoch: bool functions = { - "on_before_accelerator_backend_setup": None, - "on_configure_sharded_model": None, "on_before_backward": _LogOptions( allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False ), @@ -77,8 +75,6 @@ class _LogOptions(TypedDict): "on_test_end": None, "on_predict_start": None, "on_predict_end": None, - "on_pretrain_routine_start": None, - "on_pretrain_routine_end": None, "on_train_epoch_start": _LogOptions( allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True ), @@ -99,21 +95,9 @@ class _LogOptions(TypedDict): ), "on_predict_epoch_start": None, "on_predict_epoch_end": None, - "on_epoch_start": _LogOptions( - allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True - ), - "on_epoch_end": _LogOptions( - allowed_on_step=(False,), allowed_on_epoch=(True,), default_on_step=False, default_on_epoch=True - ), "on_before_batch_transfer": None, "transfer_batch_to_device": None, "on_after_batch_transfer": None, - "on_batch_start": _LogOptions( - allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False - ), - "on_batch_end": _LogOptions( - allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False - ), "on_train_batch_start": _LogOptions( allowed_on_step=(False, True), allowed_on_epoch=(False, True), default_on_step=True, default_on_epoch=False ), diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 14659bbd00a70..afa423fcad0d8 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -1101,7 +1101,6 @@ def _run( # ---------------------------- # SET UP TRAINING # ---------------------------- - self._call_callback_hooks("on_before_accelerator_backend_setup") log.detail(f"{self.__class__.__name__}: setting up strategy environment") self.strategy.setup_environment() self.__setup_profiler() @@ -1262,15 +1261,6 @@ def _pre_training_routine(self): # register signals self._signal_connector.register_signal_handlers() - # -------------------------- - # Pre-train - # -------------------------- - self._call_callback_hooks("on_pretrain_routine_start") - self._call_lightning_module_hook("on_pretrain_routine_start") - - self._call_callback_hooks("on_pretrain_routine_end") - self._call_lightning_module_hook("on_pretrain_routine_end") - def _run_train(self) -> None: self._pre_training_routine() @@ -1463,7 +1453,6 @@ def _call_configure_sharded_model(self) -> None: materialize_module(self.lightning_module) self._call_lightning_module_hook("configure_sharded_model") - self._call_callback_hooks("on_configure_sharded_model") def _call_teardown_hook(self) -> None: fn = self.state.fn._setup_fn diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 520a1cdedd7cb..8b226ea4fbb27 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -156,68 +156,6 @@ def test_v1_8_0_deprecated_lightning_optimizers(): assert trainer.lightning_optimizers == {} -def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_epoch_start(self, *args, **kwargs): - print("on_epoch_start") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_epoch_end(self, *args, **kwargs): - print("on_epoch_end") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_pretrain_routine_start(self, *args, **kwargs): - print("foo") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_pretrain_routine_end(self, *args, **kwargs): - print("foo") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - def test_v1_8_0_logger_agg_parameters(): class CustomLogger(Logger): @rank_zero_only diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index 158371a3097c5..910460873585c 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -290,7 +290,6 @@ def _auto_train_batch( dict(name="on_before_batch_transfer", args=(ANY, 0)), dict(name="transfer_batch_to_device", args=(ANY, device, 0)), dict(name="on_after_batch_transfer", args=(ANY, 0)), - dict(name="Callback.on_batch_start", args=(trainer, model)), dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)), dict(name="on_train_batch_start", args=(ANY, i)), dict(name="forward", args=(ANY,)), @@ -333,7 +332,6 @@ def _auto_train_batch( ), dict(name="Callback.on_train_batch_end", args=(trainer, model, dict(loss=ANY), ANY, i)), dict(name="on_train_batch_end", args=(dict(loss=ANY), ANY, i)), - dict(name="Callback.on_batch_end", args=(trainer, model)), ] ) return out @@ -348,7 +346,6 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k dict(name="on_before_batch_transfer", args=(ANY, 0)), dict(name="transfer_batch_to_device", args=(ANY, device, 0)), dict(name="on_after_batch_transfer", args=(ANY, 0)), - dict(name="Callback.on_batch_start", args=(trainer, model)), dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)), dict(name="on_train_batch_start", args=(ANY, i)), dict(name="forward", args=(ANY,)), @@ -368,7 +365,6 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k dict(name="training_step_end", args=(dict(loss=ANY),)), dict(name="Callback.on_train_batch_end", args=(trainer, model, dict(loss=ANY), ANY, i)), dict(name="on_train_batch_end", args=(dict(loss=ANY), ANY, i)), - dict(name="Callback.on_batch_end", args=(trainer, model)), ] ) return out @@ -377,16 +373,12 @@ def _manual_train_batch(trainer, model, batches, device=torch.device("cpu"), **k def _eval_epoch(fn, trainer, model, batches, key, device=torch.device("cpu")): outputs = {key: ANY} return [ - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name=f"Callback.on_{fn}_epoch_start", args=(trainer, model)), dict(name=f"on_{fn}_epoch_start"), *HookedModel._eval_batch(fn, trainer, model, batches, key, device=device), dict(name=f"{fn}_epoch_end", args=([outputs] * batches,)), dict(name=f"Callback.on_{fn}_epoch_end", args=(trainer, model)), dict(name=f"on_{fn}_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), ] @staticmethod @@ -506,20 +498,14 @@ def training_step(self, batch, batch_idx): dict(name="Callback.on_init_end", args=(trainer,)), dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), # DeepSpeed needs the batch size to figure out throughput logging *([dict(name="train_dataloader")] if kwargs.get("strategy") == "deepspeed" else []), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="Callback.on_sanity_check_start", args=(trainer, model)), dict(name="val_dataloader"), dict(name="train", args=(False,)), @@ -538,8 +524,6 @@ def training_step(self, batch, batch_idx): dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, train_batches, device=device, **kwargs), @@ -560,8 +544,6 @@ def training_step(self, batch, batch_idx): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -624,7 +606,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="Callback.on_init_end", args=(trainer,)), dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), @@ -635,16 +616,10 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="train", args=(True,)), dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, 2, current_epoch=1, current_batch=0), @@ -654,8 +629,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -719,7 +692,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="Callback.on_init_end", args=(trainer,)), dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), @@ -730,16 +702,10 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), - dict(name="Callback.on_pretrain_routine_start", args=(trainer, model)), - dict(name="on_pretrain_routine_start"), - dict(name="Callback.on_pretrain_routine_end", args=(trainer, model)), - dict(name="on_pretrain_routine_end"), dict(name="train", args=(True,)), dict(name="train_dataloader"), dict(name="Callback.on_train_start", args=(trainer, model)), dict(name="on_train_start"), - dict(name="Callback.on_epoch_start", args=(trainer, model)), - dict(name="on_epoch_start"), dict(name="Callback.on_train_epoch_start", args=(trainer, model)), dict(name="on_train_epoch_start"), *model._train_batch(trainer, model, steps_after_reload, current_batch=1), @@ -749,8 +715,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="Callback.on_save_checkpoint", args=(trainer, model, saved_ckpt)), dict(name="on_save_checkpoint", args=(saved_ckpt,)), dict(name="on_train_epoch_end"), - dict(name="Callback.on_epoch_end", args=(trainer, model)), - dict(name="on_epoch_end"), dict(name="Callback.on_train_end", args=(trainer, model)), dict(name="on_train_end"), dict(name="Callback.on_fit_end", args=(trainer, model)), @@ -802,7 +766,6 @@ def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, dict(name="Callback.on_init_end", args=(trainer,)), dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage=verb)), dict(name="setup", kwargs=dict(stage=verb)), dict(name="configure_sharded_model"), @@ -832,7 +795,6 @@ def test_trainer_model_hook_system_predict(tmpdir): dict(name="Callback.on_init_end", args=(trainer,)), dict(name="configure_callbacks"), dict(name="prepare_data"), - dict(name="Callback.on_before_accelerator_backend_setup", args=(trainer, model)), dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="predict")), dict(name="setup", kwargs=dict(stage="predict")), dict(name="configure_sharded_model"), diff --git a/tests/tests_pytorch/models/test_restore.py b/tests/tests_pytorch/models/test_restore.py index 39f623c73736d..ec0d08cb84b19 100644 --- a/tests/tests_pytorch/models/test_restore.py +++ b/tests/tests_pytorch/models/test_restore.py @@ -321,7 +321,7 @@ def test_try_resume_from_non_existing_checkpoint(tmpdir): class CaptureCallbacksBeforeTraining(Callback): callbacks = [] - def on_pretrain_routine_end(self, trainer, pl_module): + def on_fit_start(self, trainer, pl_module): self.callbacks = deepcopy(trainer.callbacks) @@ -347,15 +347,13 @@ def get_trainer_args(): # initial training trainer = Trainer(**get_trainer_args()) - with pytest.deprecated_call(match="`Callback.on_pretrain_routine_end` hook has been deprecated in v1.6"): - trainer.fit(model, datamodule=dm) + trainer.fit(model, datamodule=dm) callbacks_before_resume = deepcopy(trainer.callbacks) # resumed training trainer = Trainer(**get_trainer_args()) - with pytest.deprecated_call(match="`Callback.on_pretrain_routine_end` hook has been deprecated in v1.6"): - trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt")) + trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt")) assert len(callbacks_before_resume) == len(callback_capture.callbacks) diff --git a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py index 45f363caba621..20c5323907027 100644 --- a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py @@ -308,12 +308,6 @@ def on_validation_start(self, _, pl_module): pl_module, "on_validation_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices ) - def on_epoch_start(self, trainer, pl_module): - if trainer.validating: - self.make_logging( - pl_module, "on_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices - ) - def on_validation_epoch_start(self, _, pl_module): self.make_logging( pl_module, "on_validation_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -328,10 +322,6 @@ def on_validation_batch_end(self, _, pl_module, *__): prob_bars=self.choices, ) - def on_epoch_end(self, trainer, pl_module): - if trainer.validating: - self.make_logging(pl_module, "on_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_validation_epoch_end(self, _, pl_module): self.make_logging( pl_module, "on_validation_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -353,17 +343,13 @@ def validation_step(self, batch, batch_idx): max_epochs=1, callbacks=[cb], ) - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_start` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) assert cb.call_counter == { "on_validation_batch_end": 4, "on_validation_start": 1, - "on_epoch_start": 1, "on_validation_epoch_start": 1, "on_validation_epoch_end": 1, - "on_epoch_end": 1, } def get_expected(on_epoch, values): diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 266710abf9434..8d0c7c9d14d0e 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -37,22 +37,14 @@ def test_fx_validator(): "on_before_backward", "on_after_backward", "on_before_optimizer_step", - "on_batch_end", - "on_batch_start", - "on_before_accelerator_backend_setup", "on_before_zero_grad", - "on_epoch_end", - "on_epoch_start", "on_fit_end", - "on_configure_sharded_model", "on_fit_start", "on_init_end", "on_init_start", "on_exception", "on_load_checkpoint", "load_state_dict", - "on_pretrain_routine_end", - "on_pretrain_routine_start", "on_sanity_check_end", "on_sanity_check_start", "state_dict", @@ -86,17 +78,13 @@ def test_fx_validator(): } not_supported = { - "on_before_accelerator_backend_setup", "on_fit_end", "on_fit_start", - "on_configure_sharded_model", "on_init_end", "on_init_start", "on_exception", "on_load_checkpoint", "load_state_dict", - "on_pretrain_routine_end", - "on_pretrain_routine_start", "on_sanity_check_end", "on_sanity_check_start", "on_predict_batch_end", @@ -207,14 +195,10 @@ def test_fx_validator_integration(tmpdir): """Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.""" not_supported = { None: "`self.trainer` reference is not registered", - "on_before_accelerator_backend_setup": "You can't", "setup": "You can't", "configure_sharded_model": "You can't", - "on_configure_sharded_model": "You can't", "configure_optimizers": "You can't", "on_fit_start": "You can't", - "on_pretrain_routine_start": "You can't", - "on_pretrain_routine_end": "You can't", "train_dataloader": "You can't", "val_dataloader": "You can't", "on_before_batch_transfer": "You can't", @@ -256,7 +240,6 @@ def test_fx_validator_integration(tmpdir): not_supported.update( { # `lightning_module` ref is now present from the `fit` call - "on_before_accelerator_backend_setup": "You can't", "test_dataloader": "You can't", "on_test_model_eval": "You can't", "on_test_model_train": "You can't", diff --git a/tests/tests_pytorch/trainer/logging_/test_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_loop_logging.py index 66c7bdcd25cf3..3251d4d2aa5ef 100644 --- a/tests/tests_pytorch/trainer/logging_/test_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_loop_logging.py @@ -60,8 +60,6 @@ def _make_assertion(model, hooks, result_mock, on_step, on_epoch, extra_kwargs): "optimizer_zero_grad", "training_step", "training_step_end", - "on_batch_start", - "on_batch_end", "on_train_batch_start", "on_train_batch_end", ] @@ -72,8 +70,6 @@ def _make_assertion(model, hooks, result_mock, on_step, on_epoch, extra_kwargs): "on_train_start", "on_train_epoch_start", "on_train_epoch_end", - "on_epoch_start", - "on_epoch_end", "training_epoch_end", ] all_logging_hooks = all_logging_hooks - set(hooks) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 6a2feae352c3b..898d2ee1e1525 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -284,9 +284,6 @@ def make_logging(self, pl_module, func_name, on_steps, on_epochs, prob_bars): def on_train_start(self, _, pl_module): self.make_logging(pl_module, "on_train_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_epoch_start(self, _, pl_module): - self.make_logging(pl_module, "on_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - def on_train_epoch_start(self, _, pl_module): self.make_logging( pl_module, "on_train_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices @@ -307,9 +304,6 @@ def on_train_epoch_end(self, _, pl_module): pl_module, "on_train_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices ) - def on_epoch_end(self, _, pl_module): - self.make_logging(pl_module, "on_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices) - class TestModel(BoringModel): seen_losses = [] @@ -330,9 +324,7 @@ def training_step(self, batch, batch_idx): callbacks=[cb], ) - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_start` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) # Make sure the func_name output equals the average from all logged values when on_epoch true assert trainer.progress_bar_callback.get_metrics(trainer, model)["train_loss"] == model.seen_losses[-1] @@ -340,12 +332,10 @@ def training_step(self, batch, batch_idx): assert cb.call_counter == { "on_train_start": 1, - "on_epoch_start": 1, "on_train_epoch_start": 1, "on_train_batch_start": 2, "on_train_batch_end": 2, "on_train_epoch_end": 1, - "on_epoch_end": 1, } def get_expected(on_epoch, values): @@ -535,9 +525,6 @@ def on_train_epoch_start(self, trainer, pl_module): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): self.log("on_train_batch_end", 3) - def on_epoch_end(self, trainer, pl_module): - self.log("on_epoch_end", 4) - def on_train_epoch_end(self, trainer, pl_module): self.log("on_train_epoch_end", 5) @@ -559,7 +546,6 @@ def on_train_epoch_end(self, trainer, pl_module): "on_train_start": 1, "on_train_epoch_start": 2, "on_train_batch_end": 3, - "on_epoch_end": 4, "on_train_epoch_end": 5, } assert trainer.callback_metrics == expected From 8e7b96518727da797647c3558480d2cbc42a79f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 22 Sep 2022 14:45:28 +0200 Subject: [PATCH 07/30] Fix missed tests --- examples/app_components/python/component_tracer.py | 3 +-- tests/tests_pytorch/models/test_hooks.py | 4 ---- .../trainer/logging_/test_train_loop_logging.py | 5 +---- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/examples/app_components/python/component_tracer.py b/examples/app_components/python/component_tracer.py index 4baca5b380c99..a3fa69ebf8227 100644 --- a/examples/app_components/python/component_tracer.py +++ b/examples/app_components/python/component_tracer.py @@ -27,8 +27,7 @@ def on_train_start(self, trainer, pl_module) -> None: print("Even the Lightning Work is available and state transfer works !") print(self.lightning_work) - # FIXME - def on_batch_end(self, trainer, *_) -> None: + def on_train_batch_end(self, trainer, *_) -> None: # On every batch end, collects some information. # This is communicated automatically to the rest of the app, # so you can track your training in real time in the Lightning App UI. diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index 910460873585c..2e89eef537e97 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -612,7 +612,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="Callback.on_load_checkpoint", args=(trainer, model, {"foo": True})), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), @@ -698,7 +697,6 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="Callback.on_load_checkpoint", args=(trainer, model, {"foo": True})), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="configure_optimizers"), dict(name="Callback.on_fit_start", args=(trainer, model)), dict(name="on_fit_start"), @@ -769,7 +767,6 @@ def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage=verb)), dict(name="setup", kwargs=dict(stage=verb)), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), *(hooks if batches else []), dict(name="Callback.teardown", args=(trainer, model), kwargs=dict(stage=verb)), dict(name="teardown", kwargs=dict(stage=verb)), @@ -798,7 +795,6 @@ def test_trainer_model_hook_system_predict(tmpdir): dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="predict")), dict(name="setup", kwargs=dict(stage="predict")), dict(name="configure_sharded_model"), - dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), dict(name="predict_dataloader"), dict(name="train", args=(False,)), dict(name="on_predict_model_eval"), diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 898d2ee1e1525..8a44b7e131644 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -537,10 +537,7 @@ def on_train_epoch_end(self, trainer, pl_module): enable_model_summary=False, callbacks=[LoggingCallback()], ) - - # TODO: Update this test in v1.8 (#11578) - with pytest.deprecated_call(match="`Callback.on_epoch_end` hook was deprecated in v1.6"): - trainer.fit(model) + trainer.fit(model) expected = { "on_train_start": 1, From f80814482ee39efdb40e4d7e51cf8c59abb52bd5 Mon Sep 17 00:00:00 2001 From: otaj Date: Tue, 27 Sep 2022 14:45:59 +0200 Subject: [PATCH 08/30] fixed botched merge --- .../deprecated_api/test_remove_1-8.py | 172 ------------------ 1 file changed, 172 deletions(-) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 3ccd916ccf9d7..eea1a3c9f1765 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -67,178 +67,6 @@ def test_v1_8_0_deprecated_lightning_optimizers(): assert trainer.lightning_optimizers == {} -def test_v1_8_0_remove_on_batch_start_end(tmpdir): - class TestCallback(Callback): - def on_batch_start(self, *args, **kwargs): - print("on_batch_start") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_batch_end(self, *args, **kwargs): - print("on_batch_end") - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_on_configure_sharded_model(tmpdir): - class TestCallback(Callback): - def on_configure_sharded_model(self, trainer, model): - print("Configuring sharded model") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8." - ): - trainer.fit(model) - - -def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_epoch_start(self, *args, **kwargs): - print("on_epoch_start") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_epoch_end(self, *args, **kwargs): - print("on_epoch_end") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_pretrain_routine_start(self, *args, **kwargs): - print("foo") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_pretrain_routine_end(self, *args, **kwargs): - print("foo") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.deprecated_call( - match="The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir): - class TestCallback(Callback): - def on_before_accelerator_backend_setup(self, *args, **kwargs): - print("on_before_accelerator_backend called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6" - " and will be removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir): - class TestCallback(Callback): - def on_pretrain_routine_start(self, trainer, pl_module): - print("on_pretrain_routine_start called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - class TestCallback(Callback): - def on_pretrain_routine_end(self, trainer, pl_module): - print("on_pretrain_routine_end called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8" - ): - trainer.fit(model) - - def test_v1_8_0_datamodule_checkpointhooks(): class CustomBoringDataModuleSave(BoringDataModule): def on_save_checkpoint(self, checkpoint): From 94c5e030ae429e44430045e57cbbb63996e436de Mon Sep 17 00:00:00 2001 From: otaj Date: Tue, 27 Sep 2022 15:15:51 +0200 Subject: [PATCH 09/30] fixed botched merge v2 --- tests/tests_pytorch/deprecated_api/test_remove_1-8.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index eea1a3c9f1765..86b58f5cda180 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -59,14 +59,6 @@ def test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys(): prefix_metric_keys({"foo": 1.0}, "bar") -def test_v1_8_0_deprecated_lightning_optimizers(): - trainer = Trainer() - with pytest.deprecated_call( - match="Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8" - ): - assert trainer.lightning_optimizers == {} - - def test_v1_8_0_datamodule_checkpointhooks(): class CustomBoringDataModuleSave(BoringDataModule): def on_save_checkpoint(self, checkpoint): From f5a749e4e917309ffbe247def8d5ada832ee9baa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 30 Sep 2022 08:37:26 +0000 Subject: [PATCH 10/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pytorch_lightning/trainer/configuration_validator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index aaf30c55eeb82..ad44b50800192 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -195,4 +195,3 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: " In v1.8 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" " checkpoint dictionary instead of callback state." ) - From 2c954c75e7dda5332d68c2183dda235c26e732b1 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Fri, 30 Sep 2022 18:34:55 +0200 Subject: [PATCH 11/30] fix tests --- tests/tests_pytorch/callbacks/test_lambda_function.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index d7816a4f3988a..a9b020ca004bb 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -49,7 +49,7 @@ def call(hook, *_, **__): callbacks=[LambdaCallback(**hooks_args)], ) with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" ): trainer.fit(model) ckpt_path = trainer.checkpoint_callback.best_model_path @@ -65,15 +65,15 @@ def call(hook, *_, **__): callbacks=[LambdaCallback(**hooks_args)], ) with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" ): trainer.fit(model, ckpt_path=ckpt_path) with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" ): trainer.test(model) with pytest.deprecated_call( - match="`on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" ): trainer.predict(model) From 758da2bb84c15ca50355ed0053f7afd35d722790 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sat, 8 Oct 2022 20:03:29 +0200 Subject: [PATCH 12/30] convert warnings to error --- .../trainer/configuration_validator.py | 63 ++++++- .../deprecated_api/test_remove_1-8.py | 157 +++++++++++++++++- 2 files changed, 216 insertions(+), 4 deletions(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 6d6fcd512e2cb..1823619950df6 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -46,7 +46,12 @@ def verify_loop_configurations(trainer: "pl.Trainer") -> None: __verify_eval_loop_configuration(trainer, model, "predict") __verify_batch_transfer_support(trainer) + # TODO: Delete this check in v2.0 _check_deprecated_callback_hooks(trainer) + # TODO: Delete this check in v2.0 + _check_on_epoch_start_end(model) + # TODO: Delete this check in v2.0 + _check_on_pretrain_routine(model) def __verify_train_val_loop_configuration(trainer: "pl.Trainer", model: "pl.LightningModule") -> None: @@ -179,12 +184,66 @@ def __check_training_step_requires_dataloader_iter(model: "pl.LightningModule") ) +def _check_on_epoch_start_end(model: "pl.LightningModule") -> None: + hooks = ( + ("on_epoch_start", "on__epoch_start"), + ("on_epoch_end", "on__epoch_end"), + ) + + for hook, alternative_hook in hooks: + if callable(getattr(model, hook, None)): + raise RuntimeError( + f"The `LightningModule.{hook}` hook was removed in v1.8. Please use" + f" `LightningModule.{alternative_hook}` instead." + ) + + +def _check_on_pretrain_routine(model: "pl.LightningModule") -> None: + hooks = (("on_pretrain_routine_start", "on_fit_start"), ("on_pretrain_routine_end", "on_fit_start")) + for hook, alternative_hook in hooks: + if callable(getattr(model, hook, None)): + raise RuntimeError( + f"The `LightningModule.{hook}` hook was removed in v1.8. Please use" + f" `LightningModule.{alternative_hook}` instead." + ) + + def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: for callback in trainer.callbacks: + if callable(getattr(callback, "on_configure_sharded_model", None)): + raise RuntimeError( + "The `on_configure_sharded_model` callback hook was removed in v1.8. Use `setup()` instead." + ) + if callable(getattr(callback, "on_before_accelerator_backend_setup", None)): + raise RuntimeError( + "The `on_before_accelerator_backend_setup` callback hook was removed in v1.8. Use `setup()` instead." + ) if is_overridden(method_name="on_load_checkpoint", instance=callback): rank_zero_deprecation( - f"`{callback.__class__.__name__}.on_load_checkpoint` will change its signature and behavior in v1.8." + f"`{callback.__class__.__name__}.on_load_checkpoint` will change its signature and behavior in v2.0." " If you wish to load the state of the callback, use `load_state_dict` instead." - " In v1.8 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" + " In v2.0 `on_load_checkpoint(..., checkpoint)` will receive the entire loaded" " checkpoint dictionary instead of callback state." ) + + for hook, alternative_hook in ( + ["on_batch_start", "on_train_batch_start"], + ["on_batch_end", "on_train_batch_end"], + ): + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.{alternative_hook}` instead." + ) + for hook, alternative_hook in ( + ["on_epoch_start", "on__epoch_start"], + ["on_epoch_end", "on__epoch_end"], + ): + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.{alternative_hook}` instead." + ) + for hook in ("on_pretrain_routine_start", "on_pretrain_routine_end"): + if callable(getattr(callback, hook, None)): + raise RuntimeError( + f"The `Callback.{hook}` hook was removed in v1.8. Please use `Callback.on_fit_start` instead." + ) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 1aeef083985f1..682bc56bf5489 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -21,6 +21,159 @@ from pytorch_lightning.demos.boring_classes import BoringModel +def test_v1_8_0_remove_on_batch_start_end(tmpdir): + class TestCallback(Callback): + def on_batch_start(self, *args, **kwargs): + print("on_batch_start") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_batch_end(self, *args, **kwargs): + print("on_batch_end") + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v1_8_0_on_configure_sharded_model(tmpdir): + class TestCallback(Callback): + def on_configure_sharded_model(self, trainer, model): + print("Configuring sharded model") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `on_configure_sharded_model` callback hook was removed in v1.8."): + trainer.fit(model) + + +def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_epoch_start(self, *args, **kwargs): + print("on_epoch_start") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_epoch_end(self, *args, **kwargs): + print("on_epoch_end") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_pretrain_routine_start(self, *args, **kwargs): + print("foo") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_pretrain_routine_end(self, *args, **kwargs): + print("foo") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir): + class TestCallback(Callback): + def on_before_accelerator_backend_setup(self, *args, **kwargs): + print("on_before_accelerator_backend called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises( + RuntimeError, match="The `on_before_accelerator_backend_setup` callback hook was removed in v1.8" + ): + trainer.fit(model) + + +def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir): + class TestCallback(Callback): + def on_pretrain_routine_start(self, trainer, pl_module): + print("on_pretrain_routine_start called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_pretrain_routine_end(self, trainer, pl_module): + print("on_pretrain_routine_end called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_end` hook was removed in v1.8."): + trainer.fit(model) + + def test_deprecated_mc_save_checkpoint(): mc = ModelCheckpoint() trainer = Trainer() @@ -45,9 +198,9 @@ def on_load_checkpoint(self, trainer, pl_module, callback_state): default_root_dir=tmpdir, ) with pytest.deprecated_call( - match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v1.8." + match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v2.0." " If you wish to load the state of the callback, use `load_state_dict` instead." - r" In v1.8 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" + r" In v2.0 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" " checkpoint dictionary instead of callback state." ): trainer.fit(model) From 34b9d5a9902db6f120d0ff90940651d75c162e7a Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sat, 8 Oct 2022 20:06:38 +0200 Subject: [PATCH 13/30] move tests to 2.0 file --- .../deprecated_api/test_remove_1-8.py | 236 ------------------ .../deprecated_api/test_remove_2-0.py | 218 +++++++++++++++- 2 files changed, 217 insertions(+), 237 deletions(-) delete mode 100644 tests/tests_pytorch/deprecated_api/test_remove_1-8.py diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py deleted file mode 100644 index 682bc56bf5489..0000000000000 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright The PyTorch Lightning team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Test deprecated functionality which will be removed in v1.8.0.""" -from unittest import mock - -import pytest - -from pytorch_lightning import Callback, Trainer -from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.demos.boring_classes import BoringModel - - -def test_v1_8_0_remove_on_batch_start_end(tmpdir): - class TestCallback(Callback): - def on_batch_start(self, *args, **kwargs): - print("on_batch_start") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `Callback.on_batch_start` hook was removed in v1.8"): - trainer.fit(model) - - class TestCallback(Callback): - def on_batch_end(self, *args, **kwargs): - print("on_batch_end") - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `Callback.on_batch_end` hook was removed in v1.8"): - trainer.fit(model) - - -def test_v1_8_0_on_configure_sharded_model(tmpdir): - class TestCallback(Callback): - def on_configure_sharded_model(self, trainer, model): - print("Configuring sharded model") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `on_configure_sharded_model` callback hook was removed in v1.8."): - trainer.fit(model) - - -def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_epoch_start(self, *args, **kwargs): - print("on_epoch_start") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_start` hook was removed in v1.8"): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_epoch_end(self, *args, **kwargs): - print("on_epoch_end") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_end` hook was removed in v1.8"): - trainer.fit(model) - - -def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): - class CustomModel(BoringModel): - def on_pretrain_routine_start(self, *args, **kwargs): - print("foo") - - model = CustomModel() - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_start` hook was removed in v1.8"): - trainer.fit(model) - - class CustomModel(BoringModel): - def on_pretrain_routine_end(self, *args, **kwargs): - print("foo") - - trainer = Trainer( - fast_dev_run=True, - default_root_dir=tmpdir, - ) - - model = CustomModel() - with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_end` hook was removed in v1.8"): - trainer.fit(model) - - -def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir): - class TestCallback(Callback): - def on_before_accelerator_backend_setup(self, *args, **kwargs): - print("on_before_accelerator_backend called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.raises( - RuntimeError, match="The `on_before_accelerator_backend_setup` callback hook was removed in v1.8" - ): - trainer.fit(model) - - -def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir): - class TestCallback(Callback): - def on_pretrain_routine_start(self, trainer, pl_module): - print("on_pretrain_routine_start called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_start` hook was removed in v1.8"): - trainer.fit(model) - - class TestCallback(Callback): - def on_pretrain_routine_end(self, trainer, pl_module): - print("on_pretrain_routine_end called.") - - model = BoringModel() - - trainer = Trainer( - callbacks=[TestCallback()], - fast_dev_run=True, - enable_progress_bar=False, - default_root_dir=tmpdir, - ) - with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_end` hook was removed in v1.8."): - trainer.fit(model) - - -def test_deprecated_mc_save_checkpoint(): - mc = ModelCheckpoint() - trainer = Trainer() - with mock.patch.object(trainer, "save_checkpoint"), pytest.deprecated_call( - match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6" - ): - mc.save_checkpoint(trainer) - - -def test_v1_8_0_callback_on_load_checkpoint_hook(tmpdir): - class TestCallbackLoadHook(Callback): - def on_load_checkpoint(self, trainer, pl_module, callback_state): - print("overriding on_load_checkpoint") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackLoadHook()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v2.0." - " If you wish to load the state of the callback, use `load_state_dict` instead." - r" In v2.0 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" - " checkpoint dictionary instead of callback state." - ): - trainer.fit(model) - - -def test_v1_8_0_callback_on_save_checkpoint_hook(tmpdir): - class TestCallbackSaveHookReturn(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - return {"returning": "on_save_checkpoint"} - - class TestCallbackSaveHookOverride(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - print("overriding without returning") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackSaveHookReturn()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - trainer.fit(model) - with pytest.deprecated_call( - match="Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` is deprecated in v1.6" - " and will be removed in v1.8. Please override `Callback.state_dict`" - " to return state to be saved." - ): - trainer.save_checkpoint(tmpdir + "/path.ckpt") - - trainer.callbacks = [TestCallbackSaveHookOverride()] - trainer.save_checkpoint(tmpdir + "/pathok.ckpt") diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index 548c7feec41e1..fe460cf9db4a3 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -17,7 +17,8 @@ import pytest import pytorch_lightning -from pytorch_lightning import Trainer +from pytorch_lightning import Callback, Trainer +from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback from tests_pytorch.helpers.runif import RunIf @@ -84,3 +85,218 @@ def test_v2_0_resume_from_checkpoint_trainer_constructor(tmpdir): trainer = Trainer(resume_from_checkpoint="trainer_arg_path") with pytest.raises(FileNotFoundError, match="Checkpoint at fit_arg_ckpt_path not found. Aborting training."): trainer.fit(model, ckpt_path="fit_arg_ckpt_path") + + +def test_v2_0_remove_on_batch_start_end(tmpdir): + class TestCallback(Callback): + def on_batch_start(self, *args, **kwargs): + print("on_batch_start") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_batch_end(self, *args, **kwargs): + print("on_batch_end") + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_batch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_on_configure_sharded_model(tmpdir): + class TestCallback(Callback): + def on_configure_sharded_model(self, trainer, model): + print("Configuring sharded model") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `on_configure_sharded_model` callback hook was removed in v1.8."): + trainer.fit(model) + + +def test_v2_0_remove_on_epoch_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_epoch_start(self, *args, **kwargs): + print("on_epoch_start") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_epoch_end(self, *args, **kwargs): + print("on_epoch_end") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_epoch_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): + class CustomModel(BoringModel): + def on_pretrain_routine_start(self, *args, **kwargs): + print("foo") + + model = CustomModel() + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class CustomModel(BoringModel): + def on_pretrain_routine_end(self, *args, **kwargs): + print("foo") + + trainer = Trainer( + fast_dev_run=True, + default_root_dir=tmpdir, + ) + + model = CustomModel() + with pytest.raises(RuntimeError, match="The `LightningModule.on_pretrain_routine_end` hook was removed in v1.8"): + trainer.fit(model) + + +def test_v2_0_on_before_accelerator_backend_setup(tmpdir): + class TestCallback(Callback): + def on_before_accelerator_backend_setup(self, *args, **kwargs): + print("on_before_accelerator_backend called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises( + RuntimeError, match="The `on_before_accelerator_backend_setup` callback hook was removed in v1.8" + ): + trainer.fit(model) + + +def test_v2_0_callback_on_pretrain_routine_start_end(tmpdir): + class TestCallback(Callback): + def on_pretrain_routine_start(self, trainer, pl_module): + print("on_pretrain_routine_start called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_start` hook was removed in v1.8"): + trainer.fit(model) + + class TestCallback(Callback): + def on_pretrain_routine_end(self, trainer, pl_module): + print("on_pretrain_routine_end called.") + + model = BoringModel() + + trainer = Trainer( + callbacks=[TestCallback()], + fast_dev_run=True, + enable_progress_bar=False, + default_root_dir=tmpdir, + ) + with pytest.raises(RuntimeError, match="The `Callback.on_pretrain_routine_end` hook was removed in v1.8."): + trainer.fit(model) + + +def test_deprecated_mc_save_checkpoint(): + mc = ModelCheckpoint() + trainer = Trainer() + with mock.patch.object(trainer, "save_checkpoint"), pytest.deprecated_call( + match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6" + ): + mc.save_checkpoint(trainer) + + +def test_v2_0_callback_on_load_checkpoint_hook(tmpdir): + class TestCallbackLoadHook(Callback): + def on_load_checkpoint(self, trainer, pl_module, callback_state): + print("overriding on_load_checkpoint") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallbackLoadHook()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.deprecated_call( + match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v2.0." + " If you wish to load the state of the callback, use `load_state_dict` instead." + r" In v2.0 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" + " checkpoint dictionary instead of callback state." + ): + trainer.fit(model) + + +def test_v2_0_callback_on_save_checkpoint_hook(tmpdir): + class TestCallbackSaveHookReturn(Callback): + def on_save_checkpoint(self, trainer, pl_module, checkpoint): + return {"returning": "on_save_checkpoint"} + + class TestCallbackSaveHookOverride(Callback): + def on_save_checkpoint(self, trainer, pl_module, checkpoint): + print("overriding without returning") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallbackSaveHookReturn()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + trainer.fit(model) + with pytest.deprecated_call( + match="Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` is deprecated in v1.6" + " and will be removed in v1.8. Please override `Callback.state_dict`" + " to return state to be saved." + ): + trainer.save_checkpoint(tmpdir + "/path.ckpt") + + trainer.callbacks = [TestCallbackSaveHookOverride()] + trainer.save_checkpoint(tmpdir + "/pathok.ckpt") From 7c2c526c8bfddf2419c8d029befacf225b43fe91 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sat, 8 Oct 2022 20:12:24 +0200 Subject: [PATCH 14/30] fix tests --- tests/tests_pytorch/callbacks/test_lambda_function.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index a9b020ca004bb..d946379270857 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -49,7 +49,7 @@ def call(hook, *_, **__): callbacks=[LambdaCallback(**hooks_args)], ) with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" ): trainer.fit(model) ckpt_path = trainer.checkpoint_callback.best_model_path @@ -65,15 +65,15 @@ def call(hook, *_, **__): callbacks=[LambdaCallback(**hooks_args)], ) with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" ): trainer.fit(model, ckpt_path=ckpt_path) with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" ): trainer.test(model) with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v1.8" + match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" ): trainer.predict(model) From e64e14c59b89af3ace385bdd8bd3cc70c5298649 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:20:23 +0200 Subject: [PATCH 15/30] raise error --- src/pytorch_lightning/trainer/trainer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index ce0d0b5a11a86..dcdef712bcb12 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -1362,7 +1362,13 @@ def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None for callback in self.callbacks: with self.profiler.profile(f"[Callback]{callback.state_key}.on_save_checkpoint"): - callback.on_save_checkpoint(self, self.lightning_module, checkpoint) + state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint) + if state is not None: + raise ValueError( + f"Returning a value from `{callback.__class__.__name__}.on_save_checkpoint` was deprecated in v1.6" + f" and is no longer supported as of v1.8. Please override `Callback.state_dict` to return state" + f" to be saved." + ) if pl_module: # restore current_fx when nested context From 582a23329d8f22970dbe9d09df18ebd368d3beb0 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:25:39 +0200 Subject: [PATCH 16/30] error on legacy argument --- .../trainer/configuration_validator.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 334771640a521..6cb244e4ea7e9 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect + import pytorch_lightning as pl from lightning_lite.utilities.warnings import PossibleUserWarning from pytorch_lightning.accelerators.ipu import IPUAccelerator @@ -221,6 +223,16 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: " v1.6 and will be removed in v1.8. Use `setup()` instead." ) + has_legacy_argument = "callback_state" in inspect.signature(callback.on_load_checkpoint).parameters + if is_overridden(method_name="on_load_checkpoint", instance=callback) and has_legacy_argument: + raise TypeError( + f"`{callback.__class__.__name__}.on_load_checkpoint` has changed its signature and behavior in v1.8." + " If you wish to load the state of the callback, use `load_state_dict` instead." + " As of 1.8, `on_load_checkpoint(..., checkpoint)` receives the entire loaded" + " checkpoint dictionary instead of the callback state. To continue using this hook and avoid this error" + " message, rename the `callback_state` argument to `checkpoint`." + ) + for hook, alternative_hook in ( ["on_batch_start", "on_train_batch_start"], ["on_batch_end", "on_train_batch_end"], From 7e37d9531df7283e0c027fc0f4ac641580565dcc Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:30:52 +0200 Subject: [PATCH 17/30] error instead of deprecation --- .../trainer/configuration_validator.py | 1 + src/pytorch_lightning/trainer/trainer.py | 1 + .../deprecated_api/test_remove_1-8.py | 53 ------------------- .../deprecated_api/test_remove_2-0.py | 51 +++++++++++++++++- 4 files changed, 52 insertions(+), 54 deletions(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index 6cb244e4ea7e9..a93e0ed19406b 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -225,6 +225,7 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: has_legacy_argument = "callback_state" in inspect.signature(callback.on_load_checkpoint).parameters if is_overridden(method_name="on_load_checkpoint", instance=callback) and has_legacy_argument: + # TODO: Remove this error message in v2.0 raise TypeError( f"`{callback.__class__.__name__}.on_load_checkpoint` has changed its signature and behavior in v1.8." " If you wish to load the state of the callback, use `load_state_dict` instead." diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index dcdef712bcb12..130613e6fd791 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -1364,6 +1364,7 @@ def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None with self.profiler.profile(f"[Callback]{callback.state_key}.on_save_checkpoint"): state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint) if state is not None: + # TODO: Remove this error message in v2.0 raise ValueError( f"Returning a value from `{callback.__class__.__name__}.on_save_checkpoint` was deprecated in v1.6" f" and is no longer supported as of v1.8. Please override `Callback.state_dict` to return state" diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index c2fdc632418ae..ff4b80f8898bd 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -200,56 +200,3 @@ def test_deprecated_mc_save_checkpoint(): match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6" ): mc.save_checkpoint(trainer) - - -def test_v1_8_0_callback_on_load_checkpoint_hook(tmpdir): - class TestCallbackLoadHook(Callback): - def on_load_checkpoint(self, trainer, pl_module, callback_state): - print("overriding on_load_checkpoint") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackLoadHook()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.deprecated_call( - match="`TestCallbackLoadHook.on_load_checkpoint` will change its signature and behavior in v1.8." - " If you wish to load the state of the callback, use `load_state_dict` instead." - r" In v1.8 `on_load_checkpoint\(..., checkpoint\)` will receive the entire loaded" - " checkpoint dictionary instead of callback state." - ): - trainer.fit(model) - - -def test_v1_8_0_callback_on_save_checkpoint_hook(tmpdir): - class TestCallbackSaveHookReturn(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - return {"returning": "on_save_checkpoint"} - - class TestCallbackSaveHookOverride(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - print("overriding without returning") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackSaveHookReturn()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - trainer.fit(model) - with pytest.deprecated_call( - match="Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` is deprecated in v1.6" - " and will be removed in v1.8. Please override `Callback.state_dict`" - " to return state to be saved." - ): - trainer.save_checkpoint(tmpdir + "/path.ckpt") - - trainer.callbacks = [TestCallbackSaveHookOverride()] - trainer.save_checkpoint(tmpdir + "/pathok.ckpt") diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index 548c7feec41e1..be42121eb034a 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -17,7 +17,7 @@ import pytest import pytorch_lightning -from pytorch_lightning import Trainer +from pytorch_lightning import Callback, Trainer from pytorch_lightning.demos.boring_classes import BoringModel from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback from tests_pytorch.helpers.runif import RunIf @@ -84,3 +84,52 @@ def test_v2_0_resume_from_checkpoint_trainer_constructor(tmpdir): trainer = Trainer(resume_from_checkpoint="trainer_arg_path") with pytest.raises(FileNotFoundError, match="Checkpoint at fit_arg_ckpt_path not found. Aborting training."): trainer.fit(model, ckpt_path="fit_arg_ckpt_path") + + +def test_v1_2_0_callback_on_load_checkpoint_hook(tmpdir): + class TestCallbackLoadHook(Callback): + def on_load_checkpoint(self, trainer, pl_module, callback_state): + print("overriding on_load_checkpoint") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallbackLoadHook()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + with pytest.raises( + TypeError, match="`TestCallbackLoadHook.on_load_checkpoint` has changed its signature and behavior in v1.8." + ): + trainer.fit(model) + + +def test_v1_2_0_callback_on_save_checkpoint_hook(tmpdir): + class TestCallbackSaveHookReturn(Callback): + def on_save_checkpoint(self, trainer, pl_module, checkpoint): + return {"returning": "on_save_checkpoint"} + + class TestCallbackSaveHookOverride(Callback): + def on_save_checkpoint(self, trainer, pl_module, checkpoint): + print("overriding without returning") + + model = BoringModel() + trainer = Trainer( + callbacks=[TestCallbackSaveHookReturn()], + max_epochs=1, + fast_dev_run=True, + enable_progress_bar=False, + logger=False, + default_root_dir=tmpdir, + ) + trainer.fit(model) + with pytest.raises( + ValueError, + match="Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` was deprecated in v1.6 and is no longer supported as of v1.8", + ): + trainer.save_checkpoint(tmpdir + "/path.ckpt") + + trainer.callbacks = [TestCallbackSaveHookOverride()] + trainer.save_checkpoint(tmpdir + "/pathok.ckpt") From 5440902b8328e605aa43ac79211a06f0bff4acb9 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:35:11 +0200 Subject: [PATCH 18/30] changelog --- src/pytorch_lightning/CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 724ccca7cbdd8..9b91171d37e62 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -126,6 +126,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - HPC checkpoints are now loaded automatically only in slurm environment when no specific value for `ckpt_path` has been set ([#14911](https://github.com/Lightning-AI/lightning/pull/14911)) +- The `Callback.on_load_checkpoint` now gets the full checkpoint dictionary and the `callback_state` argument was renamed `checkpoint` ([#14835](https://github.com/Lightning-AI/lightning/pull/14835)) + + ### Deprecated - Deprecated `LightningDeepSpeedModule` ([#14000](https://github.com/Lightning-AI/lightning/pull/14000)) @@ -302,6 +305,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed the deprecated `LightningDataModule.on_save/load_checkpoint` hooks ([#14909](https://github.com/Lightning-AI/lightning/pull/14909)) +- Removed support for returning a value in `Callback.on_save_checkpoint` in favor of implementing `Callback.state_dict` ([#14835](https://github.com/Lightning-AI/lightning/pull/14835)) + + + ### Fixed - Fixed an issue with `LightningLite.setup()` not setting the `.device` attribute correctly on the returned wrapper ([#14822](https://github.com/Lightning-AI/lightning/pull/14822)) From 5952296b2a1a521590bb52dcd595b992d067a6ed Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:41:01 +0200 Subject: [PATCH 19/30] fix mypy --- src/pytorch_lightning/callbacks/pruning.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/pytorch_lightning/callbacks/pruning.py b/src/pytorch_lightning/callbacks/pruning.py index 7698b23741f63..ad5f8776c56b4 100644 --- a/src/pytorch_lightning/callbacks/pruning.py +++ b/src/pytorch_lightning/callbacks/pruning.py @@ -423,9 +423,7 @@ def move_to_cpu(tensor: Tensor) -> Tensor: return apply_to_collection(state_dict, Tensor, move_to_cpu) - def on_save_checkpoint( - self, trainer: "pl.Trainer", pl_module: LightningModule, checkpoint: Dict[str, Any] - ) -> Optional[dict]: + def on_save_checkpoint(self, trainer: "pl.Trainer", pl_module: LightningModule, checkpoint: Dict[str, Any]) -> None: if self._make_pruning_permanent: rank_zero_debug("`ModelPruning.on_save_checkpoint`. Pruning is made permanent for this checkpoint") # manually prune the weights so training can keep going with the same buffers From 4681d79610e78b24bbf6c5fe65ffd66c623c7827 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 10:42:06 +0200 Subject: [PATCH 20/30] flake --- tests/tests_pytorch/deprecated_api/test_remove_2-0.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index be42121eb034a..a1dc78c32e445 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -127,7 +127,10 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): trainer.fit(model) with pytest.raises( ValueError, - match="Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` was deprecated in v1.6 and is no longer supported as of v1.8", + match=( + "Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` was deprecated in v1.6 and is" + " no longer supported as of v1.8" + ), ): trainer.save_checkpoint(tmpdir + "/path.ckpt") From ee5b55a1a4804e84220116c1b3350abea99fe32f Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 11:11:59 +0200 Subject: [PATCH 21/30] remove old tests --- .../tests_pytorch/callbacks/test_callbacks.py | 84 ------------------- 1 file changed, 84 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_callbacks.py b/tests/tests_pytorch/callbacks/test_callbacks.py index d8664c6a1b5b2..c8dca41305267 100644 --- a/tests/tests_pytorch/callbacks/test_callbacks.py +++ b/tests/tests_pytorch/callbacks/test_callbacks.py @@ -133,44 +133,6 @@ def test_resume_callback_state_saved_by_type_stateful(tmpdir): assert callback.state == 111 -class OldStatefulCallbackHooks(Callback): - def __init__(self, state): - self.state = state - - @property - def state_key(self): - return type(self) - - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - return {"state": self.state} - - def on_load_checkpoint(self, trainer, pl_module, callback_state): - self.state = callback_state["state"] - - -def test_resume_callback_state_saved_by_type_hooks(tmpdir): - """Test that a legacy checkpoint that didn't use a state key before can still be loaded, using deprecated - on_save/load_checkpoint signatures.""" - # TODO: remove old on_save/load_checkpoint signature support in v1.8 - # in favor of Stateful and new on_save/load_checkpoint signatures - # on_save_checkpoint() -> dict, on_load_checkpoint(callback_state) - # will become - # on_save_checkpoint() -> None and on_load_checkpoint(checkpoint) - model = BoringModel() - callback = OldStatefulCallbackHooks(state=111) - trainer = Trainer(default_root_dir=tmpdir, max_steps=1, callbacks=[callback]) - with pytest.deprecated_call(): - trainer.fit(model) - ckpt_path = Path(trainer.checkpoint_callback.best_model_path) - assert ckpt_path.exists() - - callback = OldStatefulCallbackHooks(state=222) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback]) - with pytest.deprecated_call(): - trainer.fit(model, ckpt_path=ckpt_path) - assert callback.state == 111 - - def test_resume_incomplete_callbacks_list_warning(tmpdir): model = BoringModel() callback0 = ModelCheckpoint(monitor="epoch") @@ -198,49 +160,3 @@ def test_resume_incomplete_callbacks_list_warning(tmpdir): ) with no_warning_call(UserWarning, match="Please add the following callbacks:"): trainer.fit(model, ckpt_path=ckpt_path) - - -class AllStatefulCallback(Callback): - def __init__(self, state): - self.state = state - - @property - def state_key(self): - return type(self) - - def state_dict(self): - return {"new_state": self.state} - - def load_state_dict(self, state_dict): - assert state_dict == {"old_state_precedence": 10} - self.state = state_dict["old_state_precedence"] - - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - return {"old_state_precedence": 10} - - def on_load_checkpoint(self, trainer, pl_module, callback_state): - assert callback_state == {"old_state_precedence": 10} - self.old_state_precedence = callback_state["old_state_precedence"] - - -def test_resume_callback_state_all(tmpdir): - """Test on_save/load_checkpoint state precedence over state_dict/load_state_dict until v1.8 removal.""" - # TODO: remove old on_save/load_checkpoint signature support in v1.8 - # in favor of Stateful and new on_save/load_checkpoint signatures - # on_save_checkpoint() -> dict, on_load_checkpoint(callback_state) - # will become - # on_save_checkpoint() -> None and on_load_checkpoint(checkpoint) - model = BoringModel() - callback = AllStatefulCallback(state=111) - trainer = Trainer(default_root_dir=tmpdir, max_steps=1, callbacks=[callback]) - with pytest.deprecated_call(): - trainer.fit(model) - ckpt_path = Path(trainer.checkpoint_callback.best_model_path) - assert ckpt_path.exists() - - callback = AllStatefulCallback(state=222) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback]) - with pytest.deprecated_call(): - trainer.fit(model, ckpt_path=ckpt_path) - assert callback.state == 10 - assert callback.old_state_precedence == 10 From 861cce1f3f25992e6c95a424ded379aa81d7af44 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 11:17:58 +0200 Subject: [PATCH 22/30] update tests --- tests/tests_pytorch/callbacks/test_lambda_function.py | 1 - tests/tests_pytorch/models/test_hooks.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index d7816a4f3988a..14a7bc54efcf6 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -36,7 +36,6 @@ def call(hook, *_, **__): hooks = get_members(Callback) - {"state_dict", "load_state_dict"} hooks_args = {h: partial(call, h) for h in hooks} - hooks_args["on_save_checkpoint"] = lambda *_: [checker.add("on_save_checkpoint")] model = CustomModel() diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index e6ef33883b1a4..a33d2a2a7117c 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -637,7 +637,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmpdir): dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), - dict(name="Callback.on_load_checkpoint", args=(trainer, model, {"foo": True})), + dict(name="Callback.on_load_checkpoint", args=(trainer, model, loaded_ckpt)), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), @@ -726,7 +726,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmpdir): dict(name="Callback.setup", args=(trainer, model), kwargs=dict(stage="fit")), dict(name="setup", kwargs=dict(stage="fit")), dict(name="on_load_checkpoint", args=(loaded_ckpt,)), - dict(name="Callback.on_load_checkpoint", args=(trainer, model, {"foo": True})), + dict(name="Callback.on_load_checkpoint", args=(trainer, model, loaded_ckpt)), dict(name="Callback.load_state_dict", args=({"foo": True},)), dict(name="configure_sharded_model"), dict(name="Callback.on_configure_sharded_model", args=(trainer, model)), From e812b5abbec7c42a722fd0460612d9ac0078ecc6 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 11:19:22 +0200 Subject: [PATCH 23/30] address review --- src/pytorch_lightning/trainer/configuration_validator.py | 2 +- tests/tests_pytorch/deprecated_api/test_remove_2-0.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index a93e0ed19406b..f363abf92fe21 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -226,7 +226,7 @@ def _check_deprecated_callback_hooks(trainer: "pl.Trainer") -> None: has_legacy_argument = "callback_state" in inspect.signature(callback.on_load_checkpoint).parameters if is_overridden(method_name="on_load_checkpoint", instance=callback) and has_legacy_argument: # TODO: Remove this error message in v2.0 - raise TypeError( + raise RuntimeError( f"`{callback.__class__.__name__}.on_load_checkpoint` has changed its signature and behavior in v1.8." " If you wish to load the state of the callback, use `load_state_dict` instead." " As of 1.8, `on_load_checkpoint(..., checkpoint)` receives the entire loaded" diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index a1dc78c32e445..1ffe0e1732991 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -101,7 +101,7 @@ def on_load_checkpoint(self, trainer, pl_module, callback_state): default_root_dir=tmpdir, ) with pytest.raises( - TypeError, match="`TestCallbackLoadHook.on_load_checkpoint` has changed its signature and behavior in v1.8." + RuntimeError, match="`TestCallbackLoadHook.on_load_checkpoint` has changed its signature and behavior in v1.8." ): trainer.fit(model) From 301cc7fbb037205678b58e83fde4318d6983622f Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 12:15:52 +0200 Subject: [PATCH 24/30] version number --- .../deprecated_api/test_remove_2-0.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index 2cea9986bf22d..c2d6b1c19692b 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -47,7 +47,7 @@ def test_v2_0_0_deprecated_ipus(_, monkeypatch): _ = Trainer(ipus=4) -def test_v2_0_resume_from_checkpoint_trainer_constructor(tmpdir): +def test_v2_0_0_resume_from_checkpoint_trainer_constructor(tmpdir): # test resume_from_checkpoint still works until v2.0 deprecation model = BoringModel() callback = OldStatefulCallback(state=111) @@ -87,7 +87,7 @@ def test_v2_0_resume_from_checkpoint_trainer_constructor(tmpdir): trainer.fit(model, ckpt_path="fit_arg_ckpt_path") -def test_v2_0_callback_on_load_checkpoint_hook(tmpdir): +def test_v2_0_0_callback_on_load_checkpoint_hook(tmpdir): class TestCallbackLoadHook(Callback): def on_load_checkpoint(self, trainer, pl_module, callback_state): print("overriding on_load_checkpoint") @@ -107,7 +107,7 @@ def on_load_checkpoint(self, trainer, pl_module, callback_state): trainer.fit(model) -def test_v2_0_callback_on_save_checkpoint_hook(tmpdir): +def test_v2_0_0_callback_on_save_checkpoint_hook(tmpdir): class TestCallbackSaveHookReturn(Callback): def on_save_checkpoint(self, trainer, pl_module, checkpoint): return {"returning": "on_save_checkpoint"} @@ -139,7 +139,7 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): trainer.save_checkpoint(tmpdir + "/pathok.ckpt") -def test_v2_0_remove_on_batch_start_end(tmpdir): +def test_v2_0_0_remove_on_batch_start_end(tmpdir): class TestCallback(Callback): def on_batch_start(self, *args, **kwargs): print("on_batch_start") @@ -166,7 +166,7 @@ def on_batch_end(self, *args, **kwargs): trainer.fit(model) -def test_v2_0_on_configure_sharded_model(tmpdir): +def test_v2_0_0_on_configure_sharded_model(tmpdir): class TestCallback(Callback): def on_configure_sharded_model(self, trainer, model): print("Configuring sharded model") @@ -185,7 +185,7 @@ def on_configure_sharded_model(self, trainer, model): trainer.fit(model) -def test_v2_0_remove_on_epoch_start_end_lightning_module(tmpdir): +def test_v2_0_0_remove_on_epoch_start_end_lightning_module(tmpdir): class CustomModel(BoringModel): def on_epoch_start(self, *args, **kwargs): print("on_epoch_start") @@ -212,7 +212,7 @@ def on_epoch_end(self, *args, **kwargs): trainer.fit(model) -def test_v2_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): +def test_v2_0_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir): class CustomModel(BoringModel): def on_pretrain_routine_start(self, *args, **kwargs): print("foo") @@ -239,7 +239,7 @@ def on_pretrain_routine_end(self, *args, **kwargs): trainer.fit(model) -def test_v2_0_on_before_accelerator_backend_setup(tmpdir): +def test_v2_0_0_on_before_accelerator_backend_setup(tmpdir): class TestCallback(Callback): def on_before_accelerator_backend_setup(self, *args, **kwargs): print("on_before_accelerator_backend called.") @@ -260,7 +260,7 @@ def on_before_accelerator_backend_setup(self, *args, **kwargs): trainer.fit(model) -def test_v2_0_callback_on_pretrain_routine_start_end(tmpdir): +def test_v2_0_0_callback_on_pretrain_routine_start_end(tmpdir): class TestCallback(Callback): def on_pretrain_routine_start(self, trainer, pl_module): print("on_pretrain_routine_start called.") From 2d16b86ebeb3c1e5b3e7424d5cd64306ff4d5efe Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 13:11:14 +0200 Subject: [PATCH 25/30] fix bad merge --- .../deprecated_api/test_remove_2-0.py | 52 ------------------- 1 file changed, 52 deletions(-) diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index ea927980bce01..c2d6b1c19692b 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -139,58 +139,6 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): trainer.save_checkpoint(tmpdir + "/pathok.ckpt") -def test_v2_0_0_callback_on_load_checkpoint_hook(tmpdir): - class TestCallbackLoadHook(Callback): - def on_load_checkpoint(self, trainer, pl_module, callback_state): - print("overriding on_load_checkpoint") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackLoadHook()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - with pytest.raises( - RuntimeError, match="`TestCallbackLoadHook.on_load_checkpoint` has changed its signature and behavior in v1.8." - ): - trainer.fit(model) - - -def test_v2_0_0_callback_on_save_checkpoint_hook(tmpdir): - class TestCallbackSaveHookReturn(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - return {"returning": "on_save_checkpoint"} - - class TestCallbackSaveHookOverride(Callback): - def on_save_checkpoint(self, trainer, pl_module, checkpoint): - print("overriding without returning") - - model = BoringModel() - trainer = Trainer( - callbacks=[TestCallbackSaveHookReturn()], - max_epochs=1, - fast_dev_run=True, - enable_progress_bar=False, - logger=False, - default_root_dir=tmpdir, - ) - trainer.fit(model) - with pytest.raises( - ValueError, - match=( - "Returning a value from `TestCallbackSaveHookReturn.on_save_checkpoint` was deprecated in v1.6 and is" - " no longer supported as of v1.8" - ), - ): - trainer.save_checkpoint(tmpdir + "/path.ckpt") - - trainer.callbacks = [TestCallbackSaveHookOverride()] - trainer.save_checkpoint(tmpdir + "/pathok.ckpt") - - def test_v2_0_0_remove_on_batch_start_end(tmpdir): class TestCallback(Callback): def on_batch_start(self, *args, **kwargs): From 0db92d831c3751fed6ba7477345bb9b3996e7519 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 13:14:27 +0200 Subject: [PATCH 26/30] fix tests checking for old deprecation warnings --- .../callbacks/test_lambda_function.py | 22 ++++--------------- .../trainer/logging_/test_logger_connector.py | 9 +++----- 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/tests/tests_pytorch/callbacks/test_lambda_function.py b/tests/tests_pytorch/callbacks/test_lambda_function.py index dbc97d11de5ff..a3091a23bf47c 100644 --- a/tests/tests_pytorch/callbacks/test_lambda_function.py +++ b/tests/tests_pytorch/callbacks/test_lambda_function.py @@ -13,8 +13,6 @@ # limitations under the License. from functools import partial -import pytest - from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import Callback, LambdaCallback from pytorch_lightning.demos.boring_classes import BoringModel @@ -47,10 +45,7 @@ def call(hook, *_, **__): limit_val_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" - ): - trainer.fit(model) + trainer.fit(model) ckpt_path = trainer.checkpoint_callback.best_model_path # raises KeyboardInterrupt and loads from checkpoint @@ -63,17 +58,8 @@ def call(hook, *_, **__): limit_predict_batches=1, callbacks=[LambdaCallback(**hooks_args)], ) - with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" - ): - trainer.fit(model, ckpt_path=ckpt_path) - with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" - ): - trainer.test(model) - with pytest.deprecated_call( - match="`LambdaCallback.on_load_checkpoint` will change its signature and behavior in v2.0" - ): - trainer.predict(model) + trainer.fit(model, ckpt_path=ckpt_path) + trainer.test(model) + trainer.predict(model) assert checker == hooks diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 3c69f88f99b04..3c68bb38ccc8e 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -226,8 +226,7 @@ def test_fx_validator_integration(tmpdir): limit_predict_batches=1, callbacks=callback, ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.fit(model) + trainer.fit(model) not_supported.update( { @@ -238,8 +237,7 @@ def test_fx_validator_integration(tmpdir): "on_test_end": "You can't", } ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.test(model, verbose=False) + trainer.test(model, verbose=False) not_supported.update({k: "result collection is not registered yet" for k in not_supported}) not_supported.update( @@ -255,8 +253,7 @@ def test_fx_validator_integration(tmpdir): "on_predict_end": "result collection is not registered yet", } ) - with pytest.deprecated_call(match="was deprecated in"): - trainer.predict(model) + trainer.predict(model) @RunIf(min_cuda_gpus=2) From 8efb6e9895e8bd602a7ad2c835e6e1135ceaf2f0 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 13:21:55 +0200 Subject: [PATCH 27/30] one more --- .../callbacks/model_checkpoint.py | 17 +++++------------ .../deprecated_api/test_remove_2-0.py | 7 ++++--- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/pytorch_lightning/callbacks/model_checkpoint.py index 62001d50b1c85..beb457407eaa1 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/pytorch_lightning/callbacks/model_checkpoint.py @@ -351,19 +351,12 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.best_model_path = state_dict["best_model_path"] - def save_checkpoint(self, trainer: "pl.Trainer") -> None: # pragma: no-cover - """Performs the main logic around saving a checkpoint. - - This method runs on all ranks. It is the responsibility of `trainer.save_checkpoint` to correctly handle the - behaviour in distributed training, i.e., saving only on rank 0 for data parallel use cases. - """ - rank_zero_deprecation( - f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and will be removed in v1.8." - " Instead, you can use `trainer.save_checkpoint()` to manually save a checkpoint." + def save_checkpoint(self, trainer: "pl.Trainer") -> None: + raise NotImplementedError( + f"`{self.__class__.__name__}.save_checkpoint()` was deprecated in v1.6 and is no longer supported" + f" as of 1.8. Please use `trainer.save_checkpoint()` to manually save a checkpoint. This method will be" + f" removed completely in v2.0." ) - monitor_candidates = self._monitor_candidates(trainer) - self._save_topk_checkpoint(trainer, monitor_candidates) - self._save_last_checkpoint(trainer, monitor_candidates) def _save_topk_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, Tensor]) -> None: if self.save_top_k == 0: diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index c2d6b1c19692b..9457f264fd5be 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -292,10 +292,11 @@ def on_pretrain_routine_end(self, trainer, pl_module): trainer.fit(model) -def test_deprecated_mc_save_checkpoint(): +def test_v2_0_0_deprecated_mc_save_checkpoint(): mc = ModelCheckpoint() trainer = Trainer() - with mock.patch.object(trainer, "save_checkpoint"), pytest.deprecated_call( - match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6" + with mock.patch.object(trainer, "save_checkpoint"), pytest.raises( + NotImplementedError, + match=r"ModelCheckpoint.save_checkpoint\(\)` was deprecated in v1.6 and is no longer supported as of 1.8.", ): mc.save_checkpoint(trainer) From 7635128ab9f9b2c48185f07b952e8a88e0e642c1 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 13:24:32 +0200 Subject: [PATCH 28/30] remove unused imports --- src/pytorch_lightning/callbacks/model_checkpoint.py | 2 +- src/pytorch_lightning/trainer/configuration_validator.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/pytorch_lightning/callbacks/model_checkpoint.py index beb457407eaa1..256f913659529 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/pytorch_lightning/callbacks/model_checkpoint.py @@ -39,7 +39,7 @@ from lightning_lite.utilities.types import _PATH from pytorch_lightning.callbacks import Checkpoint from pytorch_lightning.utilities.exceptions import MisconfigurationException -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn from pytorch_lightning.utilities.types import STEP_OUTPUT log = logging.getLogger(__name__) diff --git a/src/pytorch_lightning/trainer/configuration_validator.py b/src/pytorch_lightning/trainer/configuration_validator.py index a5fa80378b8a3..b14f955312cdd 100644 --- a/src/pytorch_lightning/trainer/configuration_validator.py +++ b/src/pytorch_lightning/trainer/configuration_validator.py @@ -20,7 +20,7 @@ from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_warn from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature From 498e981175e9ded8c070cfe5cdbac24f626c65e4 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 16:17:35 +0200 Subject: [PATCH 29/30] undo app change --- examples/app_components/python/component_tracer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/app_components/python/component_tracer.py b/examples/app_components/python/component_tracer.py index a3fa69ebf8227..9edc48cf51a29 100644 --- a/examples/app_components/python/component_tracer.py +++ b/examples/app_components/python/component_tracer.py @@ -27,7 +27,7 @@ def on_train_start(self, trainer, pl_module) -> None: print("Even the Lightning Work is available and state transfer works !") print(self.lightning_work) - def on_train_batch_end(self, trainer, *_) -> None: + def on_batch_end(self, trainer, *_) -> None: # On every batch end, collects some information. # This is communicated automatically to the rest of the app, # so you can track your training in real time in the Lightning App UI. From ed2db31c3aa595a941e4e75701311d0349dc68cd Mon Sep 17 00:00:00 2001 From: awaelchli Date: Mon, 10 Oct 2022 16:57:38 +0200 Subject: [PATCH 30/30] reset app changes --- src/lightning_app/utilities/introspection.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/lightning_app/utilities/introspection.py b/src/lightning_app/utilities/introspection.py index 9f04dd4bb985d..856f6d6ea84a8 100644 --- a/src/lightning_app/utilities/introspection.py +++ b/src/lightning_app/utilities/introspection.py @@ -88,6 +88,8 @@ class LightningModuleVisitor(LightningVisitor): "on_fit_end", "on_load_checkpoint", "on_save_checkpoint", + "on_pretrain_routine_start", + "on_pretrain_routine_end", "on_test_batch_start", "on_test_batch_end", "on_test_epoch_start", @@ -182,12 +184,18 @@ class LightningCallbackVisitor(LightningVisitor): "on_validation_epoch_end", "on_test_epoch_start", "on_test_epoch_end", + "on_epoch_start", + "on_epoch_end", + "on_batch_start", "on_validation_batch_start", "on_validation_batch_end", "on_test_batch_start", "on_test_batch_end", + "on_batch_end", "on_train_start", "on_train_end", + "on_pretrain_routine_start", + "on_pretrain_routine_end", "on_validation_start", "on_validation_end", "on_test_start",