Skip to content

Commit b7959e3

Browse files
authored
Remove deprecated checkpoint_callback flag in Trainer (#13027)
* Removed lines pertinent to checkpoint_callback * removed checkpoint callback flag * Updated Change Log * Removed deprecation test for checkpoint_callback argument * updated line in the simple_classif_training.py * Updated docs * updated simple_classif_training.py removing enable_checkpointing
1 parent 3f78c4c commit b7959e3

File tree

6 files changed

+5
-32
lines changed

6 files changed

+5
-32
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
112112

113113
### Removed
114114

115+
- Removed the deprecated `checkpoint_callback` argument from the `Trainer` constructor ([#13027](https://github.com/PyTorchLightning/pytorch-lightning/pull/13027))
116+
117+
115118
- Removed the deprecated `TestTubeLogger` ([#12859](https://github.com/PyTorchLightning/pytorch-lightning/pull/12859))
116119

117120

docs/source/common/trainer.rst

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -535,12 +535,6 @@ Example::
535535
# run val loop every 10 training epochs
536536
trainer = Trainer(check_val_every_n_epoch=10)
537537

538-
checkpoint_callback
539-
^^^^^^^^^^^^^^^^^^^
540-
541-
.. warning:: `checkpoint_callback` has been deprecated in v1.5 and will be removed in v1.7.
542-
To disable checkpointing, pass ``enable_checkpointing = False`` to the Trainer instead.
543-
544538

545539
default_root_dir
546540
^^^^^^^^^^^^^^^^

legacy/simple_classif_training.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ def main_train(dir_path, max_epochs: int = 20):
156156
default_root_dir=dir_path,
157157
gpus=int(torch.cuda.is_available()),
158158
precision=(16 if torch.cuda.is_available() else 32),
159-
checkpoint_callback=True,
160159
callbacks=[stopping],
161160
min_epochs=3,
162161
max_epochs=max_epochs,

pytorch_lightning/trainer/connectors/callback_connector.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ def __init__(self, trainer):
4343
def on_trainer_init(
4444
self,
4545
callbacks: Optional[Union[List[Callback], Callback]],
46-
checkpoint_callback: Optional[bool],
4746
enable_checkpointing: bool,
4847
enable_progress_bar: bool,
4948
process_position: int,
@@ -71,7 +70,7 @@ def on_trainer_init(
7170

7271
# configure checkpoint callback
7372
# pass through the required args to figure out defaults
74-
self._configure_checkpoint_callbacks(checkpoint_callback, enable_checkpointing)
73+
self._configure_checkpoint_callbacks(enable_checkpointing)
7574

7675
# configure the timer callback.
7776
# responsible to stop the training when max_time is reached.
@@ -133,15 +132,7 @@ def _configure_accumulated_gradients(
133132
self.trainer.accumulate_grad_batches = grad_accum_callback.get_accumulate_grad_batches(0)
134133
self.trainer.accumulation_scheduler = grad_accum_callback
135134

136-
def _configure_checkpoint_callbacks(self, checkpoint_callback: Optional[bool], enable_checkpointing: bool) -> None:
137-
if checkpoint_callback is not None:
138-
rank_zero_deprecation(
139-
f"Setting `Trainer(checkpoint_callback={checkpoint_callback})` is deprecated in v1.5 and will "
140-
f"be removed in v1.7. Please consider using `Trainer(enable_checkpointing={checkpoint_callback})`."
141-
)
142-
# if both are set then checkpoint only if both are True
143-
enable_checkpointing = checkpoint_callback and enable_checkpointing
144-
135+
def _configure_checkpoint_callbacks(self, enable_checkpointing: bool) -> None:
145136
if self.trainer.checkpoint_callbacks:
146137
if not enable_checkpointing:
147138
raise MisconfigurationException(

pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,6 @@ class Trainer(
132132
def __init__(
133133
self,
134134
logger: Union[Logger, Iterable[Logger], bool] = True,
135-
checkpoint_callback: Optional[bool] = None,
136135
enable_checkpointing: bool = True,
137136
callbacks: Optional[Union[List[Callback], Callback]] = None,
138137
default_root_dir: Optional[str] = None,
@@ -234,13 +233,6 @@ def __init__(
234233
callbacks: Add a callback or list of callbacks.
235234
Default: ``None``.
236235
237-
checkpoint_callback: If ``True``, enable checkpointing.
238-
Default: ``None``.
239-
240-
.. deprecated:: v1.5
241-
``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.
242-
Please consider using ``enable_checkpointing`` instead.
243-
244236
enable_checkpointing: If ``True``, enable checkpointing.
245237
It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
246238
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.
@@ -514,7 +506,6 @@ def __init__(
514506
# Declare attributes to be set in _callback_connector on_trainer_init
515507
self._callback_connector.on_trainer_init(
516508
callbacks,
517-
checkpoint_callback,
518509
enable_checkpointing,
519510
enable_progress_bar,
520511
process_position,

tests/deprecated_api/test_remove_1-7.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -154,11 +154,6 @@ def test_v1_7_0_deprecate_lightning_distributed(tmpdir):
154154
_ = LightningDistributed()
155155

156156

157-
def test_v1_7_0_checkpoint_callback_trainer_constructor(tmpdir):
158-
with pytest.deprecated_call(match=r"Setting `Trainer\(checkpoint_callback=True\)` is deprecated in v1.5"):
159-
_ = Trainer(checkpoint_callback=True)
160-
161-
162157
def test_v1_7_0_deprecate_on_post_move_to_device(tmpdir):
163158
class TestModel(BoringModel):
164159
def on_post_move_to_device(self):

0 commit comments

Comments
 (0)