Skip to content

Commit 5c74d9d

Browse files
authored
Merge branch 'master' into refactor/remove-broken-code
2 parents c18cc53 + d4bcafa commit 5c74d9d

File tree

6 files changed

+27
-218
lines changed

6 files changed

+27
-218
lines changed

src/pytorch_lightning/CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
8484
- Removed the legacy and unused `Trainer.get_deprecated_arg_names()` ([#14415](https://github.com/Lightning-AI/lightning/pull/14415))
8585

8686

87+
- Removed the deprecated `on_train_batch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373))
88+
89+
90+
- Removed the deprecated `training_epoch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373))
91+
92+
8793
- Removed the experimental `pytorch_lightning.utiltiies.meta` functions in favor of built-in https://github.com/pytorch/torchdistx support ([#13868](https://github.com/Lightning-AI/lightning/pull/13868))
8894

8995

src/pytorch_lightning/loops/epoch/training_epoch_loop.py

Lines changed: 2 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from pytorch_lightning import loops # import as loops to avoid circular imports
2323
from pytorch_lightning.loops.batch import TrainingBatchLoop
2424
from pytorch_lightning.loops.batch.training_batch_loop import _OUTPUTS_TYPE as _BATCH_OUTPUTS_TYPE
25-
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached, _v1_8_output_format
25+
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached
2626
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
2727
from pytorch_lightning.trainer.progress import BatchProgress, SchedulerProgress
2828
from pytorch_lightning.trainer.supporters import CombinedLoader
@@ -31,7 +31,7 @@
3131
from pytorch_lightning.utilities.exceptions import MisconfigurationException
3232
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher
3333
from pytorch_lightning.utilities.model_helpers import is_overridden
34-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
34+
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
3535
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
3636
from pytorch_lightning.utilities.warnings import WarningCache
3737

@@ -342,24 +342,6 @@ def _prepare_outputs_training_batch_end(
342342
)
343343

344344
array = np.array(batch_output, dtype=object)
345-
# TODO: remove in v1.8
346-
if (
347-
num_optimizers > 1
348-
and lightning_module.truncated_bptt_steps > 0
349-
and is_overridden("on_train_batch_end", lightning_module)
350-
and not _v1_8_output_format(lightning_module.on_train_batch_end)
351-
):
352-
rank_zero_deprecation(
353-
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
354-
" The current format of the `on_train_batch_end(outputs, ...)` is a 2d list with sizes"
355-
" (n_optimizers, tbptt_steps), however, this has been deprecated and will change in version v1.8 to"
356-
" (tbptt_steps, n_optimizers). You can update your code by adding the following parameter to your"
357-
" hook signature: `on_train_batch_end(outputs, ..., new_format=True)`."
358-
)
359-
# (tbptt_steps, n_opt) -> (n_opt, tbptt_steps)
360-
if array.ndim == 1:
361-
array = np.expand_dims(array, 1)
362-
array = array.transpose((1, 0))
363345
# squeeze all single-element dimensions
364346
array = array.squeeze()
365347
array = array.tolist()
@@ -384,23 +366,6 @@ def _prepare_outputs_training_epoch_end(
384366
)
385367

386368
array = _recursive_pad(batch_outputs)
387-
# TODO: remove in v1.8
388-
if (
389-
num_optimizers > 1
390-
and lightning_module.truncated_bptt_steps > 0
391-
and not _v1_8_output_format(lightning_module.on_train_epoch_end)
392-
):
393-
rank_zero_deprecation(
394-
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
395-
" The current format of the `training_epoch_end(outputs)` is a 3d list with sizes"
396-
" (n_optimizers, n_batches, tbptt_steps), however, this has been deprecated and will change in version"
397-
" v1.8 to (n_batches, tbptt_steps, n_optimizers). You can update your code by adding the following"
398-
" parameter to your hook signature: `training_epoch_end(outputs, new_format=True)`."
399-
)
400-
# (n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps)
401-
if array.ndim == 2:
402-
array = np.expand_dims(array, 2)
403-
array = array.transpose((2, 0, 1))
404369
# squeeze all single-element dimensions
405370
array = array.squeeze()
406371
array = array.tolist()

src/pytorch_lightning/loops/utilities.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,10 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
import inspect
1514
from collections import OrderedDict
1615
from contextlib import contextmanager
1716
from functools import lru_cache
18-
from typing import Any, Callable, Generator, List, Optional, Sequence, Tuple
17+
from typing import Any, Generator, List, Optional, Sequence, Tuple
1918

2019
import numpy as np
2120
import torch
@@ -216,12 +215,6 @@ def _reset_progress(loop: Loop) -> None:
216215
_reset_progress(v)
217216

218217

219-
# TODO: remove in v1.8
220-
def _v1_8_output_format(fx: Callable) -> bool:
221-
parameters = inspect.signature(fx).parameters
222-
return "new_format" in parameters and parameters["new_format"].default is True
223-
224-
225218
def _set_sampler_epoch(dataloader: DataLoader, epoch: int) -> None:
226219
"""Calls the ``set_epoch`` method on either the sampler or the batch sampler of the given dataloader.
227220

tests/tests_pytorch/deprecated_api/test_remove_1-8.py

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
from pytorch_lightning.utilities.apply_func import move_data_to_device
3939
from pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY
4040
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
41-
from tests_pytorch.deprecated_api import no_deprecated_call
4241
from tests_pytorch.helpers.runif import RunIf
4342
from tests_pytorch.helpers.torchtext_utils import get_dummy_torchtext_data_iterator
4443

@@ -584,55 +583,6 @@ def test_v1_8_0_weights_save_path(tmpdir):
584583
_ = trainer.weights_save_path
585584

586585

587-
def test_deprecated_epoch_outputs_format(tmpdir):
588-
class DeprecationModel(BoringModel):
589-
def __init__(self):
590-
super().__init__()
591-
self.truncated_bptt_steps = 1
592-
593-
def training_step(self, batch, batch_idx, optimizer_idx, hiddens):
594-
output = super().training_step(batch, batch_idx)
595-
output["hiddens"] = hiddens
596-
return output
597-
598-
def tbptt_split_batch(self, batch, split_size):
599-
return [batch, batch]
600-
601-
def training_epoch_end(self, outputs):
602-
...
603-
604-
def on_train_batch_end(self, outputs, batch, batch_idx) -> None:
605-
...
606-
607-
def configure_optimizers(self):
608-
return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]
609-
610-
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
611-
model = DeprecationModel()
612-
batch_match = r"on_train_batch_end.*will change in version v1.8 to \(tbptt_steps, n_optimizers\)"
613-
with pytest.deprecated_call(match=batch_match):
614-
trainer.fit(model)
615-
616-
class DeprecationModel2(DeprecationModel):
617-
def on_train_batch_end(self, *args, new_format=True):
618-
...
619-
620-
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
621-
model = DeprecationModel()
622-
epoch_match = r"training_epoch_end.*will change in version v1.8 to \(n_batches, tbptt_steps, n_optimizers\)"
623-
with pytest.deprecated_call(match=epoch_match):
624-
trainer.fit(model)
625-
626-
class NoDeprecationModel(DeprecationModel2):
627-
def training_epoch_end(self, outputs, new_format=True):
628-
...
629-
630-
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
631-
model = NoDeprecationModel()
632-
with no_deprecated_call(match="will change in version v1.8.*new_format=True"):
633-
trainer.fit(model)
634-
635-
636586
@pytest.mark.flaky(reruns=3)
637587
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
638588
def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):

tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py

Lines changed: 17 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
from unittest import mock
1514
from unittest.mock import patch
1615

1716
import pytest
@@ -20,7 +19,6 @@
2019
from pytorch_lightning.demos.boring_classes import BoringModel
2120
from pytorch_lightning.loops import TrainingEpochLoop
2221
from pytorch_lightning.trainer.trainer import Trainer
23-
from tests_pytorch.deprecated_api import no_deprecated_call
2422

2523
_out00 = {"loss": 0.0}
2624
_out01 = {"loss": 0.1}
@@ -33,43 +31,33 @@
3331

3432

3533
class TestPrepareOutputs:
36-
def prepare_outputs(self, fn, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization):
34+
def prepare_outputs(self, fn, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization):
3735
lightning_module = LightningModule()
38-
lightning_module.on_train_batch_end = lambda *_: None # override to trigger the deprecation message
3936
lightning_module.automatic_optimization = automatic_optimization
4037
lightning_module.truncated_bptt_steps = tbptt_splits
41-
match = "will change in version v1.8.*new_format=True"
42-
will_warn = tbptt_splits and num_optimizers > 1 and not new_format
43-
ctx_manager = pytest.deprecated_call if will_warn else no_deprecated_call
44-
with ctx_manager(match=match):
45-
with mock.patch(
46-
"pytorch_lightning.loops.epoch.training_epoch_loop._v1_8_output_format", return_value=new_format
47-
):
48-
return fn(
49-
batch_outputs,
50-
lightning_module=lightning_module,
51-
num_optimizers=num_optimizers, # does not matter for manual optimization
52-
)
38+
return fn(
39+
batch_outputs,
40+
lightning_module=lightning_module,
41+
num_optimizers=num_optimizers, # does not matter for manual optimization
42+
)
5343

5444
def prepare_outputs_training_epoch_end(
55-
self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True
45+
self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True
5646
):
5747
return self.prepare_outputs(
5848
TrainingEpochLoop._prepare_outputs_training_epoch_end,
5949
tbptt_splits,
60-
new_format,
6150
batch_outputs,
6251
num_optimizers,
6352
automatic_optimization=automatic_optimization,
6453
)
6554

6655
def prepare_outputs_training_batch_end(
67-
self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True
56+
self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True
6857
):
6958
return self.prepare_outputs(
7059
TrainingEpochLoop._prepare_outputs_training_batch_end,
7160
tbptt_splits,
72-
new_format,
7361
batch_outputs,
7462
num_optimizers,
7563
automatic_optimization=automatic_optimization,
@@ -97,53 +85,19 @@ def prepare_outputs_training_batch_end(
9785
),
9886
# 1 batch, tbptt with 2 splits (uneven)
9987
(1, 2, [[{0: _out00}, {0: _out01}], [{0: _out03}]], [[_out00, _out01], [_out03]]),
100-
],
101-
)
102-
@pytest.mark.parametrize("new_format", (False, True))
103-
def test_prepare_outputs_training_epoch_end_automatic(
104-
self, num_optimizers, tbptt_splits, batch_outputs, expected, new_format
105-
):
106-
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
107-
currently expects in the case of automatic optimization."""
108-
assert (
109-
self.prepare_outputs_training_epoch_end(tbptt_splits, new_format, batch_outputs, num_optimizers) == expected
110-
)
111-
112-
@pytest.mark.parametrize(
113-
"num_optimizers,tbptt_splits,batch_outputs,expected",
114-
[
115-
# 3 batches, tbptt with 2 splits, 2 optimizers alternating
116-
(
117-
2,
118-
2,
119-
[[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]],
120-
[[[_out00, _out01], [], [_out02, _out03]], [[], [_out10, _out11], []]],
121-
)
122-
],
123-
)
124-
def test_prepare_outputs_training_epoch_end_automatic_old_format(
125-
self, num_optimizers, tbptt_splits, batch_outputs, expected
126-
):
127-
assert self.prepare_outputs_training_epoch_end(tbptt_splits, False, batch_outputs, num_optimizers) == expected
128-
129-
@pytest.mark.parametrize(
130-
"num_optimizers,tbptt_splits,batch_outputs,expected",
131-
[
13288
# 3 batches, tbptt with 2 splits, 2 optimizers alternating
13389
(
13490
2,
13591
2,
13692
[[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]],
13793
[[[_out00], [_out01]], [[_out10], [_out11]], [[_out02], [_out03]]],
138-
)
94+
),
13995
],
14096
)
141-
def test_prepare_outputs_training_epoch_end_automatic_new_format(
142-
self, num_optimizers, tbptt_splits, batch_outputs, expected
143-
):
97+
def test_prepare_outputs_training_epoch_end_automatic(self, num_optimizers, tbptt_splits, batch_outputs, expected):
14498
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
14599
currently expects in the case of automatic optimization."""
146-
assert self.prepare_outputs_training_epoch_end(tbptt_splits, True, batch_outputs, num_optimizers) == expected
100+
assert self.prepare_outputs_training_epoch_end(tbptt_splits, batch_outputs, num_optimizers) == expected
147101

148102
@pytest.mark.parametrize(
149103
"batch_outputs,expected",
@@ -160,14 +114,10 @@ def test_prepare_outputs_training_epoch_end_automatic_new_format(
160114
([[_out00, _out01], [_out02, _out03], [], [_out10]], [[_out00, _out01], [_out02, _out03], [_out10]]),
161115
],
162116
)
163-
@pytest.mark.parametrize("new_format", (False, True))
164-
def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected, new_format):
117+
def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected):
165118
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
166119
currently expects in the case of manual optimization."""
167-
assert (
168-
self.prepare_outputs_training_epoch_end(0, new_format, batch_outputs, -1, automatic_optimization=False)
169-
== expected
170-
)
120+
assert self.prepare_outputs_training_epoch_end(0, batch_outputs, -1, automatic_optimization=False) == expected
171121

172122
@pytest.mark.parametrize(
173123
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
@@ -180,47 +130,17 @@ def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected
180130
(2, 0, [{0: _out00, 1: _out01}], [_out00, _out01]),
181131
# tbptt with 2 splits
182132
(1, 2, [{0: _out00}, {0: _out01}], [_out00, _out01]),
133+
# 2 optimizers, tbptt with 2 splits
134+
(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]]),
183135
],
184136
)
185-
@pytest.mark.parametrize("new_format", (False, True))
186137
def test_prepare_outputs_training_batch_end_automatic(
187-
self, num_optimizers, tbptt_splits, batch_end_outputs, expected, new_format
188-
):
189-
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
190-
currently expects in the case of automatic optimization."""
191-
192-
assert (
193-
self.prepare_outputs_training_batch_end(tbptt_splits, new_format, batch_end_outputs, num_optimizers)
194-
== expected
195-
)
196-
197-
@pytest.mark.parametrize(
198-
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
199-
# 2 optimizers, tbptt with 2 splits
200-
[(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out10], [_out01, _out11]])],
201-
)
202-
def test_prepare_outputs_training_batch_end_automatic_old_format(
203138
self, num_optimizers, tbptt_splits, batch_end_outputs, expected
204139
):
205140
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
206141
currently expects in the case of automatic optimization."""
207-
assert (
208-
self.prepare_outputs_training_batch_end(tbptt_splits, False, batch_end_outputs, num_optimizers) == expected
209-
)
210142

211-
@pytest.mark.parametrize(
212-
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
213-
# 2 optimizers, tbptt with 2 splits
214-
[(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]])],
215-
)
216-
def test_prepare_outputs_training_batch_end_automatic_new_format(
217-
self, num_optimizers, tbptt_splits, batch_end_outputs, expected
218-
):
219-
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
220-
currently expects in the case of automatic optimization."""
221-
assert (
222-
self.prepare_outputs_training_batch_end(tbptt_splits, True, batch_end_outputs, num_optimizers) == expected
223-
)
143+
assert self.prepare_outputs_training_batch_end(tbptt_splits, batch_end_outputs, num_optimizers) == expected
224144

225145
@pytest.mark.parametrize(
226146
"batch_end_outputs,expected",
@@ -237,8 +157,7 @@ def test_prepare_outputs_training_batch_end_manual(self, batch_end_outputs, expe
237157
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
238158
currently expects in the case of manual optimization."""
239159
assert (
240-
self.prepare_outputs_training_batch_end(0, False, batch_end_outputs, -1, automatic_optimization=False)
241-
== expected
160+
self.prepare_outputs_training_batch_end(0, batch_end_outputs, -1, automatic_optimization=False) == expected
242161
)
243162

244163

0 commit comments

Comments
 (0)