Skip to content

Commit 2bc0566

Browse files
committed
add simplefsdp's autobucketing pass entry
1 parent 714cc5b commit 2bc0566

File tree

2 files changed

+22
-20
lines changed

2 files changed

+22
-20
lines changed

torchtitan/experiments/auto_parallel/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,8 @@ requires installing [email protected]:pytorch-labs/autoparallel.git
44

55
`CONFIG_FILE="./torchtitan/models/llama3/train_configs/debug_model.toml" ./run_train.sh --model.name llama3_auto_parallel --parallelism.tensor_parallel_degree 4`
66

7+
Use simplefsdp's autobucketing pass:
8+
9+
`CONFIG_FILE="./torchtitan/models/llama3/train_configs/debug_model.toml" ./run_train.sh --model.name llama3_auto_parallel --parallelism.tensor_parallel_degree 4 --experimental.enable_simplefsdp_passes --training.compile`
10+
711
(or llama3-8b.toml)

torchtitan/train.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import os
99
import time
1010
from datetime import timedelta
11+
from functools import partial
1112
from typing import Any, Generator, Iterable, Optional
1213

1314
import torch
@@ -125,32 +126,29 @@ def __init__(self, job_config: JobConfig):
125126

126127
# allow configuring inductor comms optimizations from torchtitan commandline
127128
if job_config.experimental.enable_simplefsdp_passes:
128-
try:
129-
from torch._inductor.simple_fsdp.bucket import bucket_fsdp_all_gather_concat_on_scheduler_ir
130-
except ImportError:
131-
print("Must use pytorch from unlanded https://github.com/pytorch/pytorch/pull/160282, e.g. torchtitan_conda_prod:5e4101faa448c2ee6b62ddd76ee08e8c")
132-
raise
133-
134-
# Configs from Ruisi
129+
# enable simplefsdp's autobucketing and reorder passes (original code in https://github.com/pytorch/pytorch/pull/160282)
130+
from autoparallel.auto_bucketing import (
131+
simple_fsdp_autobucketing_reordering_pass,
132+
simplefsdp_autobucketing_config,
133+
)
135134

136-
# set to 0.1 if you want to make bucketing more efficient with mixed dtype collectives
137-
torch._inductor.config.simplefsdp.relax_ratio = 0
138135
torch._inductor.config.allow_buffer_reuse = False
139-
torch._inductor.config.simplefsdp.estimate_ir = False
140-
torch._inductor.config.simplefsdp.estimate_verbose = False
141-
torch._inductor.config.simplefsdp.save_estimation_path = "/mnt/mffuse/cache_ruisi/estimation_mast_"+job_config.model.flavor+".pkl"
142-
# set to True after the first communication estimation results are saved. This would reduce decision making time.
143-
torch._inductor.config.simplefsdp.load_cache = False
144-
torch._inductor.config.simplefsdp.enable_bucket_ir = True
145-
torch._inductor.config.simplefsdp.enable_reorder_ir = True
146-
torch._inductor.config.simplefsdp.simplefsdp_only = False # False for 2d True for 1d
147-
torch._inductor.config.simplefsdp.peak_memory_offset = 0
148-
torch._inductor.config.simplefsdp.bucketing_type = "auto"
136+
torch._inductor.config.reorder_for_peak_memory = False
137+
torch._inductor.config.reorder_for_compute_comm_overlap = True
138+
simplefsdp_autobucketing_config.save_estimation_path = (
139+
"/tmp/estimation_mast.pkl"
140+
)
141+
simple_fsdp_autobucketing_reordering_pass = partial(
142+
simple_fsdp_autobucketing_reordering_pass,
143+
configs=simplefsdp_autobucketing_config,
144+
)
145+
torch._inductor.config.reorder_for_compute_comm_overlap_passes = [
146+
simple_fsdp_autobucketing_reordering_pass
147+
]
149148

150149
# Don't use both sets of passes at the same time!
151150
torch._inductor.config.bucket_all_gathers_fx = "none"
152151
torch._inductor.config.bucket_reduce_scatters_fx = "none"
153-
torch._inductor.config.reorder_for_compute_comm_overlap = False
154152
else:
155153
torch._inductor.config.bucket_all_gathers_fx = (
156154
job_config.experimental.bucket_all_gathers_fx

0 commit comments

Comments
 (0)