Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
21 changes: 13 additions & 8 deletions examples/images/dreambooth/train_dreambooth_colossalai.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ def parse_args(input_args=None):
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
help=
"Number of updates steps to accumulate before performing a backward/update pass. If using Gemini, it must be 1",
)
parser.add_argument(
"--gradient_checkpointing",
Expand Down Expand Up @@ -361,6 +362,9 @@ def main(args):
else:
colossalai.launch_from_torch(config={}, seed=args.seed)

local_rank = gpc.get_local_rank(ParallelMode.DATA)
world_size = gpc.get_world_size(ParallelMode.DATA)

if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
Expand Down Expand Up @@ -388,7 +392,7 @@ def main(args):
for example in tqdm(
sample_dataloader,
desc="Generating class images",
disable=not gpc.get_local_rank(ParallelMode.DATA) == 0,
disable=not local_rank == 0,
):
images = pipeline(example["prompt"]).images

Expand All @@ -400,7 +404,7 @@ def main(args):
del pipeline

# Handle the repository creation
if gpc.get_local_rank(ParallelMode.DATA) == 0:
if local_rank == 0:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
Expand Down Expand Up @@ -465,8 +469,9 @@ def main(args):
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()

assert args.gradient_accumulation_steps == 1, "if using ColossalAI gradient_accumulation_steps must be set to 1."
if args.scale_lr:
args.learning_rate = args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * gpc.get_world_size(ParallelMode.DATA)
args.learning_rate = args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * world_size

unet = gemini_zero_dpp(unet, args.placement)

Expand Down Expand Up @@ -555,7 +560,7 @@ def collate_fn(examples):
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)

# Train!
total_batch_size = args.train_batch_size * gpc.get_world_size(ParallelMode.DATA) * args.gradient_accumulation_steps
total_batch_size = args.train_batch_size * world_size * args.gradient_accumulation_steps

logger.info("***** Running training *****", ranks=[0])
logger.info(f" Num examples = {len(train_dataset)}", ranks=[0])
Expand All @@ -567,7 +572,7 @@ def collate_fn(examples):
logger.info(f" Total optimization steps = {args.max_train_steps}", ranks=[0])

# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not gpc.get_local_rank(ParallelMode.DATA) == 0)
progress_bar = tqdm(range(args.max_train_steps), disable=not local_rank == 0)
progress_bar.set_description("Steps")
global_step = 0

Expand Down Expand Up @@ -644,7 +649,7 @@ def collate_fn(examples):
if global_step % args.save_steps == 0:
torch.cuda.synchronize()
torch_unet = get_static_torch_model(unet)
if gpc.get_local_rank(ParallelMode.DATA) == 0:
if local_rank == 0:
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=torch_unet,
Expand All @@ -659,7 +664,7 @@ def collate_fn(examples):
torch.cuda.synchronize()
unet = get_static_torch_model(unet)

if gpc.get_local_rank(ParallelMode.DATA) == 0:
if local_rank == 0:
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=unet,
Expand Down