From a302af792571eedc949e7ec848e9cbf30a555030 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Sat, 2 Dec 2023 19:43:03 +0800 Subject: [PATCH 1/6] fix aaa fix fix fix --- colossalai/booster/plugin/gemini_plugin.py | 50 ++++++++++++++++++++++ examples/language/llama2/benchmark.py | 3 +- examples/language/llama2/finetune.py | 4 +- 3 files changed, 54 insertions(+), 3 deletions(-) diff --git a/colossalai/booster/plugin/gemini_plugin.py b/colossalai/booster/plugin/gemini_plugin.py index 261080dc9d20..a1cce1dd52cd 100644 --- a/colossalai/booster/plugin/gemini_plugin.py +++ b/colossalai/booster/plugin/gemini_plugin.py @@ -1,9 +1,11 @@ import gc import logging import os +import random from pathlib import Path from typing import Callable, Iterator, List, Optional, Tuple +import numpy as np import torch import torch.distributed as dist import torch.nn as nn @@ -11,6 +13,7 @@ from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO, GeneralCheckpointIO from colossalai.checkpoint_io.utils import ( @@ -448,6 +451,53 @@ def control_device(self) -> bool: def supported_devices(self) -> List[str]: return ["cuda", "npu"] + + def prepare_dataloader( + self, dataset, batch_size, shuffle=False, seed=1024, drop_last=False, pin_memory=False, num_workers=0, **kwargs + ): + r""" + Prepare a dataloader for distributed training. The dataloader will be wrapped by + `torch.utils.data.DataLoader` and `torch.utils.data.DistributedSampler`. + + + Args: + dataset (`torch.utils.data.Dataset`): The dataset to be loaded. + shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False. + seed (int, optional): Random worker seed for sampling, defaults to 1024. + add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True. + drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size + is not divisible by the batch size. If False and the size of dataset is not divisible by + the batch size, then the last batch will be smaller, defaults to False. + pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False. + num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0. + kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in + `DataLoader `_. + + Returns: + :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. + """ + _kwargs = kwargs.copy() + sampler = DistributedSampler( + dataset, num_replicas=self.pg_mesh.size(DP_AXIS), rank=self.pg_mesh.coordinate(DP_AXIS), shuffle=shuffle + ) + + # Deterministic dataloader + def seed_worker(worker_id): + worker_seed = seed + np.random.seed(worker_seed) + torch.manual_seed(worker_seed) + random.seed(worker_seed) + + return DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + worker_init_fn=seed_worker, + drop_last=drop_last, + pin_memory=pin_memory, + num_workers=num_workers, + **_kwargs, + ) def configure( self, diff --git a/examples/language/llama2/benchmark.py b/examples/language/llama2/benchmark.py index d7a79a0221ca..20f4379dcc31 100644 --- a/examples/language/llama2/benchmark.py +++ b/examples/language/llama2/benchmark.py @@ -93,9 +93,10 @@ def empty_init(): shard_param_frac=args.shard_param_frac, offload_optim_frac=args.offload_optim_frac, offload_param_frac=args.offload_param_frac, + tp_size=args.tp, ) elif args.plugin == "gemini_auto": - plugin = GeminiPlugin(placement_policy="auto", precision="bf16", warmup_non_model_data_ratio=args.warmup_ratio) + plugin = GeminiPlugin(placement_policy="auto", precision="bf16", warmup_non_model_data_ratio=args.warmup_ratio, tp_size=args.tp) elif args.plugin == "fsdp": if use_empty_init: plugin = TorchFSDPPlugin( diff --git a/examples/language/llama2/finetune.py b/examples/language/llama2/finetune.py index f7708b1a38ab..017e4610d3c0 100644 --- a/examples/language/llama2/finetune.py +++ b/examples/language/llama2/finetune.py @@ -143,10 +143,10 @@ def main(): # Initialize Booster # ============================== if args.plugin == "gemini": - plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip) + plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip, tp_size=args.tp) elif args.plugin == "gemini_auto": plugin = GeminiPlugin( - precision=args.mixed_precision, placement_policy="auto", initial_scale=2**16, max_norm=args.grad_clip + precision=args.mixed_precision, placement_policy="auto", initial_scale=2**16, max_norm=args.grad_clip, tp_size=args.tp ) elif args.plugin == "zero2": plugin = LowLevelZeroPlugin( From 93e41eb05b9f372bfb3e76fe7d32489b36d2ad6b Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Mon, 4 Dec 2023 10:57:55 +0800 Subject: [PATCH 2/6] fix --- colossalai/booster/plugin/gemini_plugin.py | 6 +++++- examples/language/llama2/benchmark.py | 4 +++- examples/language/llama2/finetune.py | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/colossalai/booster/plugin/gemini_plugin.py b/colossalai/booster/plugin/gemini_plugin.py index a1cce1dd52cd..d65a10e954f7 100644 --- a/colossalai/booster/plugin/gemini_plugin.py +++ b/colossalai/booster/plugin/gemini_plugin.py @@ -477,8 +477,12 @@ def prepare_dataloader( :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. """ _kwargs = kwargs.copy() + zero_world_size = self.pg_mesh.size(ZERO_AXIS) + extra_dp_world_size = self.pg_mesh.size(DP_AXIS) + zero_ranks = self.pg_mesh.coordinate(ZERO_AXIS) + extra_dp_ranks = self.pg_mesh.coordinate(DP_AXIS) sampler = DistributedSampler( - dataset, num_replicas=self.pg_mesh.size(DP_AXIS), rank=self.pg_mesh.coordinate(DP_AXIS), shuffle=shuffle + dataset, num_replicas=zero_world_size * extra_dp_world_size, rank=zero_ranks + extra_dp_ranks, shuffle=shuffle ) # Deterministic dataloader diff --git a/examples/language/llama2/benchmark.py b/examples/language/llama2/benchmark.py index 20f4379dcc31..daf7d2fd4b0b 100644 --- a/examples/language/llama2/benchmark.py +++ b/examples/language/llama2/benchmark.py @@ -72,6 +72,7 @@ def main(): parser.add_argument("--offload_optim_frac", type=float, default=0.0, help="Offload optim fraction. Only for gemini") parser.add_argument("--offload_param_frac", type=float, default=0.0, help="Offload param fraction. Only for gemini") parser.add_argument("--tp", type=int, default=1, help="Tensor parallel size") + parser.add_argument("--extra_dp", type=int, default=1, help="Extra data parallel size, used for Gemini") parser.add_argument("--pp", type=int, default=1, help="Pipeline parallel size") parser.add_argument("--mbs", type=int, default=1) parser.add_argument("--zero", type=int, default=0) @@ -94,9 +95,10 @@ def empty_init(): offload_optim_frac=args.offload_optim_frac, offload_param_frac=args.offload_param_frac, tp_size=args.tp, + extra_dp_size=args.extra_dp, ) elif args.plugin == "gemini_auto": - plugin = GeminiPlugin(placement_policy="auto", precision="bf16", warmup_non_model_data_ratio=args.warmup_ratio, tp_size=args.tp) + plugin = GeminiPlugin(placement_policy="auto", precision="bf16", warmup_non_model_data_ratio=args.warmup_ratio, tp_size=args.tp, extra_dp_size=args.extra_dp) elif args.plugin == "fsdp": if use_empty_init: plugin = TorchFSDPPlugin( diff --git a/examples/language/llama2/finetune.py b/examples/language/llama2/finetune.py index 017e4610d3c0..f7708b1a38ab 100644 --- a/examples/language/llama2/finetune.py +++ b/examples/language/llama2/finetune.py @@ -143,10 +143,10 @@ def main(): # Initialize Booster # ============================== if args.plugin == "gemini": - plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip, tp_size=args.tp) + plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip) elif args.plugin == "gemini_auto": plugin = GeminiPlugin( - precision=args.mixed_precision, placement_policy="auto", initial_scale=2**16, max_norm=args.grad_clip, tp_size=args.tp + precision=args.mixed_precision, placement_policy="auto", initial_scale=2**16, max_norm=args.grad_clip ) elif args.plugin == "zero2": plugin = LowLevelZeroPlugin( From a2e5bced90d4321818143ac81f772bba7046a1d1 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Mon, 4 Dec 2023 13:22:53 +0800 Subject: [PATCH 3/6] fix --- colossalai/booster/plugin/gemini_plugin.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/colossalai/booster/plugin/gemini_plugin.py b/colossalai/booster/plugin/gemini_plugin.py index d65a10e954f7..6622b6dc144e 100644 --- a/colossalai/booster/plugin/gemini_plugin.py +++ b/colossalai/booster/plugin/gemini_plugin.py @@ -479,10 +479,10 @@ def prepare_dataloader( _kwargs = kwargs.copy() zero_world_size = self.pg_mesh.size(ZERO_AXIS) extra_dp_world_size = self.pg_mesh.size(DP_AXIS) - zero_ranks = self.pg_mesh.coordinate(ZERO_AXIS) - extra_dp_ranks = self.pg_mesh.coordinate(DP_AXIS) + zero_rank = self.pg_mesh.coordinate(ZERO_AXIS) + extra_dp_rank = self.pg_mesh.coordinate(DP_AXIS) sampler = DistributedSampler( - dataset, num_replicas=zero_world_size * extra_dp_world_size, rank=zero_ranks + extra_dp_ranks, shuffle=shuffle + dataset, num_replicas=zero_world_size * extra_dp_world_size, rank=zero_rank * extra_dp_world_size + extra_dp_rank, shuffle=shuffle ) # Deterministic dataloader From b482263f134cec8bc5246f25f7c86ea8e22bfca9 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Thu, 7 Dec 2023 11:01:13 +0800 Subject: [PATCH 4/6] test ci --- .github/workflows/build_on_pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_on_pr.yml b/.github/workflows/build_on_pr.yml index e2114d43bcd0..bf41808cfa5e 100644 --- a/.github/workflows/build_on_pr.yml +++ b/.github/workflows/build_on_pr.yml @@ -208,7 +208,7 @@ jobs: - name: Execute Unit Testing run: | - CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest -m "not largedist" --testmon --testmon-forceselect --testmon-cov=. --durations=10 tests/ + CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest -m "not largedist" --testmon --testmon-forceselect --testmon-cov=. --durations=10 tests/test_booster/test_plugin/test_low_level_zero_plugin.py env: DATA: /data/scratch/cifar-10 NCCL_SHM_DISABLE: 1 From f1cef20663a22882a5a304b4079d0002d356edf9 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Thu, 7 Dec 2023 14:14:54 +0800 Subject: [PATCH 5/6] fix ci fix --- .github/workflows/build_on_pr.yml | 2 +- tests/kit/model_zoo/transformers/gptj.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_on_pr.yml b/.github/workflows/build_on_pr.yml index bf41808cfa5e..e2114d43bcd0 100644 --- a/.github/workflows/build_on_pr.yml +++ b/.github/workflows/build_on_pr.yml @@ -208,7 +208,7 @@ jobs: - name: Execute Unit Testing run: | - CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest -m "not largedist" --testmon --testmon-forceselect --testmon-cov=. --durations=10 tests/test_booster/test_plugin/test_low_level_zero_plugin.py + CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest -m "not largedist" --testmon --testmon-forceselect --testmon-cov=. --durations=10 tests/ env: DATA: /data/scratch/cifar-10 NCCL_SHM_DISABLE: 1 diff --git a/tests/kit/model_zoo/transformers/gptj.py b/tests/kit/model_zoo/transformers/gptj.py index 263978512a02..9eefbb43dad8 100644 --- a/tests/kit/model_zoo/transformers/gptj.py +++ b/tests/kit/model_zoo/transformers/gptj.py @@ -61,7 +61,7 @@ def data_gen_for_sequence_classification(): config = transformers.GPTJConfig( n_layer=2, - n_head=16, + n_head=4, vocab_size=50258, attn_pdrop=0, embd_pdrop=0, From 5a92d44330b8532918dafc8181ee1490e5bef734 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Mon, 11 Dec 2023 15:38:54 +0800 Subject: [PATCH 6/6] update pytorch version in documents --- README.md | 2 +- docs/README-zh-Hans.md | 2 +- docs/source/en/get_started/installation.md | 2 +- docs/source/zh-Hans/get_started/installation.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 04a349337ec0..5455005917a6 100644 --- a/README.md +++ b/README.md @@ -372,7 +372,7 @@ Please visit our [documentation](https://www.colossalai.org/) and [examples](htt ## Installation Requirements: -- PyTorch >= 1.11 (PyTorch 2.x in progress) +- PyTorch >= 1.11 and PyTorch <= 2.1 - Python >= 3.7 - CUDA >= 11.0 - [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) >= 7.0 (V100/RTX20 and higher) diff --git a/docs/README-zh-Hans.md b/docs/README-zh-Hans.md index c6f15b6d6c60..7bb4a414b7d4 100644 --- a/docs/README-zh-Hans.md +++ b/docs/README-zh-Hans.md @@ -368,7 +368,7 @@ Colossal-AI 为您提供了一系列并行组件。我们的目标是让您的 环境要求: -- PyTorch >= 1.11 (PyTorch 2.x 正在适配中) +- PyTorch >= 1.11 并且 PyTorch <= 2.1 - Python >= 3.7 - CUDA >= 11.0 - [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) >= 7.0 (V100/RTX20 and higher) diff --git a/docs/source/en/get_started/installation.md b/docs/source/en/get_started/installation.md index 6fc4ce2c922a..18607a34cf65 100644 --- a/docs/source/en/get_started/installation.md +++ b/docs/source/en/get_started/installation.md @@ -1,7 +1,7 @@ # Setup Requirements: -- PyTorch >= 1.11 (PyTorch 2.x in progress) +- PyTorch >= 1.11 and PyTorch <= 2.1 - Python >= 3.7 - CUDA >= 11.0 - [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) >= 7.0 (V100/RTX20 and higher) diff --git a/docs/source/zh-Hans/get_started/installation.md b/docs/source/zh-Hans/get_started/installation.md index a6c88672b907..e75e42530fc1 100755 --- a/docs/source/zh-Hans/get_started/installation.md +++ b/docs/source/zh-Hans/get_started/installation.md @@ -2,7 +2,7 @@ 环境要求: -- PyTorch >= 1.11 (PyTorch 2.x 正在适配中) +- PyTorch >= 1.11 并且 PyTorch <= 2.1 - Python >= 3.7 - CUDA >= 11.0 - [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) >= 7.0 (V100/RTX20 and higher)