diff --git a/.compatibility b/.compatibility index c8ac4083d2a2..32da32be5521 100644 --- a/.compatibility +++ b/.compatibility @@ -1,3 +1,3 @@ 1.12.0-11.3.0 -1.11.0-11.3.0 -1.10.1-11.3.0 +1.13.0-11.6.0 +2.0.0-11.7.0 diff --git a/.github/workflows/README.md b/.github/workflows/README.md index f40f4cc86d1b..3fad7e36f14c 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -43,10 +43,18 @@ I will provide the details of each workflow below. | Workflow Name | File name | Description | | ---------------------- | -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `Build on PR` | `build_on_pr.yml` | This workflow is triggered when a PR changes essential files. It will run all the unit tests in the repository with 4 GPUs. | +| `Build on PR` | `build_on_pr.yml` | This workflow is triggered when a PR changes essential files and a branch is created/deleted. It will run all the unit tests in the repository with 4 GPUs. | | `Build on Schedule` | `build_on_schedule.yml` | This workflow will run the unit tests everyday with 8 GPUs. The result is sent to Lark. | | `Report test coverage` | `report_test_coverage.yml` | This PR will put up a comment to report the test coverage results when `Build` is done. | +To reduce the average time of the unit test on PR, `Build on PR` workflow manages testmon cache. + +1. When creating a new branch, it copies `cache/main/.testmondata*` to `cache//`. +2. When creating a new PR or change the base branch of a PR, it copies `cache//.testmondata*` to `cache/_pull//`. +3. When running unit tests for each PR, it restores testmon cache from `cache/_pull//`. After the test, it stores the cache back to `cache/_pull//`. +4. When a PR is closed, if it's merged, it copies `cache/_pull//.testmondata*` to `cache//`. Otherwise, it just removes `cache/_pull/`. +5. When a branch is deleted, it removes `cache/`. + ### Example Test | Workflow Name | File name | Description | diff --git a/.github/workflows/build_on_pr.yml b/.github/workflows/build_on_pr.yml index a5a17d176c9d..8b2253e57cfb 100644 --- a/.github/workflows/build_on_pr.yml +++ b/.github/workflows/build_on_pr.yml @@ -2,7 +2,7 @@ name: Build on PR on: pull_request: - types: [synchronize, opened, reopened] + types: [synchronize, opened, reopened, ready_for_review, closed, edited] branches: - "main" - "develop" @@ -18,11 +18,63 @@ on: - "!tests/**.md" # ignore doc change - "pytest.ini" # test config change - "setup.py" # install command change + create: + delete: jobs: + prepare_cache: + name: Prepare testmon cache + if: | + github.event_name == 'create' && + github.event.ref_type == 'branch' && + github.event.repository.full_name == 'hpcaitech/ColossalAI' + runs-on: [self-hosted, gpu] + container: + image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 + options: --rm + timeout-minutes: 5 + defaults: + run: + shell: bash + steps: + - name: Copy testmon cache + run: | # branch name may contain slash, we need to replace it with space + export REF_BRANCH=$(echo ${{ github.event.ref }} | sed "s/\// /") + if [ -d /github/home/testmon_cache/${MAIN_BRANCH} ]; then + [ ! -z "$(ls -A /github/home/testmon_cache/${MAIN_BRANCH})" ] && cp -p -r /github/home/testmon_cache/${MAIN_BRANCH} "/github/home/testmon_cache/${REF_BRANCH}" + fi + env: + MAIN_BRANCH: ${{ github.event.master_branch }} + + prepare_cache_for_pr: + name: Prepare testmon cache for PR + if: | + github.event_name == 'pull_request' && + (github.event.action == 'opened' || github.event.action == 'reopened' || (github.event.action == 'edited' && github.event.changes.base != null)) && + github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI' + runs-on: [self-hosted, gpu] + container: + image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 + options: --rm + timeout-minutes: 5 + defaults: + run: + shell: bash + steps: + - name: Copy testmon cache + run: | # branch name may contain slash, we need to replace it with space + export BASE=$(echo ${{ github.event.pull_request.base.ref }} | sed "s/\// /") + if [ -d "/github/home/testmon_cache/${BASE}" ]; then + [ ! -z "$(ls -A "/github/home/testmon_cache/${BASE}")" ] && mkdir -p /github/home/testmon_cache/_pull && cp -p -r "/github/home/testmon_cache/${BASE}" /github/home/testmon_cache/_pull/${PR_NUMBER} + fi + env: + PR_NUMBER: ${{ github.event.number }} + detect: name: Detect file change if: | + github.event_name == 'pull_request' && + (github.event.action == 'synchronize' || github.event.action == 'opened' || github.event.action == 'reopened' || github.event.action == 'ready_for_review') && github.event.pull_request.draft == false && github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI' outputs: @@ -135,9 +187,11 @@ jobs: - name: Restore Testmon Cache run: | - if [ -d /github/home/testmon_cache ]; then - [ ! -z "$(ls -A /github/home/testmon_cache)" ] && cp -p -r /github/home/testmon_cache/.testmondata* /__w/ColossalAI/ColossalAI/ + if [ -d /github/home/testmon_cache/_pull/${PR_NUMBER} ]; then + [ ! -z "$(ls -A /github/home/testmon_cache/_pull/${PR_NUMBER})" ] && cp -p -r /github/home/testmon_cache/_pull/${PR_NUMBER}/.testmondata* /__w/ColossalAI/ColossalAI/ fi + env: + PR_NUMBER: ${{ github.event.number }} - name: Execute Unit Testing run: | @@ -149,8 +203,10 @@ jobs: - name: Store Testmon Cache run: | - [ -d /github/home/testmon_cache ] || mkdir /github/home/testmon_cache - cp -p -r /__w/ColossalAI/ColossalAI/.testmondata* /github/home/testmon_cache/ + mkdir -p /github/home/testmon_cache/_pull/${PR_NUMBER} + cp -p -r /__w/ColossalAI/ColossalAI/.testmondata* /github/home/testmon_cache/_pull/${PR_NUMBER}/ + env: + PR_NUMBER: ${{ github.event.number }} - name: Collate artifact env: @@ -188,3 +244,54 @@ jobs: with: name: report path: report/ + + store_cache: + name: Store testmon cache for PR + if: | + github.event_name == 'pull_request' && + github.event.action == 'closed' && + github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI' + runs-on: [self-hosted, gpu] + container: + image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 + options: --rm + timeout-minutes: 5 + defaults: + run: + shell: bash + steps: + - name: Store testmon cache if possible + if: github.event.pull_request.merged == true + run: | # branch name may contain slash, we need to replace it with space + export BASE=$(echo ${{ github.event.pull_request.base.ref }} | sed "s/\// /") + if [ -d /github/home/testmon_cache/_pull/${PR_NUMBER} ]; then + [ ! -z "$(ls -A /github/home/testmon_cache/_pull/${PR_NUMBER})" ] && cp -p -r /github/home/testmon_cache/_pull/${PR_NUMBER}/.testmondata* "/github/home/testmon_cache/${BASE}/" + fi + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + + - name: Remove testmon cache + run: | + rm -rf /github/home/testmon_cache/_pull/${PR_NUMBER} + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + + remove_cache: + name: Remove testmon cache + if: | + github.event_name == 'delete' && + github.event.ref_type == 'branch' && + github.event.repository.full_name == 'hpcaitech/ColossalAI' + runs-on: [self-hosted, gpu] + container: + image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 + options: --rm + timeout-minutes: 5 + defaults: + run: + shell: bash + steps: + - name: Remove testmon cache + run: | # branch name may contain slash, we need to replace it with space + export BASE=$(echo ${{ github.event.ref }} | sed "s/\// /") + rm -rf "/github/home/testmon_cache/${BASE}" diff --git a/.github/workflows/release_docker_after_publish.yml b/.github/workflows/release_docker_after_publish.yml index 22698ca192ed..6c8df9730b0d 100644 --- a/.github/workflows/release_docker_after_publish.yml +++ b/.github/workflows/release_docker_after_publish.yml @@ -23,8 +23,11 @@ jobs: run: | version=$(cat version.txt) tag=hpcaitech/colossalai:$version + latest=hpcaitech/colossalai:latest docker build --build-arg http_proxy=http://172.17.0.1:7890 --build-arg https_proxy=http://172.17.0.1:7890 --build-arg VERSION=v${version} -t $tag ./docker + docker tag $tag $latest echo "tag=${tag}" >> $GITHUB_OUTPUT + echo "latest=${latest}" >> $GITHUB_OUTPUT - name: Log in to Docker Hub uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 @@ -36,6 +39,7 @@ jobs: id: docker-push run: | docker push ${{ steps.build.outputs.tag }} + docker push ${{ steps.build.outputs.latest }} notify: name: Notify Lark via webhook diff --git a/.github/workflows/run_chatgpt_examples.yml b/.github/workflows/run_chatgpt_examples.yml index 9d9d3a007851..129bf7ed3270 100644 --- a/.github/workflows/run_chatgpt_examples.yml +++ b/.github/workflows/run_chatgpt_examples.yml @@ -20,7 +20,7 @@ jobs: runs-on: [self-hosted, gpu] container: image: hpcaitech/pytorch-cuda:1.12.0-11.3.0 - options: --gpus all --rm -v /data/scratch/github_actions/chat:/data/scratch/github_actions/chat + options: --gpus all --rm -v /data/scratch/github_actions/chat:/data/scratch/github_actions/chat --shm-size=10.24gb timeout-minutes: 30 defaults: run: diff --git a/.github/workflows/scripts/generate_leaderboard_and_send_to_lark.py b/.github/workflows/scripts/generate_leaderboard_and_send_to_lark.py index 16b8957c1d88..d8f6c8fe309e 100644 --- a/.github/workflows/scripts/generate_leaderboard_and_send_to_lark.py +++ b/.github/workflows/scripts/generate_leaderboard_and_send_to_lark.py @@ -38,7 +38,7 @@ def plot_bar_chart(x: List[Any], y: List[Any], xlabel: str, ylabel: str, title: def get_issue_pull_request_comments(github_token: str, since: str) -> Dict[str, int]: """ - Retrive the issue/PR comments made by our members in the last 7 days. + Retrieve the issue/PR comments made by our members in the last 7 days. Args: github_token (str): GitHub access token for API calls @@ -89,7 +89,7 @@ def get_issue_pull_request_comments(github_token: str, since: str) -> Dict[str, def get_discussion_comments(github_token, since) -> Dict[str, int]: """ - Retrive the discussion comments made by our members in the last 7 days. + Retrieve the discussion comments made by our members in the last 7 days. This is only available via the GitHub GraphQL API. Args: @@ -194,7 +194,7 @@ def _call_graphql_api(query): discussion_updated_at = datetime.strptime(discussion['updatedAt'], "%Y-%m-%dT%H:%M:%SZ") # check if the updatedAt is within the last 7 days - # if yes, add it to dicussion_numbers + # if yes, add it to discussion_numbers if discussion_updated_at > since: if discussion['authorAssociation'] != 'MEMBER': discussion_numbers.append(discussion['number']) @@ -207,14 +207,14 @@ def _call_graphql_api(query): # update cursor cursor = edges[-1]['cursor'] - # get the dicussion comments and replies made by our member + # get the discussion comments and replies made by our member user_engagement_count = {} - for dicussion_number in discussion_numbers: + for discussion_number in discussion_numbers: cursor = None num_per_request = 10 while True: - query = _generate_comment_reply_count_for_discussion(dicussion_number, num_per_request, cursor) + query = _generate_comment_reply_count_for_discussion(discussion_number, num_per_request, cursor) data = _call_graphql_api(query) # get the comments @@ -249,7 +249,7 @@ def _call_graphql_api(query): reply = reply_edge['node'] if reply['authorAssociation'] == 'MEMBER': # check if the updatedAt is within the last 7 days - # if yes, add it to dicussion_numbers + # if yes, add it to discussion_numbers reply_updated_at = datetime.strptime(reply['updatedAt'], "%Y-%m-%dT%H:%M:%SZ") if reply_updated_at > since: member_name = reply['author']['login'] diff --git a/applications/Chat/benchmarks/ray/1mmt_dummy.py b/applications/Chat/benchmarks/ray/1mmt_dummy.py new file mode 100644 index 000000000000..9e8f36cefc4f --- /dev/null +++ b/applications/Chat/benchmarks/ray/1mmt_dummy.py @@ -0,0 +1,178 @@ +import argparse +import os +import socket +from functools import partial + +import ray +import torch +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_receivers_per_sender, + get_reward_model_from_args, + get_strategy_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } + + # configure tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + + def model_fn(): + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) + actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, + config=critic_cfg).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_ref = ExperienceMakerHolder.options(name="maker0", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + kl_coef=0.1, + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, config=AutoConfig.from_pretrained(args.pretrain)).half().cuda() + critic = get_critic_from_args(args.critic_model, + config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=[ + f'maker{x}' for x in get_receivers_per_sender(i, args.num_trainers, 1, allow_idle_sender=True) + ], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 + + def data_gen_fn(): + input_ids = torch.randint(tokenizer.vocab_size, (256,), device=torch.cuda.current_device()) + attn_mask = torch.ones_like(input_ids) + return {'input_ids': input_ids, 'attention_mask': attn_mask} + + def build_dataloader(size): + dataset = [data_gen_fn() for _ in range(size)] + dataloader = DataLoader(dataset, batch_size=args.experience_batch_size) + return dataloader + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + wait_tasks.append( + experience_holder_ref.workingloop.remote(partial(build_dataloader, dataset_size), + num_steps=args.experience_steps)) + + total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) + main(args) diff --git a/applications/Chat/benchmarks/ray/mmmt_dummy.py b/applications/Chat/benchmarks/ray/mmmt_dummy.py new file mode 100644 index 000000000000..46a0062893b8 --- /dev/null +++ b/applications/Chat/benchmarks/ray/mmmt_dummy.py @@ -0,0 +1,189 @@ +import argparse +import os +import socket +from functools import partial + +import ray +import torch +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_receivers_per_sender, + get_reward_model_from_args, + get_strategy_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(args.num_makers)] + + # configure tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + + def model_fn(): + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + critic_cfg = AutoConfig.from_pretrained(args.critic_pretrain) + actor = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.critic_model, config=critic_cfg).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.critic_model, + config=critic_cfg).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, config=actor_cfg).requires_grad_(False).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_refs = [ + ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[ + f'trainer{x}' + for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False) + ], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + kl_coef=0.1, + debug=args.debug, + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + for i, env_info_maker in enumerate(env_info_makers) + ] + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, config=AutoConfig.from_pretrained(args.pretrain)).half().cuda() + critic = get_critic_from_args(args.critic_model, + config=AutoConfig.from_pretrained(args.critic_pretrain)).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=[ + f"maker{x}" + for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True) + ], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + ) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 + + def data_gen_fn(): + input_ids = torch.randint(tokenizer.vocab_size, (256,), device=torch.cuda.current_device()) + attn_mask = torch.ones_like(input_ids) + return {'input_ids': input_ids, 'attention_mask': attn_mask} + + def build_dataloader(size): + dataset = [data_gen_fn() for _ in range(size)] + dataloader = DataLoader(dataset, batch_size=args.experience_batch_size) + return dataloader + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + for experience_holder_ref in experience_holder_refs: + wait_tasks.append( + experience_holder_ref.workingloop.remote(partial(build_dataloader, dataset_size), + num_steps=args.experience_steps)) + + total_steps = args.experience_batch_size * args.experience_steps * \ + args.num_makers // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--num_makers', type=int, default=1) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) + main(args) diff --git a/applications/Chat/coati/dataset/prompt_dataset.py b/applications/Chat/coati/dataset/prompt_dataset.py index 5858052c836a..0bdcbbc5928e 100644 --- a/applications/Chat/coati/dataset/prompt_dataset.py +++ b/applications/Chat/coati/dataset/prompt_dataset.py @@ -35,14 +35,14 @@ def __init__(self, logger.info(f"Limiting dataset to {max_datasets_size} examples.") list_data_dict = list_data_dict[:max_datasets_size] - for data_dict in list_data_dict: - token = tokenizer(data_dict["instruction"], - return_tensors='pt', - max_length=max_length, - padding='max_length', - truncation=True) - for k, tensor in token.items(): - self.keyed_prompt[k].extend(tensor.to(torch.cuda.current_device()).unbind()) + instructions = [data_dict["instruction"] for data_dict in list_data_dict] + tokens = tokenizer(instructions, + return_tensors='pt', + max_length=max_length, + padding='max_length', + truncation=True) + for k, tensor in tokens.items(): + self.keyed_prompt[k] = tensor.to(torch.cuda.current_device()).unbind() def __len__(self): return len(self.keyed_prompt["input_ids"]) diff --git a/applications/Chat/coati/dataset/sft_dataset.py b/applications/Chat/coati/dataset/sft_dataset.py index 3e2453468bbc..3702d00cc609 100644 --- a/applications/Chat/coati/dataset/sft_dataset.py +++ b/applications/Chat/coati/dataset/sft_dataset.py @@ -74,21 +74,18 @@ def __getitem__(self, idx): return dict(input_ids=self.input_ids[idx], labels=self.labels[idx]) -def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, max_length: int) -> Dict: +def _tokenize_fn(strings: Sequence[str], + tokenizer: transformers.PreTrainedTokenizer, + max_length: int + ) -> Dict[str, torch.Tensor]: """Tokenize a list of strings.""" - tokenized_list = [ - tokenizer( - text, - return_tensors="pt", - padding="longest", - max_length=max_length, - truncation=True, - ) for text in strings - ] - input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] - input_ids_lens = labels_lens = [ - tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list - ] + tokenized_list = tokenizer( + strings, return_tensors="pt", padding="longest", + max_length=max_length, truncation=True + ) + input_ids = labels = tokenized_list["input_ids"] + input_ids_lens = labels_lens = \ + tokenized_list["input_ids"].ne(tokenizer.pad_token_id).sum(dim=-1) return dict( input_ids=input_ids, labels=labels, @@ -105,7 +102,10 @@ def preprocess( ) -> Dict: """Preprocess the data by tokenizing.""" examples = [s + t for s, t in zip(sources, targets)] - examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer, max_length) for strings in (examples, sources)] + examples_tokenized, sources_tokenized = [ + _tokenize_fn(strings, tokenizer, max_length) + for strings in (examples, sources) + ] input_ids = examples_tokenized["input_ids"] labels = copy.deepcopy(input_ids) for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]): diff --git a/applications/Chat/coati/experience_maker/naive.py b/applications/Chat/coati/experience_maker/naive.py index 94546eeb28e7..e5bb029e63d0 100644 --- a/applications/Chat/coati/experience_maker/naive.py +++ b/applications/Chat/coati/experience_maker/naive.py @@ -1,5 +1,6 @@ import torch -from coati.models.utils import compute_reward, normalize +from coati.models.generation import generate_with_actor +from coati.models.utils import calc_action_log_probs, compute_reward, normalize from .base import Experience, ExperienceMaker @@ -16,13 +17,16 @@ def make_experience(self, input_ids: torch.Tensor, **generate_kwargs) -> Experie self.initial_model.eval() self.reward_model.eval() - sequences, attention_mask, action_mask = self.actor.generate(input_ids, + sequences, attention_mask, action_mask = generate_with_actor(self.actor, + input_ids, return_action_mask=True, **generate_kwargs) num_actions = action_mask.size(1) - action_log_probs = self.actor(sequences, num_actions, attention_mask) - base_action_log_probs = self.initial_model(sequences, num_actions, attention_mask) + actor_output = self.actor(sequences, attention_mask) + action_log_probs = calc_action_log_probs(actor_output, sequences, num_actions) + base_model_output = self.initial_model(sequences, attention_mask) + base_action_log_probs = calc_action_log_probs(base_model_output, sequences, num_actions) value = self.critic(sequences, action_mask, attention_mask) r = self.reward_model(sequences, attention_mask) reward = compute_reward(r, self.kl_coef, action_log_probs, base_action_log_probs, action_mask=action_mask) diff --git a/applications/Chat/coati/models/base/__init__.py b/applications/Chat/coati/models/base/__init__.py index fe4152f2b760..c5f748a0c85a 100644 --- a/applications/Chat/coati/models/base/__init__.py +++ b/applications/Chat/coati/models/base/__init__.py @@ -1,3 +1,5 @@ +from typing import Union + import torch.nn as nn from .actor import Actor @@ -5,10 +7,10 @@ from .reward_model import RewardModel -def get_base_model(model: nn.Module) -> nn.Module: +def get_base_model(model: Union[Actor, Critic, RewardModel]) -> nn.Module: """Get the base model of our wrapper classes. - For Actor, it's base model is ``actor.model`` and it's usually a ``transformers.PreTrainedModel``. - For Critic and RewardModel, it's base model is itself. + For Actor, Critic and RewardModel, return ``model.model``, + it's usually a ``transformers.PreTrainedModel``. Args: model (nn.Module): model to get base model from @@ -16,9 +18,9 @@ def get_base_model(model: nn.Module) -> nn.Module: Returns: nn.Module: the base model """ - if isinstance(model, Actor): - return model.get_base_model() - return model + assert isinstance(model, (Actor, Critic, RewardModel)), \ + f'Expect Actor, Critic or RewardModel, got {type(model)}, use unwrap_model first.' + return model.model __all__ = ['Actor', 'Critic', 'RewardModel', 'get_base_model'] diff --git a/applications/Chat/coati/models/base/actor.py b/applications/Chat/coati/models/base/actor.py index 71fbf7bbae7d..2034d5cc81d4 100644 --- a/applications/Chat/coati/models/base/actor.py +++ b/applications/Chat/coati/models/base/actor.py @@ -1,12 +1,9 @@ -from typing import Optional, Tuple, Union +from typing import Optional import torch import torch.nn as nn -import torch.nn.functional as F -from ..generation import generate from ..lora import LoRAModule -from ..utils import log_probs_from_logits class Actor(LoRAModule): @@ -24,42 +21,16 @@ def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = self.model = model self.convert_to_lora() - @torch.no_grad() - def generate( - self, - input_ids: torch.Tensor, - return_action_mask: bool = True, - **kwargs - ) -> Union[Tuple[torch.LongTensor, torch.LongTensor], Tuple[torch.LongTensor, torch.LongTensor, torch.BoolTensor]]: - sequences = generate(self.model, input_ids, **kwargs) - attention_mask = None - pad_token_id = kwargs.get('pad_token_id', None) - if pad_token_id is not None: - attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device) - if not return_action_mask: - return sequences, attention_mask, None - input_len = input_ids.size(1) - eos_token_id = kwargs.get('eos_token_id', None) - if eos_token_id is None: - action_mask = torch.ones_like(sequences, dtype=torch.bool) - else: - # left padding may be applied, only mask action - action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0 - action_mask = F.pad(action_mask, (1 + input_len, -1), value=True) # include eos token and input - action_mask[:, :input_len] = False - action_mask = action_mask[:, 1:] - return sequences, attention_mask, action_mask[:, -(sequences.size(1) - input_len):] - def forward(self, - sequences: torch.LongTensor, - num_actions: int, - attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: - """Returns action log probs + input_ids: torch.LongTensor, + attention_mask: Optional[torch.Tensor] = None, + **model_kwargs, # HACK: `generate` method may pass more kwargs + ) -> torch.Tensor: + """Returns model output. """ - output = self.model(sequences, attention_mask=attention_mask) - logits = output['logits'] - log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:]) - return log_probs[:, -num_actions:] - - def get_base_model(self): - return self.model + output = self.model( + input_ids, + attention_mask=attention_mask, + **model_kwargs + ) + return output diff --git a/applications/Chat/coati/models/generation.py b/applications/Chat/coati/models/generation.py index f57c9458a271..0156e2284e52 100644 --- a/applications/Chat/coati/models/generation.py +++ b/applications/Chat/coati/models/generation.py @@ -1,8 +1,10 @@ -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn +import torch.nn.functional as F + try: from transformers.generation_logits_process import ( @@ -55,9 +57,8 @@ def sample(model: nn.Module, unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) for _ in range(input_ids.size(1), max_length): - model_inputs = prepare_inputs_fn(input_ids, **model_kwargs) if prepare_inputs_fn is not None else { - 'input_ids': input_ids - } + model_inputs = prepare_inputs_fn(input_ids, **model_kwargs) \ + if prepare_inputs_fn is not None else {'input_ids': input_ids} outputs = model(**model_inputs) next_token_logits = outputs['logits'][:, -1, :] @@ -144,3 +145,35 @@ def generate(model: nn.Module, raise NotImplementedError else: raise ValueError("Unsupported generation mode") + + +@torch.no_grad() +def generate_with_actor(actor_model: nn.Module, + input_ids: torch.Tensor, + return_action_mask: bool = True, + **kwargs + ) -> Union[Tuple[torch.LongTensor, torch.LongTensor], + Tuple[torch.LongTensor, torch.LongTensor, torch.BoolTensor]]: + """Generate token sequence with actor model. Refer to `generate` for more details. + """ + # generate sequences + sequences = generate(actor_model, input_ids, **kwargs) + + # calculate auxiliary tensors + attention_mask = None + pad_token_id = kwargs.get('pad_token_id', None) + if pad_token_id is not None: + attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device) + if not return_action_mask: + return sequences, attention_mask, None + input_len = input_ids.size(1) + eos_token_id = kwargs.get('eos_token_id', None) + if eos_token_id is None: + action_mask = torch.ones_like(sequences, dtype=torch.bool) + else: + # left padding may be applied, only mask action + action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0 + action_mask = F.pad(action_mask, (1 + input_len, -1), value=True) # include eos token and input + action_mask[:, :input_len] = False + action_mask = action_mask[:, 1:] + return sequences, attention_mask, action_mask[:, -(sequences.size(1) - input_len):] diff --git a/applications/Chat/coati/models/lora.py b/applications/Chat/coati/models/lora.py index 0533a60dc532..2a9059e6901e 100644 --- a/applications/Chat/coati/models/lora.py +++ b/applications/Chat/coati/models/lora.py @@ -61,7 +61,13 @@ def T(w): if self.merge_weights and self.merged: # Make sure that the weights are not merged if self.r > 0: - self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling + if not hasattr(self, "lora_A") or not hasattr(self, "lora_B"): + # FIXME(csric): temporary fix + self.lora_A = nn.Parameter(self.weight.new_empty((self.r, self.in_features))) + self.lora_B = nn.Parameter(self.weight.new_empty((self.out_features, self.r))) + self.reset_parameters() + else: + self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling self.merged = False def eval(self): diff --git a/applications/Chat/coati/models/utils.py b/applications/Chat/coati/models/utils.py index 0ff13181fcd2..b9f15f894a1f 100644 --- a/applications/Chat/coati/models/utils.py +++ b/applications/Chat/coati/models/utils.py @@ -46,6 +46,25 @@ def log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.T return log_probs_labels.squeeze(-1) +def calc_action_log_probs(output: torch.Tensor, + sequences: torch.LongTensor, + num_actions: int + ) -> torch.Tensor: + """Calculate action log probs. + + Args: + output (torch.Tensor): Output tensor of Actor.forward. + sequences (torch.LongTensor): Input sequences. + num_actions (int): Number of actions. + + Returns: + torch.Tensor: Action log probs. + """ + logits = output['logits'] + log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:]) + return log_probs[:, -num_actions:] + + def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor: tensor = tensor * mask tensor = tensor.sum(dim=dim) diff --git a/applications/Chat/coati/quant/__init__.py b/applications/Chat/coati/quant/__init__.py new file mode 100644 index 000000000000..a65a78d07bb8 --- /dev/null +++ b/applications/Chat/coati/quant/__init__.py @@ -0,0 +1,7 @@ +from .llama_gptq import load_quant as llama_load_quant +from .utils import low_resource_init + +__all__ = [ + 'llama_load_quant', + 'low_resource_init', +] diff --git a/applications/Chat/coati/quant/llama_gptq/__init__.py b/applications/Chat/coati/quant/llama_gptq/__init__.py new file mode 100644 index 000000000000..51c8d6316290 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/__init__.py @@ -0,0 +1,5 @@ +from .loader import load_quant + +__all__ = [ + 'load_quant', +] diff --git a/applications/Chat/coati/quant/llama_gptq/loader.py b/applications/Chat/coati/quant/llama_gptq/loader.py new file mode 100644 index 000000000000..5353dc8a2ea3 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/loader.py @@ -0,0 +1,26 @@ +import torch +import torch.nn as nn + +from .model_utils import find_layers +from .quant import make_quant + + +def load_quant(model: nn.Module, checkpoint: str, wbits: int, groupsize: int): + model = model.eval() + layers = find_layers(model) + + # ignore lm head + layers = find_layers(model) + for name in ['lm_head']: + if name in layers: + del layers[name] + + make_quant(model, layers, wbits, groupsize) + + if checkpoint.endswith('.safetensors'): + from safetensors.torch import load_file as safe_load + model.load_state_dict(safe_load(checkpoint)) + else: + model.load_state_dict(torch.load(checkpoint)) + + return model diff --git a/applications/Chat/coati/quant/llama_gptq/model_utils.py b/applications/Chat/coati/quant/llama_gptq/model_utils.py new file mode 100644 index 000000000000..62db171abb52 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/model_utils.py @@ -0,0 +1,13 @@ +# copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/modelutils.py + +import torch +import torch.nn as nn + + +def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''): + if type(module) in layers: + return {name: module} + res = {} + for name1, child in module.named_children(): + res.update(find_layers(child, layers=layers, name=name + '.' + name1 if name != '' else name1)) + return res diff --git a/applications/Chat/coati/quant/llama_gptq/quant.py b/applications/Chat/coati/quant/llama_gptq/quant.py new file mode 100644 index 000000000000..f7d5b7ce4bd8 --- /dev/null +++ b/applications/Chat/coati/quant/llama_gptq/quant.py @@ -0,0 +1,283 @@ +# copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/quant.py + +import math + +import numpy as np +import torch +import torch.nn as nn + + +def quantize(x, scale, zero, maxq): + q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) + return scale * (q - zero) + + +class Quantizer(nn.Module): + + def __init__(self, shape=1): + super(Quantizer, self).__init__() + self.register_buffer('maxq', torch.tensor(0)) + self.register_buffer('scale', torch.zeros(shape)) + self.register_buffer('zero', torch.zeros(shape)) + + def configure(self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=.8): + self.maxq = torch.tensor(2**bits - 1) + self.perchannel = perchannel + self.sym = sym + self.mse = mse + self.norm = norm + self.grid = grid + self.maxshrink = maxshrink + + def find_params(self, x, weight=False): + dev = x.device + self.maxq = self.maxq.to(dev) + + shape = x.shape + if self.perchannel: + if weight: + x = x.flatten(1) + else: + if len(shape) == 4: + x = x.permute([1, 0, 2, 3]) + x = x.flatten(1) + if len(shape) == 3: + x = x.reshape((-1, shape[-1])).t() + if len(shape) == 2: + x = x.t() + else: + x = x.flatten().unsqueeze(0) + + tmp = torch.zeros(x.shape[0], device=dev) + xmin = torch.minimum(x.min(1)[0], tmp) + xmax = torch.maximum(x.max(1)[0], tmp) + + if self.sym: + xmax = torch.maximum(torch.abs(xmin), xmax) + tmp = xmin < 0 + if torch.any(tmp): + xmin[tmp] = -xmax[tmp] + tmp = (xmin == 0) & (xmax == 0) + xmin[tmp] = -1 + xmax[tmp] = +1 + + self.scale = (xmax - xmin) / self.maxq + if self.sym: + self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) + else: + self.zero = torch.round(-xmin / self.scale) + + if self.mse: + best = torch.full([x.shape[0]], float('inf'), device=dev) + for i in range(int(self.maxshrink * self.grid)): + p = 1 - i / self.grid + xmin1 = p * xmin + xmax1 = p * xmax + scale1 = (xmax1 - xmin1) / self.maxq + zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero + q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq) + q -= x + q.abs_() + q.pow_(self.norm) + err = torch.sum(q, 1) + tmp = err < best + if torch.any(tmp): + best[tmp] = err[tmp] + self.scale[tmp] = scale1[tmp] + self.zero[tmp] = zero1[tmp] + if not self.perchannel: + if weight: + tmp = shape[0] + else: + tmp = shape[1] if len(shape) != 3 else shape[2] + self.scale = self.scale.repeat(tmp) + self.zero = self.zero.repeat(tmp) + + if weight: + shape = [-1] + [1] * (len(shape) - 1) + self.scale = self.scale.reshape(shape) + self.zero = self.zero.reshape(shape) + return + if len(shape) == 4: + self.scale = self.scale.reshape((1, -1, 1, 1)) + self.zero = self.zero.reshape((1, -1, 1, 1)) + if len(shape) == 3: + self.scale = self.scale.reshape((1, 1, -1)) + self.zero = self.zero.reshape((1, 1, -1)) + if len(shape) == 2: + self.scale = self.scale.unsqueeze(0) + self.zero = self.zero.unsqueeze(0) + + def quantize(self, x): + if self.ready(): + return quantize(x, self.scale, self.zero, self.maxq) + return x + + def enabled(self): + return self.maxq > 0 + + def ready(self): + return torch.all(self.scale != 0) + + +try: + import quant_cuda +except: + print('CUDA extension not installed.') + +# Assumes layer is perfectly divisible into 256 * 256 blocks + + +class QuantLinear(nn.Module): + + def __init__(self, bits, groupsize, infeatures, outfeatures): + super().__init__() + if bits not in [2, 3, 4, 8]: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + self.infeatures = infeatures + self.outfeatures = outfeatures + self.bits = bits + if groupsize != -1 and groupsize < 32 and groupsize != int(math.pow(2, int(math.log2(groupsize)))): + raise NotImplementedError("groupsize supports powers of 2 greater than 32. (e.g. : 32,64,128,etc)") + groupsize = groupsize if groupsize != -1 else infeatures + self.groupsize = groupsize + self.register_buffer( + 'qzeros', torch.zeros((math.ceil(infeatures / groupsize), outfeatures // 256 * (bits * 8)), + dtype=torch.int)) + self.register_buffer('scales', torch.zeros((math.ceil(infeatures / groupsize), outfeatures))) + self.register_buffer('bias', torch.zeros(outfeatures)) + self.register_buffer('qweight', torch.zeros((infeatures // 256 * (bits * 8), outfeatures), dtype=torch.int)) + self._initialized_quant_state = False + + def pack(self, linear, scales, zeros): + scales = scales.t().contiguous() + zeros = zeros.t().contiguous() + scale_zeros = zeros * scales + self.scales = scales.clone() + if linear.bias is not None: + self.bias = linear.bias.clone() + + intweight = [] + for idx in range(self.infeatures): + g_idx = idx // self.groupsize + intweight.append( + torch.round((linear.weight.data[:, idx] + scale_zeros[g_idx]) / self.scales[g_idx]).to(torch.int)[:, + None]) + intweight = torch.cat(intweight, dim=1) + intweight = intweight.t().contiguous() + intweight = intweight.numpy().astype(np.uint32) + qweight = np.zeros((intweight.shape[0] // 256 * (self.bits * 8), intweight.shape[1]), dtype=np.uint32) + i = 0 + row = 0 + while row < qweight.shape[0]: + if self.bits in [2, 4, 8]: + for j in range(i, i + (32 // self.bits)): + qweight[row] |= intweight[j] << (self.bits * (j - i)) + i += 32 // self.bits + row += 1 + elif self.bits == 3: + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i)) + i += 10 + qweight[row] |= intweight[i] << 30 + row += 1 + qweight[row] |= (intweight[i] >> 2) & 1 + i += 1 + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i) + 1) + i += 10 + qweight[row] |= intweight[i] << 31 + row += 1 + qweight[row] |= (intweight[i] >> 1) & 0x3 + i += 1 + for j in range(i, i + 10): + qweight[row] |= intweight[j] << (3 * (j - i) + 2) + i += 10 + row += 1 + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + + qweight = qweight.astype(np.int32) + self.qweight = torch.from_numpy(qweight) + + zeros -= 1 + zeros = zeros.numpy().astype(np.uint32) + qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 256 * (self.bits * 8)), dtype=np.uint32) + i = 0 + col = 0 + while col < qzeros.shape[1]: + if self.bits in [2, 4, 8]: + for j in range(i, i + (32 // self.bits)): + qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) + i += 32 // self.bits + col += 1 + elif self.bits == 3: + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i)) + i += 10 + qzeros[:, col] |= zeros[:, i] << 30 + col += 1 + qzeros[:, col] |= (zeros[:, i] >> 2) & 1 + i += 1 + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 1) + i += 10 + qzeros[:, col] |= zeros[:, i] << 31 + col += 1 + qzeros[:, col] |= (zeros[:, i] >> 1) & 0x3 + i += 1 + for j in range(i, i + 10): + qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 2) + i += 10 + col += 1 + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + + qzeros = qzeros.astype(np.int32) + self.qzeros = torch.from_numpy(qzeros) + + def forward(self, x): + intermediate_dtype = torch.float32 + + if not self._initialized_quant_state: + # Do we even have a bias? Check for at least one non-zero element. + if self.bias is not None and bool(torch.any(self.bias != 0)): + # Then make sure it's the right type. + self.bias.data = self.bias.data.to(intermediate_dtype) + else: + self.bias = None + + outshape = list(x.shape) + outshape[-1] = self.outfeatures + x = x.reshape(-1, x.shape[-1]) + if self.bias is None: + y = torch.zeros(x.shape[0], outshape[-1], dtype=intermediate_dtype, device=x.device) + else: + y = self.bias.clone().repeat(x.shape[0], 1) + + output_dtype = x.dtype + x = x.to(intermediate_dtype) + if self.bits == 2: + quant_cuda.vecquant2matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 3: + quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 4: + quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + elif self.bits == 8: + quant_cuda.vecquant8matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize) + else: + raise NotImplementedError("Only 2,3,4,8 bits are supported.") + y = y.to(output_dtype) + return y.reshape(outshape) + + +def make_quant(module, names, bits, groupsize, name=''): + if isinstance(module, QuantLinear): + return + for attr in dir(module): + tmp = getattr(module, attr) + name1 = name + '.' + attr if name != '' else attr + if name1 in names: + setattr(module, attr, QuantLinear(bits, groupsize, tmp.in_features, tmp.out_features)) + for name1, child in module.named_children(): + make_quant(child, names, bits, groupsize, name + '.' + name1 if name != '' else name1) diff --git a/applications/Chat/coati/quant/utils.py b/applications/Chat/coati/quant/utils.py new file mode 100644 index 000000000000..01b8cff0add1 --- /dev/null +++ b/applications/Chat/coati/quant/utils.py @@ -0,0 +1,28 @@ +from contextlib import contextmanager + +import torch + + +def _noop(*args, **kwargs): + pass + + +@contextmanager +def low_resource_init(): + """This context manager disables weight initialization and sets the default float dtype to half. + """ + old_kaiming_uniform_ = torch.nn.init.kaiming_uniform_ + old_uniform_ = torch.nn.init.uniform_ + old_normal_ = torch.nn.init.normal_ + dtype = torch.get_default_dtype() + try: + torch.nn.init.kaiming_uniform_ = _noop + torch.nn.init.uniform_ = _noop + torch.nn.init.normal_ = _noop + torch.set_default_dtype(torch.half) + yield + finally: + torch.nn.init.kaiming_uniform_ = old_kaiming_uniform_ + torch.nn.init.uniform_ = old_uniform_ + torch.nn.init.normal_ = old_normal_ + torch.set_default_dtype(dtype) diff --git a/applications/Chat/coati/ray/README.md b/applications/Chat/coati/ray/README.md new file mode 100644 index 000000000000..228155a6855b --- /dev/null +++ b/applications/Chat/coati/ray/README.md @@ -0,0 +1,160 @@ +# Distributed PPO Training on Stage 3 + +## Detach Experience Makers and Trainers + +We can completely separate the trainers and makers. + +

+ +

+ +- The experience maker performs inference, produces experience, and remotely delivers it to the trainer (1). +- The trainer consumes experience to train models, and periodically transmits new model parameters to the maker (2.1, 2.2). +- Using an experience buffer to overlap transmission and computing. + +In this manner, each node will work continuously without model idle time, and different optimization strategies can be applied for inference and training to meet the needs of speed or storage. It is also helpful for scalability. + +`DetachedPPOTrainer` and `ExperienceMakerHolder` are Ray Actors (distinguished from Actor Model), representing Trainer and Experience Maker on the graph above, respectively. + +[More about Ray Core](https://docs.ray.io/en/latest/ray-core/walkthrough.html) + +## Usage + +See examples at `ColossalAI/application/Chat/examples/ray` + +### Setup Makers + +- define makers' environment variables : + + ```python + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(num_makers)] + + ``` +- define maker models : + ```python + def model_fn(): + actor = get_actor_from_args(...) + critic = get_critic_from_args(...) + reward_model = get_reward_model_from_args(...) + initial_model = get_actor_from_args(...) + return actor, critic, reward_model, initial_model + + ``` +- set experience_holder_refs : + + ```python + experience_holder_refs = [ + ExperienceMakerHolder.options( + name=f"maker_{i}", + num_gpus=1, + max_concurrency=2 + ).remote( + detached_trainer_name_list=[f"trainer_{x}" for x in target_trainers(...)], + model_fn=model_fn, + ...) + for i, env_info_maker in enumerate(env_info_makers) + ] + ``` + The names in the `detached_trainer_name_list` refer to the target trainers that the maker should send experience to. + We set a trainer's name the same as a maker, by `.options(name="str")`. See below. + +### Setup Trainers + +- define trainers' environment variables : + ```python + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(num_trainers)] + ``` +- define trainer models : + + ```python + def trainer_model_fn(): + actor = get_actor_from_args(...) + critic = get_critic_from_args(...) + return actor, critic + ``` +- set trainer_refs : + ```python + trainer_refs = [ + DetachedPPOTrainer.options( + name=f"trainer{i}", + num_gpus=1, + max_concurrency=2 + ).remote( + experience_maker_holder_name_list=[f"maker{x}" for x in target_makers(...)], + model_fn = trainer_model_fn(), + ...) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + ``` + The names in `experience_maker_holder_name_list` refer to the target makers that the trainer should send updated models to. + By setting `detached_trainer_name_list` and `experience_maker_holder_name_list`, we can customize the transmission graph. + +### Launch Jobs +- define data_loader : + ```python + def data_loader_fn(): + return = torch.utils.data.DataLoader(dataset=dataset) + + ``` +- launch makers : + ```python + wait_tasks = [] + for experience_holder_ref in experience_holder_refs: + wait_tasks.append( + experience_holder_ref.workingloop.remote(data_loader_fn(), + num_steps=experience_steps)) + + ``` + +- launch trainers : + ```python + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, update_steps, train_epochs)) + ``` + +- wait for done : + ```python + ray.get(wait_tasks) + ``` + +## Flexible Structure + +We can deploy different strategies to makers and trainers. Here are some notions. + +### 2 Makers 1 Trainer +

+ +

+ +### 2 Makers 2 Trainer +

+ +

+ +### Maker Inference Quantization +

+ +

+ +### Tensor Parallel + +

+ +

+ +## TODO + +- [ ] Support LoRA +- [ ] Support TP & PP diff --git a/applications/Chat/coati/ray/__init__.py b/applications/Chat/coati/ray/__init__.py index 5802c05bc03f..e69de29bb2d1 100644 --- a/applications/Chat/coati/ray/__init__.py +++ b/applications/Chat/coati/ray/__init__.py @@ -1,2 +0,0 @@ -from .src.detached_replay_buffer import DetachedReplayBuffer -from .src.detached_trainer_ppo import DetachedPPOTrainer diff --git a/applications/Chat/coati/ray/callbacks/__init__.py b/applications/Chat/coati/ray/callbacks/__init__.py new file mode 100644 index 000000000000..5f5e488f383e --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/__init__.py @@ -0,0 +1,9 @@ +from .base import MakerCallback, TrainerCallback +from .performance_evaluator import ExperienceMakerPerformanceEvaluator, TrainerPerformanceEvaluator + +__all__ = [ + "TrainerCallback", + "MakerCallback", + "ExperienceMakerPerformanceEvaluator", + "TrainerPerformanceEvaluator", +] diff --git a/applications/Chat/coati/ray/callbacks/base.py b/applications/Chat/coati/ray/callbacks/base.py new file mode 100644 index 000000000000..3306150a41ff --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/base.py @@ -0,0 +1,66 @@ +from abc import ABC + +from coati.experience_maker import Experience + + +class TrainerCallback(ABC): + """ + Base callback class. It defines the interface for callbacks. + """ + + def on_fit_start(self) -> None: + pass + + def on_fit_end(self) -> None: + pass + + def on_episode_start(self, episode: int) -> None: + pass + + def on_episode_end(self, episode: int) -> None: + pass + + def on_epoch_start(self, epoch: int) -> None: + pass + + def on_epoch_end(self, epoch: int) -> None: + pass + + def on_batch_start(self) -> None: + pass + + def on_batch_end(self, metrics: dict, experience: Experience) -> None: + pass + + def on_update_start(self) -> None: + pass + + def on_update_end(self) -> None: + pass + + +class MakerCallback(ABC): + + def on_loop_start(self) -> None: + pass + + def on_loop_end(self) -> None: + pass + + def on_make_experience_start(self) -> None: + pass + + def on_make_experience_end(self, experience: Experience) -> None: + pass + + def on_send_start(self) -> None: + pass + + def on_send_end(self) -> None: + pass + + def on_batch_start(self) -> None: + pass + + def on_batch_end(self) -> None: + pass diff --git a/applications/Chat/coati/ray/callbacks/performance_evaluator.py b/applications/Chat/coati/ray/callbacks/performance_evaluator.py new file mode 100644 index 000000000000..cd3517609e7a --- /dev/null +++ b/applications/Chat/coati/ray/callbacks/performance_evaluator.py @@ -0,0 +1,212 @@ +from time import time +from typing import Optional + +import torch +import torch.distributed as dist +from coati.experience_maker import Experience + +from .base import MakerCallback, TrainerCallback + + +def get_world_size() -> int: + if dist.is_initialized(): + return dist.get_world_size() + return 1 + + +def print_rank_0(*args, **kwargs) -> None: + if not dist.is_initialized() or dist.get_rank() == 0: + print(*args, **kwargs) + + +@torch.no_grad() +def all_reduce_mean(x: float, world_size: int) -> float: + if world_size == 1: + return x + tensor = torch.tensor([x], device=torch.cuda.current_device()) + dist.all_reduce(tensor) + tensor = tensor / world_size + return tensor.item() + + +class Timer: + + def __init__(self) -> None: + self.start_time: Optional[float] = None + self.duration: float = 0. + + def start(self) -> None: + self.start_time = time() + + def end(self) -> None: + self.duration += time() - self.start_time + + def reset(self) -> None: + self.duration = 0. + + +class ExperienceMakerPerformanceEvaluator(MakerCallback): + + def __init__(self, actor_num_params: int, critic_num_params: int, initial_model_num_params: int, + reward_model_num_params: int) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.initial_model_num_params = initial_model_num_params + self.reward_model_num_params = reward_model_num_params + + self.batch_timer = Timer() + self.send_timer = Timer() + self.make_experience_timer = Timer() + self.total_samples: int = 0 + self.make_experience_flop: int = 0 + + print_rank_0( + f'ExperienceMaker actor: {actor_num_params/1024**3:.2f}B, critic: {critic_num_params/1024**3:.2f}B, initial model: {initial_model_num_params/1024**3:.2f}B, reward model: {reward_model_num_params/1024**3:.2f}B, world size: {self.world_size}' + ) + + def on_make_experience_start(self) -> None: + self.make_experience_timer.start() + + def on_make_experience_end(self, experience: Experience) -> None: + self.make_experience_timer.end() + + batch_size, seq_len = experience.sequences.shape + + self.total_samples += batch_size + + # actor generate + num_actions = experience.action_mask.size(1) + input_len = seq_len - num_actions + total_seq_len = (input_len + seq_len - 1) * num_actions / 2 + self.make_experience_flop += self.actor_num_params * batch_size * total_seq_len * 2 + # actor forward + self.make_experience_flop += self.actor_num_params * batch_size * seq_len * 2 + # critic forward + self.make_experience_flop += self.critic_num_params * batch_size * seq_len * 2 + # initial model forward + self.make_experience_flop += self.initial_model_num_params * batch_size * seq_len * 2 + # reward model forward + self.make_experience_flop += self.reward_model_num_params * batch_size * seq_len * 2 + + def on_send_start(self) -> None: + self.send_timer.start() + + def on_send_end(self) -> None: + self.send_timer.end() + + def on_batch_start(self) -> None: + self.batch_timer.start() + + def on_batch_end(self) -> None: + self.batch_timer.end() + + def on_loop_end(self) -> None: + avg_make_experience_duration = all_reduce_mean(self.make_experience_timer.duration, self.world_size) + avg_overall_duration = all_reduce_mean(self.batch_timer.duration, self.world_size) + avg_send_duration = all_reduce_mean(self.send_timer.duration, self.world_size) + + avg_throughput = self.total_samples * self.world_size / (avg_overall_duration + 1e-12) + avg_make_experience_tflops = self.make_experience_flop / 1e12 / (avg_make_experience_duration + 1e-12) + avg_time_per_sample = (avg_overall_duration + 1e-12) / (self.total_samples * self.world_size) + avg_make_experience_time_per_sample = (avg_make_experience_duration + 1e-12) / \ + (self.total_samples * self.world_size) + avg_send_time_per_sample = (avg_send_duration + 1e-12) / (self.total_samples * self.world_size) + + print_rank_0( + 'Making Experience Performance Summary:\n' + f'Throughput: {avg_throughput:.3f} samples/sec\n' + + f'TFLOPS per GPU: {avg_make_experience_tflops:.3f}\n' + + f'Sample time (overall): {avg_time_per_sample:.3f} s\n' + + f'Sample time (make experience): {avg_make_experience_time_per_sample:.3f} s, {avg_make_experience_time_per_sample/avg_time_per_sample*100:.2f}%\n' + + + f'Sample time (send): {avg_send_time_per_sample:.3f} s, {avg_send_time_per_sample/avg_time_per_sample*100:.2f}%\n' + ) + + +class TrainerPerformanceEvaluator(TrainerCallback): + + def __init__(self, + actor_num_params: int, + critic_num_params: int, + enable_grad_checkpoint: bool = False, + ignore_first_episodes: int = 1) -> None: + super().__init__() + self.world_size = get_world_size() + self.actor_num_params = actor_num_params + self.critic_num_params = critic_num_params + self.enable_grad_checkpoint = enable_grad_checkpoint + self.ignore_first_episodes = ignore_first_episodes + self.ignore_this_episode = False + + self.episode_timer = Timer() + self.batch_timer = Timer() + self.update_timer = Timer() + self.total_samples: int = 0 + self.learn_flop: int = 0 + + print_rank_0( + f'Trainer actor: {self.actor_num_params/1024**3:.2f}B, critic: {self.critic_num_params/1024**3:.2f}B, world size: {self.world_size}' + ) + + def on_episode_start(self, episodes: int) -> None: + self.ignore_this_episode = episodes < self.ignore_first_episodes + if self.ignore_this_episode: + return + self.episode_timer.start() + + def on_episode_end(self, episodes: int) -> None: + if self.ignore_this_episode: + return + self.episode_timer.end() + + def on_batch_start(self) -> None: + if self.ignore_this_episode: + return + self.batch_timer.start() + + def on_batch_end(self, metrics: dict, experience: Experience) -> None: + if self.ignore_this_episode: + return + self.batch_timer.end() + + batch_size, seq_len = experience.sequences.shape + + self.total_samples += batch_size + + # actor forward-backward, 3 means forward(1) + backward(2) + self.learn_flop += self.actor_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + # critic forward-backward + self.learn_flop += self.critic_num_params * batch_size * seq_len * 2 * (3 + int(self.enable_grad_checkpoint)) + + def on_update_start(self) -> None: + if self.ignore_this_episode: + return + self.update_timer.start() + + def on_update_end(self) -> None: + if self.ignore_this_episode: + return + self.update_timer.end() + + def on_fit_end(self) -> None: + if self.total_samples == 0: + print_rank_0('No samples are collected, skip trainer performance evaluation') + return + avg_train_duration = all_reduce_mean(self.batch_timer.duration, self.world_size) + avg_update_duration = all_reduce_mean(self.update_timer.duration, self.world_size) + avg_episode_duration = all_reduce_mean(self.episode_timer.duration, self.world_size) + + avg_throughput = self.total_samples * self.world_size / (avg_episode_duration + 1e-12) + avg_learn_tflops = self.learn_flop / 1e12 / (avg_train_duration + 1e-12) + avg_time_per_sample = (avg_episode_duration + 1e-12) / (self.total_samples * self.world_size) + avg_train_time_per_sample = (avg_train_duration + 1e-12) / (self.total_samples * self.world_size) + avg_update_time_per_sample = (avg_update_duration + 1e-12) / (self.total_samples * self.world_size) + + print_rank_0( + 'Learning Performance Summary:\n' + f'Throughput: {avg_throughput:.3f} samples/sec\n' + + f'TFLOPS per GPU: {avg_learn_tflops:.3f}\n' + f'Sample time (overall): {avg_time_per_sample:.3f} s\n' + + f'Sample time (train): {avg_train_time_per_sample:.3f} s, {avg_train_time_per_sample/avg_time_per_sample*100:.2f}%\n' + + + f'Sample time (update): {avg_update_time_per_sample:.3f} s, {avg_update_time_per_sample/avg_time_per_sample*100:.2f}%\n' + ) diff --git a/applications/Chat/coati/ray/src/detached_replay_buffer.py b/applications/Chat/coati/ray/detached_replay_buffer.py similarity index 62% rename from applications/Chat/coati/ray/src/detached_replay_buffer.py rename to applications/Chat/coati/ray/detached_replay_buffer.py index 18c8db388e88..2f765281178a 100644 --- a/applications/Chat/coati/ray/src/detached_replay_buffer.py +++ b/applications/Chat/coati/ray/detached_replay_buffer.py @@ -1,22 +1,24 @@ -import torch +import asyncio +import copy import random -from typing import List, Any -# from torch.multiprocessing import Queue -from ray.util.queue import Queue +from threading import Lock +from typing import Any, List + import ray -import asyncio +import torch from coati.experience_maker.base import Experience -from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch from coati.replay_buffer import ReplayBuffer -from threading import Lock -import copy +from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch +# from torch.multiprocessing import Queue +from ray.util.queue import Queue + class DetachedReplayBuffer: ''' - Detached replay buffer. Share Experience across workers on the same node. - Therefore a trainer node is expected to have only one instance. + Detached replay buffer. Share Experience across workers on the same node. + Therefore a trainer node is expected to have only one instance. It is ExperienceMakerHolder's duty to call append(exp) method, remotely. - + Args: sample_batch_size: Batch size when sampling. Exp won't enqueue until they formed a batch. tp_world_size: Number of workers in the same tp group @@ -24,31 +26,25 @@ class DetachedReplayBuffer: cpu_offload: Whether to offload experience to cpu when sampling. Defaults to True. ''' - def __init__(self, sample_batch_size: int, tp_world_size: int = 1, limit : int = 0, cpu_offload: bool = True) -> None: - self.cpu_offload = cpu_offload + def __init__(self, sample_batch_size: int, limit: int = 0) -> None: self.sample_batch_size = sample_batch_size self.limit = limit - self.items = Queue(self.limit, actor_options={"num_cpus":1}) - self.batch_collector : List[BufferItem] = [] + self.items = Queue(self.limit, actor_options={"num_cpus": 1}) + self.batch_collector: List[BufferItem] = [] + @torch.no_grad() + def append(self, experience: Experience) -> None: ''' - Workers in the same tp group share this buffer and need same sample for one step. - Therefore a held_sample should be returned tp_world_size times before it could be dropped. - worker_state records whether a worker got the held_sample + Expected to be called remotely. ''' - self.tp_world_size = tp_world_size - self.worker_state = [False] * self.tp_world_size - self.held_sample = None - self._worker_state_lock = Lock() + items = split_experience_batch(experience) + self.extend(items) @torch.no_grad() - def append(self, experience: Experience) -> None: + def extend(self, items: List[BufferItem]) -> None: ''' Expected to be called remotely. ''' - if self.cpu_offload: - experience.to_device(torch.device('cpu')) - items = split_experience_batch(experience) self.batch_collector.extend(items) while len(self.batch_collector) >= self.sample_batch_size: items = self.batch_collector[:self.sample_batch_size] @@ -62,19 +58,10 @@ def clear(self) -> None: self.items = Queue(self.limit) self.worker_state = [False] * self.tp_world_size self.batch_collector = [] - + @torch.no_grad() - def sample(self, worker_rank = 0, to_device = "cpu") -> Experience: - self._worker_state_lock.acquire() - if not any(self.worker_state): - self.held_sample = self._sample_and_erase() - self.worker_state[worker_rank] = True - if all(self.worker_state): - self.worker_state = [False] * self.tp_world_size - ret = self.held_sample - else: - ret = copy.deepcopy(self.held_sample) - self._worker_state_lock.release() + def sample(self, worker_rank=0, to_device="cpu") -> Experience: + ret = self._sample_and_erase() ret.to_device(to_device) return ret @@ -85,4 +72,4 @@ def _sample_and_erase(self) -> Experience: def get_length(self) -> int: ret = self.items.qsize() - return ret \ No newline at end of file + return ret diff --git a/applications/Chat/coati/ray/detached_trainer_base.py b/applications/Chat/coati/ray/detached_trainer_base.py new file mode 100644 index 000000000000..ac2d35e9da19 --- /dev/null +++ b/applications/Chat/coati/ray/detached_trainer_base.py @@ -0,0 +1,179 @@ +import os +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +import ray +import torch +from coati.experience_maker import Experience +from coati.replay_buffer.utils import BufferItem +from torch.utils.data import DataLoader +from tqdm import tqdm + +from .callbacks import TrainerCallback +from .detached_replay_buffer import DetachedReplayBuffer +from .utils import is_rank_0 + + +class DetachedTrainer(ABC): + ''' + Base class for detached rlhf trainers. + 'detach' means that the experience maker is detached compared to a normal Trainer. + Please set name attribute during init: + >>> trainer = DetachedTrainer.options(..., name = "xxx", ...).remote() + So an ExperienceMakerHolder can reach the detached_replay_buffer by Actor's name. + Args: + detached_strategy (DetachedStrategy): the strategy to use for training + detached_replay_buffer_ref (ObjectRef[DetachedReplayBuffer]): the replay buffer to use for training + data_loader_pin_memory (bool, defaults to True): whether to pin memory for data loader + callbacks (List[Callback], defaults to []): the callbacks to call during training process + generate_kwargs (dict, optional): the kwargs to use while model generating + + ''' + + def __init__(self, + experience_maker_holder_name_list: List[str], + train_batch_size: int = 8, + buffer_limit: int = 0, + dataloader_pin_memory: bool = True, + callbacks: List[TrainerCallback] = [], + debug: bool = False) -> None: + super().__init__() + self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit) + self.dataloader_pin_memory = dataloader_pin_memory + self.callbacks = callbacks + self.target_holder_name_list = experience_maker_holder_name_list + self.target_holder_list = [] + self._is_target_holder_initialized = False + self._debug = debug + + def update_target_holder_list(self): + # as the length of target_holder_list may be zero, we need to check it by a bool flag + if not self._is_target_holder_initialized: + for name in self.target_holder_name_list: + self.target_holder_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) + self._is_target_holder_initialized = True + + @abstractmethod + def _update_remote_makers(self, fully_update: bool = False, **kwargs): + pass + + def sync_models_to_remote_makers(self, **kwargs): + self._update_remote_makers(fully_update=True, **kwargs) + + @abstractmethod + def training_step(self, experience: Experience) -> Dict[str, Any]: + pass + + def _learn(self, update_steps: int, train_epochs: int) -> None: + data = [] + # warmup + pbar = tqdm(range(update_steps), desc=f'Train epoch [1/{train_epochs}]', disable=not is_rank_0()) + self._on_epoch_start(0) + self._learn_epoch(pbar, data) + self._on_epoch_end(0) + # item is already a batch + dataloader = DataLoader(data, + batch_size=1, + shuffle=True, + pin_memory=self.dataloader_pin_memory, + collate_fn=lambda x: x[0]) + for epoch in range(1, train_epochs): + pbar = tqdm(dataloader, desc=f'Train epoch [{epoch + 1}/{train_epochs}]', disable=not is_rank_0()) + self._on_epoch_start(epoch) + self._learn_epoch(pbar, data) + self._on_epoch_end(epoch) + + def _learn_epoch(self, pbar: tqdm, data: List[Experience]) -> None: + is_warmup = len(data) == 0 + for x in pbar: + if self._debug: + print("[trainer] training step") + # sample a batch and then train to avoid waiting + experience = x if not is_warmup else self._buffer_sample() + experience.to_device(torch.cuda.current_device()) + self._on_batch_start() + metrics = self.training_step(experience) + self._on_batch_end(metrics, experience) + + if self._debug: + print("[trainer] step over") + experience.to_device("cpu") + if is_warmup: + data.append(experience) + pbar.set_postfix(metrics) + + def fit(self, total_steps: int, update_steps: int, train_epochs: int = 1) -> None: + self._on_fit_start() + for i in tqdm(range(total_steps // update_steps), desc='Trainer', disable=not is_rank_0()): + self._on_episode_start(i) + self._learn(update_steps, train_epochs) + self._on_update_start() + self._update_remote_makers() + self._on_update_end() + self._on_episode_end(i) + self._on_fit_end() + + @ray.method(concurrency_group="buffer_length") + def buffer_get_length(self): + # called by ExperienceMakerHolder + if self._debug: + print("[trainer] telling length") + return self.detached_replay_buffer.get_length() + + @ray.method(concurrency_group="buffer_append") + def buffer_append(self, experience: Experience): + # called by ExperienceMakerHolder + if self._debug: + print(f"[trainer] receiving exp.") + self.detached_replay_buffer.append(experience) + + @ray.method(concurrency_group="buffer_append") + def buffer_extend(self, items: List[BufferItem]): + # called by ExperienceMakerHolder + if self._debug: + print(f"[trainer] receiving exp.") + self.detached_replay_buffer.extend(items) + + @ray.method(concurrency_group="buffer_sample") + def _buffer_sample(self): + return self.detached_replay_buffer.sample() + + def _on_fit_start(self) -> None: + for callback in self.callbacks: + callback.on_fit_start() + + def _on_fit_end(self) -> None: + for callback in self.callbacks: + callback.on_fit_end() + + def _on_episode_start(self, episode: int) -> None: + for callback in self.callbacks: + callback.on_episode_start(episode) + + def _on_episode_end(self, episode: int) -> None: + for callback in self.callbacks: + callback.on_episode_end(episode) + + def _on_epoch_start(self, epoch: int) -> None: + for callback in self.callbacks: + callback.on_epoch_start(epoch) + + def _on_epoch_end(self, epoch: int) -> None: + for callback in self.callbacks: + callback.on_epoch_end(epoch) + + def _on_batch_start(self) -> None: + for callback in self.callbacks: + callback.on_batch_start() + + def _on_batch_end(self, metrics: dict, experience: Experience) -> None: + for callback in self.callbacks: + callback.on_batch_end(metrics, experience) + + def _on_update_start(self) -> None: + for callback in self.callbacks: + callback.on_update_start() + + def _on_update_end(self) -> None: + for callback in self.callbacks: + callback.on_update_end() diff --git a/applications/Chat/coati/ray/src/detached_trainer_ppo.py b/applications/Chat/coati/ray/detached_trainer_ppo.py similarity index 55% rename from applications/Chat/coati/ray/src/detached_trainer_ppo.py rename to applications/Chat/coati/ray/detached_trainer_ppo.py index 838e82d07f4a..5f0032716f93 100644 --- a/applications/Chat/coati/ray/src/detached_trainer_ppo.py +++ b/applications/Chat/coati/ray/detached_trainer_ppo.py @@ -1,24 +1,38 @@ -from typing import Any, Callable, Dict, List, Optional -import torch -from torch.optim import Adam +from typing import Any, Callable, Dict, List, Optional, Tuple +import ray +import torch from coati.experience_maker import Experience, NaiveExperienceMaker from coati.models.base import Actor, Critic -from coati.models.generation_utils import update_model_kwargs_fn from coati.models.loss import PolicyLoss, ValueLoss -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy, Strategy from coati.trainer.callbacks import Callback +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy, Strategy +from torch.optim import Adam from colossalai.nn.optimizer import HybridAdam -import ray - - -from .utils import is_rank_0, get_cuda_actor_critic_from_args, get_strategy_from_args, set_dist_env +from .callbacks import TrainerCallback, TrainerPerformanceEvaluator from .detached_trainer_base import DetachedTrainer - - -@ray.remote(concurrency_groups={"buffer_length": 1, "buffer_append":1, "buffer_sample":1,"model_io": 1, "compute": 1}) +from .lora_constructor import LoRAConstructor +from .utils import ( + get_actor_from_args, + get_critic_from_args, + get_model_numel, + get_rank, + get_strategy_from_args, + is_rank_0, + set_dist_env, + state_dict_to, +) + + +@ray.remote(concurrency_groups={ + "buffer_length": 1, + "buffer_append": 1, + "buffer_sample": 1, + "model_io": 1, + "compute": 1 +}) class DetachedPPOTrainer(DetachedTrainer): ''' Detached Trainer for PPO algorithm @@ -40,86 +54,102 @@ class DetachedPPOTrainer(DetachedTrainer): generate_kwargs (dict, optional): the kwargs to use while model generating ''' - def __init__(self, - experience_maker_holder_name_list: List[str], - strategy: str, - model: str, - env_info: Dict[str, str] = None, - pretrained: str = None, - lora_rank: int = 0, - train_batch_size: int = 8, - buffer_limit: int = 0, - buffer_cpu_offload: bool = True, - eps_clip: float = 0.2, - value_clip: float = 0.4, - experience_batch_size: int = 8, - max_epochs: int = 10, - dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], - **generate_kwargs) -> None: + def __init__( + self, + experience_maker_holder_name_list: List[str], + strategy_fn: Callable[[], Strategy], + model_fn: Callable[[], Tuple[Actor, Critic]], + env_info: Dict[str, str] = None, + train_batch_size: int = 8, + buffer_limit: int = 0, + eps_clip: float = 0.2, + value_clip: float = 0.4, + dataloader_pin_memory: bool = True, + callbacks: List[TrainerCallback] = [], + eval_performance: bool = False, + debug: bool = False, + update_lora_weights: bool = False, + ) -> None: # set environment variables if env_info: set_dist_env(env_info=env_info) # configure strategy - self.strategy = get_strategy_from_args(strategy) + self.strategy = strategy_fn() # configure models, loss and optimizers with self.strategy.model_init_context(): - self.actor, self.critic = get_cuda_actor_critic_from_args(model, pretrained, lora_rank) + self.actor, self.critic = model_fn() - if strategy != 'colossalai_gemini': - self.actor.to(torch.float16).to(torch.cuda.current_device()) - self.critic.to(torch.float16).to(torch.cuda.current_device()) + if eval_performance: + actor_numel = get_model_numel(self.actor) + critic_numel = get_model_numel(self.critic) + evaluator = TrainerPerformanceEvaluator(actor_numel, critic_numel) + callbacks = callbacks + [evaluator] - if strategy.startswith('colossalai'): - self.actor_optim = HybridAdam(self.actor.parameters(), lr=5e-6) - self.critic_optim = HybridAdam(self.critic.parameters(), lr=5e-6) + if isinstance(self.strategy, ColossalAIStrategy): + self.actor_optim = HybridAdam(self.actor.parameters(), lr=1e-7) + self.critic_optim = HybridAdam(self.critic.parameters(), lr=1e-7) else: - self.actor_optim = Adam(self.actor.parameters(), lr=5e-6) - self.critic_optim = Adam(self.critic.parameters(), lr=5e-6) + self.actor_optim = Adam(self.actor.parameters(), lr=1e-7) + self.critic_optim = Adam(self.critic.parameters(), lr=1e-7) (self.actor, self.actor_optim), (self.critic, self.critic_optim) = \ self.strategy.prepare((self.actor, self.actor_optim), (self.critic, self.critic_optim)) - generate_kwargs = _set_default_generate_kwargs(self.strategy, generate_kwargs, self.actor) + # configure trainer self.actor_loss_fn = PolicyLoss(eps_clip) self.critic_loss_fn = ValueLoss(value_clip) super().__init__(experience_maker_holder_name_list, train_batch_size=train_batch_size, buffer_limit=buffer_limit, - buffer_cpu_offload=buffer_cpu_offload, - experience_batch_size=experience_batch_size, - max_epochs=max_epochs, dataloader_pin_memory=dataloader_pin_memory, callbacks=callbacks, - **generate_kwargs) + debug=debug) + if self._debug: + print(f'[trainer{get_rank()}] will send state dict to {experience_maker_holder_name_list}') + + self._update_lora_weights = update_lora_weights @ray.method(concurrency_group="model_io") - def _update_remote_makers(self): + @torch.no_grad() + def _update_remote_makers(self, fully_update: bool = False, **config): # TODO: balance duties - if is_rank_0(): - self.update_target_holder_list(self.target_holder_name_list) + if not fully_update: + config['requires_grad_only'] = True + self.update_target_holder_list() + # mark start, ensure order + tasks = [] + for target_holder in self.target_holder_list: + tasks.append(target_holder.update_experience_maker.remote(chunk_start=True, fully_update=fully_update)) + ray.get(tasks) + # sending loop + tasks = [] + + for state_dict_shard in self._get_model_state_dict_shard(self.actor, fully_update=fully_update, **config): for target_holder in self.target_holder_list: - # TODO: reduce malloc - with torch.no_grad(): - ray.get(target_holder.update_experience_maker.remote(self._get_unwrapped_actor(), self._get_unwrapped_critic())) - - @ray.method(concurrency_group="model_io") - def initialize_remote_makers(self): - # TODO: balance duties - if is_rank_0(): - self.update_target_holder_list(self.target_holder_name_list) + tasks.append( + target_holder.update_experience_maker.remote( + new_actor_state_dict=state_dict_shard, + new_actor_lora_config_dict=self._get_model_lora_config_dict(self.actor), + fully_update=fully_update)) + # sending loop + for state_dict_shard in self._get_model_state_dict_shard(self.critic, fully_update=fully_update, **config): for target_holder in self.target_holder_list: - # TODO: reduce malloc - with torch.no_grad(): - ray.get(target_holder.initialize_experience_maker.remote(self._get_unwrapped_actor(), self._get_unwrapped_critic())) + tasks.append( + target_holder.update_experience_maker.remote( + new_critic_state_dict=state_dict_shard, + new_critic_lora_config_dict=self._get_model_lora_config_dict(self.critic), + fully_update=fully_update)) + ray.get(tasks) + # mark end + for target_holder in self.target_holder_list: + target_holder.update_experience_maker.remote(chunk_end=True, fully_update=fully_update) @ray.method(concurrency_group="compute") def training_step(self, experience: Experience) -> Dict[str, float]: self.actor.train() self.critic.train() - experience.to_device(torch.cuda.current_device()) num_actions = experience.action_mask.size(1) action_log_probs = self.actor(experience.sequences, num_actions, attention_mask=experience.attention_mask) actor_loss = self.actor_loss_fn(action_log_probs, @@ -155,38 +185,16 @@ def strategy_save_actor_optim(self, path: str, only_rank0: bool = False) -> None def strategy_save_critic_optim(self, path: str, only_rank0: bool = False) -> None: self.strategy.save_optimizer(self.critic_optim, path, only_rank0) - def _get_unwrapped_actor(self): - if False: - pass - elif isinstance(self.strategy, ColossalAIStrategy): - ret = Actor(self.strategy._unwrap_model(self.actor)) - return ret - elif isinstance(self.strategy, DDPStrategy): - return Actor(self.strategy._unwrap_actor(self.actor)) - elif isinstance(self.strategy, NaiveStrategy): - return self.actor - - def _get_unwrapped_critic(self): - if False: - pass - elif isinstance(self.strategy, ColossalAIStrategy): - ret = self.strategy._unwrap_model(self.critic) - return ret - elif isinstance(self.strategy, DDPStrategy): - return self.critic.module - elif isinstance(self.strategy, NaiveStrategy): - return self.critic - - -def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> None: - origin_model = strategy._unwrap_actor(actor) - new_kwargs = {**generate_kwargs} - # use huggingface models method directly - if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): - new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation - - if 'update_model_kwargs_fn' not in generate_kwargs: - new_kwargs['update_model_kwargs_fn'] = update_model_kwargs_fn - - return new_kwargs - \ No newline at end of file + def _get_model_state_dict_shard(self, model: torch.nn.Module, fully_update=False, **config): + for state_dict in self.strategy.get_model_state_dict_shard(model, **config): + if not self._update_lora_weights or fully_update: + yield state_dict_to(state_dict) + else: + state_dict_lora, _ = LoRAConstructor.filter_state_dict_lora(state_dict) + yield state_dict_to(state_dict_lora) + + def _get_model_lora_config_dict(self, model: torch.nn.Module): + if not self._update_lora_weights: + return None + unwrapped_model = self.strategy.unwrap_model(model) + return LoRAConstructor.extract_lora_config(unwrapped_model) diff --git a/applications/Chat/coati/ray/example/1m1t.py b/applications/Chat/coati/ray/example/1m1t.py deleted file mode 100644 index a6527370505b..000000000000 --- a/applications/Chat/coati/ray/example/1m1t.py +++ /dev/null @@ -1,153 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '1', - 'master_port' : maker_port, - 'master_addr' : master_addr} - - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info = env_info_trainer, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - env_info = env_info_maker, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - ray.get(trainer_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance - maker_done_ref = experience_holder_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_done_ref, maker_done_ref]) - - # save model checkpoint after fitting - trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) diff --git a/applications/Chat/coati/ray/example/1m1t.sh b/applications/Chat/coati/ray/example/1m1t.sh deleted file mode 100644 index f7c5054c800e..000000000000 --- a/applications/Chat/coati/ray/example/1m1t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 1m1t.py "/path/to/prompts.csv" \ - --trainer_strategy colossalai_zero2 --maker_strategy naive --lora_rank 2 --pretrain "facebook/opt-350m" --model 'opt' \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 --debug diff --git a/applications/Chat/coati/ray/example/1m2t.py b/applications/Chat/coati/ray/example/1m2t.py deleted file mode 100644 index 3883c364a8e0..000000000000 --- a/applications/Chat/coati/ray/example/1m2t.py +++ /dev/null @@ -1,186 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - env_info_trainer_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : maker_port, - 'master_addr' : master_addr} - print([env_info_trainer_1, - env_info_trainer_2, - env_info_maker_1]) - ray.init(dashboard_port = 1145) - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug= args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs * 2 + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/coati/ray/example/1m2t.sh b/applications/Chat/coati/ray/example/1m2t.sh deleted file mode 100644 index 669f4141026c..000000000000 --- a/applications/Chat/coati/ray/example/1m2t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 1m2t.py "/path/to/prompts.csv" --model gpt2 \ - --maker_strategy naive --trainer_strategy ddp --lora_rank 2 \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 #--debug \ No newline at end of file diff --git a/applications/Chat/coati/ray/example/2m1t.py b/applications/Chat/coati/ray/example/2m1t.py deleted file mode 100644 index b655de1ab1fa..000000000000 --- a/applications/Chat/coati/ray/example/2m1t.py +++ /dev/null @@ -1,140 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - - -def main(args): - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_ref = DetachedPPOTrainer.options(name="trainer1", num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1"], - strategy=args.maker_strategy, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - ray.get(trainer_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_done_ref = trainer_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs // 2 + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_done_ref, maker_1_done_ref, maker_2_done_ref]) - - # save model checkpoint after fitting - trainer_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - ray.init(namespace=os.environ["RAY_NAMESPACE"]) - main(args) diff --git a/applications/Chat/coati/ray/example/2m1t.sh b/applications/Chat/coati/ray/example/2m1t.sh deleted file mode 100644 index a207d4118d60..000000000000 --- a/applications/Chat/coati/ray/example/2m1t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 3 - -export RAY_NAMESPACE="admin" - -python 2m1t.py "/path/to/prompts.csv" \ - --trainer_strategy naive --maker_strategy naive --lora_rank 2 --pretrain "facebook/opt-350m" --model 'opt' \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 # --debug diff --git a/applications/Chat/coati/ray/example/2m2t.py b/applications/Chat/coati/ray/example/2m2t.py deleted file mode 100644 index 435c71915fc2..000000000000 --- a/applications/Chat/coati/ray/example/2m2t.py +++ /dev/null @@ -1,209 +0,0 @@ -import argparse -from copy import deepcopy - -import pandas as pd -import torch -from coati.trainer import PPOTrainer - - -from coati.ray.src.experience_maker_holder import ExperienceMakerHolder -from coati.ray.src.detached_trainer_ppo import DetachedPPOTrainer - -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -from coati.experience_maker import NaiveExperienceMaker -from torch.optim import Adam -from transformers import AutoTokenizer, BloomTokenizerFast -from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer - -from colossalai.nn.optimizer import HybridAdam - -import ray -import os -import socket - - -def get_free_port(): - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.bind(('', 0)) - return s.getsockname()[1] - - -def get_local_ip(): - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(('8.8.8.8', 80)) - return s.getsockname()[0] - -def main(args): - master_addr = str(get_local_ip()) - # trainer_env_info - trainer_port = str(get_free_port()) - env_info_trainer_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - env_info_trainer_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port' : trainer_port, - 'master_addr' : master_addr} - # maker_env_info - maker_port = str(get_free_port()) - env_info_maker_1 = {'local_rank' : '0', - 'rank' : '0', - 'world_size' : '2', - 'master_port' : maker_port, - 'master_addr' : master_addr} - env_info_maker_2 = {'local_rank' : '0', - 'rank' : '1', - 'world_size' : '2', - 'master_port': maker_port, - 'master_addr' : master_addr} - print([env_info_trainer_1, - env_info_trainer_2, - env_info_maker_1, - env_info_maker_2]) - ray.init() - # configure tokenizer - if args.model == 'gpt2': - tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'bloom': - tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain) - tokenizer.pad_token = tokenizer.eos_token - elif args.model == 'opt': - tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") - else: - raise ValueError(f'Unsupported model "{args.model}"') - - # configure Trainer - trainer_1_ref = DetachedPPOTrainer.options(name="trainer1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_1, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - trainer_2_ref = DetachedPPOTrainer.options(name="trainer2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - experience_maker_holder_name_list=["maker1", "maker2"], - strategy=args.trainer_strategy, - model=args.model, - env_info=env_info_trainer_2, - pretrained=args.pretrain, - lora_rank=args.lora_rank, - train_batch_size=args.train_batch_size, - buffer_limit=16, - experience_batch_size=args.experience_batch_size, - max_epochs=args.max_epochs, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # configure Experience Maker - experience_holder_1_ref = ExperienceMakerHolder.options(name="maker1", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_1, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - experience_holder_2_ref = ExperienceMakerHolder.options(name="maker2", namespace=os.environ["RAY_NAMESPACE"], num_gpus=1, max_concurrency=2).remote( - detached_trainer_name_list=["trainer1", "trainer2"], - strategy=args.maker_strategy, - env_info=env_info_maker_2, - experience_batch_size=args.experience_batch_size, - kl_coef=0.1, - #kwargs: - max_length=128, - do_sample=True, - temperature=1.0, - top_k=50, - pad_token_id=tokenizer.pad_token_id, - eos_token_id=tokenizer.eos_token_id, - debug=args.debug, - ) - - # trainer send its actor and critic to experience holders. - # TODO: balance duty - ray.get(trainer_1_ref.initialize_remote_makers.remote()) - - # configure sampler - dataset = pd.read_csv(args.prompt_path)['prompt'] - - def tokenize_fn(texts): - # MUST padding to max length to ensure inputs of all ranks have the same length - # Different length may lead to hang when using gemini, as different generation steps - batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) - return {k: v.cuda() for k, v in batch.items()} - - trainer_1_done_ref = trainer_1_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - trainer_2_done_ref = trainer_2_ref.fit.remote(num_episodes=args.num_episodes, max_timesteps=args.max_timesteps, update_timesteps=args.update_timesteps) - num_exp_per_maker = args.num_episodes * args.max_timesteps // args.update_timesteps * args.max_epochs + 3 # +3 for fault tolerance - maker_1_done_ref = experience_holder_1_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - maker_2_done_ref = experience_holder_2_ref.workingloop.remote(dataset, tokenize_fn, times=num_exp_per_maker) - - ray.get([trainer_1_done_ref, trainer_2_done_ref, maker_1_done_ref, maker_2_done_ref]) - # save model checkpoint after fitting - trainer_1_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - trainer_2_ref.strategy_save_actor.remote(args.save_path, only_rank0=True) - # save optimizer checkpoint on all ranks - if args.need_optim_ckpt: - trainer_1_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - trainer_2_ref.strategy_save_actor_optim.remote('actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()), - only_rank0=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('prompt_path') - parser.add_argument('--trainer_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--maker_strategy', - choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'], - default='naive') - parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt']) - parser.add_argument('--pretrain', type=str, default=None) - parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts.pt') - parser.add_argument('--need_optim_ckpt', type=bool, default=False) - parser.add_argument('--num_episodes', type=int, default=10) - parser.add_argument('--max_timesteps', type=int, default=10) - parser.add_argument('--update_timesteps', type=int, default=10) - parser.add_argument('--max_epochs', type=int, default=5) - parser.add_argument('--train_batch_size', type=int, default=8) - parser.add_argument('--experience_batch_size', type=int, default=8) - parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") - - parser.add_argument('--debug', action='store_true') - args = parser.parse_args() - main(args) diff --git a/applications/Chat/coati/ray/example/2m2t.sh b/applications/Chat/coati/ray/example/2m2t.sh deleted file mode 100644 index fb4024766c54..000000000000 --- a/applications/Chat/coati/ray/example/2m2t.sh +++ /dev/null @@ -1,23 +0,0 @@ -set_n_least_used_CUDA_VISIBLE_DEVICES() { - local n=${1:-"9999"} - echo "GPU Memory Usage:" - local FIRST_N_GPU_IDS=$(nvidia-smi --query-gpu=memory.used --format=csv \ - | tail -n +2 \ - | nl -v 0 \ - | tee /dev/tty \ - | sort -g -k 2 \ - | awk '{print $1}' \ - | head -n $n) - export CUDA_VISIBLE_DEVICES=$(echo $FIRST_N_GPU_IDS | sed 's/ /,/g') - echo "Now CUDA_VISIBLE_DEVICES is set to:" - echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES" -} - -set_n_least_used_CUDA_VISIBLE_DEVICES 2 - -export RAY_NAMESPACE="admin" - -python 2m2t.py "path/to/prompts.csv" \ - --maker_strategy naive --trainer_strategy colossalai_zero2 --lora_rank 2 \ - --num_episodes 10 --max_timesteps 10 --update_timesteps 10 \ - --max_epochs 10 --debug \ No newline at end of file diff --git a/applications/Chat/coati/ray/experience_maker_holder.py b/applications/Chat/coati/ray/experience_maker_holder.py new file mode 100644 index 000000000000..8551ef1eacef --- /dev/null +++ b/applications/Chat/coati/ray/experience_maker_holder.py @@ -0,0 +1,271 @@ +import os +import time +import tracemalloc +from copy import deepcopy +from threading import Lock +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union + +import ray +import torch +import torch.nn as nn +from coati.experience_maker import Experience, ExperienceMaker, NaiveExperienceMaker +from coati.models.base import Actor, Critic, RewardModel +from coati.replay_buffer.utils import BufferItem, make_experience_batch, split_experience_batch +from coati.trainer.callbacks import Callback +from coati.trainer.strategies import Strategy +from coati.trainer.strategies.sampler import DistributedSampler +from ray.exceptions import GetTimeoutError +from torch import Tensor +from tqdm import tqdm + +from .callbacks import ExperienceMakerPerformanceEvaluator, MakerCallback +from .utils import (get_model_numel, + get_rank, + get_world_size, + is_rank_0, + set_dist_env, + state_dict_to) +from .lora_constructor import LoRAConstructor + +@ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) +class ExperienceMakerHolder: + ''' + Args: + detached_trainer_name_list: str list to get ray actor handles + strategy: + kl_coef: the coefficient of kl divergence loss + sync_models_from_trainers: whether to sync models from trainers. If True, you must call sync_models_to_remote_makers() in trainers to sync models. + ''' + + def __init__( + self, + detached_trainer_name_list: List[str], + strategy_fn: Callable[[], Strategy], + # a function returns (actor, critic, reward_model, initial_model) + model_fn: Callable[[], Tuple[Actor, Critic, RewardModel, Actor]], + env_info: Dict[str, str] = None, + sync_models_from_trainers: bool = False, + buffer_cpu_offload: bool = True, + kl_coef: float = 0.1, + callbacks: List[MakerCallback] = [], + eval_performance: bool = False, + debug: bool = False, + update_lora_weights: bool = False, + **generate_kwargs): + # set environment variables + if env_info: + set_dist_env(env_info=env_info) + self.target_trainer_list = [] + assert len(detached_trainer_name_list) > 0 + self._detached_trainer_name_list = detached_trainer_name_list + self.strategy = strategy_fn() + self.buffer_cpu_offload = buffer_cpu_offload + self.kl_coef = kl_coef + # init models + with self.strategy.model_init_context(): + actor, critic, reward_model, initial_model = model_fn() + self.generate_kwargs = _set_default_generate_kwargs(generate_kwargs, actor) + if eval_performance: + actor_numel = get_model_numel(actor) + critic_numel = get_model_numel(critic) + initial_model_numel = get_model_numel(initial_model) + reward_model_numel = get_model_numel(reward_model) + evaluator = ExperienceMakerPerformanceEvaluator(actor_numel, critic_numel, initial_model_numel, + reward_model_numel) + callbacks = callbacks + [evaluator] + + actor, critic, reward_model, initial_model = self.strategy.prepare(actor, critic, reward_model, initial_model) + self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) + self.callbacks = callbacks + + self._model_visit_lock = Lock() + + self._is_fully_initialized = not sync_models_from_trainers + + self._debug = debug + self._update_lora_weights = update_lora_weights + if self._update_lora_weights: + self.actor_lora_constructor = LoRAConstructor() + self.critic_lora_constructor = LoRAConstructor() + + self.target_auto_balance = False + + self._target_idx = 0 + + if self._debug: + print(f'[maker{get_rank()}] will send items to {self._detached_trainer_name_list}') + if not self._is_fully_initialized: + print(f'[maker{get_rank()}] Waiting for INIT') + + def _get_ready(self): + while not self._fully_initialized(): + time.sleep(1.0) + + def _fully_initialized(self): + return self._is_fully_initialized + + def _init_target_trainer_list(self): + if len(self.target_trainer_list) > 0: + return + for name in self._detached_trainer_name_list: + self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) + + # copy from ../trainer/base.py + @ray.method(concurrency_group="compute") + def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experience: + if isinstance(inputs, Tensor): + return self.experience_maker.make_experience(inputs, **self.generate_kwargs) + elif isinstance(inputs, dict): + return self.experience_maker.make_experience(**inputs, **self.generate_kwargs) + else: + raise ValueError(f'Unsupported input type "{type(inputs)}"') + + @ray.method(concurrency_group="experience_io") + def _send_items(self, experience: Experience) -> None: + self._init_target_trainer_list() + items = split_experience_batch(experience) + items_per_trainer = [[] for _ in range(len(self.target_trainer_list))] + for item in items: + items_per_trainer[self._target_idx].append(item) + self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) + for i, target_trainer in enumerate(self.target_trainer_list): + if len(items_per_trainer[i]) > 0: + target_trainer.buffer_extend.remote(items_per_trainer[i]) + + def _inference_step(self, batch) -> None: + self._on_batch_start() + with self._model_visit_lock: + self._on_make_experience_start() + experience = self._make_experience(batch) + self._on_make_experience_end(experience) + self._on_send_start() + if self.buffer_cpu_offload: + experience.to_device('cpu') + self._send_items(experience) + self._on_send_end() + self._on_batch_end() + + def workingloop(self, dataloader_fn: Callable[[], Iterable], num_epochs: int = 1, num_steps: int = 0): + """Working loop of the experience maker. + + Args: + dataloader_fn (Callable[[], Iterable]): A function that returns a dataloader. + num_epochs (int, optional): Iterate the dataloader for number of epochs. Defaults to 1. + num_steps (int, optional): Iterate the dataloader for number if steps. If this value > 0, num_epochs will be ignored. Defaults to 0. + """ + self._get_ready() + self._on_loop_start() + dataloader = dataloader_fn() + if num_steps > 0: + # ignore num epochs + it = iter(dataloader) + for _ in tqdm(range(num_steps), desc='ExperienceMaker', disable=not is_rank_0()): + try: + batch = next(it) + except StopIteration: + it = iter(dataloader) + batch = next(it) + self._inference_step(batch) + else: + with tqdm(total=num_epochs * len(dataloader), desc='ExperienceMaker', disable=not is_rank_0()) as pbar: + for _ in range(num_epochs): + for batch in dataloader: + self._inference_step(batch) + pbar.update() + self._on_loop_end() + + @ray.method(concurrency_group="model_io") + def update_experience_maker(self, + new_actor_state_dict: Dict[str, Any] = None, + new_actor_lora_config_dict: Dict[str, Any] = None, + new_critic_state_dict: Dict[str, Any] = None, + new_critic_lora_config_dict: Dict[str, Any] = None, + fully_update: bool = False, + chunk_start: bool = None, + chunk_end: bool = None): + ''' + called by trainer + chunk_start: Set True at the first call. Before sending state_dict calls + chunk_end: Set True at the last call. After sending state_dict calls. + fully_update: Set True if you want to sync models when initializing + + TODO: load_state_dict integrate with model-sharding strategy + ''' + _watch_memory = self._debug + if chunk_start: + if self._debug: + print("[maker] UPDATE ") + if _watch_memory: + tracemalloc.start() + self._model_visit_lock.acquire() + + with torch.no_grad(): + if new_actor_state_dict is not None: + if not self._update_lora_weights or fully_update: + self.experience_maker.actor.model.load_state_dict(new_actor_state_dict, strict=False) + else: + new_actor_state_dict = state_dict_to(new_actor_state_dict, device=torch.cuda.current_device()) + state_dict_increasae = self.actor_lora_constructor.reconstruct_increase(new_actor_state_dict, new_actor_lora_config_dict) + self.actor_lora_constructor.load_state_dict_increase(self.experience_maker.actor.model, state_dict_increasae) + if new_critic_state_dict is not None: + if not self._update_lora_weights or fully_update: + self.experience_maker.critic.load_state_dict(new_critic_state_dict, strict=False) + else: + new_critic_state_dict = state_dict_to(new_critic_state_dict, device=torch.cuda.current_device()) + state_dict_increasae = self.critic_lora_constructor.reconstruct_increase(new_critic_state_dict, new_critic_lora_config_dict) + self.critic_lora_constructor.load_state_dict_increase(self.experience_maker.critic, state_dict_increasae) + + # the lock must be released after both actor and critic being updated + if chunk_end: + self._model_visit_lock.release() + if _watch_memory: + current, peak = tracemalloc.get_traced_memory() + print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB") + tracemalloc.stop() + if fully_update: + self._is_fully_initialized = True + + def _on_make_experience_start(self) -> None: + for callback in self.callbacks: + callback.on_make_experience_start() + + def _on_make_experience_end(self, experience: Experience) -> None: + for callback in self.callbacks: + callback.on_make_experience_end(experience) + + def _on_loop_start(self) -> None: + for callback in self.callbacks: + callback.on_loop_start() + + def _on_loop_end(self) -> None: + for callback in self.callbacks: + callback.on_loop_end() + + def _on_send_start(self) -> None: + for callback in self.callbacks: + callback.on_send_start() + + def _on_send_end(self) -> None: + for callback in self.callbacks: + callback.on_send_end() + + def _on_batch_start(self) -> None: + for callback in self.callbacks: + callback.on_batch_start() + + def _on_batch_end(self) -> None: + for callback in self.callbacks: + callback.on_batch_end() + + +def _set_default_generate_kwargs(generate_kwargs: dict, actor: Actor) -> None: + origin_model = actor.model + new_kwargs = {**generate_kwargs} + # use huggingface models method directly + if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): + new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation + + if 'update_model_kwargs_fn' not in generate_kwargs and hasattr(origin_model, '_update_model_kwargs_for_generation'): + new_kwargs['update_model_kwargs_fn'] = origin_model._update_model_kwargs_for_generation + + return new_kwargs diff --git a/applications/Chat/coati/ray/lora_constructor.py b/applications/Chat/coati/ray/lora_constructor.py new file mode 100644 index 000000000000..599a58248728 --- /dev/null +++ b/applications/Chat/coati/ray/lora_constructor.py @@ -0,0 +1,122 @@ +from typing import Any, Callable, Dict, List, Optional +from collections import OrderedDict +from dataclasses import dataclass + +import torch +import torch.nn as nn +from loralib.layers import LoRALayer +from coati.models.lora import LoraLinear + + +@dataclass +class LoRAConfig: + r: int = 0 + lora_alpha: int = 1 + lora_dropout: float = 0 + fan_in_fan_out: bool = False + + +class LoRAConstructor: + ''' + Tools for reconstructing a model from a remote LoRA model. + (Transfering only LoRA data costs much less!) + Usage: + Step 1 (Sender): + filter_state_dict_lora() + + Step 2 (Sender, Optional): + extract_lora_config() + + Step 3 (Sender): + send state_dict_lora and lora_config_dict + + Step 4 (Receiver): + reconstruct_increase() + + Step 5 (Receiver): + load_state_dict_increase() + + ''' + + def __init__(self): + self.lora_config_dict = None + + def register_lora_config(self, lora_config_dict: Dict[str, Any]): + self.lora_config_dict = lora_config_dict + + def reconstruct_increase(self, state_dict_lora: Dict[str, Any], lora_config_dict: Dict[str, Any]): + ''' + xxx.lora_A, xxx.lora_B -->> xxx.weight + Warning: the xxx.weight here is the increment actually. + ''' + if lora_config_dict is not None: + self.register_lora_config(lora_config_dict) + + state_dict_increasae = OrderedDict() + config_iter = iter(self.lora_config_dict.items()) + lora_A, lora_B, layer_prefix = None, None, None + for k, v in state_dict_lora.items(): + if k.rpartition('.')[-1] == 'lora_A': + lora_A = v + layer_prefix = k.rpartition('.')[0] + elif k.rpartition('.')[-1] == 'lora_B': + assert layer_prefix == k.rpartition('.')[0], "unmatched (lora_A, lora_B) pair" + layer_prefix_2, config = next(config_iter) + assert layer_prefix_2 == layer_prefix, "unmatched (state_dict, config_dict) pair" + lora_B = v + weight_data_increase = self._compute(lora_A, lora_B, config) + state_dict_increasae[layer_prefix + '.weight'] = weight_data_increase + lora_A, lora_B, layer_prefix = None, None, None + else: + raise ValueError('unexpected key') + return state_dict_increasae + + def _compute(self, lora_A, lora_B, config=LoRAConfig()): + def T(w): + return w.T if config.fan_in_fan_out else w + if config.r > 0: + scaling = config.lora_alpha / config.r + weight_data_increase = T(lora_B @ lora_A) * scaling + return weight_data_increase + return 0 + + def load_state_dict_increase(self, model: nn.Module, state_dict_increasae: Dict[str, Any]): + ''' + The final reconstruction step + ''' + # naive approach + model.load_state_dict({k: v + model.state_dict()[k] for k, v in state_dict_increasae.items()}, strict=False) + + @staticmethod + def filter_state_dict_lora(state_dict: Dict[str, Any], keep_non_lora=False): + ''' + if keep_non_lora, also return non_lora state_dict + ''' + state_dict_lora = OrderedDict() + state_dict_non_lora = OrderedDict() + for k, v in state_dict.items(): + if 'lora_A' in k or 'lora_B' in k: + state_dict_lora[k] = v + elif keep_non_lora: + state_dict_non_lora[k] = v + if keep_non_lora: + return state_dict_lora, state_dict_non_lora + else: + return state_dict_lora, None + + @staticmethod + def extract_lora_config(model: nn.Module) -> Dict[str, LoRAConfig]: + ''' + extract LoraLinear model. + return OrderedDict(): name -> LoRAConfig + ''' + lora_config_dict = OrderedDict() + + for name, child in model.named_modules(): + if isinstance(child, LoraLinear): + lora_config_dict[name] = LoRAConfig(r=child.r, + lora_alpha=child.lora_alpha, + lora_dropout=child.lora_dropout, + fan_in_fan_out=child.fan_in_fan_out) + + return lora_config_dict diff --git a/applications/Chat/coati/ray/src/__init__.py b/applications/Chat/coati/ray/src/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/applications/Chat/coati/ray/src/detached_trainer_base.py b/applications/Chat/coati/ray/src/detached_trainer_base.py deleted file mode 100644 index f1ed1ec71499..000000000000 --- a/applications/Chat/coati/ray/src/detached_trainer_base.py +++ /dev/null @@ -1,121 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Optional, Union -from tqdm import tqdm -from coati.trainer.callbacks import Callback -from coati.experience_maker import Experience -import ray -import os - -from .detached_replay_buffer import DetachedReplayBuffer -from .utils import is_rank_0 - -class DetachedTrainer(ABC): - ''' - Base class for detached rlhf trainers. - 'detach' means that the experience maker is detached compared to a normal Trainer. - Please set name attribute during init: - >>> trainer = DetachedTrainer.options(..., name = "xxx", ...).remote() - So an ExperienceMakerHolder can reach the detached_replay_buffer by Actor's name. - Args: - detached_strategy (DetachedStrategy): the strategy to use for training - detached_replay_buffer_ref (ObjectRef[DetachedReplayBuffer]): the replay buffer to use for training - experience_batch_size (int, defaults to 8): the batch size to use for experience generation - max_epochs (int, defaults to 1): the number of epochs of training process - data_loader_pin_memory (bool, defaults to True): whether to pin memory for data loader - callbacks (List[Callback], defaults to []): the callbacks to call during training process - generate_kwargs (dict, optional): the kwargs to use while model generating - ''' - - def __init__(self, - experience_maker_holder_name_list: List[str], - train_batch_size: int = 8, - buffer_limit: int = 0, - buffer_cpu_offload: bool = True, - experience_batch_size: int = 8, - max_epochs: int = 1, - dataloader_pin_memory: bool = True, - callbacks: List[Callback] = [], - **generate_kwargs) -> None: - super().__init__() - self.detached_replay_buffer = DetachedReplayBuffer(train_batch_size, limit=buffer_limit, cpu_offload=buffer_cpu_offload) - self.experience_batch_size = experience_batch_size - self.max_epochs = max_epochs - self.dataloader_pin_memory = dataloader_pin_memory - self.callbacks = callbacks - self.generate_kwargs = generate_kwargs - self.target_holder_name_list = experience_maker_holder_name_list - self.target_holder_list = [] - - def update_target_holder_list(self, experience_maker_holder_name_list): - self.target_holder_name_list = experience_maker_holder_name_list - self.target_holder_list = [] - for name in self.target_holder_name_list: - self.target_holder_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) - - @abstractmethod - def _update_remote_makers(self): - pass - - @abstractmethod - def training_step(self, experience: Experience) -> Dict[str, Any]: - pass - - def _learn(self): - pbar = tqdm(range(self.max_epochs), desc='Train epoch', disable=not is_rank_0()) - for _ in pbar: - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[trainer] sampling exp") - experience = self._buffer_sample() - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[trainer] training step") - metrics = self.training_step(experience) - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[trainer] step over") - pbar.set_postfix(metrics) - - def fit(self, num_episodes: int = 50000, max_timesteps: int = 500, update_timesteps: int = 5000) -> None: - self._on_fit_start() - for episode in range(num_episodes): - self._on_episode_start(episode) - for timestep in tqdm(range(max_timesteps // update_timesteps), - desc=f'Episode [{episode+1}/{num_episodes}]', - disable=not is_rank_0()): - self._learn() - self._update_remote_makers() - self._on_episode_end(episode) - self._on_fit_end() - - @ray.method(concurrency_group="buffer_length") - def buffer_get_length(self): - # called by ExperienceMakerHolder - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[trainer] telling length") - return self.detached_replay_buffer.get_length() - - @ray.method(concurrency_group="buffer_append") - def buffer_append(self, experience: Experience): - # called by ExperienceMakerHolder - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - # print(f"[trainer] receiving exp. Current buffer length: {self.detached_replay_buffer.get_length()}") - print(f"[trainer] receiving exp.") - self.detached_replay_buffer.append(experience) - - @ray.method(concurrency_group="buffer_sample") - def _buffer_sample(self): - return self.detached_replay_buffer.sample() - - def _on_fit_start(self) -> None: - for callback in self.callbacks: - callback.on_fit_start() - - def _on_fit_end(self) -> None: - for callback in self.callbacks: - callback.on_fit_end() - - def _on_episode_start(self, episode: int) -> None: - for callback in self.callbacks: - callback.on_episode_start(episode) - - def _on_episode_end(self, episode: int) -> None: - for callback in self.callbacks: - callback.on_episode_end(episode) diff --git a/applications/Chat/coati/ray/src/experience_maker_holder.py b/applications/Chat/coati/ray/src/experience_maker_holder.py deleted file mode 100644 index 0ae4e3125b70..000000000000 --- a/applications/Chat/coati/ray/src/experience_maker_holder.py +++ /dev/null @@ -1,172 +0,0 @@ -import torch -from typing import Any, Callable, Dict, List, Optional, Union -import ray -from ray.exceptions import GetTimeoutError -from torch import Tensor -import torch.nn as nn -from coati.models.base import Actor, Critic, RewardModel -from coati.trainer.strategies.sampler import DistributedSampler -from coati.trainer.strategies import Strategy -from coati.experience_maker import NaiveExperienceMaker, Experience, ExperienceMaker - -from copy import deepcopy -from threading import Lock -import time -import os - - -from .utils import is_rank_0, get_strategy_from_args, set_dist_env - - -@ray.remote(concurrency_groups={"experience_io": 1, "model_io": 1, "compute": 1}) -class ExperienceMakerHolder: - ''' - Args: - detached_trainer_name_list: str list to get ray actor handles - strategy: - experience_batch_size: batch size of generated experience - kl_coef: the coefficient of kl divergence loss - ''' - - def __init__(self, - detached_trainer_name_list: List[str], - strategy: str, - env_info: Dict[str, str] = None, - experience_batch_size: int = 8, - kl_coef: float = 0.1, - **generate_kwargs): - # set environment variables - if env_info: - set_dist_env(env_info=env_info) - self.target_trainer_list = [] - for name in detached_trainer_name_list: - self.target_trainer_list.append(ray.get_actor(name, namespace=os.environ["RAY_NAMESPACE"])) - self.strategy_str = strategy - self.strategy = get_strategy_from_args(strategy) - self.experience_batch_size = experience_batch_size - self.kl_coef = kl_coef - self.generate_kwargs = generate_kwargs - # Need a trainer to give an actor and a critic via initialize_experience_maker(...) - actor, critic, reward_model, initial_model = None, None, None, None - self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_coef) - self._model_visit_lock = Lock() - self.fully_initialized = False - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print('[maker] Waiting for INIT') - - def _get_ready(self): - while not self.fully_initialized: - time.sleep(1.0) - - def update_target_trainer_list(self, detached_trainer_name_list): - self.target_trainer_list = [] - for name in detached_trainer_name_list: - self.target_trainer_list.append(ray.get_actor(name)) - - # copy from ../trainer/base.py - @ray.method(concurrency_group="compute") - def _make_experience(self, inputs: Union[Tensor, Dict[str, Tensor]]) -> Experience: - self._get_ready() - if isinstance(inputs, Tensor): - return self.experience_maker.make_experience(inputs, **self.generate_kwargs) - elif isinstance(inputs, dict): - return self.experience_maker.make_experience(**inputs, **self.generate_kwargs) - else: - raise ValueError(f'Unsupported input type "{type(inputs)}"') - - @ray.method(concurrency_group="experience_io") - def _send_experience(self, experience): - ''' - ignore it - - # choose a trainer that has the least experience batch in its detached_replay_buffer - chosen_trainer = None - min_length = None - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[maker] choosing target trainer") - while chosen_trainer is None: - for target_trainer in self.target_trainer_list: - try: - temp_length = ray.get(target_trainer.buffer_get_length.remote(), timeout=0.1) - if min_length is None: - min_length = temp_length - chosen_trainer = target_trainer - else: - if temp_length < min_length: - min_length = temp_length - chosen_trainer = target_trainer - except GetTimeoutError: - pass - - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - ''' - # - if not hasattr(self, "_target_idx"): - self._target_idx = 0 - chosen_trainer = self.target_trainer_list[self._target_idx] - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print(f"[maker] sending exp to {chosen_trainer}") - chosen_trainer.buffer_append.remote(experience) - self._target_idx = (self._target_idx + 1) % len(self.target_trainer_list) - - def workingloop(self, dataset, tokenizer: Optional[Callable[[Any], dict]] = None, times=5000 * 50000): - self._get_ready() - sampler = self.strategy.setup_sampler(dataset) - for _ in range(times): - rand_prompts = sampler.sample(self.experience_batch_size) - if tokenizer is not None: - inputs = tokenizer(rand_prompts) - else: - inputs = rand_prompts - self._model_visit_lock.acquire() - experience = self._make_experience(inputs=inputs) - self._model_visit_lock.release() - self._send_experience(experience=experience) - - @ray.method(concurrency_group="model_io") - def initialize_experience_maker(self, init_actor: Actor, init_critic: Critic): - ''' - called by trainer. Only once. - ''' - # TODO: reduce malloc - if self.fully_initialized: - return - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print('[maker] INIT') - with torch.no_grad(): - with self.strategy.model_init_context(): - actor = init_actor - critic = init_critic - initial_model = deepcopy(actor) - reward_model = RewardModel(deepcopy(critic.model), - deepcopy(critic.value_head)).to(torch.cuda.current_device()) - if self.strategy_str != 'colossalai_gemini': - actor.to(torch.float16).to(torch.cuda.current_device()) - critic.to(torch.float16).to(torch.cuda.current_device()) - initial_model.to(torch.float16).to(torch.cuda.current_device()) - reward_model.to(torch.float16).to(torch.cuda.current_device()) - - self.experience_maker.actor = self.strategy.prepare(actor) - self.experience_maker.critic = self.strategy.prepare(critic) - self.experience_maker.initial_model = self.strategy.prepare(initial_model) - self.experience_maker.reward_model = self.strategy.prepare(reward_model) - self.fully_initialized = True - - @ray.method(concurrency_group="model_io") - def update_experience_maker(self, new_actor: Actor, new_critic: Critic): - ''' - called by trainer - ''' - # TODO: reduce malloc - self._model_visit_lock.acquire() - with torch.no_grad(): - if 'debug' in self.generate_kwargs and self.generate_kwargs['debug'] == True: - print("[maker] UPDATE ") - if self.strategy_str != 'colossalai_gemini': - new_actor.to(torch.float16).to(torch.cuda.current_device()) - new_critic.to(torch.float16).to(torch.cuda.current_device()) - self.experience_maker.actor = self.strategy.prepare(new_actor) - self.experience_maker.critic = self.strategy.prepare(new_critic) - self._model_visit_lock.release() diff --git a/applications/Chat/coati/ray/src/pipeline_strategy.py b/applications/Chat/coati/ray/src/pipeline_strategy.py deleted file mode 100644 index 7ecb5d7d86d6..000000000000 --- a/applications/Chat/coati/ray/src/pipeline_strategy.py +++ /dev/null @@ -1,105 +0,0 @@ -# WIP - - -from coati.trainer.strategies import Strategy -from coati.trainer.strategies import NaiveStrategy -from coati.models.base import Actor, RewardModel, Critic - -import numpy as np -import torch -from torch._C._distributed_rpc import _is_current_rpc_agent_set - -import colossalai -from colossalai.pipeline.pipeline_process_group import ppg -from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine -from colossalai.fx import ColoTracer -from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass -from colossalai.pipeline.middleware.adaptor import get_fx_topology - - -import os -from functools import partial -import random - -rpc_is_initialized = _is_current_rpc_agent_set - -class PipelineModel(torch.nn.Module): - ''' - Actor has 2 kinds of jobs: forward and generate. - better to just pipeline the inner model - ''' - def __init__(self, - model: torch.nn.Module, - stage_num: int, - num_microbatches: int, - data_kwargs = None, - ): - super().__init__() - # create partition module - def create_partition_module(pp_rank:int, stage_num: int, model, data_kwargs): - model.eval() - tracer = ColoTracer() - meta_args = {k: v.to('meta') for k, v in data_kwargs.items()} - graph = tracer.trace(root=model, meta_args=meta_args) - gm = torch.fx.GraphModule(model, graph, model.__class__.__name__) - annotated_model = balanced_split_pass(gm, stage_num) - top_module, split_submodules = split_with_split_nodes_pass(annotated_model, merge_output=True) - topo = get_fx_topology(top_module) - for submodule in split_submodules: - if isinstance(submodule, torch.fx.GraphModule): - setattr(submodule, '_topo', topo) - return split_submodules[pp_rank + 1] - - def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int): - partition = create_partition_module(pp_rank, stage_num, model, data_kwargs) - return partition - self.inference_engine = OneFOneBPipelineEngine( - partition_fn=partial(partition, model, data_kwargs), - stage_num=stage_num, - num_microbatches=num_microbatches, - device='cuda', - ) - - def forward(self, - **model_inputs): - return self.inference_engine.forward_backward(**model_inputs, forward_only=True) - - - -class PPStrategy(NaiveStrategy): - """ - Strategy for Pipeline inference (inference only!) - - master node only - """ - def __init__( - self, - seed: int = 42 - ): - self.seed = seed - super().__init__() - - - def setup_distributed(self) -> None: - colossalai.launch_from_torch({}, seed=self.seed) - ppg.set_global_info(rank = int(os.environ['RANK']), - world_size=int(os.environ['WORLD_SIZE']), - dp_degree=1, - tp_degree=1, - num_worker_threads=128, - device="cuda") - - def model_init_context(self): - return super().model_init_context() - - def setup_model(self, model: torch.nn.Module) -> torch.nn.Module: - if isinstance(model, Actor) or \ - isinstance(model, RewardModel) or \ - isinstance(model, Critic): - model.model = PipelineModel(model.model) - - def set_seed(self, seed: int) -> None: - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - diff --git a/applications/Chat/coati/ray/src/utils.py b/applications/Chat/coati/ray/src/utils.py deleted file mode 100644 index c750879b6d18..000000000000 --- a/applications/Chat/coati/ray/src/utils.py +++ /dev/null @@ -1,48 +0,0 @@ -import torch.distributed as dist -from typing import Any, Callable, Dict, List, Optional -from coati.models.bloom import BLOOMActor, BLOOMCritic -from coati.models.gpt import GPTActor, GPTCritic -from coati.models.opt import OPTActor, OPTCritic -from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy -import torch -import os - -def is_rank_0() -> bool: - return not dist.is_initialized() or dist.get_rank() == 0 - - -def get_cuda_actor_critic_from_args(model: str, pretrained: str = None, lora_rank=0): - if model == 'gpt2': - actor = GPTActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - elif model == 'bloom': - actor = BLOOMActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - elif model == 'opt': - actor = OPTActor(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank).to(torch.cuda.current_device()) - else: - raise ValueError(f'Unsupported model "{model}"') - return actor, critic - - -def get_strategy_from_args(strategy: str): - if strategy == 'naive': - strategy_ = NaiveStrategy() - elif strategy == 'ddp': - strategy_ = DDPStrategy() - elif strategy == 'colossalai_gemini': - strategy_ = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5) - elif strategy == 'colossalai_zero2': - strategy_ = ColossalAIStrategy(stage=2, placement_policy='cuda') - else: - raise ValueError(f'Unsupported strategy "{strategy}"') - return strategy_ - - -def set_dist_env(env_info: Dict[str, str]): - os.environ["RANK"] = env_info['rank'] - os.environ["LOCAL_RANK"] = env_info['local_rank'] - os.environ["WORLD_SIZE"] = env_info['world_size'] - os.environ['MASTER_PORT'] = env_info['master_port'] - os.environ['MASTER_ADDR'] = env_info['master_addr'] diff --git a/applications/Chat/coati/ray/utils.py b/applications/Chat/coati/ray/utils.py new file mode 100644 index 000000000000..4361ee236771 --- /dev/null +++ b/applications/Chat/coati/ray/utils.py @@ -0,0 +1,152 @@ +import os +from typing import Any, Callable, Dict, List, Optional +from collections import OrderedDict + +import torch +import torch.distributed as dist +import torch.nn as nn +from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic +from coati.models.gpt import GPTRM, GPTActor, GPTCritic +from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM +from coati.models.opt import OPTRM, OPTActor, OPTCritic +from coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM +from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy +from coati.utils import prepare_llama_tokenizer_and_embedding +from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer + + +def is_rank_0() -> bool: + return not dist.is_initialized() or dist.get_rank() == 0 + + +def get_rank() -> int: + return dist.get_rank() if dist.is_initialized() else 0 + + +def get_world_size() -> int: + return dist.get_world_size() if dist.is_initialized() else 1 + + +def get_actor_from_args(model: str, pretrained: str = None, config=None, lora_rank=0): + if model == 'gpt2': + actor = GPTActor(pretrained=pretrained, config=config, lora_rank=lora_rank) + elif model == 'bloom': + actor = BLOOMActor(pretrained=pretrained, config=config, lora_rank=lora_rank) + elif model == 'opt': + actor = OPTActor(pretrained=pretrained, config=config, lora_rank=lora_rank) + elif model == 'llama': + actor = LlamaActor(pretrained=pretrained, config=config, lora_rank=lora_rank) + elif model == 'roberta': + actor = RoBERTaActor(pretrained=pretrained, config=config, lora_rank=lora_rank) + else: + raise ValueError(f'Unsupported actor model "{model}"') + return actor + + +def get_critic_from_args(model: str, pretrained: str = None, config=None, lora_rank=0): + if model == 'gpt2': + critic = GPTCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) + elif model == 'bloom': + critic = BLOOMCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) + elif model == 'opt': + critic = OPTCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) + elif model == 'llama': + critic = LlamaCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) + elif model == 'roberta': + critic = RoBERTaCritic(pretrained=pretrained, lora_rank=lora_rank, config=config, use_action_mask=True) + else: + raise ValueError(f'Unsupported reward model "{model}"') + return critic + + +def get_reward_model_from_args(model: str, pretrained: str = None, config=None): + if model == 'gpt2': + reward_model = GPTRM(pretrained=pretrained, config=config) + elif model == 'bloom': + reward_model = BLOOMRM(pretrained=pretrained, config=config) + elif model == 'opt': + reward_model = OPTRM(pretrained=pretrained, config=config) + elif model == 'llama': + reward_model = LlamaRM(pretrained=pretrained, config=config) + elif model == 'roberta': + reward_model = RoBERTaRM(pretrained=pretrained, config=config) + else: + raise ValueError(f'Unsupported reward model "{model}"') + return reward_model + + +def get_strategy_from_args(strategy: str): + if strategy == 'naive': + strategy_ = NaiveStrategy() + elif strategy == 'ddp': + strategy_ = DDPStrategy() + elif strategy == 'colossalai_gemini': + strategy_ = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5) + elif strategy == 'colossalai_zero2': + strategy_ = ColossalAIStrategy(stage=2, placement_policy='cuda') + elif strategy == 'colossalai_gemini_cpu': + strategy_ = ColossalAIStrategy(stage=3, placement_policy='cpu', initial_scale=2**5) + elif strategy == 'colossalai_zero2_cpu': + strategy_ = ColossalAIStrategy(stage=2, placement_policy='cpu') + else: + raise ValueError(f'Unsupported strategy "{strategy}"') + return strategy_ + + +def get_tokenizer_from_args(model: str, **kwargs): + if model == 'gpt2': + tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + elif model == 'bloom': + tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m') + elif model == 'opt': + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") + elif model == 'llama': + pretrain_path = kwargs["pretrain"] + tokenizer = AutoTokenizer.from_pretrained(pretrain_path) + elif model == 'roberta': + tokenizer = RobertaTokenizer.from_pretrained("roberta-base") + else: + raise ValueError(f'Unsupported model "{model}"') + + tokenizer.pad_token = tokenizer.eos_token + return tokenizer + + +def set_dist_env(env_info: Dict[str, str]): + os.environ["RANK"] = env_info['rank'] + os.environ["LOCAL_RANK"] = env_info['local_rank'] + os.environ["WORLD_SIZE"] = env_info['world_size'] + os.environ['MASTER_PORT'] = env_info['master_port'] + os.environ['MASTER_ADDR'] = env_info['master_addr'] + + +def get_model_numel(model: nn.Module) -> int: + numel = sum(p.numel() for p in model.parameters()) + return numel + + +def get_receivers_per_sender(sender_idx: int, num_senders: int, num_receivers: int, allow_idle_sender: bool) -> list: + target_receivers = [] + if num_senders <= num_receivers or allow_idle_sender: + # a sender will send data to one or more than one receivers + # a receiver only has one sender + for i in range(num_receivers): + if i % num_senders == sender_idx: + target_receivers.append(i) + else: + # a sender will send data to one receiver + # a receiver may have more than one sender + target_receivers.append(sender_idx % num_receivers) + return target_receivers + + +def state_dict_to(state_dict: Dict[str, Any], + dtype: torch.dtype = torch.float16, + device: torch.device = torch.device('cpu')): + ''' + keep state_dict intact + ''' + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + new_state_dict[k] = v.to(dtype=dtype, device=device) + return new_state_dict diff --git a/applications/Chat/coati/trainer/ppo.py b/applications/Chat/coati/trainer/ppo.py index fe5ae48d9c2f..e2e44e62533e 100644 --- a/applications/Chat/coati/trainer/ppo.py +++ b/applications/Chat/coati/trainer/ppo.py @@ -3,8 +3,9 @@ import torch import torch.nn as nn from coati.experience_maker import Experience, NaiveExperienceMaker -from coati.models.base import Actor, Critic +from coati.models.base import Actor, Critic, get_base_model from coati.models.loss import GPTLMLoss, PolicyLoss, ValueLoss +from coati.models.utils import calc_action_log_probs from coati.replay_buffer import NaiveReplayBuffer from torch import Tensor from torch.optim import Optimizer @@ -165,7 +166,8 @@ def training_step(self, experience: Experience) -> Dict[str, float]: self.critic.train() # policy loss num_actions = experience.action_mask.size(1) - action_log_probs = self.actor(experience.sequences, num_actions, attention_mask=experience.attention_mask) + actor_output = self.actor(experience.sequences, attention_mask=experience.attention_mask) + action_log_probs = calc_action_log_probs(actor_output, experience.sequences, num_actions) actor_loss = self.actor_loss_fn(action_log_probs, experience.action_log_probs, experience.advantages, @@ -175,8 +177,8 @@ def training_step(self, experience: Experience) -> Dict[str, float]: if self.ptx_coef != 0: batch = next(iter(self.pretrain_dataloader)) batch = to_device(batch, self.device) - ptx_log_probs = self.actor.get_base_model()(batch['input_ids'], - attention_mask=batch['attention_mask'])['logits'] + ptx_log_probs = self.actor(batch['input_ids'], + attention_mask=batch['attention_mask'])['logits'] ptx_loss = self.ptx_loss_fn(ptx_log_probs, batch['labels']) actor_loss = ptx_loss * self.ptx_coef + actor_loss * (1 - self.ptx_coef) @@ -200,14 +202,15 @@ def training_step(self, experience: Experience) -> Dict[str, float]: return {'reward': experience.reward.mean().item()} -def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> None: - origin_model = strategy.unwrap_model(actor) +def _set_default_generate_kwargs(strategy: Strategy, generate_kwargs: dict, actor: Actor) -> Dict: + unwrapper_model = strategy.unwrap_model(actor) + hf_model = get_base_model(unwrapper_model) new_kwargs = {**generate_kwargs} # use huggingface models method directly - if 'prepare_inputs_fn' not in generate_kwargs and hasattr(origin_model, 'prepare_inputs_for_generation'): - new_kwargs['prepare_inputs_fn'] = origin_model.prepare_inputs_for_generation + if 'prepare_inputs_fn' not in generate_kwargs and hasattr(hf_model, 'prepare_inputs_for_generation'): + new_kwargs['prepare_inputs_fn'] = hf_model.prepare_inputs_for_generation - if 'update_model_kwargs_fn' not in generate_kwargs and hasattr(origin_model, '_update_model_kwargs_for_generation'): - new_kwargs['update_model_kwargs_fn'] = origin_model._update_model_kwargs_for_generation + if 'update_model_kwargs_fn' not in generate_kwargs and hasattr(hf_model, '_update_model_kwargs_for_generation'): + new_kwargs['update_model_kwargs_fn'] = hf_model._update_model_kwargs_for_generation return new_kwargs diff --git a/applications/Chat/coati/trainer/strategies/base.py b/applications/Chat/coati/trainer/strategies/base.py index b1452869179e..06f81f21ab26 100644 --- a/applications/Chat/coati/trainer/strategies/base.py +++ b/applications/Chat/coati/trainer/strategies/base.py @@ -4,7 +4,6 @@ import torch import torch.nn as nn -from coati.models.base import Actor, get_base_model from coati.replay_buffer import ReplayBuffer from torch.optim import Optimizer from torch.utils.data import DataLoader @@ -69,21 +68,16 @@ def prepare( Union[List[ModelOrModelOptimPair], ModelOrModelOptimPair]: Models or model-optimizer-pairs in the original order. """ - def prepare_model(model: nn.Module): - if isinstance(model, Actor): - return Actor(self.setup_model(model.get_base_model())) - return self.setup_model(model) - rets = [] for arg in models_or_model_optim_pairs: if isinstance(arg, tuple): assert len(arg) == 2, f'Expect (model, optimizer) pair, got a tuple with size "{len(arg)}"' model, optimizer = arg - model = prepare_model(model) - optimizer = self.setup_optimizer(optimizer, get_base_model(model)) + model = self.setup_model(model) + optimizer = self.setup_optimizer(optimizer, model) rets.append((model, optimizer)) elif isinstance(arg, nn.Module): - rets.append(prepare_model(arg)) + rets.append(self.setup_model(model)) else: raise RuntimeError(f'Expect model or (model, optimizer) pair, got {type(arg)}') @@ -93,16 +87,15 @@ def prepare_model(model: nn.Module): @staticmethod def unwrap_model(model: nn.Module) -> nn.Module: - """Get the unwrapped model from a wrapped model. Useful for getting original huggingface model. - For Actor, it will unwrap `actor.model`. + """Get the unwrapped model from a wrapped model made by Strategy.prepare. Args: model (nn.Module): the model to unwrap Returns: - nn.Module: the original model (usually a huggingface model) + nn.Module: the original model """ - return get_base_model(model) + return model @abstractmethod def save_model(self, model: nn.Module, path: str, only_rank0: bool = True) -> None: @@ -130,3 +123,7 @@ def save_pretrained(self, only_rank0: bool = True, tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None: pass + + @abstractmethod + def get_model_state_dict_shard(self, model: nn.Module, **config): + pass diff --git a/applications/Chat/coati/trainer/strategies/colossalai.py b/applications/Chat/coati/trainer/strategies/colossalai.py index 8aa302c77eee..fafd0918deaf 100644 --- a/applications/Chat/coati/trainer/strategies/colossalai.py +++ b/applications/Chat/coati/trainer/strategies/colossalai.py @@ -5,7 +5,6 @@ import torch.distributed as dist import torch.nn as nn import torch.optim as optim -from coati.models.base import get_base_model from torch.optim import Optimizer from transformers.tokenization_utils_base import PreTrainedTokenizerBase @@ -153,14 +152,13 @@ def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None: def save_model(self, model: nn.Module, path: str, only_rank0: bool = True) -> None: if only_rank0 and dist.get_rank() != 0 and self.stage != 3: return - base_model = get_base_model(model) if self.stage == 3: - assert isinstance(base_model, ZeroDDP) + assert isinstance(model, ZeroDDP) # for stage 3, state_dict() method should be called on every rank - state_dict = base_model.state_dict(only_rank_0=only_rank0) + state_dict = model.state_dict(only_rank_0=only_rank0) else: # only_rank0 is false or rank == 0 - state_dict = base_model.state_dict() + state_dict = model.state_dict() if only_rank0 and dist.get_rank() != 0: return torch.save(state_dict, path) @@ -172,11 +170,10 @@ def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = Fal torch.save(optimizer.state_dict(), path) def unwrap_model(self, model: nn.Module) -> nn.Module: - base_model: Union[nn.Module, ZeroDDP] = get_base_model(model) if self.stage == 3: - assert isinstance(base_model, ZeroDDP) - return base_model.module - return base_model + assert isinstance(model, ZeroDDP) + return model.module + return model def save_pretrained(self, model: nn.Module, @@ -186,3 +183,15 @@ def save_pretrained(self, if self.stage == 3: raise RuntimeError('ColossalAI strategy with stage-3 does not support save_pretrained() now') super().save_pretrained(model, path, only_rank0, tokenizer) + + def get_model_state_dict_shard(self, model: nn.Module, **config): + if self.stage != 3: + yield from super().get_model_state_dict_shard(model, **config) + else: + # unwrapped_model = self._unwrap_model(model) + # for module in unwrapped_model.modules(): + # if isinstance(module, LoraLinear): + # module.merge_weights = True + # module.eval() + assert isinstance(model, ZeroDDP) + yield from model.state_dict_shard(max_shard_size=1024, only_rank_0=False) diff --git a/applications/Chat/coati/trainer/strategies/ddp.py b/applications/Chat/coati/trainer/strategies/ddp.py index 7910b57878f8..713d7b90c6f0 100644 --- a/applications/Chat/coati/trainer/strategies/ddp.py +++ b/applications/Chat/coati/trainer/strategies/ddp.py @@ -26,19 +26,8 @@ def __init__(self, seed: int = 42) -> None: super().__init__() def setup_distributed(self) -> None: - try: - rank = int(os.environ['RANK']) - local_rank = int(os.environ['LOCAL_RANK']) - world_size = int(os.environ['WORLD_SIZE']) - host = os.environ['MASTER_ADDR'] - port = int(os.environ['MASTER_PORT']) - except KeyError as e: - raise RuntimeError( - f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" - ) - dist.init_process_group('nccl', init_method=f'tcp://[{host}]:{port}', world_size=world_size, rank=rank) + self._try_init_dist(force=True) self.set_seed(self.seed) - torch.cuda.set_device(local_rank) def set_seed(self, seed: int) -> None: random.seed(seed) @@ -80,8 +69,8 @@ def setup_sampler(self, dataset) -> DistributedSampler: return DistributedSampler(dataset, dist.get_world_size(), dist.get_rank()) def unwrap_model(self, model: nn.Module) -> nn.Module: - base_model: DDP = super().unwrap_model(model) - return base_model.module + assert isinstance(model, DDP) + return model.module def save_pretrained(self, model: nn.Module, diff --git a/applications/Chat/coati/trainer/strategies/naive.py b/applications/Chat/coati/trainer/strategies/naive.py index 4d94026ce932..202c480e06d9 100644 --- a/applications/Chat/coati/trainer/strategies/naive.py +++ b/applications/Chat/coati/trainer/strategies/naive.py @@ -1,10 +1,17 @@ -from typing import Any, Optional +import os +import sys +from collections import OrderedDict +from typing import Any, Dict, Optional import torch +import torch.distributed as dist import torch.nn as nn import torch.optim as optim from coati.models.base import get_base_model from coati.replay_buffer import ReplayBuffer +from coati.models.base import RewardModel +from coati.models.lora import LoraLinear +from coati.replay_buffer import ReplayBuffer from torch.optim import Optimizer from torch.utils.data import DataLoader from transformers.modeling_utils import PreTrainedModel @@ -13,6 +20,15 @@ from .base import Strategy +# TODO Move this to a util.py (Moving to ray.util introduces ringed import) +def get_grad_required_state_dict(model: nn.Module): + state_dict = OrderedDict() + for name, parameter in model.named_parameters(): + if parameter.requires_grad: + state_dict[name] = parameter.detach() + return state_dict + + class NaiveStrategy(Strategy): """ Strategy for single GPU. No parallelism is used. @@ -25,7 +41,7 @@ def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None: optimizer.step() def setup_distributed(self) -> None: - pass + self._try_init_dist(force=False) def setup_model(self, model: nn.Module) -> nn.Module: return model @@ -42,14 +58,13 @@ def setup_dataloader(self, replay_buffer: ReplayBuffer, pin_memory: bool = False collate_fn=replay_buffer.collate_fn) def save_model(self, model: nn.Module, path: str, only_rank0: bool = True) -> None: - base_model = get_base_model(model) - state_dict = base_model.state_dict() + state_dict = model.state_dict() torch.save(state_dict, path) def load_model(self, model: nn.Module, path: str, map_location: Any = None, strict: bool = True) -> None: - base_model = get_base_model(model) + unwrapped_model = self.unwrap_model(model) state_dict = torch.load(path, map_location=map_location) - base_model.load_state_dict(state_dict, strict=strict) + unwrapped_model.load_state_dict(state_dict, strict=strict) def save_optimizer(self, optimizer: Optimizer, path: str, only_rank0: bool = False) -> None: torch.save(optimizer.state_dict(), path) @@ -68,3 +83,45 @@ def save_pretrained(self, unwrapped_model.save_pretrained(path) if tokenizer is not None: tokenizer.save_pretrained(path) + + def get_model_state_dict_shard(self, model: nn.Module, **config): + # TODO: implement sharding on naive strategy + model = self.unwrap_model(model) + if 'requires_grad_only' in config and config['requires_grad_only'] == True: + state_dict = get_grad_required_state_dict(model) + else: + state_dict = model.state_dict() + + if 'shard_size' in config: + shard_size = config['shard_size'] + accumulate_size = 0 + state_dict_shard = OrderedDict() + for name, param in state_dict.items(): + state_dict_shard[name] = param + accumulate_size += param.numel() * param.element_size() + if accumulate_size >= shard_size: + accumulate_size = 0 + yield state_dict_shard + state_dict_shard = OrderedDict() + if accumulate_size > 0: + yield state_dict_shard + else: + yield state_dict + + def _try_init_dist(self, force: bool = False) -> None: + try: + rank = int(os.environ['RANK']) + local_rank = int(os.environ['LOCAL_RANK']) + world_size = int(os.environ['WORLD_SIZE']) + host = os.environ['MASTER_ADDR'] + port = int(os.environ['MASTER_PORT']) + dist.init_process_group('nccl', init_method=f'tcp://[{host}]:{port}', world_size=world_size, rank=rank) + torch.cuda.set_device(local_rank) + except KeyError as e: + if force: + raise RuntimeError( + f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" + ) + except Exception as e: + if force: + raise e diff --git a/applications/Chat/coati/trainer/strategies/sampler.py b/applications/Chat/coati/trainer/strategies/sampler.py index d726fa640fa2..65e199dbf029 100644 --- a/applications/Chat/coati/trainer/strategies/sampler.py +++ b/applications/Chat/coati/trainer/strategies/sampler.py @@ -27,6 +27,7 @@ def __init__(self, dataset, num_replicas: int, rank: int) -> None: assert len(indices) == self.num_samples self.indices = indices + def sample(self, batch_size: int) -> list: sampled_indices = np.random.choice(self.indices, batch_size, replace=False) return [self.dataset[idx] for idx in sampled_indices] diff --git a/applications/Chat/evaluate/README.md b/applications/Chat/evaluate/README.md index e3510e3522f6..e4a50b11d41f 100644 --- a/applications/Chat/evaluate/README.md +++ b/applications/Chat/evaluate/README.md @@ -12,12 +12,14 @@ pip install -r requirements.txt ## Evaluation Pipeline -The whole evaluation pipeline consists of two methods: +The whole evaluation pipeline consists of three methods: 1. `GPT Evaluation`: evaluates model predictions using GPT models. * Compare the performance of two different models (battle). * Rate the model according to pre-defined metrics using prompting design. + * Rate the model according to pre-defined metrics with additional reference answer using prompting design. 2. `Automatic Evaluation`: evaluates model predictions using automatic metrics. +3. `UniEval`: evaluates model predictions using UniEval models(English only). ### Evaluation Category @@ -65,7 +67,7 @@ GPT evaluation uses GPT models to evaluate the prediction of different models an | 切题
(Relevance) | 切题(1-5):答案内容是否切题,不答非所问,并且严格遵照题目要求。

Relevance (1-5): whether the content of the answer is relevant to the topic, does not answer the wrong question, and strictly follows the requirements of the topic. | 1. 阅读题目,确定题目所问的问题是什么,以及需要回答哪些方面的问题。
2. 阅读答案,确认答案是否直接回答了题目所问的问题。
3. 检查答案是否严格遵照了题目的要求,包括答题方式、答题长度、答题格式等等。
4. 根据以上因素综合评估答案的切题程度,并给出一个1到5的分数,其中5表示答案非常切题,而1表示答案完全没有切题。

1. Read the question to determine what the question asks and what aspects of the question need to be answered.
2. Read the answers to make sure that they directly answer the question asked.
3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.
4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all. | | 创意性
(Creativity) | 创意性(1-5):某些头脑风暴问题可能需要答案具有创意,提出新的思路。

Creativity (1-5): Some brainstorming questions may require answers that are creative and suggest new ideas. | 1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。
2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则创意性评分可能会受到影响。
3. 考虑答案中是否包含新颖的想法或独特的思路。答案可能与已知的解决方案有所重叠,但仍然可以被认为是有创意的,只要它提供了新的角度或方法来解决问题。
4. 根据答案的创意性,给出一个1到5的评分。如果答案缺乏创意,则应给出一个较低的评分。如果答案具有创意并提供了新的思路,应给出一个较高的评分。

1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.
2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the creativity score may be affected.
3. Consider whether the answer contains novel ideas or unique thoughts. An answer may overlap with a known solution and still be considered creative, as long as it offers a new perspective or approach to the problem.
4. Give a score of 1 to 5 depending on the creativity of the answer. If the answer lacks creativity, a lower score should be given. If the answer is creative and provides a new idea, a higher score should be given. | | 实用性
(Practicality) | 实用性(1-5):某些头脑风暴问题可能需要答案提出实用的建议或解决方法。

Practicality (1-5): Some brainstorming questions may require answers to suggest practical suggestions or solutions. | 1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。
2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则实用性评分可能会受到影响。
3. 考虑答案中提出的建议或解决方法是否实用并可行。答案可能看起来很好,但如果无法实现或应用,则实用性评分可能会受到影响。
4. 根据答案的实用性,给出一个1到5的评分。如果答案缺乏实用性,则应给出一个较低的评分。如果答案提出了实用的建议或解决方法,并且可以很好地解决问题,则应给出一个较高的评分。

1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.
2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the practicality score may be affected.
3. Consider whether the suggestions or solutions presented in the answer are practical and workable. The answer may look good, but if it cannot be implemented or applied, the practicality score may be affected.
4. Give a score of 1 to 5 depending on the practicality of the answer. If the answer lacks practicality, a lower score should be given. If the answer makes a practical suggestion or solution and solves the problem well, a higher score should be given. | -| 正确性
(Correctness) | 正确性(1-5):答案应该符合常识、生活实际等等。

Correctness (1-5): The answer should be in line with common sense, life experience, etc. | 1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。
2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则正确性评分可能会受到影响。
3. 考虑答案中所提供的信息是否正确、符合常识、生活实际等等。如果答案中存在明显的错误或不合理之处,则正确性评分可能会受到影响。
4. 根据答案的正确性,给出一个1到5的评分。如果答案存在明显的错误或不合理之处,则应给出一个较低的评分。如果答案正确、符合常识、生活实际等等,则应给出一个较高的评分。

1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.
2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the correctness score may be affected.
3. Consider whether the information provided in the answer is correct, consistent with common sense, real life, etc. If there are obvious errors or implausibilities in the answer, the correctness score may be affected.
4. Give a score of 1 to 5 depending on the correctness of the answer. If the answer contains obvious errors or unreasonable points, a lower score should be given. A higher score should be given if the answer is correct, consistent with common sense, real life, etc. | +| 正确性
(Correctness) | 正确性(1-5):正确性(1-5):答案是否正确。

Correctness (1-5): whether the answer is correct or not. | 1. 仔细阅读题目,尝试自己回答该问题。
2. 检查答案的准确性。您可以使用已知的事实或研究来验证答案是否正确。如果答案是正确的,则可以将正确性得分为5分。如果答案是部分正确的,则可以给予适当的得分,例如2分、3分或4分。如果答案完全不正确,则只得1分。

1. Read the question carefully and try to answer the question yourself.
2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be given. If the answer is completely incorrect, only 1 point is awarded. | | 自然
(Naturalness) | 自然(1-5):答案是否自然,并且符合问题给定的身份。

Naturalness (1-5): whether the answer is natural and fits the identity given by the question. | 1. 阅读题目,确定题目提供的身份信息。
2. 检查答案内容是否符合题目给定的身份。
3. 根据以上因素,对该回答的自然性进行打分,分数从1到5,其中1表示不自然,5表示非常自然,并符合问题给定的身份。

1. Read the question and determine the identity information provided in the question.
2. Check whether the content of the answer matches the identity given in the question.
3. Based on the above factors, score the naturalness of the response on a scale from 1 to 5, where 1 means unnatural and 5 means very natural and in accordance with the identity given in the question. | | 参与感
(Engagingness) | 参与感(1-5):答案是否对前面的对话内容做出了恰当的反应,是否理解对话的语境和背景。

Engagingness (1-5): whether the answer responds appropriately to the content of the preceding conversation and whether it understands the context and background of the conversation. | 1. 阅读题目,确定对话的语境和背景。
2. 检查答案是否充分理解对话的语境和背景,能否自然地融入到对话中而不显得突兀。
3. 根据以上因素,对该回答的参与感进行打分,分数从1到5,其中1表示没有参与感,5表示非常有参与感,并且恰当地理解了对话的语境和背景。

1. Read the questions to determine the context and background of the dialogue.
2. Check that the answer fully understands the context and background of the conversation and that it fits naturally into the conversation without seeming abrupt.
3. Based on the above factors, rate the response's engagement on a scale from 1 to 5, where 1 means not engaged and 5 means very engaged and appropriately understands the context and background of the conversation. | | 合理性
(Reasonableness) | 合理性(1-5):答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。

Reasonableness (1-5): Whether the answer can form a logical connection with the content of the previous dialogue, whether it is consistent with common sense, and whether it can reasonably exist in this context. | 1. 阅读题目,确定对话的主题以及问题期望的回答方向。
2. 判断答案是否能够与前面的对话内容形成逻辑上的衔接,是否符合常理,能否在这个上下文中合理存在。
3. 根据以上因素,对该回答的合理性进行打分,分数从1到5,其中1表示不合理,5表示非常合理,并且能够与前面的对话内容形成逻辑上的衔接,并符合常理。

1. Read the question and determine the topic of the conversation and the direction the question expects the answer to go.
2. Determine whether the answer can be logically connected to the preceding conversation, whether it makes common sense, and whether it can reasonably exist in this context.
3. Based on the above factors, rate the reasonableness of the answer on a scale from 1 to 5, where 1 means unreasonable and 5 means very reasonable and able to form a logical connection with the preceding dialogue content and consistent with common sense. | @@ -75,7 +77,9 @@ GPT evaluation uses GPT models to evaluate the prediction of different models an GPT models evaluate the quality of model predictions based on the given prompt words and gives a score between 1-5. -> **NOTE:** Even for the same metric, the details of its prompt words and CoT(Chain-of-Thought) can differ based on which category you want to evaluate. For example, prompt words for metric `correctness` showed here is "The answer should be in line with common sense, life experience, etc."(this is for category `brainstorming`), but for category `extraction`, prompt words can be "Answers should extract the required information accurately and should not contain any incorrect or misleading information." You can find all the prompt words and CoT(Chain-of-Thought) in `prompt/evaluation_prompt`. +> **NOTE 1:** Even for the same metric, the details of its prompt words and CoT(Chain-of-Thought) can differ based on which category you want to evaluate. For example, prompt words for metric `correctness` showed here is "Whether the answer is correct or not."(this is for category `classification`), but for category `extraction`, prompt words can be "Answers should extract the required information accurately and should not contain any incorrect or misleading information." You can find all the prompt words and CoT(Chain-of-Thought) in `prompt/evaluation_prompt`. + +> **NOTE 2:** To add customized metrics, you can refer to [FAQ](#faq). #### Automatic Evaluation @@ -85,7 +89,7 @@ There are two ways to obtain reference answers: * For instruction coming from human-designed problems, the reference answers are generated by GPT-3.5, such as roleplay, chat. * For instruction related with classic NLP problems, the reference answers are collected from open-sourced dataset with target answers, such as classification, extraction, summarization. -There are 5 types of automatic evaluation metrics listed in the table below: +There are 6 types of automatic evaluation metrics listed in the table below: | Automatic Evaluation Metric | Description | | :---------------------------------: | :----------------------------------------------------------- | @@ -94,6 +98,25 @@ There are 5 types of automatic evaluation metrics listed in the table below: | Distinct | Measure the diversity of generation text by counting the unique n-grams. | | BERTScore | Measure the semantic similarity between tokens of predictions and references with BERT. | | Precision
Recall
F1 Score | Measure the number of overlaps between prediction and reference (design for classification and extraction categories). | +| CHRF | Measure the similarity of character n-grams between prediction and reference. | + +#### UniEval Evaluation + +UniEval converts all evaluation tasks of different dimensions(metrics) into Boolean QA problems and utilize the model to answer with “Yes” or “No”. Compared with similarity-based metrics such as ROUGE and BLEU, UniEval can achieve a more comprehensive evaluation. In addition, UniEval also demonstrates its ability to transfer to unseen dimensions and tasks. + +In our evaluation pipeline, two pre-trained UniEval evaluators are used. One is [unieval-sum](https://huggingface.co/MingZhong/unieval-sum) and the other is [unieval-dialog](https://huggingface.co/MingZhong/unieval-dialog). The two models can be used for the 3 tasks, `summarization`, `dialogue` and `data2text`. Each task has different evaluation dimensions. + +| UniEval Model | Task | Dimension(Metric) | +| :------------: | :----------------- | :--- | +| unieval-sum | summarization | coherence: whether the summary is coherent
consistency: whether the claim is consistent with the given document
fluency: whether the paragraph is fluent
relevance: whether the summary is relevant to the reference | +| unieval-sum | data2text | naturalness: whether the utterance is fluent
informativeness: whether the utterance is informative according to the reference | +| unieval-dialog | dialogue | naturalness: whether the response is natural in the dialogue
coherence: whether the response is coherent in the dialogue history
understandability: whether the response is understandable in the dialogue | + +> **NOTE 1:** Task "data2text" uses the same model as task "summarization". + +> **NOTE 2:** In UniEval paper, the `unieval-sum` model demonstrates the best transfer ability and so you can evaluate your customized metric with this model. Details of adding customized metrics can be found in [FAQ](#faq). + +> **NOTE 3:** We consider not including all metrics provided in UniEval in our pipeline because the data structure and content of the instructions we want to evaluate are not suitable for direct use of some UniEval metrics. ## Evaluation Process @@ -215,19 +238,26 @@ The following is an example of a Chinese GPT evaluation prompt. In an evaluation #### Configuration -The following is an example of a Chinese config file. The configuration file can control how the pipeline evaluates the model. You need to specify GPT evaluation metrics and automatic metrics in key `GPT` and `Metrics`. You can find an example Chinese config file in `config`. +The following is an example of a Chinese config file. The configuration file can control how the pipeline evaluates the model. You need to specify GPT evaluation metrics, automatic metrics and UniEval metrics in key `GPT`, `Metrics` and `UniEval`(English only). You can find an example English config file in `config`. ```json { - "language": "cn", + "language": "en", + "path_for_UniEval": { + "summarization": "path to unieval-sum model", + "dialogue": "path to unieval-dialog model", + "data2text": "path to unieval-sum model" + }, "category": { "brainstorming": { - "GPT": ["relevance", "creativity", "practicality", "correctness"], - "Metrics": ["Distinct"] + "GPT": ["relevance", "creativity", "practicality", "reasonableness"], + "Metrics": ["Distinct"], + "UniEval": ["summarization-fluency", "data2text-naturalness", "data2text-informativeness"] }, "chat": { "GPT": [ "relevance", "naturalness", "engagingness", "reasonableness"], - "Metrics": ["Distinct"] + "Metrics": ["Distinct"], + "UniEval": ["dialogue-naturalness", "dialogue-coherence", "dialogue-understandability"] } } } @@ -235,27 +265,33 @@ The following is an example of a Chinese config file. The configuration file can `"language"`: the language used to evaluate the model capability. We only support Chinese `"cn"` for now. +`"path_for_UniEval"`: path to the UniEval model. + `"category"`: the category/categories needed to evaluate the model capability. `"GPT"`: the metrics you want to use for GPT evaluation. `"Metrics"`: the metrics you want to use for automatic metrics evaluation. +`"UniEval"`: the metrics you want to use for UniEval metrics evaluation. The metric has to be in the `"{task}-{metric}"` format because different tasks have same metrics such as naturalness and coherence. + +You can remove the key such as `"Metrics"` to skip evaluating answers using its corresponding evaluation metrics. + You can create your config file based on available settings listed in following table. -| "category" | "GPT" | "Metrics" | -| :--------------: | :---------------------: | :---------: | -| "brainstorming" | "language organization" | "BLEU" | -| "chat" | "relevance" | "ROUGE" | -| "classification" | "creativity" | "Distinct" | -| "closed_qa" | "practicality" | "BERTScore" | -| "extraction" | "correctness" | "Precision" | -| "generation" | "naturalness" | "Recall" | -| "open_qa" | "engagingness" | "F1 score" | -| "rewriting" | "reasonableness" | | -| "roleplay" | "diversity" | | -| "summarization" | "fidelity" | | -| | "conciseness" | | +| "category" | "GPT" | "Metrics" | "UniEval" | +| :--------------: | :---------------------: | :---------: | :--------------------------: | +| "brainstorming" | "language organization" | "BLEU" | "dialogue-naturalness" | +| "chat" | "relevance" | "ROUGE" | "dialogue-coherence" | +| "classification" | "creativity" | "Distinct" | "dialogue-understandability" | +| "closed_qa" | "practicality" | "BERTScore" | "data2text-naturalness" | +| "extraction" | "correctness" | "Precision" | "data2text-informativeness" | +| "generation" | "naturalness" | "Recall" | "summarization-coherence" | +| "open_qa" | "engagingness" | "F1 score" | "summarization-consistency" | +| "rewriting" | "reasonableness" | "CHRF" | "summarization-fluency" | +| "roleplay" | "diversity" | | "summarization-relevance" | +| "summarization" | "fidelity" | | | +| | "conciseness" | | | > **NOTE:** For categories which don't have standard answers such as `brainstorming`, you should avoid using automatic metrics such as `BLEU` and `ROUGE` which are based on similarity measures and you should use `Distinct` instead in your config file. @@ -278,6 +314,8 @@ python eval.py \ --openai_key "your openai key" \ ``` +If you want GPT evaluation with reference, you can add an argument `--gpt_with_reference`. + ## FAQ
How can I add a new GPT evaluation metric? @@ -290,23 +328,36 @@ For example, if you want to add a new metric `persuasiveness` into category `bra "id": 1, "category": "brainstorming", "metrics": { - "persuasiveness": "说服力(1-5):XXX" + "persuasiveness": "persuasiveness(1-5):a short description for persuasiveness" }, "CoT": { - "persuasiveness": "XXX\n\n说服力:" + "persuasiveness": "CoT for persuasiveness\n\npersuasiveness:" }, - "prompt": "你是一个好助手。请你为下面“头脑风暴”问题的答案打分。\n\n问题如下:\n\n{question}\n\n答案如下:\n\n{answer}\n\n评分的指标如下:\n\n{metric}\n\n请你遵照以下的评分步骤:\n\n{steps}" + "prompt": "You are a good assistant. Please rate the given answer to the \"brainstorming\" question below.\n\nThe question is as follows:\n\n{question}\n\nThe answer is as follows:\n\n{answer}\n\nThe metric for evaluation is as follows:\n\n{metric}\n\nYou should follow the following evaluation steps:\n\n{steps}" } } ```
+
How can I add a new UniEval evaluation metric? + +For example, if you want to add a new metric `persuasiveness` into task `data2text`, you should add a Boolean QA question about the metric in function `add_question` in `unieval/utils.py`. Please do note that how effectively the model would evaluate this metric is unknown and you may need some experiments to test whether the model is capable of evaluating this metric. + +```python +if task == 'data2text': + if dimension == 'persuasiveness': + cur_input = 'question: Is this a persuasive utterence utterance: ' + output[i] +``` + +
+ ## To Do - [x] Add evaluation for English capability -- [ ] Support UniEval +- [x] Support UniEval - [x] Support GPT-4 evaluation +- [x] Support GPT evaluation with reference ## Citations @@ -327,4 +378,13 @@ For example, if you want to add a new metric `persuasiveness` into category `bra archivePrefix={arXiv}, primaryClass={cs.CL} } + +@misc{zhong2022unified, + title={Towards a Unified Multi-Dimensional Evaluator for Text Generation}, + author={Ming Zhong and Yang Liu and Da Yin and Yuning Mao and Yizhu Jiao and Pengfei Liu and Chenguang Zhu and Heng Ji and Jiawei Han}, + year={2022}, + eprint={2210.07197}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} ``` diff --git a/applications/Chat/evaluate/config/config_cn.json b/applications/Chat/evaluate/config/config_cn.json index a8c7ea8a3135..dffb66f6c3be 100644 --- a/applications/Chat/evaluate/config/config_cn.json +++ b/applications/Chat/evaluate/config/config_cn.json @@ -7,7 +7,7 @@ "relevance", "creativity", "practicality", - "correctness" + "reasonableness" ], "Metrics": [ "Distinct" @@ -34,7 +34,8 @@ "Metrics": [ "Precision", "Recall", - "F1 score" + "F1 score", + "CHRF" ] }, "closed_qa": { @@ -46,7 +47,8 @@ "Metrics": [ "BLEU", "ROUGE", - "BERTScore" + "BERTScore", + "CHRF" ] }, "extraction": { @@ -58,7 +60,8 @@ "Metrics": [ "Precision", "Recall", - "F1 score" + "F1 score", + "CHRF" ] }, "generation": { @@ -116,7 +119,8 @@ "Metrics": [ "BLEU", "ROUGE", - "BERTScore" + "BERTScore", + "CHRF" ] } } diff --git a/applications/Chat/evaluate/config/config_en.json b/applications/Chat/evaluate/config/config_en.json index 5b6272b97084..5238bd19f67e 100644 --- a/applications/Chat/evaluate/config/config_en.json +++ b/applications/Chat/evaluate/config/config_en.json @@ -1,5 +1,10 @@ { "language": "en", + "path_for_UniEval": { + "summarization": "path to unieval-sum", + "dialogue": "path to unieval-dialog", + "data2text": "path to unieval-sum" + }, "category": { "brainstorming": { "GPT": [ @@ -7,10 +12,15 @@ "relevance", "creativity", "practicality", - "correctness" + "reasonableness" ], "Metrics": [ "Distinct" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "chat": { @@ -23,6 +33,14 @@ ], "Metrics": [ "Distinct" + ], + "UniEval": [ + "summarization-fluency", + "dialogue-naturalness", + "dialogue-coherence", + "dialogue-understandability", + "data2text-naturalness", + "data2text-informativeness" ] }, "classification": { @@ -34,7 +52,13 @@ "Metrics": [ "Precision", "Recall", - "F1 score" + "F1 score", + "CHRF" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "closed_qa": { @@ -46,7 +70,13 @@ "Metrics": [ "BLEU", "ROUGE", - "BERTScore" + "BERTScore", + "CHRF" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "extraction": { @@ -58,7 +88,13 @@ "Metrics": [ "Precision", "Recall", - "F1 score" + "F1 score", + "CHRF" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "generation": { @@ -71,6 +107,11 @@ "BLEU", "ROUGE", "BERTScore" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "open_qa": { @@ -81,6 +122,11 @@ ], "Metrics": [ "Distinct" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "rewriting": { @@ -93,6 +139,11 @@ "BLEU", "ROUGE", "BERTScore" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "roleplay": { @@ -104,6 +155,11 @@ ], "Metrics": [ "Distinct" + ], + "UniEval": [ + "summarization-fluency", + "data2text-naturalness", + "data2text-informativeness" ] }, "summarization": { @@ -116,7 +172,16 @@ "Metrics": [ "BLEU", "ROUGE", - "BERTScore" + "BERTScore", + "CHRF" + ], + "UniEval": [ + "summarization-coherence", + "summarization-consistency", + "summarization-fluency", + "summarization-relevance", + "data2text-naturalness", + "data2text-informativeness" ] } } diff --git a/applications/Chat/evaluate/eval.py b/applications/Chat/evaluate/eval.py index 8388d95f748a..e3fe0e9e091b 100644 --- a/applications/Chat/evaluate/eval.py +++ b/applications/Chat/evaluate/eval.py @@ -38,9 +38,14 @@ def main(args): raise Exception( "No prompt file for gpt evaluation provided. Please specify the prompt file for gpt evaluation!") + if args.gpt_model == "text-davinci-003" and args.gpt_with_reference: + raise Exception( + "GPT evaluation with reference is not supported for text-davinci-003. You should specify chat models such as gpt-3.5-turbo or gpt-4." + ) + # initialize evaluator evaluator = Evaluator(metrics_per_category, battle_prompt, gpt_evaluation_prompt, args.gpt_model, - config["language"]) + config["language"], config.get("path_for_UniEval", None), args.gpt_with_reference) if len(args.model_name_list) == 2: answers1 = jload(args.answer_file_list[0]) answers2 = jload(args.answer_file_list[1]) @@ -92,6 +97,10 @@ def main(args): default="gpt-3.5-turbo", choices=["text-davinci-003", "gpt-3.5-turbo", "gpt-4"], help='which GPT model to use for evaluation') + parser.add_argument('--gpt_with_reference', + default=False, + action="store_true", + help='whether to include reference answer in gpt evaluation') parser.add_argument('--save_path', type=str, default="results", help='path to save evaluation results') parser.add_argument('--openai_key', type=str, default=None, required=True, help='Your openai key') args = parser.parse_args() diff --git a/applications/Chat/evaluate/evaluator.py b/applications/Chat/evaluate/evaluator.py index 0bf55ca80d7c..3dd5fd6f2f23 100644 --- a/applications/Chat/evaluate/evaluator.py +++ b/applications/Chat/evaluate/evaluator.py @@ -4,6 +4,7 @@ import gpt_evaluate import metrics import pandas as pd +import unieval from utils import analyze_automatic_results, get_data_per_category, save_automatic_results @@ -15,13 +16,16 @@ class Evaluator(object): """ def __init__(self, params: Dict[str, Any], battle_prompt: Dict[str, Any], gpt_evaluation_prompt: Dict[str, Any], - gpt_model: str, language: str) -> None: + gpt_model: str, language: str, path_for_UniEval: Dict[str, str], gpt_with_reference: bool) -> None: self.params = params self.battle_prompt = battle_prompt self.gpt_evaluation_prompt = gpt_evaluation_prompt self.gpt_model = gpt_model self.language = language + self.path_for_UniEval = path_for_UniEval + self.gpt_with_reference = gpt_with_reference self.automatic_metric_stats = dict() + self.unieval_metric_stats = dict() self.gpt_evaluation_results = dict() self.battle_results = [] @@ -47,16 +51,18 @@ def switch(metric, language): return metrics.bleu_score(preds=predicts_list, targets=targets_list, language=language) elif metric == "ROUGE": return metrics.rouge_score(preds=predicts_list, targets=targets_list, language=language) - elif (metric == "Distinct"): + elif metric == "Distinct": return metrics.distinct_score(preds=predicts_list, language=language) - elif (metric == "BERTScore"): + elif metric == "BERTScore": return metrics.bert_score(preds=predicts_list, targets=targets_list, language=language) - elif (metric == "Precision"): + elif metric == "Precision": return metrics.precision(preds=predicts_list, targets=targets_list, language=language) - elif (metric == "Recall"): + elif metric == "Recall": return metrics.recall(preds=predicts_list, targets=targets_list, language=language) - elif (metric == "F1 score"): + elif metric == "F1 score": return metrics.F1_score(preds=predicts_list, targets=targets_list, language=language) + elif metric == "CHRF": + return metrics.chrf_score(preds=predicts_list, targets=targets_list, language=language) else: raise ValueError(f"Unexpected metric") @@ -69,6 +75,9 @@ def switch(metric, language): print(f"Category {category} specified in your config doesn't have corresponding answers!") continue + if self.params[category].get("Metrics", None) is None: + continue + category_metrics = self.params[category]["Metrics"] self.automatic_metric_stats[category] = {} @@ -80,12 +89,68 @@ def switch(metric, language): for metric in category_metrics: self.automatic_metric_stats[category].update(switch(metric=metric, language=self.language)) + # UniEval evaluation + # self.unieval_metric_stats's key is "task" instead of "category". + # Iterating "task" first will avoid repeated loading models because one task corresponds to one UniEval model. + # If key is "category", different models will be loaded for multiple times across categories because the user may require different task(models) to evaluate one category. + for category in self.params: + if len(answers_per_category[category]) == 0: + print(f"Category {category} specified in your config doesn't have corresponding answers!") + continue + + if self.params[category].get("UniEval", None) is None: + continue + + if self.params[category]["UniEval"] and self.language == "cn": + raise Exception( + "UniEval doesn't support Chinese! Please remove UniEval config in your Chinese config file.") + + category_metrics = self.params[category]["UniEval"] + + for task, metric in [tuple(category_metric.split("-")) for category_metric in category_metrics]: + if self.unieval_metric_stats.get(task, None) is None: + self.unieval_metric_stats[task] = {category: {metric: 0}} + elif self.unieval_metric_stats[task].get(category, None) is None: + self.unieval_metric_stats[task][category] = {metric: 0} + else: + self.unieval_metric_stats[task][category][metric] = 0 + + for task in self.unieval_metric_stats: + if self.path_for_UniEval is None: + raise Exception(f"Please specify the path for UniEval model in the config file!") + + if self.path_for_UniEval.get(task, None) is None: + raise Exception(f"Please specify the model path for task {task} in the config file!") + + print(f"Load UniEval model for task {task}.") + + uni_evaluator = unieval.get_evaluator(task, model_name_or_path=self.path_for_UniEval[task]) + for category in self.unieval_metric_stats[task]: + targets_list = [ + target["target"] if target["target"] else target["output"] + for target in targets_per_category[category] + ] + predicts_list = [answer["output"] for answer in answers_per_category[category]] + sources_list = [answer["instruction"] + answer["input"] for answer in answers_per_category[category]] + + data = unieval.convert_data_to_unieval_format(predicts_list, sources_list, targets_list) + scores = uni_evaluator.evaluate(data, + category, + dims=list(self.unieval_metric_stats[task][category].keys()), + overall=False) + avg_scores = unieval.calculate_average_score(scores) + + self.unieval_metric_stats[task][category].update(avg_scores) + # gpt evaluation for category in self.params: if len(answers_per_category[category]) == 0: print(f"Category {category} specified in your config doesn't have corresponding answers!") continue + if self.params[category].get("GPT", None) is None: + continue + category_metrics = self.params[category]["GPT"] prompt = self.gpt_evaluation_prompt.get(category, None) @@ -93,8 +158,14 @@ def switch(metric, language): print(f"No prompt for category {category}! Use prompt for category general now.") prompt = self.gpt_evaluation_prompt["general"] - self.gpt_evaluation_results[category] = gpt_evaluate.evaluate(answers_per_category[category], prompt, - category_metrics, category, self.gpt_model) + self.gpt_evaluation_results[category] = gpt_evaluate.evaluate( + answers_per_category[category], + prompt, + category_metrics, + category, + self.gpt_model, + self.language, + references=targets_per_category[category] if self.gpt_with_reference else None) def save(self, path: str, model_name_list: List[str]) -> None: """ @@ -106,29 +177,43 @@ def save(self, path: str, model_name_list: List[str]) -> None: save_path = os.path.join(path, "gpt_evaluate", "battle_results") gpt_evaluate.save_battle_results(self.battle_results, model_name_list[0], model_name_list[1], save_path) else: - # Save evaluation results for automatic metrics - automatic_base_save_path = os.path.join(path, "automatic_results") - automatic_results_save_path = os.path.join(automatic_base_save_path, "evaluation_results") - - save_automatic_results(model_name_list[0], self.automatic_metric_stats, automatic_results_save_path) - - # Save charts and csv. - automatic_analyses_save_path = os.path.join(automatic_base_save_path, "evaluation_analyses") - analyze_automatic_results(automatic_results_save_path, automatic_analyses_save_path) - - # Save evaluation results for GPT evaluation metrics. - gpt_base_save_path = os.path.join(path, "gpt_evaluate", "gpt_evaluate_results") - gpt_evaluation_results_save_path = os.path.join(gpt_base_save_path, "evaluation_results") - - all_evaluations = gpt_evaluate.save_gpt_evaluation_results(model_name_list[0], self.gpt_evaluation_results, - gpt_evaluation_results_save_path) - - # Start to calculate scores and save statistics. - gpt_evaluation_statistics_save_path = os.path.join(gpt_base_save_path, "evaluation_statistics") - gpt_evaluate.save_gpt_evaluation_statistics(model_name_list[0], all_evaluations, - gpt_evaluation_statistics_save_path) - - # Save charts and csv. - gpt_evaluation_analyses_save_path = os.path.join(gpt_base_save_path, "evaluation_analyses") - gpt_evaluate.analyze_gpt_evaluation_statistics(gpt_evaluation_statistics_save_path, - gpt_evaluation_analyses_save_path) + if self.automatic_metric_stats: + # Save evaluation results for automatic metrics + automatic_base_save_path = os.path.join(path, "automatic_results") + automatic_results_save_path = os.path.join(automatic_base_save_path, "evaluation_results") + + save_automatic_results(model_name_list[0], self.automatic_metric_stats, automatic_results_save_path) + + # Save charts and csv. + automatic_analyses_save_path = os.path.join(automatic_base_save_path, "evaluation_analyses") + analyze_automatic_results(automatic_results_save_path, automatic_analyses_save_path) + + if self.unieval_metric_stats: + # Save evaluation results for UniEval metrics + unieval_base_save_path = os.path.join(path, "unieval_results") + unieval_results_save_path = os.path.join(unieval_base_save_path, "evaluation_results") + + unieval.save_unieval_results(model_name_list[0], self.unieval_metric_stats, unieval_results_save_path) + + # Save charts and csv. + unieval_analyses_save_path = os.path.join(unieval_base_save_path, "evaluation_analyses") + unieval.analyze_unieval_results(unieval_results_save_path, unieval_analyses_save_path) + + if self.gpt_evaluation_results: + # Save evaluation results for GPT evaluation metrics. + gpt_base_save_path = os.path.join(path, "gpt_evaluate", "gpt_evaluate_results") + gpt_evaluation_results_save_path = os.path.join(gpt_base_save_path, "evaluation_results") + + all_evaluations = gpt_evaluate.save_gpt_evaluation_results(model_name_list[0], + self.gpt_evaluation_results, + gpt_evaluation_results_save_path) + + # Start to calculate scores and save statistics. + gpt_evaluation_statistics_save_path = os.path.join(gpt_base_save_path, "evaluation_statistics") + gpt_evaluate.save_gpt_evaluation_statistics(model_name_list[0], all_evaluations, + gpt_evaluation_statistics_save_path) + + # Save charts and csv. + gpt_evaluation_analyses_save_path = os.path.join(gpt_base_save_path, "evaluation_analyses") + gpt_evaluate.analyze_gpt_evaluation_statistics(gpt_evaluation_statistics_save_path, + gpt_evaluation_analyses_save_path) diff --git a/applications/Chat/evaluate/gpt_evaluate.py b/applications/Chat/evaluate/gpt_evaluate.py index b433500dfa04..012f41ab0c41 100644 --- a/applications/Chat/evaluate/gpt_evaluate.py +++ b/applications/Chat/evaluate/gpt_evaluate.py @@ -13,6 +13,23 @@ import tqdm from utils import jdump, jload +ref_step_template = { + "en": + "Now please compare the answer with the {adjective} answer, determine whether the answer is able to achieve the same level of {metric}.\n\n", + "cn": + "请比较答案与上面的{adjective}答案,确定答案是否可以达到与该{adjective}答案同样水平的{metric}。\n\n" +} + +ref_answer_template_general = { + "en": "\nAn example answer with good quality is as follows:\n\n{answer}\n\n", + "cn": "\n一个优质的示例答案如下:\n\n{answer}\n\n" +} + +ref_answer_template_correctness = { + "en": "\nA correct answer is as follows:\n\n{answer}\n\n", + "cn": "\n标准答案如下:\n\n{answer}\n\n" +} + def get_battle_result(sys_prompt: str, user_prompt: str, id: int, max_tokens: int = 2048) -> Dict[str, Any]: """ @@ -233,18 +250,125 @@ def save_battle_results(evaluations: List[Dict], name1: str, name2: str, save_pa print(f"Model {name2} average score: {ans2_score/(len(evaluations)-invalid_count):.2f}") +def reference_template(metric: str, language: str, reference: Dict[str, Any]) -> str: + """ + Get prompt template for GPT evaluation with reference. + + Different languages have different prompt templates. + + Args: + metric: metric used in GPT evaluation with reference. + language: language for the template. + reference: the instruction that contains target answer. + + Returns: + Prompt template for GPT evaluation with reference. + """ + + step_to_add = ref_step_template[language] + + for_the_given_answer = "{metric} (1-5) (directly give the score for the given answer):" if language == "en" else "{metric} (1-5) (直接对给定答案打分)" + + # adjective is used to describe the word "answer" in the prompt. + adjective = "example" if language == "en" else "示例" + answer_to_add = ref_answer_template_general[language] + + # Only for correctness, we will provide a correct answer and so the adjective for "answer" will be "correct". The prompt words will be "a correct answer". + # In other cases, the prompt words will be "an example answer with good quality" by default. + if metric.lower() == "correctness": + adjective = "correct" if language == "en" else "标准" + answer_to_add = ref_answer_template_correctness[language] + + answer_to_add = answer_to_add.format(answer=reference["target"] if reference["target"] else reference["output"]) + step_to_add = step_to_add.format(metric=metric.lower(), + adjective=adjective) + for_the_given_answer.format(metric=metric) + + return answer_to_add + step_to_add + + +def fill_in_message(role: str, content: str) -> Dict[str, str]: + """ + Generate one formatted message to send through chat completion. + + Args: + role: the role of the author of this message. + content: the contents of the message. + + Returns: + One message to send through chat completion. + """ + + return {"role": role, "content": content} + + +def multiturn_chat_completion(user_messages: List[str], model: str, max_tokens: int = 1, turns=2) -> Dict[str, Any]: + """ + Do multi-turn chat completion. + + When turns == 1, it is a one-turn conversation for normal GPT evaluation. + When turns == 2, it is a two-turn conversation which is used for GPT evaluation with reference answers. + + Args: + user_messages: messages user wants to send. + model: the model used to evaluate answers. + max_tokens: the maximum number of tokens to generate in the chat completion. + turns: the number of turns for conversation. + + Returns: + Last turn's response. + """ + + if len(user_messages) != turns: + raise Exception("The length of user messages should be equal to the turn number!") + + assistant_responses = [] + + for i in range(turns): + messages_to_send = [] + + for j in range(i): + messages_to_send.append(fill_in_message("user", user_messages[j])) + messages_to_send.append( + fill_in_message("assistant", assistant_responses[j]["choices"][0]["message"]["content"])) + + # Length of user messages == Length of assistant messages + 1 + # Because we always expect the api to response + messages_to_send.append(fill_in_message("user", user_messages[i])) + + response = openai.ChatCompletion.create( + model=model, + messages=messages_to_send, + temperature=0, + max_tokens=max_tokens, + ) + + # Avoid exceeding rate limits. + # You can comment this line if your request doesn't contain many tokens. + time.sleep(1) + + assistant_responses.append(response) + + return assistant_responses[-1] + + def get_gpt_evaluation_without_logprobs(prompt: Dict[str, Any], inst: Dict[str, Any], metrics: List[str], + language: str, + reference: Dict[str, Any] = None, model: str = "gpt-3.5-turbo", max_tokens: int = 2048) -> Dict[str, Any]: """ Use chat models(gpt-3.5-turbo or gpt-4) to evaluate one model answer. + Temprature is set to 0 to make the model more deterministic. + Args: prompt: a dictionary including prompt template, CoT and metrics. inst: the instruction that is needed to be evaluated. metrics: the metrics for evaluation. + language: language used to change the CoT(add one more step about comparing the given answer and reference) if reference is not None. + reference: the reference answer. model: the model used to evaluate answers. max_tokens: the maximum number of tokens to generate in the chat completion. @@ -254,7 +378,7 @@ def get_gpt_evaluation_without_logprobs(prompt: Dict[str, Any], MAX_API_RETRY = 3 - question = (inst["instruction"] if inst["input"] == "" else inst["instruction"] + " " + inst["input"]) + question = (inst["instruction"] if inst["input"] == "" else inst["instruction"] + "\n" + inst["input"]) answer = inst["output"] inst["evaluation"] = {} @@ -265,28 +389,34 @@ def get_gpt_evaluation_without_logprobs(prompt: Dict[str, Any], ) for i in range(MAX_API_RETRY): try: - response = openai.ChatCompletion.create( - model=model, - messages=[ - { - "role": - "user", - "content": - prompt["prompt"].format( - question=question, - answer=answer, - metric=prompt["metrics"][metric], - steps=prompt["CoT"][metric], - ), - }, - ], - temperature=0, - max_tokens=max_tokens, + prompt_reference = "" if reference is None else reference_template(metric, language, reference) + + prompt_1st_round = prompt["prompt"].format( + question=question, + answer=answer, + metric=prompt["metrics"][metric], + steps=prompt["CoT"][metric], ) + + if prompt_reference: + # Do a 2-round conversation + response = multiturn_chat_completion([prompt_1st_round, prompt_reference], + model, + max_tokens=max_tokens, + turns=2) + else: + response = multiturn_chat_completion([prompt_1st_round], model, max_tokens=max_tokens, turns=1) + inst["evaluation"][metric] = { "response": response["choices"][0]["message"]["content"], "logprobs": None, } + + # Prevent exceeding rate limits because we have multiple workers. + # But this will slow down the evaluation process. + # You can comment this line if your request doesn't contain many tokens. + time.sleep(len(metrics) * 0.5) + break except Exception as e: print(e) @@ -305,6 +435,8 @@ def get_gpt_evaluation_with_logprobs(prompt: Dict[str, Any], Use completion model(text-davinci-003) to evaluate one model answer. Only completion models can return log probabilities. + Temprature is set to 0 to make the model more deterministic. + Args: prompt: a dictionary including prompt template, CoT and metrics. inst: the instruction that is needed to be evaluated. @@ -317,7 +449,7 @@ def get_gpt_evaluation_with_logprobs(prompt: Dict[str, Any], MAX_API_RETRY = 3 - question = (inst["instruction"] if inst["input"] == "" else inst["instruction"] + " " + inst["input"]) + question = (inst["instruction"] if inst["input"] == "" else inst["instruction"] + "\n" + inst["input"]) answer = inst["output"] inst["evaluation"] = {} @@ -344,6 +476,12 @@ def get_gpt_evaluation_with_logprobs(prompt: Dict[str, Any], "response": response["choices"][0]["text"], "logprobs": response["choices"][0]["logprobs"]["top_logprobs"], } + + # Prevent exceeding rate limits because we have multiple workers. + # But this will slow down the evaluation process. + # You can comment this line if your request doesn't contain many tokens. + time.sleep(len(metrics) * 0.5) + break except Exception as e: print(e) @@ -354,7 +492,13 @@ def get_gpt_evaluation_with_logprobs(prompt: Dict[str, Any], return inst -def evaluate(answers: List[Dict], prompt: Dict[str, Any], metrics: List[str], category: str, model: str) -> List[Dict]: +def evaluate(answers: List[Dict], + prompt: Dict[str, Any], + metrics: List[str], + category: str, + model: str, + language: str, + references: List[Dict] = None) -> List[Dict]: """ Use GPT models to evaluate model answers and save evaluation results. @@ -364,6 +508,8 @@ def evaluate(answers: List[Dict], prompt: Dict[str, Any], metrics: List[str], ca metrics: metrics for GPT evaluation. category: the category of the model answers for evaluation. model: the specific GPT model used to evaluate answers. + language: language used in GPT evaluation + references: references for GPT evaluation Returns: Evaluations of the given answers. @@ -378,12 +524,19 @@ def evaluate(answers: List[Dict], prompt: Dict[str, Any], metrics: List[str], ca with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: futures = [] - for inst in answers: + for idx, inst in enumerate(answers): # Completion models can return log probabilities. if model == "text-davinci-003": future = executor.submit(get_gpt_evaluation_with_logprobs, prompt, inst, metrics, 1) else: - future = executor.submit(get_gpt_evaluation_without_logprobs, prompt, inst, metrics, model, 1) + future = executor.submit(get_gpt_evaluation_without_logprobs, + prompt, + inst, + metrics, + language, + reference=None if references is None else references[idx], + model=model, + max_tokens=1) futures.append(future) @@ -599,7 +752,7 @@ def analyze_gpt_evaluation_statistics(statistics_path: str, save_path: str) -> N for category in tqdm.tqdm( frame_per_category.keys(), - desc=f"category: ", + desc=f"GPT evaluation: ", total=len(frame_per_category.keys()), ): data = pd.DataFrame(frame_per_category[category]) diff --git a/applications/Chat/evaluate/metrics.py b/applications/Chat/evaluate/metrics.py index 031f6fa83926..e220226ec041 100644 --- a/applications/Chat/evaluate/metrics.py +++ b/applications/Chat/evaluate/metrics.py @@ -4,6 +4,7 @@ import jieba from bert_score import score from nltk.translate.bleu_score import sentence_bleu +from nltk.translate.chrf_score import sentence_chrf from rouge_chinese import Rouge as Rouge_cn from rouge_score import rouge_scorer as Rouge_en from sklearn.metrics import f1_score, precision_score, recall_score @@ -40,6 +41,27 @@ def bleu_score(preds: List[str], targets: List[str], language: str) -> Dict[str, return bleu_scores +def chrf_score(preds: List[str], targets: List[str], language: str) -> Dict[str, float]: + """Calculate CHRF Score Metric in sentence level. + """ + chrf_score = {"chrf": 0} + cumulative_chrf = [] + + for pred, target in zip(preds, targets): + if language == "cn": + pred_list = ' '.join(jieba.cut(preprocessing_text(pred))).split() + target_list = ' '.join(jieba.cut(preprocessing_text(target))).split() + elif language == "en": + pred_list = preprocessing_text(pred).split() + target_list = preprocessing_text(target).split() + + cumulative_chrf.append(sentence_chrf(target_list, pred_list)) + + chrf_score["chrf"] = statistics.mean(cumulative_chrf) + + return chrf_score + + def rouge_cn_score(preds: List[str], targets: List[str]) -> Dict[str, float]: """Calculate Chinese ROUGE Score Metric diff --git a/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_cn.json b/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_cn.json index ee6caae32091..783f453cafdb 100644 --- a/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_cn.json +++ b/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_cn.json @@ -7,14 +7,14 @@ "relevance": "切题(1-5):答案内容是否切题,不答非所问,并且严格遵照题目要求。", "creativity": "创意性(1-5):某些头脑风暴问题可能需要答案具有创意,提出新的思路。", "practicality": "实用性(1-5):某些头脑风暴问题可能需要答案提出实用的建议或解决方法。", - "correctness": "正确性(1-5):答案应该符合常识、生活实际等等。" + "reasonableness": "合理性(1-5):答案应该符合常识、生活实际等等。" }, "CoT": { "language organization": "1. 阅读答案,并检查是否有语法错误、用词不当或其他显著的错误。\n2. 检查答案是否具有逻辑性,能够按照合理的顺序传达信息并且能够自圆其说。\n3. 确定答案是否与问题或主题相关,并且能够传达清晰的信息。\n4. 检查答案是否连贯,是否使用适当的转换和过渡来保持句子和段落之间的连贯性。\n5. 检查答案是否具有明确的结构和组织方式,使得读者可以轻松理解信息的层次和结构。\n6. 根据以上因素综合评估答案的语言组织,并给出一个1到5的分数,其中5表示语言组织非常好,而1表示语言组织非常差。\n\n语言组织:", "relevance": "1. 阅读题目,确定题目所问的问题是什么,以及需要回答哪些方面的问题。\n2. 阅读答案,确认答案是否直接回答了题目所问的问题。\n3. 检查答案是否严格遵照了题目的要求,包括答题方式、答题长度、答题格式等等。\n4. 根据以上因素综合评估答案的切题程度,并给出一个1到5的分数,其中5表示答案非常切题,而1表示答案完全没有切题。\n\n切题:", "creativity": "1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。\n2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则创意性评分可能会受到影响。\n3. 考虑答案中是否包含新颖的想法或独特的思路。答案可能与已知的解决方案有所重叠,但仍然可以被认为是有创意的,只要它提供了新的角度或方法来解决问题。\n4. 根据答案的创意性,给出一个1到5的评分。如果答案缺乏创意,则应给出一个较低的评分。如果答案具有创意并提供了新的思路,应给出一个较高的评分。\n\n创意性:", "practicality": "1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。\n2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则实用性评分可能会受到影响。\n3. 考虑答案中提出的建议或解决方法是否实用并可行。答案可能看起来很好,但如果无法实现或应用,则实用性评分可能会受到影响。\n4. 根据答案的实用性,给出一个1到5的评分。如果答案缺乏实用性,则应给出一个较低的评分。如果答案提出了实用的建议或解决方法,并且可以很好地解决问题,则应给出一个较高的评分。\n\n实用性:", - "correctness": "1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。\n2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则正确性评分可能会受到影响。\n3. 考虑答案中所提供的信息是否正确、符合常识、生活实际等等。如果答案中存在明显的错误或不合理之处,则正确性评分可能会受到影响。\n4. 根据答案的正确性,给出一个1到5的评分。如果答案存在明显的错误或不合理之处,则应给出一个较低的评分。如果答案正确、符合常识、生活实际等等,则应给出一个较高的评分。\n\n正确性:" + "reasonableness": "1. 仔细阅读所提供的头脑风暴问题,确保你理解问题的要点和背景。\n2. 根据你的知识和经验,判断所提供的答案是否可行。如果答案不可行,则合理性评分可能会受到影响。\n3. 考虑答案中所提供的信息是否合理、符合常识、生活实际等等。如果答案中存在明显的不合理之处,则合理性评分可能会受到影响。\n4. 根据答案的合理性,给出一个1到5的评分。如果答案存在明显的不合理之处,则应给出一个较低的评分。如果答案合理、符合常识、生活实际等等,则应给出一个较高的评分。\n\n合理性:" }, "prompt": "你是一个好助手。请你为下面“头脑风暴”问题的答案打分。\n\n问题如下:\n\n{question}\n\n答案如下:\n\n{answer}\n\n评分的指标如下:\n\n{metric}\n\n请你遵照以下的评分步骤:\n\n{steps}" }, diff --git a/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_en.json b/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_en.json index 0b2053746af2..2285b639427c 100644 --- a/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_en.json +++ b/applications/Chat/evaluate/prompt/evaluation_prompt/evaluation_prompt_en.json @@ -7,14 +7,14 @@ "relevance": "Relevance (1-5): whether the content of the answer is relevant to the topic, does not answer the wrong question, and strictly follows the requirements of the topic.", "creativity": "Creativity (1-5): Some brainstorming questions may require answers that are creative and suggest new ideas.", "practicality": "Practicality (1-5): Some brainstorming questions may require answers to suggest practical suggestions or solutions.", - "correctness": "Correctness (1-5): The answer should be in line with common sense, life experience, etc." + "reasonableness": "Reasonableness (1-5): The answer should be in line with common sense, life experience, etc." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "creativity": "1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.\n2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the creativity score may be affected.\n3. Consider whether the answer contains novel ideas or unique thoughts. An answer may overlap with a known solution and still be considered creative, as long as it offers a new perspective or approach to the problem.\n4. Give a score of 1 to 5 depending on the creativity of the answer. If the answer lacks creativity, a lower score should be given. If the answer is creative and provides a new idea, a higher score should be given.\n\nCreativity:", "practicality": "1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.\n2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the practicality score may be affected.\n3. Consider whether the suggestions or solutions presented in the answer are practical and workable. The answer may look good, but if it cannot be implemented or applied, the practicality score may be affected.\n4. Give a score of 1 to 5 depending on the practicality of the answer. If the answer lacks practicality, a lower score should be given. If the answer makes a practical suggestion or solution and solves the problem well, a higher score should be given.\n\nPracticality:", - "correctness": "1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.\n2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the correctness score may be affected.\n3. Consider whether the information provided in the answer is correct, consistent with common sense, real life, etc. If there are obvious errors or implausibilities in the answer, the correctness score may be affected.\n4. Give a score of 1 to 5 depending on the correctness of the answer. If the answer contains obvious errors or unreasonable points, a lower score should be given. A higher score should be given if the answer is correct, consistent with common sense, real life, etc.\n\nCorrectness:" + "reasonableness": "1. Read the provided brainstorming questions carefully to make sure you understand the gist and context of the questions.\n2. Based on your knowledge and experience, determine if the answers provided are feasible. If the answer is not feasible, the reasonableness score may be affected.\n3. Consider whether the information provided in the answer is reasonable, consistent with common sense, real life, etc. If there are obvious errors or implausibilities in the answer, the reasonableness score may be affected.\n4. Give a score of 1 to 5 depending on the reasonableness of the answer. If the answer contains obvious errors or unreasonable points, a lower score should be given. A higher score should be given if the answer is reasonable, consistent with common sense, real life, etc.\n\nReasonableness:" }, "prompt": "You are a good assistant. Please rate the given answer to the \"brainstorming\" question below.\n\nThe question is as follows:\n\n{question}\n\nThe answer is as follows:\n\n{answer}\n\nThe metric for evaluation is as follows:\n\n{metric}\n\nYou should follow the following evaluation steps:\n\n{steps}" }, @@ -29,7 +29,7 @@ "reasonableness": "Reasonableness (1-5): Whether the answer can form a logical connection with the content of the previous dialogue, whether it is consistent with common sense, and whether it can reasonably exist in this context." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "naturalness": "1. Read the question and determine the identity information provided in the question.\n2. Check whether the content of the answer matches the identity given in the question.\n3. Based on the above factors, score the naturalness of the response on a scale from 1 to 5, where 1 means unnatural and 5 means very natural and in accordance with the identity given in the question.\n\nNaturalness:", "engagingness": "1. Read the questions to determine the context and background of the dialogue.\n2. Check that the answer fully understands the context and background of the conversation and that it fits naturally into the conversation without seeming abrupt.\n3. Based on the above factors, rate the response's engagement on a scale from 1 to 5, where 1 means not engaged and 5 means very engaged and appropriately understands the context and background of the conversation.\n\nEngagingness:", @@ -46,7 +46,7 @@ "correctness": "Correctness (1-5): whether the answer is correct or not." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the question carefully and try to answer the question yourself.\n2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be given. If the answer is completely incorrect, only 1 point is awarded.\n\nCorrectness:" }, @@ -61,7 +61,7 @@ "correctness": "Correctness (1-5): whether the answer is correct or not." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the question carefully and try to answer the question by yourself.\n2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be assigned. If the answer is completely incorrect, only 1 point is awarded.\n\nCorrectness:" }, @@ -76,7 +76,7 @@ "correctness": "correctness (1-5): Answers should extract the required information accurately and should not contain any incorrect or misleading information." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the questions carefully and identify the information that needs to be extracted from the material.\n2. Read the answer carefully and make sure it covers all the information that needs to be extracted.\n3. Use the material provided to verify the correctness of the response. If the response is inaccurate or contains incorrect or misleading information, a high score cannot be given.\n4. Check that the answer contains all the information required to be extracted and do not leave out any important details.\n5. Give a score between 1 and 5 based on the correctness and completeness of the response, with a score of 5 indicating a very accurate and complete response and a score of 1 indicating that the response barely extracts the required information.\n\nCorrectness:" }, @@ -91,7 +91,7 @@ "diversity": "Diversity (1-5): Whether the answers use beautiful language and have some creativity and imagination. However, answers should also be kept reasonable and moderate, not overly exaggerated or off-topic." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "diversity": "1. Read the entire response carefully to ensure that you fully understand the content and theme expressed in the response.\n2. While reading the response, pay attention to the quality of the language, such as whether the wording is correct and the language is vivid.\n3. Check the creativity and imagination of the response to see if the response is engaging to read on.\n4. Check the reasonableness and appropriateness of the responses to see if the responses are exaggerated or off-topic.\n5. Rate the diversity on a scale of 1 to 5, with a 5 indicating a good quality response that is engaging to read and a 1 indicating a raw response or a question that is off-topic.\n\nDiversity:" }, @@ -106,7 +106,7 @@ "correctness": "Correctness (1-5): whether the answer is correct or not." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the question carefully and try to answer the question yourself.\n2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be given. If the answer is completely incorrect, only 1 point is awarded.\n\nCorrectness:" }, @@ -121,7 +121,7 @@ "correctness": "Correctness (1-5): whether the answer is correct or not." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the question carefully and try to answer the question yourself.\n2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be assigned. If the answer is completely incorrect, only 1 point is awarded.\n\nCorrectness:" }, @@ -137,7 +137,7 @@ "creativity": "Creativity (1-5): The answers to the role-play questions need to be somewhat creative, but at the same time they need to adhere to the setting of the role." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "fidelity": "1. Read the question carefully to understand how the character is set up and represented in the question, including aspects such as occupation, background, point of view, and personality.\n2. Read the question's request and confirm the details that need to be taken into account when answering the request.\n3. Compare the provided answer with the setting of the role and assess whether the answer can strictly adhere to the setting of the role.\n4. Combine the results of the above assessment to give a fidelity score ranging from 1 to 5, where a score of 1 means that the response does not match the persona at all, and a score of 5 means that the response fully complies with the persona and satisfies the given request.\n\nFidelity:", "creativity": "1. Read the question carefully to understand how the character is set up and represented in the question, including career, background, perspective, and personality.\n2. Evaluate whether the answer has unique ideas and suggestions that bring new ideas and insights to the questioner.\n3. Compare the creativity in the response to the setting of the persona and assess whether the response adheres to the setting and essential characteristics of the persona.\n4. Evaluate the quality of the responses in general and combine the results of the above assessment to give a creativity score ranging from 1 to 5, where a score of 1 indicates that the response lacks creativity and a score of 5 indicates that the response has unique ideas and suggestions and is able to adhere to the set-up of the persona.\n\nCreativity:" @@ -154,7 +154,7 @@ "conciseness": "Conciseness (1-5): answers should be concise and without redundant content." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the material given in the question carefully to understand its content and main points.\n2. Assess whether the answer accurately summarizes the key points of the source material.\n3. assess whether the response contains all the key information in the source material.\n4. Based on the above steps, give a score of 1-5, where 1 means that the response does not accurately summarize the main points of the material and 5 means that the response completely accurately summarizes the main points of the material.\n\nCorrectness:", "conciseness": "1. Read the title and extract the main points of the material.\n2. Read the summary and note the main ideas and messages in it.\n3. Assess the length of the summary. A concise summary should usually convey key information within a few sentences or paragraphs, rather than lengthy paragraphs or essays.\n4. Check that the summary does not contain information that is not relevant to the main ideas or that is redundant.\n5. Make sure that the summary covers the key information in the material and that no important details have been omitted.\n6. Rate the summary on a scale of 1-5, where 5 means the summary is concise and free of redundancy, and 1 means the summary is lengthy or contains unnecessary information that is difficult to understand or remember. Based on your judgment, assign the appropriate score.\n\nConciseness:" @@ -170,7 +170,7 @@ "correctness": "Correctness (1-5): whether the answer is correct or not." }, "CoT": { - "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the linguistic organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good linguistic organization and 1 indicates very poor linguistic organization.\n\nLanguage organization:", + "language organization": "1. Read the answers and check for grammatical errors, poor word choice, or other significant mistakes.\n2. Check that the answer is logical, conveys the information in a logical order, and is self-explanatory.\n3. Determine if the answer is relevant to the question or topic and conveys a clear message.\n4. Check that the answer is coherent and that appropriate transitions and switches are used to maintain coherence between sentences and paragraphs.\n5. Check that the answer is clearly structured and organized in such a way that the reader can easily understand the hierarchy and structure of the information.\n6. Evaluate the language organization of the answer based on a combination of the above factors and give a score of 1 to 5, where 5 indicates very good language organization and 1 indicates very poor language organization.\n\nLanguage organization:", "relevance": "1. Read the question to determine what the question asks and what aspects of the question need to be answered.\n2. Read the answers to make sure that they directly answer the question asked.\n3. Check that the answer follows the requirements of the question, including the way it is answered, the length of the answer, the format of the answer, etc.\n4. Evaluate how relevant the answer is based on the above factors and give a score of 1 to 5, where 5 means the answer is very relevant and 1 means the answer is not relevant at all.\n\nRelevance:", "correctness": "1. Read the question carefully and try to answer the question yourself.\n2. Check the correctness of the answer. You can use known facts or research to verify that the answer is correct. If the answer is correct, you can give a score of 5 for correctness. If the answer is partially correct, an appropriate score, such as 2, 3, or 4, may be assigned. If the answer is completely incorrect, only 1 point is awarded.\n\nCorrectness:" }, diff --git a/applications/Chat/evaluate/unieval/__init__.py b/applications/Chat/evaluate/unieval/__init__.py new file mode 100644 index 000000000000..dad8d6ad09fa --- /dev/null +++ b/applications/Chat/evaluate/unieval/__init__.py @@ -0,0 +1,12 @@ +from .evaluator import get_evaluator +from .utils import ( + analyze_unieval_results, + calculate_average_score, + convert_data_to_unieval_format, + save_unieval_results, +) + +__all__ = [ + 'get_evaluator', 'convert_data_to_unieval_format', 'calculate_average_score', 'save_unieval_results', + 'analyze_unieval_results' +] diff --git a/applications/Chat/evaluate/unieval/evaluator.py b/applications/Chat/evaluate/unieval/evaluator.py new file mode 100644 index 000000000000..385425e4a576 --- /dev/null +++ b/applications/Chat/evaluate/unieval/evaluator.py @@ -0,0 +1,330 @@ +# MIT License + +# Copyright (c) 2022 Ming Zhong + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np +from nltk import sent_tokenize + +from .scorer import UniEvaluator +from .utils import add_question + + +class SumEvaluator: + + def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None): + """ Set up evaluator for text summarization """ + self.scorer = UniEvaluator( + model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == "" else model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + self.task = 'summarization' + self.dimensions = ['coherence', 'consistency', 'fluency', 'relevance'] + + def evaluate(self, data, category, dims=None, overall=True): + """ + Get the scores of all the given dimensions + + category: The category to be evaluated. + + dims: A list of dimensions to be evaluated. If dims is None, SumEvaluator will evaluate + four dimensions: coherence, consistency, fluency, relevance. + + overall: indicates whether the overall score is to be calculated. + Overall score can be customized to a combination of scores based on different + dimensions. The default here is the average score of all the given dimensions. + """ + n_data = len(data) + eval_scores = [{} for _ in range(n_data)] + + if dims == None: + eval_dims = self.dimensions + else: + assert isinstance(dims, list) + eval_dims = dims + + for dim in eval_dims: + # Calculate average sentence-level scores for 'consistency' and 'fluency' + if dim == 'consistency' or dim == 'fluency': + src_list, output_list = [], [] + n_sents = [] # the number of sentences in each generated summary + for i in range(n_data): + source = data[i]['source'] + system_outputs = sent_tokenize(data[i]['system_output']) + n_sents.append(len(system_outputs)) + for j in range(len(system_outputs)): + src_list.append(source) + output_list.append(system_outputs[j]) + input_list = add_question(dimension=dim, output=output_list, src=src_list, task=self.task) + sent_score = self.scorer.score(input_list, self.task, category, dim) + + # Get average score for each sample + start_idx = 0 + score = [] + for cur_n_sent in n_sents: + score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / cur_n_sent) + start_idx += cur_n_sent + + # Calculate summary-level score for 'coherence' and 'relevance' + elif dim == 'coherence' or dim == 'relevance': + src_list, output_list, ref_list = [], [], [] + for i in range(n_data): + src_list.append(data[i]['source']) + output_list.append(data[i]['system_output']) + if dim == 'relevance': + ref_list.append(data[i]['reference']) + input_list = add_question(dimension=dim, output=output_list, src=src_list, ref=ref_list, task=self.task) + score = self.scorer.score(input_list, self.task, category, dim) + + # Please customize other dimensions here for summarization + else: + raise NotImplementedError('The input format for this dimension is still undefined. \ + Please customize it first.') + + for i in range(n_data): + eval_scores[i][dim] = score[i] + + # Customize your overall score here. + if overall == True: + for i in range(n_data): + eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values())) + + return eval_scores + + +class DialogEvaluator: + + def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None): + """ Set up evaluator for dialogues """ + self.scorer = UniEvaluator( + model_name_or_path='MingZhong/unieval-dialog' if model_name_or_path == "" else model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + self.task = 'dialogue' + self.dimensions = ['naturalness', 'coherence', 'engagingness', 'groundedness', 'understandability'] + + def evaluate(self, data, category, dims=None, overall=True): + """ + Get the scores of all the given dimensions + + category: The category to be evaluated. + + dims: A list of dimensions to be evaluated. If dims is None, DialogEvaluator will evaluate + five dimensions: naturalness, coherence, engagingness, groundedness and understandability. + + overall: indicates whether the overall score is to be calculated. + Overall score can be customized to a combination of scores based on different + dimensions. The default here is the average score of all the given dimensions. + """ + n_data = len(data) + eval_scores = [{} for _ in range(n_data)] + + if dims == None: + eval_dims = self.dimensions + else: + assert isinstance(dims, list) + eval_dims = dims + + for dim in eval_dims: + # Calculate summation score for 'engagingness' + if dim == 'engagingness': + src_list, output_list, context_list = [], [], [] + n_sents = [] # the number of sentences in each generated response + for i in range(n_data): + source = data[i]['source'] + context = data[i]['context'] + system_outputs = sent_tokenize(data[i]['system_output']) + n_sents.append(len(system_outputs)) + for j in range(len(system_outputs)): + src_list.append(source) + context_list.append(context) + output_list.append(system_outputs[j]) + input_list = add_question(dimension=dim, + output=output_list, + src=src_list, + context=context_list, + task=self.task) + sent_score = self.scorer.score(input_list, self.task, category, dim) + + # Get the summation score for each sample + start_idx = 0 + score = [] + for cur_n_sent in n_sents: + score.append(sum(sent_score[start_idx:start_idx + cur_n_sent])) + start_idx += cur_n_sent + + # Calculate turn-level score for other dimensions + elif dim in ['naturalness', 'coherence', 'groundedness', 'understandability']: + src_list, output_list, context_list = [], [], [] + for i in range(n_data): + src_list.append(data[i]['source']) + output_list.append(data[i]['system_output']) + context_list.append(data[i]['context']) + input_list = add_question(dimension=dim, + output=output_list, + src=src_list, + context=context_list, + task=self.task) + score = self.scorer.score(input_list, self.task, category, dim) + + # Please customize other dimensions here for summarization + else: + raise NotImplementedError('The input format for this dimension is still undefined. \ + Please customize it first.') + + for i in range(n_data): + eval_scores[i][dim] = score[i] + + # Customize your overall score here. + if overall == True: + for i in range(n_data): + eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values())) + + return eval_scores + + +class D2tEvaluator: + + def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None): + """ Set up evaluator for data-to-text """ + self.scorer = UniEvaluator( + model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == "" else model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + self.task = 'data2text' + self.dimensions = ['naturalness', 'informativeness'] + + def evaluate(self, data, category, dims=None, overall=True): + """ + Get the scores of all the given dimensions + + category: The category to be evaluated. + + dims: A list of dimensions to be evaluated. If dims is None, D2tEvaluator will evaluate + two dimensions: naturalness and informativeness. + + overall: indicates whether the overall score is to be calculated. + Overall score can be customized to a combination of scores based on different + dimensions. The default here is the average score of all the given dimensions. + """ + n_data = len(data) + eval_scores = [{} for _ in range(n_data)] + + if dims == None: + eval_dims = self.dimensions + else: + assert isinstance(dims, list) + eval_dims = dims + + for dim in eval_dims: + output_list, ref_list = [], [] + for i in range(n_data): + output_list.append(data[i]['system_output']) + ref_list.append(data[i]['reference']) + + input_list = add_question(dimension=dim, output=output_list, ref=ref_list, task=self.task) + score = self.scorer.score(input_list, self.task, category, dim) + + for i in range(n_data): + eval_scores[i][dim] = score[i] + + # Customize your overall score here. + if overall == True: + for i in range(n_data): + eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values())) + + return eval_scores + + +class FactEvaluator: + + def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None): + """ Set up evaluator for factual consistency detection """ + self.scorer = UniEvaluator( + model_name_or_path='MingZhong/unieval-fact' if model_name_or_path == "" else model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + self.task = 'fact' + self.dim = 'consistency' + + def evaluate(self, data, category): + """ + Get the factual consistency score (only 1 dimension for this task) + + category: The category to be evaluated. + """ + n_data = len(data) + eval_scores = [{} for _ in range(n_data)] + + # Calculate average sentence-level scores for facutal consistency + src_list, output_list = [], [] + n_sents = [] # the number of sentences in the claim + for i in range(n_data): + source = data[i]['source'] + system_outputs = sent_tokenize(data[i]['system_output']) + n_sents.append(len(system_outputs)) + for j in range(len(system_outputs)): + src_list.append(source) + output_list.append(system_outputs[j]) + input_list = add_question(dimension=self.dim, output=output_list, src=src_list, task=self.task) + sent_score = self.scorer.score(input_list, self.task, category, dim) + + # Get average score for each sample + start_idx = 0 + score = [] + for cur_n_sent in n_sents: + score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / cur_n_sent) + start_idx += cur_n_sent + + for i in range(n_data): + eval_scores[i][self.dim] = score[i] + + return eval_scores + + +def get_evaluator(task, model_name_or_path="", max_length=1024, device='cuda:0', cache_dir=None): + assert task in ['summarization', 'dialogue', 'data2text', 'fact'] + if task == 'summarization': + return SumEvaluator(model_name_or_path=model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + elif task == 'dialogue': + return DialogEvaluator(model_name_or_path=model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + elif task == 'data2text': + return D2tEvaluator(model_name_or_path=model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + elif task == 'fact': + return FactEvaluator(model_name_or_path=model_name_or_path, + max_length=max_length, + device=device, + cache_dir=cache_dir) + else: + raise NotImplementedError('Other tasks are not implemented, \ + please customize specific tasks here.') diff --git a/applications/Chat/evaluate/unieval/scorer.py b/applications/Chat/evaluate/unieval/scorer.py new file mode 100644 index 000000000000..2c70bb9f6ded --- /dev/null +++ b/applications/Chat/evaluate/unieval/scorer.py @@ -0,0 +1,101 @@ +# MIT License + +# Copyright (c) 2022 Ming Zhong + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +import torch.nn as nn +from tqdm import tqdm +from transformers import AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer + + +class UniEvaluator: + + def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None): + """ Set up model """ + self.device = device + self.max_length = max_length + + self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir) + self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir) + self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, config=self.config, cache_dir=cache_dir) + + self.model.eval() + self.model.to(device) + + self.softmax = nn.Softmax(dim=1) + + self.pos_id = self.tokenizer("Yes")["input_ids"][0] + self.neg_id = self.tokenizer("No")["input_ids"][0] + + def score(self, inputs, task, category, dim, batch_size=8): + """ + Get scores for the given samples. + final_score = postive_score / (postive_score + negative_score) + """ + + # The implementation of "forward" in T5 still requires decoder_input_ids. + # Therefore, we construct a random one-word target sequence. + # The content of the target has no effect on the final scores. + tgts = ["No" for _ in range(len(inputs))] + + pos_score_list, neg_score_list = [], [] + for i in tqdm(range(0, len(inputs), batch_size), desc=f"{category}-({dim}-{task}): "): + src_list = inputs[i:i + batch_size] + tgt_list = tgts[i:i + batch_size] + try: + with torch.no_grad(): + encoded_src = self.tokenizer(src_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt') + encoded_tgt = self.tokenizer(tgt_list, + max_length=self.max_length, + truncation=True, + padding=True, + return_tensors='pt') + + src_tokens = encoded_src['input_ids'].to(self.device) + src_mask = encoded_src['attention_mask'].to(self.device) + + tgt_tokens = encoded_tgt['input_ids'].to(self.device)[:, 0].unsqueeze(-1) + + output = self.model(input_ids=src_tokens, attention_mask=src_mask, labels=tgt_tokens) + logits = output.logits.view(-1, self.model.config.vocab_size) + + pos_score = self.softmax(logits)[:, self.pos_id] # Yes + neg_score = self.softmax(logits)[:, self.neg_id] # No + + cur_pos_score = [x.item() for x in pos_score] + cur_neg_score = [x.item() for x in neg_score] + pos_score_list += cur_pos_score + neg_score_list += cur_neg_score + + except RuntimeError: + print(f'source: {src_list}') + print(f'target: {tgt_list}') + exit(0) + + score_list = [] + for i in range(len(pos_score_list)): + score_list.append(pos_score_list[i] / (pos_score_list[i] + neg_score_list[i])) + + return score_list diff --git a/applications/Chat/evaluate/unieval/utils.py b/applications/Chat/evaluate/unieval/utils.py new file mode 100644 index 000000000000..a77505faa0d2 --- /dev/null +++ b/applications/Chat/evaluate/unieval/utils.py @@ -0,0 +1,248 @@ +# MIT License + +# Copyright (c) 2022 Ming Zhong + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +from typing import Dict + +import matplotlib.pyplot as plt +import pandas as pd +import seaborn as sns +import tqdm + + +def add_question(dimension, output, src=None, ref=None, context=None, task=None): + """ + Add questions to generate input in Bool-QA format for UniEval. + + dimension: specific dimension to be evaluated + src: source input for different NLG tasks. For example, source document for summarization + and dialogue history for dialogue response generation. + output: output text generated by the models + ref: human-annotataed groundtruth + context: the context needed to evaluate several specific dimension. For example, + additional factual information when evaluating engagingness and groundedness in dialogues. + """ + + input_with_question = [] + for i in range(len(output)): + # For summarization + if task == 'summarization': + if dimension == 'fluency': + cur_input = 'question: Is this a fluent paragraph? paragraph: ' + output[i] + elif dimension == 'coherence': + cur_input = 'question: Is this a coherent summary to the document? summary: ' + output[ + i] + ' document: ' + src[i] + elif dimension == 'consistency': + cur_input = 'question: Is this claim consistent with the document? claim: ' + output[ + i] + ' document: ' + src[i] + elif dimension == 'relevance': + cur_input = 'question: Is this summary relevant to the reference? summary: ' + output[ + i] + ' reference: ' + ref[i] + else: + raise NotImplementedError( + 'The input format for this dimension is still undefined. Please customize it first.') + # For dialogues + elif task == 'dialogue': + if dimension == 'naturalness': + cur_input = 'question: Is this a natural response in the dialogue? response: ' + output[i] + elif dimension == 'coherence': + cur_input = 'question: Is this a coherent response given the dialogue history? response: '\ + + output[i] + ' dialogue history: ' + src[i] + elif dimension == 'engagingness': + cur_input = 'question: Is this an engaging and informative response according to the dialogue history and fact? response: '\ + + output[i] + ' dialogue history: ' + src[i] + ' fact: ' + context[i] + elif dimension == 'groundedness': + cur_input = 'question: Is this response consistent with knowledge in the fact? response: '\ + + output[i] + ' fact: ' + context[i] + elif dimension == 'understandability': + cur_input = 'question: Is this an understandable response in the dialogue? response: ' + output[i] + else: + raise NotImplementedError( + 'The input format for this dimension is still undefined. Please customize it first.') + # For data-to-text + elif task == 'data2text': + if dimension == 'naturalness': + cur_input = 'question: Is this a fluent utterance? utterance: ' + output[i] + elif dimension == 'informativeness': + cur_input = 'question: Is this sentence informative according to the reference? sentence: '\ + + output[i] + ' reference: ' + ref[i] + else: + raise NotImplementedError( + 'The input format for this dimension is still undefined. Please customize it first.') + # For factual consistency detection + elif task == 'fact': + if dimension == 'consistency': + cur_input = 'question: Is this claim consistent with the document? claim: ' + output[ + i] + ' document: ' + src[i] + else: + raise NotImplementedError('No other dimensions for the factual consistency detection task.') + # For new customized tasks + else: + raise NotImplementedError('Other tasks are not implemented, please customize specific tasks here.') + input_with_question.append(cur_input) + return input_with_question + + +def convert_data_to_unieval_format(output_list, src_list=None, ref_list=None): + """ + Convert the data into the unieval's format. + + output_list: a list of model output + + src_list: source input for different NLG tasks. For example, source document for summarization + and dialogue history for dialogue response generation + ref_list: human-annotated groundtruth + """ + json_data = [] + for i in range(len(output_list)): + cur = {} + cur['system_output'] = output_list[i] + if src_list is not None: + cur['source'] = src_list[i] + if ref_list is not None: + cur['reference'] = ref_list[i] + cur['context'] = "" + json_data.append(cur) + return json_data + + +def calculate_average_score(scores): + """ + Calculate average scores for different metrics + + scores: a list of scores for different metrics for each answer + + """ + metrics = {metric: 0 for metric in scores[0]} + + for score in scores: + for metric in score: + metrics[metric] += score[metric] + + for metric in metrics: + metrics[metric] /= len(scores) + + return metrics + + +def save_unieval_results(model_name: str, unieval_metric_stats: Dict[str, Dict], save_path: str) -> None: + """ + Save UniEval evaluation results of different categories for one model. + + """ + + if not os.path.exists(save_path): + os.makedirs(save_path) + + unieval_metric_stats_per_category = {} + for task, category_stat in unieval_metric_stats.items(): + for category, metric_stat in category_stat.items(): + if unieval_metric_stats_per_category.get(category, None) is None: + unieval_metric_stats_per_category[category] = {} + for metric, score in metric_stat.items(): + unieval_metric_stats_per_category[category][f"{metric}-{task}"] = score + + automatic_df = pd.DataFrame(unieval_metric_stats_per_category) + automatic_df.to_csv(os.path.join(save_path, f"{model_name}_results.csv"), index=True) + + +def read_unieval_results(results_path: str, file_name: str) -> Dict[str, Dict]: + """ + Read a csv file and return a dictionary which stores scores per metric. + + """ + + results = pd.read_csv(os.path.join(results_path, file_name), index_col=0) + + results_dict = {metric: {} for metric in list(results.index)} + for i, metric in enumerate(results_dict.keys()): + for j, category in enumerate(list(results.columns)): + if pd.isnull(results.iloc[i][j]): + continue + results_dict[metric][category] = results.iloc[i][j] + + return results_dict + + +def analyze_unieval_results(results_path: str, save_path: str) -> None: + """ + Analyze and visualize all csv files in the given folder. + + """ + + if not os.path.exists(results_path): + raise Exception(f'The given directory "{results_path}" doesn\'t exist! No results found!') + + all_statistics = {} + + for file_name in os.listdir(results_path): + if file_name.endswith("_results.csv"): + model_name = file_name.split("_results.csv")[0] + all_statistics[model_name] = read_unieval_results(results_path, file_name) + + if len(list(all_statistics.keys())) == 0: + raise Exception(f'There are no csv files in the given directory "{results_path}"!') + + frame_all = {"model": [], "category": [], "metric": [], "score": []} + frame_per_metric = {} + for model_name, model_statistics in all_statistics.items(): + for metric, metric_statistics in model_statistics.items(): + if frame_per_metric.get(metric) is None: + frame_per_metric[metric] = {"model": [], "category": [], "score": []} + + for category, category_score in metric_statistics.items(): + frame_all["model"].append(model_name) + frame_all["category"].append(category) + frame_all["metric"].append(metric) + frame_all["score"].append(category_score) + + frame_per_metric[metric]["model"].append(model_name) + frame_per_metric[metric]["category"].append(category) + frame_per_metric[metric]["score"].append(category_score) + + if not os.path.exists(save_path): + os.makedirs(save_path) + + frame_all = pd.DataFrame(frame_all) + frame_all.to_csv(os.path.join(save_path, "unieval_statistics.csv")) + + for metric in tqdm.tqdm( + frame_per_metric.keys(), + desc=f"UniEval metrics: ", + total=len(frame_per_metric.keys()), + ): + data = pd.DataFrame(frame_per_metric[metric]) + + sns.set() + fig = plt.figure(figsize=(16, 10)) + + fig = sns.barplot(x="category", y="score", hue="model", data=data, dodge=True) + fig.set_title( + f"Comparison between Different Models for Metric {metric.split('-')[0].title()} in Task {metric.split('-')[1].title()}" + ) + plt.xlabel("Evaluation Category") + plt.ylabel("Score") + + figure = fig.get_figure() + figure.savefig(os.path.join(save_path, f"{metric}.png"), dpi=400) + + plt.close() diff --git a/applications/Chat/evaluate/utils.py b/applications/Chat/evaluate/utils.py index 1f4069386fcd..fefe25f5e764 100644 --- a/applications/Chat/evaluate/utils.py +++ b/applications/Chat/evaluate/utils.py @@ -199,7 +199,7 @@ def analyze_automatic_results(results_path: str, save_path: str) -> None: for metric in tqdm.tqdm( frame_per_metric.keys(), - desc=f"metric: ", + desc=f"automatic metrics: ", total=len(frame_per_metric.keys()), ): data = pd.DataFrame(frame_per_metric[metric]) diff --git a/applications/Chat/examples/ray/1mmt_prompt.py b/applications/Chat/examples/ray/1mmt_prompt.py new file mode 100644 index 000000000000..afdd6a922cc7 --- /dev/null +++ b/applications/Chat/examples/ray/1mmt_prompt.py @@ -0,0 +1,175 @@ +import argparse +import os +import socket +from functools import partial + +import pandas as pd +import ray +import torch +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_reward_model_from_args, + get_strategy_from_args, + get_tokenizer_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_maker = { + 'local_rank': '0', + 'rank': '0', + 'world_size': '1', + 'master_port': maker_port, + 'master_addr': master_addr + } + + # configure tokenizer + tokenizer = get_tokenizer_from_args(args.model) + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=["maker1"], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + update_lora_weights=not (args.lora_rank == 0), + ) for i, env_info_trainer in enumerate(env_info_trainers) + ] + + def model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + experience_batch_size=args.experience_batch_size, + kl_coef=0.1, + debug=args.debug, + update_lora_weights=not (args.lora_rank == 0), + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + dataset_size = args.experience_batch_size * 4 + + def build_dataloader(): + + def tokenize_fn(texts): + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + dataset = pd.read_csv(args.prompt_path)['prompt'] + dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn) + return dataloader + + wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--prompt_path', type=str, default=None) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) + main(args) diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py new file mode 100644 index 000000000000..fa7b2bd7edfd --- /dev/null +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -0,0 +1,189 @@ +import argparse +import os +import socket +from functools import partial + +import pandas as pd +import ray +import torch +from coati.quant import llama_load_quant, low_resource_init +from coati.ray.detached_trainer_ppo import DetachedPPOTrainer +from coati.ray.experience_maker_holder import ExperienceMakerHolder +from coati.ray.utils import ( + get_actor_from_args, + get_critic_from_args, + get_receivers_per_sender, + get_reward_model_from_args, + get_strategy_from_args, +) +from torch.utils.data import DataLoader +from transformers import AutoConfig, AutoTokenizer +from transformers.modeling_utils import no_init_weights + + +def get_free_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('', 0)) + return s.getsockname()[1] + + +def get_local_ip(): + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + + +def main(args): + master_addr = str(get_local_ip()) + # trainer_env_info + trainer_port = str(get_free_port()) + env_info_trainers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_trainers), + 'master_port': trainer_port, + 'master_addr': master_addr + } for rank in range(args.num_trainers)] + + # maker_env_info + maker_port = str(get_free_port()) + env_info_makers = [{ + 'local_rank': '0', + 'rank': str(rank), + 'world_size': str(args.num_makers), + 'master_port': maker_port, + 'master_addr': master_addr + } for rank in range(args.num_makers)] + + # configure tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.pretrain) + tokenizer.pad_token = tokenizer.eos_token + + def model_fn(): + actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() + if args.initial_model_quant_ckpt is not None and args.model == 'llama': + # quantize initial model + actor_cfg = AutoConfig.from_pretrained(args.pretrain) + with low_resource_init(), no_init_weights(): + initial_model = get_actor_from_args(args.model, config=actor_cfg) + initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, + args.quant_group_size).cuda().requires_grad_(False) + else: + initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() + return actor, critic, reward_model, initial_model + + # configure Experience Maker + experience_holder_refs = [ + ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote( + detached_trainer_name_list=[ + f'trainer{x}' + for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False) + ], + strategy_fn=partial(get_strategy_from_args, args.maker_strategy), + model_fn=model_fn, + env_info=env_info_maker, + kl_coef=0.1, + debug=args.debug, + update_lora_weights=not (args.lora_rank == 0), + # sync_models_from_trainers=True, + # generation kwargs: + max_length=512, + do_sample=True, + temperature=1.0, + top_k=50, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + eval_performance=True, + use_cache=True, + ) + for i, env_info_maker in enumerate(env_info_makers) + ] + + def trainer_model_fn(): + actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda() + critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda() + return actor, critic + + # configure Trainer + trainer_refs = [ + DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( + experience_maker_holder_name_list=[ + f"maker{x}" + for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True) + ], + strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), + model_fn=trainer_model_fn, + env_info=env_info_trainer, + train_batch_size=args.train_batch_size, + buffer_limit=16, + eval_performance=True, + debug=args.debug, + update_lora_weights=not (args.lora_rank == 0), + ) + for i, env_info_trainer in enumerate(env_info_trainers) + ] + + dataset_size = args.experience_batch_size * 4 + + def build_dataloader(): + + def tokenize_fn(texts): + batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) + return {k: v.cuda() for k, v in batch.items()} + + dataset = pd.read_csv(args.prompt_path)['prompt'] + dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn) + return dataloader + + # uncomment this function if sync_models_from_trainers is True + # ray.get([ + # trainer_ref.sync_models_to_remote_makers.remote() + # for trainer_ref in trainer_refs + # ]) + + wait_tasks = [] + + for experience_holder_ref in experience_holder_refs: + wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps)) + + total_steps = args.experience_batch_size * args.experience_steps * \ + args.num_makers // (args.num_trainers * args.train_batch_size) + for trainer_ref in trainer_refs: + wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) + + ray.get(wait_tasks) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--prompt_path', type=str, default=None) + parser.add_argument('--num_makers', type=int, default=1) + parser.add_argument('--num_trainers', type=int, default=1) + parser.add_argument('--trainer_strategy', + choices=[ + 'naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', + 'colossalai_zero2_cpu' + ], + default='naive') + parser.add_argument('--maker_strategy', choices=['naive'], default='naive') + parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) + parser.add_argument('--pretrain', type=str, default=None) + parser.add_argument('--critic_pretrain', type=str, default=None) + parser.add_argument('--experience_steps', type=int, default=4) + parser.add_argument('--experience_batch_size', type=int, default=8) + parser.add_argument('--train_epochs', type=int, default=1) + parser.add_argument('--update_steps', type=int, default=2) + parser.add_argument('--train_batch_size', type=int, default=8) + parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") + + parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) + parser.add_argument('--quant_bits', type=int, default=4) + parser.add_argument('--quant_group_size', type=int, default=128) + parser.add_argument('--debug', action='store_true') + args = parser.parse_args() + + ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) + main(args) diff --git a/applications/Chat/examples/ray/requirements.txt b/applications/Chat/examples/ray/requirements.txt new file mode 100644 index 000000000000..e0275631807f --- /dev/null +++ b/applications/Chat/examples/ray/requirements.txt @@ -0,0 +1 @@ +ray diff --git a/applications/Chat/examples/ray/test_ci.sh b/applications/Chat/examples/ray/test_ci.sh new file mode 100755 index 000000000000..895f7de0fea9 --- /dev/null +++ b/applications/Chat/examples/ray/test_ci.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -xe +BASE=$(realpath $(dirname $0)) + +export RAY_NAMESPACE=admin +export DATA=/data/scratch/chatgpt/prompts.csv + +# install requirements +pip install -r ${BASE}/requirements.txt + +python ${BASE}/mmmt_prompt.py --prompt_path $DATA --num_makers 2 --num_trainers 2 --trainer_strategy colossalai_gemini --model opt --critic_model opt --pretrain facebook/opt-350m --critic_pretrain facebook/opt-125m --experience_batch_size 4 --train_batch_size 2 diff --git a/applications/Chat/examples/test_ci.sh b/applications/Chat/examples/test_ci.sh index 2b049163c801..ac3a9b507864 100755 --- a/applications/Chat/examples/test_ci.sh +++ b/applications/Chat/examples/test_ci.sh @@ -121,6 +121,17 @@ torchrun --standalone --nproc_per_node=2 ${BASE}/train_prompts.py --prompt_datas --rm_pretrain 'gpt2' \ --rm_path ${BASE}/rm_ckpt_gpt.pt \ --save_path ${BASE}/actor_checkpoint_prompts.pt + +torchrun --standalone --nproc_per_node=2 ${BASE}/train_prompts.py --prompt_dataset $PROMPT_PATH --pretrain_dataset $PRETRAIN_DATASET \ + --strategy colossalai_gemini --num_episodes 1 --max_timesteps 2 \ + --update_timesteps 2 --max_epochs 1 --train_batch_size 2 \ + --pretrain 'gpt2' --model gpt2 \ + --rm_pretrain 'gpt2' \ + --rm_path ${BASE}/rm_ckpt_gpt.pt \ + --save_path ${BASE}/actor_checkpoint_prompts.pt rm -rf ${BASE}/rm_ckpt_gpt.pt rm -rf ${BASE}/actor_checkpoint_prompts.pt + +# 3080 doesn't support P2P, skip this test +# cd ${BASE}/ray && bash test_ci.sh && cd ${BASE} diff --git a/applications/Chat/tests/test_checkpoint.py b/applications/Chat/tests/test_checkpoint.py index 4c05a3431699..d93a5c94d8ea 100644 --- a/applications/Chat/tests/test_checkpoint.py +++ b/applications/Chat/tests/test_checkpoint.py @@ -6,6 +6,7 @@ import torch import torch.distributed as dist from coati.models.gpt import GPTActor +from coati.models.utils import calc_action_log_probs from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy from transformers.models.gpt2.configuration_gpt2 import GPT2Config @@ -43,7 +44,8 @@ def run_test_checkpoint(strategy): def run_step(): data = get_data(BATCH_SIZE) action_mask = torch.ones_like(data['attention_mask'], dtype=torch.bool) - action_log_probs = actor(data['input_ids'], action_mask.size(1), data['attention_mask']) + actor_output = actor(data['input_ids'], data['attention_mask']) + action_log_probs = calc_action_log_probs(actor_output, data['input_ids'], action_mask.size(1)) loss = action_log_probs.sum() strategy.backward(loss, actor, actor_optim) strategy.optimizer_step(actor_optim) diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py index 61d912157449..4a42e204982f 100644 --- a/colossalai/booster/booster.py +++ b/colossalai/booster/booster.py @@ -25,11 +25,11 @@ class Booster: Examples: ```python colossalai.launch(...) - plugin = GeminiPlugin(stage=3, ...) + plugin = GeminiPlugin(...) booster = Booster(precision='fp16', plugin=plugin) model = GPT2() - optimizer = Adam(model.parameters()) + optimizer = HybridAdam(model.parameters()) dataloader = Dataloader(Dataset) lr_scheduler = LinearWarmupScheduler() criterion = GPTLMLoss() diff --git a/colossalai/initialize.py b/colossalai/initialize.py index 5d3f3e5530cb..dc0df0517508 100644 --- a/colossalai/initialize.py +++ b/colossalai/initialize.py @@ -238,7 +238,7 @@ def initialize(model: nn.Module, loaded into gpc.config. Args: - model (:class:`torch.nn.Module` or Callbale): Your model instance or a function to build the model. + model (:class:`torch.nn.Module` or Callable): Your model instance or a function to build the model. optimizer (:class:`torch.optim.optimizer.Optimizer` or :class:`Type[torch.optim.optimizer]`): Your optimizer instance. criterion (:class:`torch.nn.modules.loss._Loss`, optional): Your criterion instance. diff --git a/colossalai/lazy/lazy_init.py b/colossalai/lazy/lazy_init.py index c1fda3c53865..76f550dc4392 100644 --- a/colossalai/lazy/lazy_init.py +++ b/colossalai/lazy/lazy_init.py @@ -37,7 +37,7 @@ # If your intent is to change the metadata of a Tensor (such as sizes / strides / storage / storage_offset) # without autograd tracking the change, remove the .data / .detach() call and wrap the change in a `with torch.no_grad():` block. # These ops cannot be unwrapped using .data -_CHANGE_META_OPS = ['_cudnn_rnn_flatten_weight', 'requires_grad_', '__get__', '__set__'] +_CHANGE_META_OPS = ['_cudnn_rnn_flatten_weight', 'requires_grad_', '__get__', '__set__', 'numel', 'size', 'dim'] _LEGACY_TENSOR_CONSTRUCTOR = { 'FloatTensor': torch.float, diff --git a/colossalai/nn/optimizer/cpu_adam.py b/colossalai/nn/optimizer/cpu_adam.py index 1ec8783c53d3..3a6d37103398 100644 --- a/colossalai/nn/optimizer/cpu_adam.py +++ b/colossalai/nn/optimizer/cpu_adam.py @@ -13,7 +13,7 @@ class CPUAdam(NVMeOptimizer): """Implements Adam algorithm. - Supports parameters updating on both GPU and CPU, depanding on the device of parameters. + Supports parameters updating on both GPU and CPU, depending on the device of parameters. But the parameters and gradients should on the same device: * Parameters on CPU and gradients on CPU is allowed. * Parameters on GPU and gradients on GPU is allowed. diff --git a/colossalai/nn/optimizer/hybrid_adam.py b/colossalai/nn/optimizer/hybrid_adam.py index 526071b06f95..84903ac36832 100644 --- a/colossalai/nn/optimizer/hybrid_adam.py +++ b/colossalai/nn/optimizer/hybrid_adam.py @@ -14,7 +14,7 @@ class HybridAdam(CPUAdam): """Implements Adam algorithm. - Supports parameters updating on both GPU and CPU, depanding on the device of parameters. + Supports parameters updating on both GPU and CPU, depending on the device of parameters. But the parameters and gradients should on the same device: * Parameters on CPU and gradients on CPU is allowed. * Parameters on GPU and gradients on GPU is allowed. diff --git a/colossalai/pipeline/pipelinable.py b/colossalai/pipeline/pipelinable.py index 9731530a6e15..79913987b7cc 100644 --- a/colossalai/pipeline/pipelinable.py +++ b/colossalai/pipeline/pipelinable.py @@ -83,7 +83,7 @@ def _post_init_method(self, module: torch.nn.Module, *args, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.nn.Module): v = self._layer_spec_dict[id(v)] - # (lyl)TODO: analyse ColoTensor as well + # (lyl)TODO: analyze ColoTensor as well modified_kwargs[k] = v # keep track of the module children @@ -117,7 +117,7 @@ def _post_init_method(self, module: torch.nn.Module, *args, **kwargs): def to_layer_list(self, exec_seq=None): """ Create a layer spec list and func list with execution sequence given by user. - If exec_seq is None, we will take the module initizing order as execution order. + If exec_seq is None, we will take the module initializing order as execution order. """ self._exec_seq = exec_seq @@ -177,7 +177,7 @@ def to_layer_list(self, exec_seq=None): def partition(self, num_chunks, pipeline_size, rank): """ - Partitioned model will be built respect to partion policy. + Partitioned model will be built respect to partition policy. The real module instance will be built in this method. """ if isinstance(self._policy, str): @@ -193,7 +193,7 @@ def partition(self, num_chunks, pipeline_size, rank): self.customized_parts = customized_partition(self._exec_seq) assert len(self.customized_parts) == gpc.get_world_size( ParallelMode.PIPELINE - ), f'World size is {gpc.get_world_size(ParallelMode.PIPELINE)}, but the number of partions is {len(self.customized_parts)}' + ), f'World size is {gpc.get_world_size(ParallelMode.PIPELINE)}, but the number of partitions is {len(self.customized_parts)}' parts = self.customized_parts[rank] else: raise ValueError("A string partition policy should be one of ['uniform', 'balanced', 'customized'].") diff --git a/colossalai/pipeline/rpc/_pipeline_base.py b/colossalai/pipeline/rpc/_pipeline_base.py index 2d7e25c82e7b..9e549df58214 100644 --- a/colossalai/pipeline/rpc/_pipeline_base.py +++ b/colossalai/pipeline/rpc/_pipeline_base.py @@ -123,7 +123,7 @@ def __init__(self, self.device = device self._initialize_outstanding_range() - # variable and const for context managment + # variable and const for context management self.outstanding = 0 self.forward_times = 0 self.backward_times = 0 @@ -226,7 +226,7 @@ def sync_global_worker_rrefs(self, pp_rank_to_worker_rref: Dict[int, PyRRef]) -> self.pp_rank_to_worker_rref = pp_rank_to_worker_rref # for some schedule need the other worker's info to initialise partition (like Chimera) - # construction of partition is executed after the registion of pp_rank_to_worker_rref + # construction of partition is executed after the registration of pp_rank_to_worker_rref self._initialize_partition() # res_use works for lifecycle counter, @@ -418,7 +418,7 @@ def subscribe_producer(self, microbatch_id: int, forward_only: bool): # On current PP middleware design for DAG, get_output_by_key used by _subscribe_producer # can only be executed once for every producer-consumer stage pair, which is necessary # to count the lifecycle of work_item. So, keeping the _subscribe_producer in the same - # lock of work_item queue operation gurantees the consistency of lifecycle counter. + # lock of work_item queue operation guarantees the consistency of lifecycle counter. work_item_from_producer = self._subscribe_producer(microbatch_id, forward_only) self.work_list[key] = work_item_from_producer self.work_list_condition_lock.notify_all() @@ -460,7 +460,7 @@ def subscribe_consumer(self, microbatch_id: int): # On current PP middleware design for DAG, get_output_by_key used by subscribe_consumer # can only be executed once for every producer-consumer stage pair, which is necessary # to count the lifecycle of work_item. So, keeping the subscribe_consumer in the same - # lock of work_item queue operation gurantees the consistency of lifecycle counter. + # lock of work_item queue operation guarantees the consistency of lifecycle counter. work_item_from_consumer = self._subscribe_consumer(microbatch_id) self.work_list[key] = work_item_from_consumer self.work_list_condition_lock.notify_all() @@ -508,7 +508,7 @@ def _get_producer_consumer(self) -> None: assert self.producer_stage_ids is None, f"all the producers of rank {rank} has been subscribed" assert self.consumer_stage_ids is None, f"all the consumers of rank {rank} has been subscribed" - # should be aranged in order, the order of the input of current forward + # should be arranged in order, the order of the input of current forward self.producer_stage_ids = self.get_producer_stage_ids() self.consumer_stage_ids = self.get_consumer_stage_ids() diff --git a/colossalai/pipeline/rpc/_pipeline_schedule.py b/colossalai/pipeline/rpc/_pipeline_schedule.py index 0d572231d378..6eda8f3b34b7 100644 --- a/colossalai/pipeline/rpc/_pipeline_schedule.py +++ b/colossalai/pipeline/rpc/_pipeline_schedule.py @@ -123,7 +123,7 @@ def _get_producer_consumer(self) -> None: assert self.producer_stage_ids is None, f"all the producers of rank {rank} has been subscribed" assert self.consumer_stage_ids is None, f"all the consumers of rank {rank} has been subscribed" - # should be aranged in order, the order of the input of current forward + # should be arranged in order, the order of the input of current forward self.producer_stage_ids = [] self.consumer_stage_ids = [] @@ -174,7 +174,7 @@ def _initialize_partition(self): else: # if it is down pipeline, create partition by origin method co_up_pp_worker_rref = self.pp_rank_to_worker_rref[pp_rank - stage_num] - # get the coresponding model state dict and wait for its init + # get the corresponding model state dict and wait for its init state_dict = co_up_pp_worker_rref.rpc_sync().get_partition_state_dict() super()._initialize_partition() self.module_partition.load_state_dict(state_dict) @@ -228,7 +228,7 @@ def _hook_before_step(self): stage_num = self.actual_stage_num co_pp_rank = (pp_rank + stage_num) % (2 * stage_num) - # if currrent pp_rank is not the first to do step + # if current pp_rank is not the first to do step # wait its previous pp_rank finish step grads = self.get_parameter_gradients() diff --git a/colossalai/pipeline/utils.py b/colossalai/pipeline/utils.py index df7226644a7a..ac8a3ad7d1db 100644 --- a/colossalai/pipeline/utils.py +++ b/colossalai/pipeline/utils.py @@ -113,7 +113,7 @@ def _binary_search(weights, num): def partition_uniform(num_items, pipeline_parallel_size, num_chunks): assert num_items % num_chunks == 0, \ - "Layer length should be divided by the number of chunks, otherwise parameter method is recomended" + "Layer length should be divided by the number of chunks, otherwise parameter method is recommended" logger = get_dist_logger() parts = [[] for _ in range(pipeline_parallel_size)] diff --git a/colossalai/tensor/d_tensor/comm_spec.py b/colossalai/tensor/d_tensor/comm_spec.py index 765d8ec1b01a..159125fa16db 100644 --- a/colossalai/tensor/d_tensor/comm_spec.py +++ b/colossalai/tensor/d_tensor/comm_spec.py @@ -28,7 +28,7 @@ class CommSpec: to determine the buffer shape, and logical_process_axis Argument: - comm_pattern(CollectiveCommPattern): decribe the communication method used in this spec. + comm_pattern(CollectiveCommPattern): describe the communication method used in this spec. process_groups_dict(Dict): A dict which contains the process groups used to apply this CommSpec. gather_dim(int, Optional): The gather_dim of the tensor will be gathered. shard_dim(int, Optional): The shard_dim of the tensor will be sharded. diff --git a/colossalai/tensor/d_tensor/sharding_spec.py b/colossalai/tensor/d_tensor/sharding_spec.py index 2ea0c4db89fd..565012b58a03 100644 --- a/colossalai/tensor/d_tensor/sharding_spec.py +++ b/colossalai/tensor/d_tensor/sharding_spec.py @@ -41,7 +41,7 @@ def __repr__(self): def _convert_str_to_shard_list(self, str_spec): ''' - Conver str_spec into shard_list. + Convert str_spec into shard_list. Argument: str_spec(str): dim spec in str type. @@ -58,7 +58,7 @@ def _convert_str_to_shard_list(self, str_spec): def build_difference_2d_dict(self): ''' - Build a difference maping for 2D device mesh case. It will be used to + Build a difference mapping for 2D device mesh case. It will be used to compute the difference between DimSpec pairs. ''' diff --git a/colossalai/tensor/param_op_hook.py b/colossalai/tensor/param_op_hook.py index 9c2e0d4adbf1..8ed8176d996a 100644 --- a/colossalai/tensor/param_op_hook.py +++ b/colossalai/tensor/param_op_hook.py @@ -164,7 +164,7 @@ def _get_grad_args(*args): for obj in args: if _is_grad_tensor(obj): return args, None - # otherwise, the first arguement should be a tuple of grad tensors + # otherwise, the first argument should be a tuple of grad tensors # if there is no grad tensor, the backward of PreFwdPostBwd can't be triggered arg_zero = args[0] if not isinstance(arg_zero, tuple): diff --git a/colossalai/tensor/process_group.py b/colossalai/tensor/process_group.py index f108bdc247f5..8d2e9a616d76 100644 --- a/colossalai/tensor/process_group.py +++ b/colossalai/tensor/process_group.py @@ -130,7 +130,7 @@ def set_cpu_groups(self): @property def has_cpu_groups(self) -> bool: """has_cpu_groups - If cpu groups have been initailized. + If cpu groups have been initialized. Returns: bool: cpu process groups have been initialized or not. diff --git a/colossalai/tensor/shape_consistency.py b/colossalai/tensor/shape_consistency.py index 0a840006f086..5bec552d69d5 100644 --- a/colossalai/tensor/shape_consistency.py +++ b/colossalai/tensor/shape_consistency.py @@ -252,7 +252,7 @@ def get_all_all_to_all_spec(self, source_spec: ShardingSpec, def get_all_shard_spec(self, source_spec: ShardingSpec, orig_cost_dict): ''' Get all valid sharding specs from source_spec with single shard operation, and - accumulate commucation cost on origin cost which will finally be used in auto sharding solver. + accumulate communication cost on origin cost which will finally be used in auto sharding solver. For the sharding operation, we just care about legal sharding dimensions. Argument: @@ -386,7 +386,7 @@ def get_all_mix_gather_spec(self, source_spec: ShardingSpec, def get_all_one_step_transform_spec(self, source_spec: ShardingSpec, orig_cost_dict) -> Dict[ShardingSpec, float]: ''' Get all valid sharding specs from source_spec with one step transform, and - accumulate commucation cost on origin cost which will finally be used in auto sharding solver. + accumulate communication cost on origin cost which will finally be used in auto sharding solver. Note: all-gather will eliminate a sharding dimension, all-to-all will keep sharding dimension same as before, and shard will add a sharding dimension. Therefore, the result of above operations are mutual exclusive, @@ -577,7 +577,7 @@ def shape_consistency(self, source_spec: ShardingSpec, Step3: Repeat above steps until the source spec transform to target spec. - During finding the transform path, commucation cost will be accumulated, and it + During finding the transform path, communication cost will be accumulated, and it will be finally used in auto parallel solver. Additionally, to avoid repeating the path search in runtime, we cached all solved path diff --git a/colossalai/tensor/sharding_spec.py b/colossalai/tensor/sharding_spec.py index bed320130ccd..406ad49097b5 100644 --- a/colossalai/tensor/sharding_spec.py +++ b/colossalai/tensor/sharding_spec.py @@ -45,7 +45,7 @@ def __repr__(self): def _convert_str_to_shard_list(self, str_spec): ''' - Conver str_spec into shard_list. + Convert str_spec into shard_list. Argument: str_spec(str): dim spec in str type. @@ -62,7 +62,7 @@ def _convert_str_to_shard_list(self, str_spec): def build_difference_2d_dict(self): ''' - Build a difference maping for 2D device mesh case. It will be used to + Build a difference mapping for 2D device mesh case. It will be used to compute the difference between DimSpec pairs. ''' @@ -166,7 +166,7 @@ class ShardingSpec: device_mesh(DeviceMesh): A logical view of a physical mesh. entire_shape(torch.Size): The entire shape of tensor before sharded. dim_partition_dict(Dict[int, List[int]], optional): The key is the dimension of tensor to be sharded, - and the value of the key decribe which logical axis will be sharded in that dimension. + and the value of the key describe which logical axis will be sharded in that dimension. sharding_sequence(List[_DimSpec], optional): A straight view of ShardingSpec looks like [R, R, S0, S1]. ''' diff --git a/colossalai/tensor/utils.py b/colossalai/tensor/utils.py index 6e30f97fef03..e7d51d099e02 100644 --- a/colossalai/tensor/utils.py +++ b/colossalai/tensor/utils.py @@ -77,7 +77,7 @@ def shard_simulator(target_pair, legal_sharding_dims): Argument: target_pair(Tuple[int, List[int]]): The first element is the dimension of tensor to be sharded, - and the second element decribes which logical axis will be sharded in that dimension. + and the second element describes which logical axis will be sharded in that dimension. ''' _, shard_list = target_pair shard_list_list = [] diff --git a/colossalai/trainer/_trainer.py b/colossalai/trainer/_trainer.py index 60bbc4eeee32..bfe1c403fd48 100644 --- a/colossalai/trainer/_trainer.py +++ b/colossalai/trainer/_trainer.py @@ -31,9 +31,9 @@ class Trainer: >>> # Initialize your engine, train_dataloader, test_dataloader, lr_scheduler >>> engine, train_dataloader, _, _ = colossalai.initialize(model, optimizer, criterion) >>> # Beginning training progress - >>> timier = ... + >>> timer = ... >>> logger = ... - >>> trainer = Trainer(engine=engine, logger=logger, timer=timier) + >>> trainer = Trainer(engine=engine, logger=logger, timer=timer) >>> # add hooks you would like to use here. >>> hook_list = [] >>> trainer.fit( @@ -56,7 +56,7 @@ def __init__( timer: MultiTimer = None, logger: DistributedLogger = None, ): - # training-ralated params + # training-related params self._engine = engine self._max_epochs = 0 self._cur_epoch = 0 @@ -118,7 +118,7 @@ def _set_current_step(self, epoch: int): self._cur_step = epoch * self._steps_per_epoch def _call_timer(self, action: str, item: str, *args, **kwargs) -> None: - """Call timer funciton with a given timer name. + """Call timer function with a given timer name. Args: action (str): Function to be called on timer. diff --git a/colossalai/utils/data_sampler/data_parallel_sampler.py b/colossalai/utils/data_sampler/data_parallel_sampler.py index 945dc54b397a..2318e07a7f8d 100644 --- a/colossalai/utils/data_sampler/data_parallel_sampler.py +++ b/colossalai/utils/data_sampler/data_parallel_sampler.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- -# adpated from torch.utils.data.DistributedSampler +# adapted from torch.utils.data.DistributedSampler import math import random diff --git a/colossalai/utils/model/utils.py b/colossalai/utils/model/utils.py index f49607376439..21bc530934d3 100644 --- a/colossalai/utils/model/utils.py +++ b/colossalai/utils/model/utils.py @@ -70,7 +70,7 @@ def _init_subclass(cls, **kwargs): cls.__init__ = preprocess_after(cls.__init__) # Replace .__init__() for all existing subclasses of torch.nn.Module - # Excution self._post_init_method after the default init function. + # Execution self._post_init_method after the default init function. substitute_init_recursively(torch.nn.modules.module.Module, _enable_class, set()) # holding on to the current __init__subclass__ for exit diff --git a/colossalai/utils/profiler/legacy/comm_profiler.py b/colossalai/utils/profiler/legacy/comm_profiler.py index a4f5729c97ec..334f0113ee90 100644 --- a/colossalai/utils/profiler/legacy/comm_profiler.py +++ b/colossalai/utils/profiler/legacy/comm_profiler.py @@ -111,7 +111,7 @@ def append(s: str = None): res.append(sep) if self.warn_flag: - append("Warnning: there exists multiple communication operations in the same time. As a result, " + append("Warning: there exists multiple communication operations in the same time. As a result, " "the profiling result is not accurate.") if self.total_cuda_time == 0: @@ -123,12 +123,12 @@ def append(s: str = None): append("total number of calls: {}".format(self.total_count)) append("All events:") - seperation = '-' * 74 + separation = '-' * 74 row_format = '{:^10}' + '{:^12}' * 2 + '{:^16}' + '{:^12}' * 2 - append(seperation) + append(separation) append(row_format.format('Location', 'GPU time', 'Percentage', 'Comm volume', 'Bandwidth', 'Num of calls')) - append(seperation) + append(separation) show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].self_cuda_time) for location, event in show_list: diff --git a/colossalai/utils/profiler/legacy/pcie_profiler.py b/colossalai/utils/profiler/legacy/pcie_profiler.py index 526222941ef9..8f812f5cfc7b 100644 --- a/colossalai/utils/profiler/legacy/pcie_profiler.py +++ b/colossalai/utils/profiler/legacy/pcie_profiler.py @@ -130,12 +130,12 @@ def append(s: str = None): append("Possible data transmission events in PCIE:") - seperation = '-' * 62 + separation = '-' * 62 row_format = '{:^10}' + '{:^12}' + '{:^16}' + '{:^12}' * 2 - append(seperation) + append(separation) append(row_format.format('Location', 'GPU time', 'Trans volume', 'Bandwidth', 'Num of calls')) - append(seperation) + append(separation) show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].cuda_time) for location, event in show_list: diff --git a/colossalai/utils/profiler/legacy/prof_utils.py b/colossalai/utils/profiler/legacy/prof_utils.py index 87ad644a7ecc..2f7eee827651 100644 --- a/colossalai/utils/profiler/legacy/prof_utils.py +++ b/colossalai/utils/profiler/legacy/prof_utils.py @@ -32,9 +32,9 @@ def _format_memory(nbytes): return str(nbytes) + ' B' -def _format_bandwidth(volme: float or int, time_us: int): +def _format_bandwidth(volume: float or int, time_us: int): sec_div_mb = (1000.0 / 1024.0)**2 - mb_per_sec = volme / time_us * sec_div_mb + mb_per_sec = volume / time_us * sec_div_mb if mb_per_sec >= 1024.0: return '{:.3f} GB/s'.format(mb_per_sec / 1024.0) diff --git a/colossalai/utils/rank_recorder/README.md b/colossalai/utils/rank_recorder/README.md index e30a925d2a92..da8a6039d543 100644 --- a/colossalai/utils/rank_recorder/README.md +++ b/colossalai/utils/rank_recorder/README.md @@ -1,5 +1,5 @@ # Rank Recorder -This is a useful tool to get the records of certain functions in each rank. The records of each rank will dump into a json file after the end of multiple process program. You can parse and visualise the json file easily. +This is a useful tool to get the records of certain functions in each rank. The records of each rank will dump into a json file after the end of multiple process program. You can parse and visualize the json file easily. Before using the tool, you should ensure dist.is_initialized() return true before exit of program. @@ -20,7 +20,7 @@ with recorder(record_name, current_rank) as r: ``` ## Example -This is a demo to display kernel select in cuda and visualise the cost of several procedures in each rank. +This is a demo to display kernel select in cuda and visualize the cost of several procedures in each rank. ```python import time diff --git a/colossalai/utils/rank_recorder/rank_recorder.py b/colossalai/utils/rank_recorder/rank_recorder.py index c088ceeb2e87..40bb7e184a12 100644 --- a/colossalai/utils/rank_recorder/rank_recorder.py +++ b/colossalai/utils/rank_recorder/rank_recorder.py @@ -133,7 +133,7 @@ def merge_recode(self): with open(self.export_name + '.json', 'w', encoding='utf-8') as f: json.dump(recoders, f, ensure_ascii=False) - def visualise_record(self): + def visualize_record(self): with open(self.export_name + '.json', 'r', encoding='utf-8') as f: records = json.load(f) records = dict(records) @@ -171,7 +171,7 @@ def exit_worker(self): if rank == 1: # take the base time of rank 0 as standard self.merge_recode() - self.visualise_record() + self.visualize_record() recorder = Recorder() diff --git a/colossalai/zero/gemini/chunk/chunk.py b/colossalai/zero/gemini/chunk/chunk.py index a7682eaf62e9..51da9be2b1f8 100644 --- a/colossalai/zero/gemini/chunk/chunk.py +++ b/colossalai/zero/gemini/chunk/chunk.py @@ -416,7 +416,7 @@ def copy_tensor_to_chunk_slice(self, tensor: torch.Tensor, data_slice: torch.Ten Copy data slice to the memory space indexed by the input tensor in the chunk. Args: - tensor (torch.Tensor): the tensor used to retrive meta information + tensor (torch.Tensor): the tensor used to retrieve meta information data_slice (torch.Tensor): the tensor to be copied to the chunk """ # sanity check diff --git a/colossalai/zero/gemini/chunk/manager.py b/colossalai/zero/gemini/chunk/manager.py index 77368d06d255..38d34f14863e 100644 --- a/colossalai/zero/gemini/chunk/manager.py +++ b/colossalai/zero/gemini/chunk/manager.py @@ -157,7 +157,7 @@ def copy_tensor_to_chunk_slice(self, tensor: torch.Tensor, data: torch.Tensor) - Copy data to the chunk. Args: - tensor (torch.Tensor): the tensor used to retrive meta information + tensor (torch.Tensor): the tensor used to retrieve meta information data (torch.Tensor): the tensor to be copied to the chunk """ chunk = self.tensor_chunk_map[tensor] diff --git a/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py b/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py index f5eb05b4f22a..83903bbf4023 100644 --- a/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py +++ b/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py @@ -25,7 +25,7 @@ def __init__(self, chunk_manager: ChunkManager, memstats: Optional[MemStats] = N # override def record_model_data_volume(self) -> None: """ - record model data volumn on cuda and cpu. + record model data volume on cuda and cpu. """ if self._start_flag and not self.use_outside_memstats: cuda_mem = self._chunk_manager.total_mem['cuda'] diff --git a/colossalai/zero/gemini/memory_tracer/memory_monitor.py b/colossalai/zero/gemini/memory_tracer/memory_monitor.py index f8d99dbce7a4..4bb585677d5b 100644 --- a/colossalai/zero/gemini/memory_tracer/memory_monitor.py +++ b/colossalai/zero/gemini/memory_tracer/memory_monitor.py @@ -45,7 +45,7 @@ def clear(self): class AsyncMemoryMonitor(MemoryMonitor): """ - An Async Memory Monitor runing during computing. Sampling memory usage of the current GPU + An Async Memory Monitor running during computing. Sampling memory usage of the current GPU at interval of `1/(10**power)` sec. The idea comes from Runtime Memory Tracer of PatrickStar @@ -67,7 +67,7 @@ class AsyncMemoryMonitor(MemoryMonitor): async_mem_monitor.save('log.pkl') Args: - power (int, optional): the power of time interva. Defaults to 10. + power (int, optional): the power of time interval. Defaults to 10. .. _PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management: https://arxiv.org/abs/2108.05818 diff --git a/colossalai/zero/gemini/memory_tracer/utils.py b/colossalai/zero/gemini/memory_tracer/utils.py index 6962c058110e..65f6ba775139 100644 --- a/colossalai/zero/gemini/memory_tracer/utils.py +++ b/colossalai/zero/gemini/memory_tracer/utils.py @@ -7,7 +7,7 @@ def colo_model_optimizer_usage(optim) -> Tuple[int, int]: """Trace the optimizer memory usage Args: - optim (ShardedOptimV2): an instance of ShardedOptimver + optim (ShardedOptimV2): an instance of ShardedOptimizer Returns: Tuple[int, int]: cuda/cpu memory usage in Byte diff --git a/colossalai/zero/gemini/utils.py b/colossalai/zero/gemini/utils.py index e52b5b836b0b..6f4a253b504b 100644 --- a/colossalai/zero/gemini/utils.py +++ b/colossalai/zero/gemini/utils.py @@ -73,7 +73,7 @@ def get_static_torch_model(zero_ddp_model, zero_ddp_model (ZeroDDP): a zero ddp model device (torch.device): the device of the final torch model dtype (torch.dtype): the dtype of the final torch model - only_rank_0 (bool): if True, only rank0 has the coverted torch model + only_rank_0 (bool): if True, only rank0 has the converted torch model Returns: torch.nn.Module: a static torch model used for saving checkpoints or numeric checks diff --git a/colossalai/zero/legacy/gemini/ophooks/utils.py b/colossalai/zero/legacy/gemini/ophooks/utils.py index 84e8298c1d51..f88ad2b00e9e 100644 --- a/colossalai/zero/legacy/gemini/ophooks/utils.py +++ b/colossalai/zero/legacy/gemini/ophooks/utils.py @@ -88,7 +88,7 @@ def register_ophooks_recursively(module: torch.nn.Module, ophook_list: List[BaseOpHook], name: str = "", filter_fn: Optional[Callable] = None): - r"""Recursilvely register pre/post hooks for all submodules in the module in FWD and BWD.""" + r"""Recursively register pre/post hooks for all submodules in the module in FWD and BWD.""" assert isinstance(module, torch.nn.Module) assert isinstance(ophook_list, (list, tuple)) assert len(ophook_list) > 0, 'expected at least 1 hook in the argument ophook_list but found 0' @@ -103,7 +103,7 @@ def register_ophooks_recursively(module: torch.nn.Module, if len(list(module.parameters(recurse=False))) == 0: return - # return from flitered module + # return from filtered module if filter_fn is not None and filter_fn(module): return diff --git a/colossalai/zero/legacy/gemini/tensor_utils.py b/colossalai/zero/legacy/gemini/tensor_utils.py index b7f23e0253fd..843e330ee2c6 100644 --- a/colossalai/zero/legacy/gemini/tensor_utils.py +++ b/colossalai/zero/legacy/gemini/tensor_utils.py @@ -77,7 +77,7 @@ def colo_model_data_tensor_move_inline(t: Union[StatefulTensor, torch.Tensor], t move a tensor to the target_device Args: t (Union[StatefulTensor, torch.Tensor]): the tensor be moved - target_device: a traget device, if type is int, it the index of cuda card. + target_device: a target device, if type is int, it the index of cuda card. """ if not isinstance(target_device, torch.device): target_device = torch.device(f'cuda:{target_device}') diff --git a/colossalai/zero/legacy/init_ctx/init_context.py b/colossalai/zero/legacy/init_ctx/init_context.py index a3fa46b38b5a..84e2d2f4f8e1 100644 --- a/colossalai/zero/legacy/init_ctx/init_context.py +++ b/colossalai/zero/legacy/init_ctx/init_context.py @@ -46,7 +46,7 @@ class ZeroInitContext(InsertPostInitMethodToModuleSubClasses): """A context to initialize model. 1. Convert the model to fp16. - 2. The paramaters of the module are adapted to type ShardedParameter. + 2. The parameters of the module are adapted to type ShardedParameter. 3. Shard the param and grad according to flags. Args: diff --git a/colossalai/zero/legacy/sharded_model/sharded_model_v2.py b/colossalai/zero/legacy/sharded_model/sharded_model_v2.py index be3842beb208..e7064277fb3c 100644 --- a/colossalai/zero/legacy/sharded_model/sharded_model_v2.py +++ b/colossalai/zero/legacy/sharded_model/sharded_model_v2.py @@ -69,7 +69,7 @@ class ShardedModelV2(nn.Module): If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well. Note that 'auto' policy can only work well when no other processes use CUDA during your training. Defaults to 'cuda'. - gradient_predivide_factor (Optional[float], optional): Gradient is divived by this value before reduce-scatter. Defaults to 1.0. + gradient_predivide_factor (Optional[float], optional): Gradient is divided by this value before reduce-scatter. Defaults to 1.0. reuse_fp16_shard (bool, optional): Whether to reuse fp16 shard for param and grad. Enabling this can reduce GPU memory usage, but you have to make sure you disable it when using gradient accumulation. In this mode, grad will be fp16. Make sure your optimizer supports mixed precision (fp32 param and fp16 grad). @@ -205,7 +205,7 @@ def dump_memory_stats(self, filename: Optional[str] = 'dump_mem_stats.log') -> N exit(0) """ if self._use_memory_tracer: - self.logger.error(f'dump memort tracer collected information to a {filename}', ranks=[0]) + self.logger.error(f'dump memory tracer collected information to a {filename}', ranks=[0]) if gpc.get_global_rank() == 0: with open(filename, 'w+') as f: f.write(f'cuda reserved {torch.cuda.memory_reserved(get_current_device()) / 1e9} GB\n') @@ -385,7 +385,7 @@ def _save_grad(self, param: Parameter, grad: torch.Tensor): # make parameters point to gradient assert param.colo_attr.saved_grad.is_null( - ), 'Gradien accumulation is not supported when reuse_fp16_shard=True' + ), 'Gradient accumulation is not supported when reuse_fp16_shard=True' param.colo_attr.grad_payload_reset(grad.data) # release the memory of param diff --git a/colossalai/zero/low_level/_utils.py b/colossalai/zero/low_level/_utils.py index afc98e7a7f54..218f7603bc54 100644 --- a/colossalai/zero/low_level/_utils.py +++ b/colossalai/zero/low_level/_utils.py @@ -261,7 +261,7 @@ def sync_param(flat_tensor, tensor_list): share the same memory space. This function will update the tensor list so that they point to the same value. - :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor lsit + :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor list :param tensor_list: A list of tensors corresponding to the flattened tensor :type flat_tensor: torch.Tensor :type tensor_list: List[torch.Tensor] diff --git a/colossalai/zero/low_level/low_level_optim.py b/colossalai/zero/low_level/low_level_optim.py index d4d03e5b5fcd..ee03c0f0ae15 100644 --- a/colossalai/zero/low_level/low_level_optim.py +++ b/colossalai/zero/low_level/low_level_optim.py @@ -207,8 +207,8 @@ def __init__( for param in self._working_param_groups[group_id]: self._param_store.set_param_reduction_state(param, False) - # intialize communication stream for - # communication-compuation overlapping + # initialize communication stream for + # communication-computation overlapping if self._overlap_communication: self._comm_stream = torch.cuda.Stream() @@ -269,7 +269,7 @@ def _partition_param_list(self, param_list): params_per_rank = [[] for _ in range(self._world_size)] numel_per_rank = [0 for _ in range(self._world_size)] - # partititon the parameters in a greedy fashion + # partition the parameters in a greedy fashion sorted_params = sorted(param_list, key=lambda x: x.numel(), reverse=True) for param in sorted_params: # allocate this parameter to the rank with @@ -297,7 +297,7 @@ def _attach_reduction_hook(self): param_group = self._working_param_groups[group_id] for param in param_group: if param.requires_grad: - # determines the reduction destionation rank + # determines the reduction destination rank # this is only valid for stage 2 # dst_rank = None means using all-reduce # else using reduce diff --git a/docs/source/en/advanced_tutorials/parallelize_your_training_like_Megatron.md b/docs/source/en/advanced_tutorials/parallelize_your_training_like_Megatron.md index 22d52fb3cd1a..978ac32fc78e 100644 --- a/docs/source/en/advanced_tutorials/parallelize_your_training_like_Megatron.md +++ b/docs/source/en/advanced_tutorials/parallelize_your_training_like_Megatron.md @@ -141,16 +141,16 @@ for mn, module in model.named_modules(): if 'mlp.c_fc' in mn: if 'weight' in pn or 'bias' in pn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice # keep the shape of the output from c_fc param.compute_spec.set_output_replicate(False) elif 'mlp.c_proj' in mn: if 'weight' in pn: split_param_row_tp1d(param, pg) # row slice elif 'wte' in mn or 'wpe' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif 'c_attn' in mn or 'c_proj' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice ``` The modified model is illustrated below. diff --git a/docs/source/en/features/zero_with_chunk.md b/docs/source/en/features/zero_with_chunk.md index d6f6f611a64c..1b27d64b6897 100644 --- a/docs/source/en/features/zero_with_chunk.md +++ b/docs/source/en/features/zero_with_chunk.md @@ -195,7 +195,7 @@ def get_data(batch_size, seq_len, vocab_size): Finally, we define a model which uses Gemini + ZeRO DDP and define our training loop, As we pre-train GPT in this example, we just use a simple language model loss: ```python -from torch.optim import Adam +from colossalai.nn.optimizer import HybridAdam from colossalai.booster import Booster from colossalai.zero import ColoInitContext @@ -211,7 +211,7 @@ def main(): # build criterion criterion = GPTLMLoss() - optimizer = Adam(model.parameters(), lr=0.001) + optimizer = HybridAdam(model.parameters(), lr=0.001) torch.manual_seed(123) default_pg = ProcessGroup(tp_degree=args.tp_degree) diff --git a/docs/source/zh-Hans/advanced_tutorials/parallelize_your_training_like_Megatron.md b/docs/source/zh-Hans/advanced_tutorials/parallelize_your_training_like_Megatron.md index c4131e593437..b4e0d18a2647 100644 --- a/docs/source/zh-Hans/advanced_tutorials/parallelize_your_training_like_Megatron.md +++ b/docs/source/zh-Hans/advanced_tutorials/parallelize_your_training_like_Megatron.md @@ -126,16 +126,16 @@ for mn, module in model.named_modules(): if 'mlp.c_fc' in mn: if 'weight' in pn or 'bias' in pn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice # keep the shape of the output from c_fc param.compute_spec.set_output_replicate(False) elif 'mlp.c_proj' in mn: if 'weight' in pn: split_param_row_tp1d(param, pg) # row slice elif 'wte' in mn or 'wpe' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif 'c_attn' in mn or 'c_proj' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice ``` 修改后的模型如下图所示。 diff --git a/docs/source/zh-Hans/features/zero_with_chunk.md b/docs/source/zh-Hans/features/zero_with_chunk.md index 9030464ddf9a..9fe5601bbd1b 100644 --- a/docs/source/zh-Hans/features/zero_with_chunk.md +++ b/docs/source/zh-Hans/features/zero_with_chunk.md @@ -197,7 +197,7 @@ def get_data(batch_size, seq_len, vocab_size): 最后,使用booster注入 Gemini + ZeRO DDP 特性, 并定义训练循环。由于我们在这个例子中对GPT进行预训练,因此只使用了一个简单的语言模型损失函数: ```python -from torch.optim import Adam +from colossalai.nn.optimizer import HybridAdam from colossalai.booster import Booster from colossalai.zero import ColoInitContext @@ -213,7 +213,7 @@ def main(): # build criterion criterion = GPTLMLoss() - optimizer = Adam(model.parameters(), lr=0.001) + optimizer = HybridAdam(model.parameters(), lr=0.001) torch.manual_seed(123) default_pg = ProcessGroup(tp_degree=args.tp_degree) diff --git a/examples/community/roberta/README.md b/examples/community/roberta/README.md index 8aefa327a4b4..000fce63f35f 100644 --- a/examples/community/roberta/README.md +++ b/examples/community/roberta/README.md @@ -44,7 +44,7 @@ following the `README.md`, load the h5py generated by preprocess of step 1 to pr ## 3. Finetune -The checkpoint produced by this repo can replace `pytorch_model.bin` from [hfl/chinese-roberta-wwm-ext-large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/tree/main) directly. Then use transfomers from Hugging Face to finetune downstream application. +The checkpoint produced by this repo can replace `pytorch_model.bin` from [hfl/chinese-roberta-wwm-ext-large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/tree/main) directly. Then use transformers from Hugging Face to finetune downstream application. ## Contributors The example is contributed by AI team from [Moore Threads](https://www.mthreads.com/). If you find any problems for pretraining, please file an issue or send an email to yehua.zhang@mthreads.com. At last, welcome any form of contribution! diff --git a/examples/community/roberta/preprocessing/README.md b/examples/community/roberta/preprocessing/README.md index 17cc2f4dc22c..2ed747541280 100644 --- a/examples/community/roberta/preprocessing/README.md +++ b/examples/community/roberta/preprocessing/README.md @@ -25,10 +25,10 @@ Firstly, each file has multiple documents, and each document contains multiple s In this example, split 200G Corpus into 100 shard, and each shard is about 2G. The size of the shard is memory-dependent, taking into account the number of servers, the memory used by the tokenizer, and the memory used by the multi-process training to read the shard (n data parallel requires n\*shard_size memory). **To sum up, data preprocessing and model pretraining requires fighting with hardware, not just GPU.** ```python -python sentence_split.py --input_path /orginal_corpus --output_path /shard --shard 100 +python sentence_split.py --input_path /original_corpus --output_path /shard --shard 100 # This step takes a short time ``` -* `--input_path`: all original corpus, e.g., /orginal_corpus/0.json /orginal_corpus/1.json ... +* `--input_path`: all original corpus, e.g., /original_corpus/0.json /original_corpus/1.json ... * `--output_path`: all shard with split sentences, e.g., /shard/0.txt, /shard/1.txt ... * `--shard`: Number of shard, e.g., 10, 50, or 100 @@ -76,7 +76,7 @@ make * `--input_path`: location of all shard with split sentences, e.g., /shard/0.txt, /shard/1.txt ... * `--output_path`: location of all h5 with token_id, input_mask, segment_ids and masked_lm_positions, e.g., /h5/0.h5, /h5/1.h5 ... -* `--tokenizer_path`: tokenizer path contains huggingface tokenizer.json. Download config.json, special_tokens_map.json, vocab.txt and tokenzier.json from [hfl/chinese-roberta-wwm-ext-large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/tree/main) +* `--tokenizer_path`: tokenizer path contains huggingface tokenizer.json. Download config.json, special_tokens_map.json, vocab.txt and tokenizer.json from [hfl/chinese-roberta-wwm-ext-large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/tree/main) * `--backend`: python or c++, **specifies c++ can obtain faster preprocess speed** * `--dupe_factor`: specifies how many times the preprocessor repeats to create the input from the same article/document * `--worker`: number of process diff --git a/examples/community/roberta/pretraining/README.md b/examples/community/roberta/pretraining/README.md index c248fc1f5708..8abe48aa6c0e 100644 --- a/examples/community/roberta/pretraining/README.md +++ b/examples/community/roberta/pretraining/README.md @@ -13,7 +13,7 @@ bash run_pretrain.sh * `--bert_config`: config.json which represent model * `--mlm`: model type of backbone, bert or deberta_v2 -2. if resume training from earylier checkpoint, run the script below. +2. if resume training from earlier checkpoint, run the script below. ```shell bash run_pretrain_resume.sh diff --git a/examples/community/roberta/pretraining/arguments.py b/examples/community/roberta/pretraining/arguments.py index 40210c4b1be7..e0702ceb59b0 100644 --- a/examples/community/roberta/pretraining/arguments.py +++ b/examples/community/roberta/pretraining/arguments.py @@ -46,7 +46,7 @@ def parse_args(): type=int, default=1, help="This param makes sure that a certain task is repeated for this time steps to \ - optimise on the back propogation speed with APEX's DistributedDataParallel") + optimize on the back propagation speed with APEX's DistributedDataParallel") parser.add_argument("--max_predictions_per_seq", "--max_pred", default=80, @@ -73,12 +73,12 @@ def parse_args(): help="location of saving checkpoint, which contains model and optimizer") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--vscode_debug', action='store_true', help="use vscode to debug") - parser.add_argument('--load_pretrain_model', default='', type=str, help="location of model's checkpoin") + parser.add_argument('--load_pretrain_model', default='', type=str, help="location of model's checkpoint") parser.add_argument( '--load_optimizer_lr', default='', type=str, - help="location of checkpoint, which contains optimerzier, learning rate, epoch, shard and global_step") + help="location of checkpoint, which contains optimizer, learning rate, epoch, shard and global_step") parser.add_argument('--resume_train', action='store_true', help="whether resume training from a early checkpoint") parser.add_argument('--mlm', default='bert', type=str, help="model type, bert or deberta") parser.add_argument('--checkpoint_activations', action='store_true', help="whether to use gradient checkpointing") diff --git a/examples/community/roberta/pretraining/model/bert.py b/examples/community/roberta/pretraining/model/bert.py index a5da1bea6f65..abdf925d0540 100644 --- a/examples/community/roberta/pretraining/model/bert.py +++ b/examples/community/roberta/pretraining/model/bert.py @@ -327,7 +327,7 @@ def forward( attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhld,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) diff --git a/examples/community/roberta/pretraining/run_pretraining.py b/examples/community/roberta/pretraining/run_pretraining.py index 9a6ffc1c5661..a72bdf775644 100644 --- a/examples/community/roberta/pretraining/run_pretraining.py +++ b/examples/community/roberta/pretraining/run_pretraining.py @@ -78,7 +78,7 @@ def main(): default_pg=shard_pg): config, model, numel = get_model(args, logger) - # asign running configurations + # assign running configurations gemini_config = None if args.distplan.startswith("CAI_ZeRO"): optim_config = dict(reduce_bucket_size=12 * 1024 * 1024, overlap_communication=True, verbose=True) diff --git a/examples/community/roberta/pretraining/utils/exp_util.py b/examples/community/roberta/pretraining/utils/exp_util.py index 0cdb56bad031..4a2c9d8a47ad 100644 --- a/examples/community/roberta/pretraining/utils/exp_util.py +++ b/examples/community/roberta/pretraining/utils/exp_util.py @@ -97,7 +97,7 @@ def throughput_calculator(numel, args, config, iteration_time, total_iterations, def synchronize(): if not torch.distributed.is_available(): return - if not torch.distributed.is_intialized(): + if not torch.distributed.is_initialized(): return world_size = torch.distributed.get_world_size() if world_size == 1: diff --git a/examples/community/roberta/pretraining/utils/global_vars.py b/examples/community/roberta/pretraining/utils/global_vars.py index 7b0c5a2be73d..9eef19e71614 100644 --- a/examples/community/roberta/pretraining/utils/global_vars.py +++ b/examples/community/roberta/pretraining/utils/global_vars.py @@ -110,7 +110,7 @@ def write(self, names, writer, iteration, normalizer=1.0, reset=False): """Write timers to a tensorboard writer""" # currently when using add_scalars, # torch.utils.add_scalars makes each timer its own run, which - # polutes the runs list, so we just add each as a scalar + # pollutes the runs list, so we just add each as a scalar assert normalizer > 0.0 for name in names: value = self.timers[name].elapsed(reset=reset) / normalizer diff --git a/examples/images/dreambooth/README.md b/examples/images/dreambooth/README.md index 7c117d841e24..ba4c1a71034a 100644 --- a/examples/images/dreambooth/README.md +++ b/examples/images/dreambooth/README.md @@ -37,7 +37,7 @@ The `text` include the tag `Teyvat`, `Name`,`Element`, `Weapon`, `Region`, `Mode ## Training -We provide the script `colossalai.sh` to run the training task with colossalai. Meanwhile, we also provided traditional training process of dreambooth, `dreambooth.sh`, for possible comparation. For instance, the script of training process for [stable-diffusion-v1-4] model can be modified into: +We provide the script `colossalai.sh` to run the training task with colossalai. Meanwhile, we also provided traditional training process of dreambooth, `dreambooth.sh`, for possible comparison. For instance, the script of training process for [stable-diffusion-v1-4] model can be modified into: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" @@ -92,6 +92,29 @@ torchrun --nproc_per_node 2 train_dreambooth_colossalai.py \ --placement="cuda" ``` +## New API +We have modified our previous implementation of Dreambooth with our new Booster API, which offers a more flexible and efficient way to train your model. The new API is more user-friendly and easy to use. You can find the new API in `train_dreambooth_colossalai.py`. +We have also offer a shell script `test_ci.sh` for you to go through all our plugins for the booster. +For more information about the booster API you can refer to https://colossalai.org/docs/basics/booster_api/. + +## Performance + +| Strategy | #GPU | Batch Size | GPU RAM(GB) | speedup | +|:--------------:|:----:|:----------:|:-----------:|:-------:| +| Traditional | 1 | 16 | oom | \ | +| Traditional | 1 | 8 | 61.81 | 1 | +| torch_ddp | 4 | 16 | oom | \ | +| torch_ddp | 4 | 8 | 41.97 | 0.97 | +| gemini | 4 | 16 | 53.29 | \ | +| gemini | 4 | 8 | 29.36 | 2.00 | +| low_level_zero | 4 | 16 | 52.80 | \ | +| low_level_zero | 4 | 8 | 28.87 | 2.02 | + +The evaluation is performed on 4 Nvidia A100 GPUs with 80GB memory each, with GPU 0 & 1, 2 & 3 connected with NVLink. +We finetuned the [stable-diffusion-v1-4](https://huggingface.co/stabilityai/stable-diffusion-v1-4) model with 512x512 resolution on the [Teyvat](https://huggingface.co/datasets/Fazzie/Teyvat) dataset and compared +the memory cost and the throughput for the plugins. + + ## Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. `--instance_prompt="a photo of sks dog" ` in the above example) in your prompt. diff --git a/examples/images/dreambooth/colossalai.sh b/examples/images/dreambooth/colossalai.sh index 227d8b8bdb04..db4562dbc921 100755 --- a/examples/images/dreambooth/colossalai.sh +++ b/examples/images/dreambooth/colossalai.sh @@ -1,22 +1,18 @@ -export MODEL_NAME= -export INSTANCE_DIR= -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -HF_DATASETS_OFFLINE=1 -TRANSFORMERS_OFFLINE=1 +HF_DATASETS_OFFLINE=1 +TRANSFORMERS_OFFLINE=1 DIFFUSERS_OFFLINE=1 -torchrun --nproc_per_node 2 --master_port=25641 train_dreambooth_colossalai.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of a dog" \ +torchrun --nproc_per_node 4 --standalone train_dreambooth_colossalai.py \ + --pretrained_model_name_or_path="/data/dreambooth/diffuser/stable-diffusion-v1-4" \ + --instance_data_dir="/data/dreambooth/Teyvat/data" \ + --output_dir="./weight_output" \ + --instance_prompt="a picture of a dog" \ --resolution=512 \ + --plugin="gemini" \ --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ - --placement="cuda" \ + --test_run=True \ + --placement="auto" \ diff --git a/examples/images/dreambooth/dreambooth.sh b/examples/images/dreambooth/dreambooth.sh index e063bc8279c5..f6b8f5e1b87e 100644 --- a/examples/images/dreambooth/dreambooth.sh +++ b/examples/images/dreambooth/dreambooth.sh @@ -1,7 +1,7 @@ python train_dreambooth.py \ - --pretrained_model_name_or_path= ## Your Model Path \ - --instance_data_dir= ## Your Training Input Pics Path \ - --output_dir="path-to-save-model" \ + --pretrained_model_name_or_path="/data/dreambooth/diffuser/stable-diffusion-v1-4" \ + --instance_data_dir="/data/dreambooth/Teyvat/data" \ + --output_dir="./weight_output" \ --instance_prompt="a photo of a dog" \ --resolution=512 \ --train_batch_size=1 \ diff --git a/examples/images/dreambooth/test_ci.sh b/examples/images/dreambooth/test_ci.sh index e69de29bb2d1..21f45adae2a0 100644 --- a/examples/images/dreambooth/test_ci.sh +++ b/examples/images/dreambooth/test_ci.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -xe +pip install -r requirements.txt + +HF_DATASETS_OFFLINE=1 +TRANSFORMERS_OFFLINE=1 +DIFFUSERS_OFFLINE=1 + +# "torch_ddp" "torch_ddp_fp16" "low_level_zero" +for plugin in "gemini"; do + torchrun --nproc_per_node 4 --standalone train_dreambooth_colossalai.py \ + --pretrained_model_name_or_path="/data/dreambooth/diffuser/stable-diffusion-v1-4" \ + --instance_data_dir="/data/dreambooth/Teyvat/data" \ + --output_dir="./weight_output" \ + --instance_prompt="a picture of a dog" \ + --resolution=512 \ + --plugin=$plugin \ + --train_batch_size=1 \ + --learning_rate=5e-6 \ + --lr_scheduler="constant" \ + --lr_warmup_steps=0 \ + --test_run=True \ + --num_class_images=200 \ + --placement="auto" # "cuda" +done diff --git a/examples/images/dreambooth/train_dreambooth_colossalai.py b/examples/images/dreambooth/train_dreambooth_colossalai.py index d07febea0a84..888b28de8306 100644 --- a/examples/images/dreambooth/train_dreambooth_colossalai.py +++ b/examples/images/dreambooth/train_dreambooth_colossalai.py @@ -4,6 +4,7 @@ import os from pathlib import Path from typing import Optional +import shutil import torch import torch.nn.functional as F @@ -21,9 +22,12 @@ from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.nn.optimizer import HybridAdam from colossalai.utils import get_current_device -from colossalai.zero import ColoInitContext, GeminiAdamOptimizer +from colossalai.zero import ColoInitContext from colossalai.zero.gemini import get_static_torch_model +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin disable_existing_loggers() logger = get_dist_logger() @@ -58,6 +62,13 @@ def parse_args(input_args=None): required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) + parser.add_argument( + "--externel_unet_path", + type=str, + default=None, + required=False, + help="Path to the externel unet model.", + ) parser.add_argument( "--revision", type=str, @@ -187,12 +198,19 @@ def parse_args(input_args=None): parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument("--test_run", default=False, help="Whether to use a smaller dataset for test run.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) + parser.add_argument('-p', + '--plugin', + type=str, + default='torch_ddp', + choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero'], + help="plugin to use") parser.add_argument( "--logging_dir", type=str, @@ -250,6 +268,7 @@ def __init__( class_prompt=None, size=512, center_crop=False, + test=False, ): self.size = size self.center_crop = center_crop @@ -260,6 +279,8 @@ def __init__( raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) + if test: + self.instance_images_path = self.instance_images_path[:10] self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images @@ -339,18 +360,6 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: return f"{organization}/{model_id}" -# Gemini + ZeRO DDP -def gemini_zero_dpp(model: torch.nn.Module, placement_policy: str = "auto"): - from colossalai.nn.parallel import GeminiDDP - - model = GeminiDDP(model, - device=get_current_device(), - placement_policy=placement_policy, - pin_memory=True, - search_range_mb=64) - return model - - def main(args): if args.seed is None: colossalai.launch_from_torch(config={}) @@ -392,7 +401,7 @@ def main(args): images = pipeline(example["prompt"]).images for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() + hash_image = hashlib.sha256(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) @@ -452,12 +461,18 @@ def main(args): revision=args.revision, ) - logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) - with ColoInitContext(device=get_current_device()): + + if args.externel_unet_path is None: + logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, - subfolder="unet", - revision=args.revision, - low_cpu_mem_usage=False) + subfolder="unet", + revision=args.revision, + low_cpu_mem_usage=False) + else: + logger.info(f"Loading UNet2DConditionModel from {args.externel_unet_path}", ranks=[0]) + unet = UNet2DConditionModel.from_pretrained(args.externel_unet_path, + revision=args.revision, + low_cpu_mem_usage=False) vae.requires_grad_(False) text_encoder.requires_grad_(False) @@ -468,10 +483,22 @@ def main(args): if args.scale_lr: args.learning_rate = args.learning_rate * args.train_batch_size * world_size - unet = gemini_zero_dpp(unet, args.placement) + # Use Booster API to use Gemini/Zero with ColossalAI + + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(placement_policy=args.placement, strict_ddp_mode=True, initial_scale=2 ** 5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2 ** 5) + + booster = Booster(plugin=plugin, **booster_kwargs) # config optimizer for colossalai zero - optimizer = GeminiAdamOptimizer(unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm) + optimizer = HybridAdam(unet.parameters(), lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm) # load noise_scheduler noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") @@ -486,6 +513,7 @@ def main(args): tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, + test=args.test_run ) def collate_fn(examples): @@ -554,6 +582,8 @@ def collate_fn(examples): # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + unet, optimizer, _, _, lr_scheduler = booster.boost(unet, optimizer, lr_scheduler=lr_scheduler) + # Train! total_batch_size = args.train_batch_size * world_size @@ -642,36 +672,24 @@ def collate_fn(examples): if global_step % args.save_steps == 0: torch.cuda.synchronize() - torch_unet = get_static_torch_model(unet) + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + booster.save_model(unet, os.path.join(save_path, "diffusion_pytorch_model.bin")) if local_rank == 0: - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=torch_unet, - revision=args.revision, - ) - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - pipeline.save_pretrained(save_path) + if not os.path.exists(os.path.join(save_path, "config.json")): + shutil.copy(os.path.join(args.pretrained_model_name_or_path, "unet/config.json"), save_path) logger.info(f"Saving model checkpoint to {save_path}", ranks=[0]) if global_step >= args.max_train_steps: break - torch.cuda.synchronize() - unet = get_static_torch_model(unet) + booster.save_model(unet, os.path.join(args.output_dir, "diffusion_pytorch_model.bin")) + logger.info(f"Saving model checkpoint to {args.output_dir} on rank {local_rank}") if local_rank == 0: - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=unet, - revision=args.revision, - ) - - pipeline.save_pretrained(args.output_dir) - logger.info(f"Saving model checkpoint to {args.output_dir}", ranks=[0]) - + if not os.path.exists(os.path.join(args.output_dir, "config.json")): + shutil.copy(os.path.join(args.pretrained_model_name_or_path, "unet/config.json"), args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - if __name__ == "__main__": args = parse_args() main(args) diff --git a/examples/images/dreambooth/train_dreambooth_colossalai_lora.py b/examples/images/dreambooth/train_dreambooth_colossalai_lora.py index 6715b473a567..dce65ff514b7 100644 --- a/examples/images/dreambooth/train_dreambooth_colossalai_lora.py +++ b/examples/images/dreambooth/train_dreambooth_colossalai_lora.py @@ -4,6 +4,7 @@ import os from pathlib import Path from typing import Optional +import shutil import torch import torch.nn.functional as F @@ -23,9 +24,12 @@ from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.nn.optimizer import HybridAdam from colossalai.utils import get_current_device from colossalai.zero import ColoInitContext, GeminiAdamOptimizer from colossalai.zero.gemini import get_static_torch_model +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin disable_existing_loggers() logger = get_dist_logger() @@ -60,6 +64,13 @@ def parse_args(input_args=None): required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) + parser.add_argument( + "--externel_unet_path", + type=str, + default=None, + required=False, + help="Path to the externel unet model.", + ) parser.add_argument( "--revision", type=str, @@ -195,6 +206,12 @@ def parse_args(input_args=None): default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) + parser.add_argument('-p', + '--plugin', + type=str, + default='torch_ddp', + choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero'], + help="plugin to use") parser.add_argument( "--logging_dir", type=str, @@ -341,18 +358,6 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: return f"{organization}/{model_id}" -# Gemini + ZeRO DDP -def gemini_zero_dpp(model: torch.nn.Module, placement_policy: str = "auto"): - from colossalai.nn.parallel import GeminiDDP - - model = GeminiDDP(model, - device=get_current_device(), - placement_policy=placement_policy, - pin_memory=True, - search_range_mb=64) - return model - - def main(args): if args.seed is None: colossalai.launch_from_torch(config={}) @@ -394,7 +399,7 @@ def main(args): images = pipeline(example["prompt"]).images for i, image in enumerate(images): - hash_image = hashlib.sha1(image.tobytes()).hexdigest() + hash_image = hashlib.sha256(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) @@ -454,32 +459,42 @@ def main(args): revision=args.revision, ) - logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) - with ColoInitContext(device=get_current_device()): + + if args.externel_unet_path is None: + logger.info(f"Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}", ranks=[0]) unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, - subfolder="unet", - revision=args.revision, - low_cpu_mem_usage=False) - unet.requires_grad_(False) - - # Set correct lora layers - lora_attn_procs = {} - for name in unet.attn_processors.keys(): - cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim - if name.startswith("mid_block"): - hidden_size = unet.config.block_out_channels[-1] - elif name.startswith("up_blocks"): - block_id = int(name[len("up_blocks.")]) - hidden_size = list(reversed(unet.config.block_out_channels))[block_id] - elif name.startswith("down_blocks"): - block_id = int(name[len("down_blocks.")]) - hidden_size = unet.config.block_out_channels[block_id] - - lora_attn_procs[name] = LoRACrossAttnProcessor(hidden_size=hidden_size, - cross_attention_dim=cross_attention_dim) - - unet.set_attn_processor(lora_attn_procs) - lora_layers = AttnProcsLayers(unet.attn_processors) + subfolder="unet", + revision=args.revision, + low_cpu_mem_usage=False) + else: + logger.info(f"Loading UNet2DConditionModel from {args.externel_unet_path}", ranks=[0]) + unet = UNet2DConditionModel.from_pretrained(args.externel_unet_path, + revision=args.revision, + low_cpu_mem_usage=False) + unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, + subfolder="unet", + revision=args.revision, + low_cpu_mem_usage=False) + unet.requires_grad_(False) + + # Set correct lora layers + lora_attn_procs = {} + for name in unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = unet.config.block_out_channels[block_id] + + lora_attn_procs[name] = LoRACrossAttnProcessor(hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim) + + unet.set_attn_processor(lora_attn_procs) + lora_layers = AttnProcsLayers(unet.attn_processors) vae.requires_grad_(False) text_encoder.requires_grad_(False) @@ -490,10 +505,22 @@ def main(args): if args.scale_lr: args.learning_rate = args.learning_rate * args.train_batch_size * world_size - unet = gemini_zero_dpp(unet, args.placement) + # Use Booster API to use Gemini/Zero with ColossalAI + + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2 ** 5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2 ** 5) + + booster = Booster(plugin=plugin, **booster_kwargs) # config optimizer for colossalai zero - optimizer = GeminiAdamOptimizer(unet, lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm) + optimizer = HybridAdam(unet.parameters(), lr=args.learning_rate, initial_scale=2**5, clipping_norm=args.max_grad_norm) # load noise_scheduler noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") @@ -576,6 +603,8 @@ def collate_fn(examples): # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + unet, optimizer, _, _, lr_scheduler = booster.boost(unet, optimizer, lr_scheduler=lr_scheduler) + # Train! total_batch_size = args.train_batch_size * world_size @@ -664,27 +693,24 @@ def collate_fn(examples): if global_step % args.save_steps == 0: torch.cuda.synchronize() - torch_unet = get_static_torch_model(unet) + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + booster.save_model(unet, os.path.join(save_path, "diffusion_pytorch_model.bin")) if local_rank == 0: - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - torch_unet = torch_unet.to(torch.float32) - torch_unet.save_attn_procs(save_path) + if not os.path.exists(os.path.join(save_path, "config.json")): + shutil.copy(os.path.join(args.pretrained_model_name_or_path, "unet/config.json"), save_path) logger.info(f"Saving model checkpoint to {save_path}", ranks=[0]) if global_step >= args.max_train_steps: break - torch.cuda.synchronize() - torch_unet = get_static_torch_model(unet) + booster.save_model(unet, os.path.join(args.output_dir, "diffusion_pytorch_model.bin")) + logger.info(f"Saving model checkpoint to {args.output_dir} on rank {local_rank}") if local_rank == 0: - torch_unet = torch_unet.to(torch.float32) - torch_unet.save_attn_procs(save_path) - logger.info(f"Saving model checkpoint to {args.output_dir}", ranks=[0]) - + if not os.path.exists(os.path.join(args.output_dir, "config.json")): + shutil.copy(os.path.join(args.pretrained_model_name_or_path, "unet/config.json"), args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - if __name__ == "__main__": args = parse_args() main(args) diff --git a/examples/images/vit/README.md b/examples/images/vit/README.md index 4423d85d19e0..7c4147b76457 100644 --- a/examples/images/vit/README.md +++ b/examples/images/vit/README.md @@ -1,61 +1,28 @@ -# Vision Transformer with ColoTensor +## Overview -# Overview +Vision Transformer is a class of Transformer model tailored for computer vision tasks. It was first proposed in paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) and achieved SOTA results on various tasks at that time. -In this example, we will run Vision Transformer with ColoTensor. +In our example, we are using pretrained weights of ViT loaded from HuggingFace. +We adapt the ViT training code to ColossalAI by leveraging [Boosting API](https://colossalai.org/docs/basics/booster_api) loaded with a chosen plugin, where each plugin corresponds to a specific kind of training strategy. This example supports plugins including TorchDDPPlugin, LowLevelZeroPlugin, and GeminiPlugin. -We use model **ViTForImageClassification** from Hugging Face [Link](https://huggingface.co/docs/transformers/model_doc/vit) for unit test. -You can change world size or decide whether use DDP in our code. +## Run Demo -We use model **vision_transformer** from timm [Link](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) for training example. - -(2022/6/28) The default configuration now supports 2DP+2TP with gradient accumulation and checkpoint support. Zero is not supported at present. - -# Requirement - -Install colossalai version >= 0.1.11 - -## Unit test -To run unit test, you should install pytest, transformers with: -```shell -pip install pytest transformers +By running the following script: +```bash +bash run_demo.sh ``` +You will finetune a a [ViT-base](https://huggingface.co/google/vit-base-patch16-224) model on this [dataset](https://huggingface.co/datasets/beans), with more than 8000 images of bean leaves. This dataset is for image classification task and there are 3 labels: ['angular_leaf_spot', 'bean_rust', 'healthy']. -## Training example -To run training example with ViT-S, you should install **NVIDIA DALI** from [Link](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html) for dataloader support. -You also need to install timm and titans for model/dataloader support with: -```shell -pip install timm titans -``` +The script can be modified if you want to try another set of hyperparameters or change to another ViT model with different size. -### Data preparation -You can download the ImageNet dataset from the [ImageNet official website](https://www.image-net.org/download.php). You should get the raw images after downloading the dataset. As we use **NVIDIA DALI** to read data, we use the TFRecords dataset instead of raw Imagenet dataset. This offers better speedup to IO. If you don't have TFRecords dataset, follow [imagenet-tools](https://github.com/ver217/imagenet-tools) to build one. +The demo code refers to this [blog](https://huggingface.co/blog/fine-tune-vit). -Before you start training, you need to set the environment variable `DATA` so that the script knows where to fetch the data for DALI dataloader. -```shell -export DATA=/path/to/ILSVRC2012 -``` -# How to run +## Run Benchmark -## Unit test -In your terminal -```shell -pytest test_vit.py +You can run benchmark for ViT model by running the following script: +```bash +bash run_benchmark.sh ``` - -This will evaluate models with different **world_size** and **use_ddp**. - -## Training example -Modify the settings in run.sh according to your environment. -For example, if you set `--nproc_per_node=8` in `run.sh` and `TP_WORLD_SIZE=2` in your config file, -data parallel size will be automatically calculated as 4. -Thus, the parallel strategy is set to 4DP+2TP. - -Then in your terminal -```shell -sh run.sh -``` - -This will start ViT-S training with ImageNet. +The script will test performance (throughput & peak memory usage) for each combination of hyperparameters. You can also play with this script to configure your own set of hyperparameters for testing. \ No newline at end of file diff --git a/examples/images/vit/args.py b/examples/images/vit/args.py new file mode 100644 index 000000000000..e4a873a9eb52 --- /dev/null +++ b/examples/images/vit/args.py @@ -0,0 +1,124 @@ +from colossalai import get_default_parser + +def parse_demo_args(): + + parser = get_default_parser() + parser.add_argument( + "--model_name_or_path", + type=str, + default="google/vit-base-patch16-224", + help="Path to pretrained model or model identifier from huggingface.co/models." + ) + parser.add_argument( + "--output_path", + type=str, + default="./output_model.bin", + help="The path of your saved model after finetuning." + ) + parser.add_argument( + "--plugin", + type=str, + default="gemini", + help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'." + ) + parser.add_argument( + "--num_epoch", + type=int, + default=3, + help="Number of epochs." + ) + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="Batch size (per dp group) for the training dataloader." + ) + parser.add_argument( + "--learning_rate", + type=float, + default=3e-4, + help="Initial learning rate (after the potential warmup period) to use." + ) + parser.add_argument( + "--warmup_ratio", + type=float, + default=0.3, + help="Ratio of warmup steps against total training steps." + ) + parser.add_argument( + "--weight_decay", + type=float, + default=0.1, + help="Weight decay to use." + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="A seed for reproducible training." + ) + + args = parser.parse_args() + return args + +def parse_benchmark_args(): + + parser = get_default_parser() + + parser.add_argument( + "--model_name_or_path", + type=str, + default="google/vit-base-patch16-224", + help="Path to a pretrained model or model identifier from huggingface.co/models." + ) + parser.add_argument( + "--plugin", + type=str, + default="gemini", + help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'." + ) + parser.add_argument( + "--batch_size", + type=int, + default=8, + help="Batch size (per dp group) for the training dataloader." + ) + parser.add_argument( + "--num_labels", + type=int, + default=10, + help="Number of labels for classification." + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use." + ) + parser.add_argument( + "--weight_decay", + type=float, + default=0.0, + help="Weight decay to use." + ) + parser.add_argument( + "--max_train_steps", + type=int, + default=20, + help="Total number of training steps to perform." + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="A seed for reproducible training." + ) + parser.add_argument( + "--mem_cap", + type=int, + default=0, + help="Limit on the usage of space for each GPU (in GB)." + ) + args = parser.parse_args() + + return args \ No newline at end of file diff --git a/examples/images/vit/configs/vit_1d_tp2.py b/examples/images/vit/configs/vit_1d_tp2.py deleted file mode 100644 index fbf399f2e50d..000000000000 --- a/examples/images/vit/configs/vit_1d_tp2.py +++ /dev/null @@ -1,32 +0,0 @@ -from colossalai.amp import AMP_TYPE - -# hyperparameters -# BATCH_SIZE is as per GPU -# global batch size = BATCH_SIZE x data parallel size -BATCH_SIZE = 256 -LEARNING_RATE = 3e-3 -WEIGHT_DECAY = 0.3 -NUM_EPOCHS = 300 -WARMUP_EPOCHS = 32 - -# model config -IMG_SIZE = 224 -PATCH_SIZE = 16 -HIDDEN_SIZE = 384 -DEPTH = 12 -NUM_HEADS = 6 -MLP_RATIO = 4 -NUM_CLASSES = 1000 -CHECKPOINT = False -SEQ_LENGTH = (IMG_SIZE // PATCH_SIZE)**2 + 1 # add 1 for cls token - -USE_DDP = True -TP_WORLD_SIZE = 2 -TP_TYPE = 'row' -parallel = dict(tensor=dict(mode="1d", size=TP_WORLD_SIZE),) - -fp16 = dict(mode=AMP_TYPE.NAIVE) -clip_grad_norm = 1.0 -gradient_accumulation = 8 - -LOG_PATH = "./log" diff --git a/examples/images/vit/configs/vit_1d_tp2_ci.py b/examples/images/vit/configs/vit_1d_tp2_ci.py deleted file mode 100644 index e491e4ada45e..000000000000 --- a/examples/images/vit/configs/vit_1d_tp2_ci.py +++ /dev/null @@ -1,32 +0,0 @@ -from colossalai.amp import AMP_TYPE - -# hyperparameters -# BATCH_SIZE is as per GPU -# global batch size = BATCH_SIZE x data parallel size -BATCH_SIZE = 8 -LEARNING_RATE = 3e-3 -WEIGHT_DECAY = 0.3 -NUM_EPOCHS = 3 -WARMUP_EPOCHS = 1 - -# model config -IMG_SIZE = 224 -PATCH_SIZE = 16 -HIDDEN_SIZE = 32 -DEPTH = 2 -NUM_HEADS = 4 -MLP_RATIO = 4 -NUM_CLASSES = 10 -CHECKPOINT = False -SEQ_LENGTH = (IMG_SIZE // PATCH_SIZE)**2 + 1 # add 1 for cls token - -USE_DDP = True -TP_WORLD_SIZE = 2 -TP_TYPE = 'row' -parallel = dict(tensor=dict(mode="1d", size=TP_WORLD_SIZE),) - -fp16 = dict(mode=AMP_TYPE.NAIVE) -clip_grad_norm = 1.0 -gradient_accumulation = 2 - -LOG_PATH = "./log_ci" diff --git a/examples/images/vit/data.py b/examples/images/vit/data.py new file mode 100644 index 000000000000..00fde707b173 --- /dev/null +++ b/examples/images/vit/data.py @@ -0,0 +1,32 @@ +import torch +from torch.utils.data import Dataset +from datasets import load_dataset + +class BeansDataset(Dataset): + + def __init__(self, image_processor, split='train'): + + super().__init__() + self.image_processor = image_processor + self.ds = load_dataset('beans')[split] + self.label_names = self.ds.features['labels'].names + self.num_labels = len(self.label_names) + self.inputs = [] + for example in self.ds: + self.inputs.append(self.process_example(example)) + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, idx): + return self.inputs[idx] + + def process_example(self, example): + input = self.image_processor(example['image'], return_tensors='pt') + input['labels'] = example['labels'] + return input + + +def beans_collator(batch): + return {'pixel_values': torch.cat([data['pixel_values'] for data in batch], dim=0), + 'labels': torch.tensor([data['labels'] for data in batch], dtype=torch.int64)} diff --git a/examples/images/vit/requirements.txt b/examples/images/vit/requirements.txt index 1f69794ebe70..edad87ca380f 100644 --- a/examples/images/vit/requirements.txt +++ b/examples/images/vit/requirements.txt @@ -1,8 +1,6 @@ colossalai >= 0.1.12 torch >= 1.8.1 numpy>=1.24.1 -timm>=0.6.12 -titans>=0.0.7 tqdm>=4.61.2 -transformers>=4.25.1 -nvidia-dali-cuda110>=1.8.0 --extra-index-url https://developer.download.nvidia.com/compute/redist +transformers>=4.20.0 +datasets \ No newline at end of file diff --git a/examples/images/vit/run.sh b/examples/images/vit/run.sh deleted file mode 100644 index 84fe58f11a6a..000000000000 --- a/examples/images/vit/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -export DATA=/data/scratch/imagenet/tf_records -export OMP_NUM_THREADS=4 - -# resume -# CUDA_VISIBLE_DEVICES=4,5,6,7 colossalai run \ -# --nproc_per_node 4 train.py \ -# --config configs/vit_1d_tp2.py \ -# --resume_from checkpoint/epoch_10 \ -# --master_port 29598 | tee ./out 2>&1 - -# train -CUDA_VISIBLE_DEVICES=4,5,6,7 colossalai run \ ---nproc_per_node 4 train.py \ ---config configs/vit_1d_tp2.py \ ---master_port 29598 | tee ./out 2>&1 diff --git a/examples/images/vit/run_benchmark.sh b/examples/images/vit/run_benchmark.sh new file mode 100644 index 000000000000..2487bf81ee2b --- /dev/null +++ b/examples/images/vit/run_benchmark.sh @@ -0,0 +1,27 @@ +set -xe +pip install -r requirements.txt + +export BS=8 +export MEMCAP=0 +export GPUNUM=1 + +for BS in 8 32 128 +do +for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" +do +for GPUNUM in 1 4 +do + +MODEL_PATH="google/vit-base-patch16-224" +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + vit_benchmark.py \ + --model_name_or_path ${MODEL_PATH} \ + --mem_cap ${MEMCAP} \ + --plugin ${PLUGIN} \ + --batch_size ${BS} + +done +done +done diff --git a/examples/images/vit/run_demo.sh b/examples/images/vit/run_demo.sh new file mode 100644 index 000000000000..2d140dd6e423 --- /dev/null +++ b/examples/images/vit/run_demo.sh @@ -0,0 +1,44 @@ +set -xe +pip install -r requirements.txt + +# model name or path +MODEL="google/vit-base-patch16-224" + +# path for saving model +OUTPUT_PATH="./output_model.bin" + +# plugin(training strategy) +# can only be one of "torch_ddp"/"torch_ddp_fp16"/"low_level_zero"/"gemini" +PLUGIN="gemini" + +# number of gpus to use +GPUNUM=4 + +# batch size per gpu +BS=16 + +# learning rate +LR="2e-4" + +# number of epoch +EPOCH=3 + +# weight decay +WEIGHT_DECAY=0.05 + +# ratio of warmup steps +WARMUP_RATIO=0.3 + +# run the script for demo +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + vit_train_demo.py \ + --model_name_or_path ${MODEL} \ + --output_path ${OUTPUT_PATH} \ + --plugin ${PLUGIN} \ + --batch_size ${BS} \ + --num_epoch ${EPOCH} \ + --learning_rate ${LR} \ + --weight_decay ${WEIGHT_DECAY} \ + --warmup_ratio ${WARMUP_RATIO} diff --git a/examples/images/vit/test_ci.sh b/examples/images/vit/test_ci.sh index 41d25ee23521..8606015c0397 100644 --- a/examples/images/vit/test_ci.sh +++ b/examples/images/vit/test_ci.sh @@ -1,9 +1,19 @@ -export OMP_NUM_THREADS=4 - +set -xe pip install -r requirements.txt -# train -colossalai run \ ---nproc_per_node 4 train.py \ ---config configs/vit_1d_tp2_ci.py \ ---dummy_data +BS=8 +for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" +do +for GPUNUM in 1 4 +do + +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + vit_benchmark.py \ + --model_name_or_path "google/vit-base-patch16-224" \ + --plugin ${PLUGIN} \ + --batch_size ${BS} + +done +done diff --git a/examples/images/vit/test_vit.py b/examples/images/vit/test_vit.py deleted file mode 100644 index c0ae35bca871..000000000000 --- a/examples/images/vit/test_vit.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import random - -import numpy as np -import pytest -import torch -from torch.nn.parallel import DistributedDataParallel as DDP -from vit import get_training_components - -import colossalai -from colossalai.context import ParallelMode -from colossalai.context.parallel_mode import ParallelMode -from colossalai.core import global_context as gpc -from colossalai.nn.parallel.data_parallel import ColoDDP -from colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, ProcessGroup, ShardSpec -from colossalai.testing import rerun_if_address_is_in_use, spawn -from colossalai.utils.cuda import get_current_device -from colossalai.zero import ColoInitContext - - -def set_seed(seed): - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - - -def tensor_equal(A, B): - return torch.allclose(A, B, rtol=1e-3, atol=1e-1) - - -def tensor_shard_equal(tensor: torch.Tensor, shard: torch.Tensor): - assert tensor.ndim == shard.ndim - if tensor.shape == shard.shape: - return tensor_equal(tensor, shard) - else: - dims_not_eq = torch.nonzero(torch.tensor(tensor.shape) != torch.tensor(shard.shape)) - if dims_not_eq.numel() == 1: - # 1D shard - dim = dims_not_eq.item() - world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) - rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D) - return tensor_equal(tensor.chunk(world_size, dim)[rank], shard) - else: - raise - - -# Only for all Linear, it's 1d_row split because Linear will be transposed when calculating. -# But for other layers, it's 1d_col split. -# Layernorm is not supported for now. -# patch_embeddings.projection has nn.Conv2d -# https://github.com/huggingface/transformers/blob/dcb08b99f44919425f8ba9be9ddcc041af8ec25e/src/transformers/models/vit/modeling_vit.py#L182 -def init_1d_row_for_linear_weight_spec(model, world_size: int): - pg = ProcessGroup(tp_degree=world_size) - spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D)) - with DistSpecManager.no_grad(): - for n, p in model.named_parameters(): - if 'weight' in n and 'layernorm' not in n and 'embeddings.patch_embeddings.projection.weight' not in n: - p.set_process_group(pg) - p.set_tensor_spec(*spec) - - -# Similarly, it's col split for Linear but row split for others. -def init_1d_col_for_linear_weight_bias_spec(model, world_size: int): - pg = ProcessGroup(tp_degree=world_size) - spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D)) - with DistSpecManager.no_grad(): - for n, p in model.named_parameters(): - if ('weight' in n - or 'bias' in n) and 'layernorm' not in n and 'embeddings.patch_embeddings.projection' not in n: - p.set_process_group(pg) - p.set_tensor_spec(*spec) - - -def check_param_equal(model, torch_model): - for p, torch_p in zip(model.parameters(), torch_model.parameters()): - assert tensor_shard_equal(torch_p, p) - - -def check_grad_equal(model, torch_model): - for p, torch_p in zip(model.parameters(), torch_model.parameters()): - if (torch_p.grad.shape == p.grad.shape): - assert torch.allclose(torch_p.grad, p.grad, rtol=1e-3, atol=2.0) == True - else: - dims_not_eq = torch.nonzero(torch.tensor(torch_p.grad.shape) != torch.tensor(p.grad.shape)) - dim = dims_not_eq.item() - world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) - rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D) - assert torch.allclose(torch_p.grad.chunk(world_size, dim)[rank], p.grad, rtol=1e-3, atol=2.0) == True - - -def run_vit(init_spec_func, use_ddp): - model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_training_components() - with ColoInitContext(device=get_current_device()): - model = model_builder() - model = model.cuda() - torch_model = model_builder().cuda() - if use_ddp: - model = ColoDDP(model) - torch_model = DDP(torch_model, - device_ids=[gpc.get_global_rank()], - process_group=gpc.get_group(ParallelMode.DATA)) - for torch_p, p in zip(torch_model.parameters(), model.parameters()): - torch_p.data.copy_(p) - - world_size = torch.distributed.get_world_size() - init_spec_func(model, world_size) - - check_param_equal(model, torch_model) - model.train() - torch_model.train() - set_seed(gpc.get_local_rank(ParallelMode.DATA)) - - optimizer = optimizer_class(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) - torch_optimizer = optimizer_class(torch_model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) - - for i, image_dict in enumerate(train_dataloader): - if use_ddp: - model.zero_grad() - else: - optimizer.zero_grad() - logits = model(image_dict['pixel_values']) - torch_logits = torch_model(image_dict['pixel_values']) - assert tensor_equal(torch_logits.logits, logits.logits) - loss = criterion(logits.logits, image_dict['label']) - torch_loss = criterion(torch_logits.logits, image_dict['label']) - if use_ddp: - model.backward(loss) - else: - loss.backward() - torch_loss.backward() - check_grad_equal(model, torch_model) - optimizer.step() - torch_optimizer.step() - check_param_equal(model, torch_model) - break - - -def run_dist(rank, world_size, port, use_ddp): - if use_ddp and world_size == 1: - return - tp_world_size = world_size // 2 if use_ddp else world_size - config = dict(parallel=dict(tensor=dict(mode="1d", size=tp_world_size),)) - colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') - run_vit(init_1d_row_for_linear_weight_spec, use_ddp) - run_vit(init_1d_col_for_linear_weight_bias_spec, use_ddp) - - -@pytest.mark.dist -@pytest.mark.parametrize('world_size', [1, 4]) -@pytest.mark.parametrize('use_ddp', [False, True]) -@rerun_if_address_is_in_use() -def test_vit(world_size, use_ddp): - spawn(run_dist, world_size, use_ddp=use_ddp) - - -if __name__ == '__main__': - test_vit(1, False) diff --git a/examples/images/vit/train.py b/examples/images/vit/train.py deleted file mode 100644 index b42cf2bedc6b..000000000000 --- a/examples/images/vit/train.py +++ /dev/null @@ -1,174 +0,0 @@ -import os - -import torch -import torch.distributed as dist -import torch.nn as nn -import torch.nn.functional as F -from timm.models.vision_transformer import _create_vision_transformer -from titans.dataloader.imagenet import build_dali_imagenet -from tqdm import tqdm -from vit import DummyDataLoader - -import colossalai -from colossalai.core import global_context as gpc -from colossalai.logging import disable_existing_loggers, get_dist_logger -from colossalai.nn import CrossEntropyLoss -from colossalai.nn._ops import * -from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR -from colossalai.nn.optimizer import HybridAdam -from colossalai.nn.parallel.data_parallel import ColoDDP -from colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, ProcessGroup, ShardSpec -from colossalai.utils import get_current_device -from colossalai.zero import ColoInitContext - - -def init_1d_row_for_linear_weight_spec(model, world_size: int): - pg = ProcessGroup(tp_degree=world_size) - spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D)) - with DistSpecManager.no_grad(): - for n, p in model.named_parameters(): - if 'weight' in n and 'norm' not in n and 'patch_embed.proj.weight' not in n: - p.set_process_group(pg) - p.set_tensor_spec(*spec) - - -# Similarly, it's col split for Linear but row split for others. -def init_1d_col_for_linear_weight_bias_spec(model, world_size: int): - pg = ProcessGroup(tp_degree=world_size) - spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D)) - with DistSpecManager.no_grad(): - for n, p in model.named_parameters(): - if ('weight' in n or 'bias' in n) and 'norm' not in n and ('patch_embed.proj.weight' not in n - and 'patch_embed.proj.bias' not in n): - p.set_process_group(pg) - p.set_tensor_spec(*spec) - - -def init_spec_func(model, tp_type): - world_size = torch.distributed.get_world_size() - if tp_type == 'row': - init_1d_row_for_linear_weight_spec(model, world_size) - elif tp_type == 'col': - init_1d_col_for_linear_weight_bias_spec(model, world_size) - else: - raise NotImplemented - - -def train_imagenet(): - - parser = colossalai.get_default_parser() - parser.add_argument('--resume_from', default=False, action='store_true') - parser.add_argument('--dummy_data', default=False, action='store_true') - - args = parser.parse_args() - colossalai.launch_from_torch(config=args.config) - use_ddp = gpc.config.USE_DDP - - disable_existing_loggers() - - logger = get_dist_logger() - if hasattr(gpc.config, 'LOG_PATH'): - if gpc.get_global_rank() == 0: - log_path = gpc.config.LOG_PATH - if not os.path.exists(log_path): - os.mkdir(log_path) - logger.log_to_file(log_path) - - logger.info('Build data loader', ranks=[0]) - if not args.dummy_data: - root = os.environ['DATA'] - train_dataloader, test_dataloader = build_dali_imagenet(root, - train_batch_size=gpc.config.BATCH_SIZE, - test_batch_size=gpc.config.BATCH_SIZE) - else: - train_dataloader = DummyDataLoader(length=10, - batch_size=gpc.config.BATCH_SIZE, - category=gpc.config.NUM_CLASSES, - image_size=gpc.config.IMG_SIZE, - return_dict=False) - test_dataloader = DummyDataLoader(length=5, - batch_size=gpc.config.BATCH_SIZE, - category=gpc.config.NUM_CLASSES, - image_size=gpc.config.IMG_SIZE, - return_dict=False) - - logger.info('Build model', ranks=[0]) - - model_kwargs = dict(img_size=gpc.config.IMG_SIZE, - patch_size=gpc.config.PATCH_SIZE, - embed_dim=gpc.config.HIDDEN_SIZE, - depth=gpc.config.DEPTH, - num_heads=gpc.config.NUM_HEADS, - mlp_ratio=gpc.config.MLP_RATIO, - num_classes=gpc.config.NUM_CLASSES, - drop_rate=0.1, - attn_drop_rate=0.1, - weight_init='jax') - - with ColoInitContext(device=get_current_device()): - model = _create_vision_transformer('vit_small_patch16_224', pretrained=False, **model_kwargs) - init_spec_func(model, gpc.config.TP_TYPE) - - world_size = torch.distributed.get_world_size() - model = ColoDDP(module=model, process_group=ProcessGroup(tp_degree=world_size)) - logger.info('Build criterion, optimizer, lr_scheduler', ranks=[0]) - optimizer = HybridAdam(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY) - - criterion = CrossEntropyLoss() - lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, - total_steps=gpc.config.NUM_EPOCHS, - warmup_steps=gpc.config.WARMUP_EPOCHS) - - start_epoch = 0 - if args.resume_from: - load_model = torch.load(args.resume_from + '_model.pth') - start_epoch = load_model['epoch'] - model.load_state_dict(load_model['model']) - load_optim = torch.load(args.resume_from + '_optim_rank_{}.pth'.format(dist.get_rank())) - optimizer.load_state_dict(load_optim['optim']) - - for epoch in range(start_epoch, gpc.config.NUM_EPOCHS): - model.train() - for index, (x, y) in tqdm(enumerate(train_dataloader), total=len(train_dataloader), leave=False): - x, y = x.cuda(), y.cuda() - output = model(x) - loss = criterion(output, y) - loss = loss / gpc.config.gradient_accumulation - if use_ddp: - model.backward(loss) - else: - loss.backward() - if (index + 1) % gpc.config.gradient_accumulation == 0: - optimizer.step() - if use_ddp: - model.zero_grad() - else: - optimizer.zero_grad() - - logger.info( - f"Finish Train Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {loss.item():.3f} lr: {optimizer.state_dict()['param_groups'][0]['lr']}", - ranks=[0]) - - model.eval() - test_loss = 0 - correct = 0 - test_sum = 0 - with torch.no_grad(): - for index, (x, y) in tqdm(enumerate(test_dataloader), total=len(test_dataloader), leave=False): - x, y = x.cuda(), y.cuda() - output = model(x) - test_loss += F.cross_entropy(output, y, reduction='sum').item() - pred = output.argmax(dim=1, keepdim=True) - correct += pred.eq(y.view_as(pred)).sum().item() - test_sum += y.size(0) - - test_loss /= test_sum - logger.info( - f"Finish Test Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {test_loss:.3f} Accuracy: [{correct}/{test_sum}]({correct/test_sum:.3f})", - ranks=[0]) - - lr_scheduler.step() - - -if __name__ == '__main__': - train_imagenet() diff --git a/examples/images/vit/vit.py b/examples/images/vit/vit.py deleted file mode 100644 index f22e8ea90cec..000000000000 --- a/examples/images/vit/vit.py +++ /dev/null @@ -1,95 +0,0 @@ -from abc import ABC, abstractmethod - -import torch -import torch.nn as nn -from transformers import ViTConfig, ViTForImageClassification - -from colossalai.utils.cuda import get_current_device - - -class DummyDataGenerator(ABC): - - def __init__(self, length=10): - self.length = length - - @abstractmethod - def generate(self): - pass - - def __iter__(self): - self.step = 0 - return self - - def __next__(self): - if self.step < self.length: - self.step += 1 - return self.generate() - else: - raise StopIteration - - def __len__(self): - return self.length - - -class DummyDataLoader(DummyDataGenerator): - - def __init__(self, length=10, batch_size=4, channel=3, category=8, image_size=224, return_dict=True): - super().__init__(length) - self.batch_size = batch_size - self.channel = channel - self.category = category - self.image_size = image_size - self.return_dict = return_dict - - def generate(self): - image_dict = {} - image_dict['pixel_values'] = torch.rand( - self.batch_size, self.channel, self.image_size, self.image_size, device=get_current_device()) * 2 - 1 - image_dict['label'] = torch.randint(self.category, (self.batch_size,), - dtype=torch.int64, - device=get_current_device()) - if not self.return_dict: - return image_dict['pixel_values'], image_dict['label'] - return image_dict - - -class ViTCVModel(nn.Module): - - def __init__(self, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - image_size=224, - patch_size=16, - num_channels=3, - num_labels=8, - checkpoint=False): - super().__init__() - self.checkpoint = checkpoint - self.model = ViTForImageClassification( - ViTConfig(hidden_size=hidden_size, - num_hidden_layers=num_hidden_layers, - num_attention_heads=num_attention_heads, - image_size=image_size, - patch_size=patch_size, - num_channels=num_channels, - num_labels=num_labels)) - if checkpoint: - self.model.gradient_checkpointing_enable() - - def forward(self, pixel_values): - return self.model(pixel_values=pixel_values) - - -def vit_base_s(checkpoint=True): - return ViTCVModel(checkpoint=checkpoint) - - -def vit_base_micro(checkpoint=True): - return ViTCVModel(hidden_size=32, num_hidden_layers=2, num_attention_heads=4, checkpoint=checkpoint) - - -def get_training_components(): - trainloader = DummyDataLoader() - testloader = DummyDataLoader() - return vit_base_micro, trainloader, testloader, torch.optim.Adam, torch.nn.functional.cross_entropy diff --git a/examples/images/vit/vit_benchmark.py b/examples/images/vit/vit_benchmark.py new file mode 100644 index 000000000000..11d480bba65f --- /dev/null +++ b/examples/images/vit/vit_benchmark.py @@ -0,0 +1,129 @@ +import time + +import torch +import transformers +from transformers import ViTConfig, ViTForImageClassification +import tqdm + +import colossalai +from colossalai.nn.optimizer import HybridAdam +from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.utils import get_current_device +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator + +from args import parse_benchmark_args + +def format_num(num: int, bytes=False): + """Scale bytes to its proper format, e.g. 1253656 => '1.20MB'""" + factor = 1024 if bytes else 1000 + suffix = "B" if bytes else "" + for unit in ["", " K", " M", " G", " T", " P"]: + if num < factor: + return f"{num:.2f}{unit}{suffix}" + num /= factor + + +def get_data(batch_size, num_labels, num_channels=3, height=224, width=224): + pixel_values = torch.randn(batch_size, num_channels, height, width, device=torch.cuda.current_device(), dtype=torch.float) + labels = torch.randint(0, num_labels, (batch_size, ), device=torch.cuda.current_device(), dtype=torch.int64) + return pixel_values, labels + + +def colo_memory_cap(size_in_GB): + from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device + cuda_capacity = colo_device_memory_capacity(get_current_device()) + if size_in_GB * (1024**3) < cuda_capacity: + colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity) + print(f"Limiting GPU memory usage to {size_in_GB} GB") + + +def main(): + + args = parse_benchmark_args() + + # Launch ColossalAI + colossalai.launch_from_torch(config={}, seed=args.seed) + coordinator = DistCoordinator() + world_size = coordinator.world_size + + # Manage loggers + disable_existing_loggers() + logger = get_dist_logger() + if coordinator.is_master(): + transformers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + + # Whether to set limit on memory capacity + if args.mem_cap > 0: + colo_memory_cap(args.mem_cap) + + # Build ViT model + config = ViTConfig.from_pretrained(args.model_name_or_path) + model = ViTForImageClassification(config) + logger.info(f"Finish loading model from {args.model_name_or_path}", ranks=[0]) + + # Enable gradient checkpointing + model.gradient_checkpointing_enable() + + # Set plugin + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(device=get_current_device(), + placement_policy='cpu', + pin_memory=True, + strict_ddp_mode=True, + initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + logger.info(f"Set plugin as {args.plugin}", ranks=[0]) + + # Set optimizer + optimizer = HybridAdam(model.parameters(), lr=(args.learning_rate * world_size)) + + # Set booster + booster = Booster(plugin=plugin, **booster_kwargs) + model, optimizer, _, _, _ = booster.boost(model, optimizer) + + + # Start training. + logger.info(f"Start testing", ranks=[0]) + progress_bar = tqdm.tqdm(total=args.max_train_steps, desc="Training Step", disable=not coordinator.is_master()) + + torch.cuda.synchronize() + model.train() + start_time = time.time() + + for _ in range(args.max_train_steps): + + pixel_values, labels = get_data(args.batch_size, args.num_labels, 3, 224, 224) + optimizer.zero_grad() + outputs = model(pixel_values=pixel_values, labels=labels) + loss = outputs['loss'] + booster.backward(loss, optimizer) + optimizer.step() + + torch.cuda.synchronize() + progress_bar.update(1) + + # Compute Statistics + end_time = time.time() + throughput = "{:.4f}".format((world_size * args.max_train_steps * args.batch_size) / (end_time - start_time)) + max_mem = format_num(torch.cuda.max_memory_allocated(device=torch.cuda.current_device()), bytes=True) + + logger.info(f"Testing finished, " + f"batch size per gpu: {args.batch_size}, " + f"plugin: {args.plugin}, " + f"throughput: {throughput}, " + f"maximum memory usage per gpu: {max_mem}.", + ranks=[0]) + + +if __name__ == "__main__": + main() diff --git a/examples/images/vit/vit_train_demo.py b/examples/images/vit/vit_train_demo.py new file mode 100644 index 000000000000..3a739f10b5d0 --- /dev/null +++ b/examples/images/vit/vit_train_demo.py @@ -0,0 +1,177 @@ +import torch +import torch.distributed as dist +import transformers +from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor +from tqdm import tqdm + +import colossalai +from colossalai.nn.optimizer import HybridAdam +from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR +from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.utils import get_current_device +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator + +from args import parse_demo_args +from data import BeansDataset, beans_collator + + +def move_to_cuda(batch, device): + return {k: v.to(device) for k, v in batch.items()} + + +def train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coordinator): + + torch.cuda.synchronize() + model.train() + + with tqdm(dataloader, desc=f'Epoch [{epoch + 1}]', disable=not coordinator.is_master()) as pbar: + + for batch in pbar: + + # Foward + optimizer.zero_grad() + batch = move_to_cuda(batch, torch.cuda.current_device()) + outputs = model(**batch) + loss = outputs['loss'] + + # Backward + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() + + # Print batch loss + pbar.set_postfix({'loss': loss.item()}) + + +@torch.no_grad() +def evaluate_model(epoch, model, eval_dataloader, num_labels, coordinator): + + model.eval() + accum_loss = torch.zeros(1, device=get_current_device()) + total_num = torch.zeros(1, device=get_current_device()) + accum_correct = torch.zeros(1, device=get_current_device()) + + for batch in eval_dataloader: + batch = move_to_cuda(batch, torch.cuda.current_device()) + outputs = model(**batch) + val_loss, logits = outputs[:2] + accum_loss += (val_loss / len(eval_dataloader)) + if num_labels > 1: + preds = torch.argmax(logits, dim=1) + elif num_labels == 1: + preds = logits.squeeze() + + labels = batch["labels"] + total_num += batch["labels"].shape[0] + accum_correct += (torch.sum(preds == labels)) + + dist.all_reduce(accum_loss) + dist.all_reduce(total_num) + dist.all_reduce(accum_correct) + avg_loss = "{:.4f}".format(accum_loss.item()) + accuracy = "{:.4f}".format(accum_correct.item() / total_num.item()) + if coordinator.is_master(): + print(f"Evaluation result for epoch {epoch + 1}: \ + average_loss={avg_loss}, \ + accuracy={accuracy}.") + + + + +def main(): + + args = parse_demo_args() + + # Launch ColossalAI + colossalai.launch_from_torch(config={}, seed=args.seed) + coordinator = DistCoordinator() + world_size = coordinator.world_size + + # Manage loggers + disable_existing_loggers() + logger = get_dist_logger() + if coordinator.is_master(): + transformers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + + # Prepare Dataset + image_processor = ViTImageProcessor.from_pretrained(args.model_name_or_path) + train_dataset = BeansDataset(image_processor, split='train') + eval_dataset = BeansDataset(image_processor, split='validation') + + + # Load pretrained ViT model + config = ViTConfig.from_pretrained(args.model_name_or_path) + config.num_labels = train_dataset.num_labels + config.id2label = {str(i): c for i, c in enumerate(train_dataset.label_names)} + config.label2id = {c: str(i) for i, c in enumerate(train_dataset.label_names)} + model = ViTForImageClassification.from_pretrained(args.model_name_or_path, + config=config, + ignore_mismatched_sizes=True) + logger.info(f"Finish loading model from {args.model_name_or_path}", ranks=[0]) + + # Enable gradient checkpointing + model.gradient_checkpointing_enable() + + # Set plugin + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(device=get_current_device(), + placement_policy='cpu', + pin_memory=True, + strict_ddp_mode=True, + initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + logger.info(f"Set plugin as {args.plugin}", ranks=[0]) + + # Prepare dataloader + train_dataloader = plugin.prepare_dataloader(train_dataset, + batch_size=args.batch_size, + shuffle=True, + drop_last=True, + collate_fn=beans_collator) + eval_dataloader = plugin.prepare_dataloader(eval_dataset, + batch_size=args.batch_size, + shuffle=True, + drop_last=True, + collate_fn=beans_collator) + + # Set optimizer + optimizer = HybridAdam(model.parameters(), lr=(args.learning_rate * world_size), weight_decay=args.weight_decay) + + # Set lr scheduler + total_steps = len(train_dataloader) * args.num_epoch + num_warmup_steps = int(args.warmup_ratio * total_steps) + lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, + total_steps=(len(train_dataloader) * args.num_epoch), + warmup_steps=num_warmup_steps) + + # Set booster + booster = Booster(plugin=plugin, **booster_kwargs) + model, optimizer, _, train_dataloader, lr_scheduler = booster.boost(model=model, + optimizer=optimizer, + dataloader=train_dataloader, + lr_scheduler=lr_scheduler) + + # Finetuning + logger.info(f"Start finetuning", ranks=[0]) + for epoch in range(args.num_epoch): + train_epoch(epoch, model, optimizer, lr_scheduler, train_dataloader, booster, coordinator) + evaluate_model(epoch, model, eval_dataloader, eval_dataset.num_labels, coordinator) + logger.info(f"Finish finetuning", ranks=[0]) + + # Save the finetuned model + booster.save_model(model, args.output_path) + logger.info(f"Saving model checkpoint to {args.output_path}", ranks=[0]) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/language/bert/README.md b/examples/language/bert/README.md new file mode 100644 index 000000000000..81c3f03fffca --- /dev/null +++ b/examples/language/bert/README.md @@ -0,0 +1,34 @@ +## Overview + +This directory includes two parts: Using the Booster API finetune Huggingface Bert and AlBert models and benchmarking Bert and AlBert models with different Booster Plugin. + +## Finetune +``` +bash test_ci.sh +``` + +## Benchmark +``` +bash benchmark.sh +``` + +Now include these metrics in benchmark: CUDA mem occupy, throughput and the number of model parameters. If you have custom metrics, you can add them to benchmark_util. + +## Results + +### Bert + +| | max cuda mem | throughput(sample/s) | params | +| :-----| -----------: | :--------: | :----: | +| ddp | 21.44 GB | 3.0 | 82M | +| ddp_fp16 | 16.26 GB | 11.3 | 82M | +| gemini | 11.0 GB | 12.9 | 82M | +| low_level_zero | 11.29 G | 14.7 | 82M | + +### AlBert +| | max cuda mem | throughput(sample/s) | params | +| :-----| -----------: | :--------: | :----: | +| ddp | OOM | | | +| ddp_fp16 | OOM | | | +| gemini | 69.39 G | 1.3 | 208M | +| low_level_zero | 56.89 G | 1.4 | 208M | \ No newline at end of file diff --git a/examples/language/bert/benchmark.py b/examples/language/bert/benchmark.py new file mode 100644 index 000000000000..ae8b2269a534 --- /dev/null +++ b/examples/language/bert/benchmark.py @@ -0,0 +1,174 @@ +import argparse + +import torch +from benchmark_utils import benchmark +from torch.utils.data import DataLoader, Dataset +from transformers import ( + AlbertConfig, + AlbertForSequenceClassification, + BertConfig, + BertForSequenceClassification, + get_linear_schedule_with_warmup, +) + +import colossalai +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator +from colossalai.nn.optimizer import HybridAdam + +# ============================== +# Prepare Hyperparameters +# ============================== +NUM_EPOCHS = 3 +BATCH_SIZE = 32 +LEARNING_RATE = 2.4e-5 +WEIGHT_DECAY = 0.01 +WARMUP_FRACTION = 0.1 +SEQ_LEN = 512 +VOCAB_SIZE = 1000 +NUM_LABELS = 10 +DATASET_LEN = 1000 + + +class RandintDataset(Dataset): + + def __init__(self, dataset_length: int, sequence_length: int, vocab_size: int, n_class: int): + + self._sequence_length = sequence_length + self._vocab_size = vocab_size + self._n_class = n_class + self._dataset_length = dataset_length + self._datas = torch.randint( + low=0, + high=self._vocab_size, + size=(self._dataset_length, self._sequence_length,), + dtype=torch.long, + ) + self._labels = torch.randint(low=0, high=self._n_class, size=(self._dataset_length, 1), dtype=torch.long) + + def __len__(self): + return self._dataset_length + + def __getitem__(self, idx): + return self._datas[idx], self._labels[idx] + + +def main(): + # ============================== + # Parse Arguments + # ============================== + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--task', default='mrpc', help="GLUE task to run") + parser.add_argument('-p', + '--plugin', + type=str, + default='torch_ddp', + choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero'], + help="plugin to use") + parser.add_argument( + "--model_type", + type=str, + default="bert", + help="bert or albert", + ) + + args = parser.parse_args() + + # ============================== + # Launch Distributed Environment + # ============================== + colossalai.launch_from_torch(config={}, seed=42) + coordinator = DistCoordinator() + + # local_batch_size = BATCH_SIZE // coordinator.world_size + lr = LEARNING_RATE * coordinator.world_size + + # ============================== + # Instantiate Plugin and Booster + # ============================== + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + + booster = Booster(plugin=plugin, **booster_kwargs) + + # ============================== + # Prepare Dataloader + # ============================== + + train_dataset = RandintDataset(dataset_length=DATASET_LEN, + sequence_length=SEQ_LEN, + vocab_size=VOCAB_SIZE, + n_class=NUM_LABELS) + train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE) + + # ==================================== + # Prepare model, optimizer + # ==================================== + # bert pretrained model + + if args.model_type == "bert": + cfg = BertConfig(vocab_size=VOCAB_SIZE, num_labels=NUM_LABELS) + model = BertForSequenceClassification(cfg) + elif args.model_type == "albert": + cfg = AlbertConfig(vocab_size=VOCAB_SIZE, num_labels=NUM_LABELS) + model = AlbertForSequenceClassification(cfg) + else: + raise RuntimeError + + # optimizer + no_decay = ["bias", "LayerNorm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": WEIGHT_DECAY, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + + optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8) + + # lr scheduler + total_steps = len(train_dataloader) * NUM_EPOCHS + num_warmup_steps = int(WARMUP_FRACTION * total_steps) + lr_scheduler = get_linear_schedule_with_warmup( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=total_steps, + ) + + # criterion + criterion = lambda inputs: inputs[0] + + # ============================== + # Boost with ColossalAI + # ============================== + model, optimizer, _, _, lr_scheduler = booster.boost(model, optimizer, lr_scheduler=lr_scheduler) + + # ============================== + # Benchmark model + # ============================== + + results = benchmark(model, + booster, + optimizer, + lr_scheduler, + train_dataloader, + criterion=criterion, + epoch_num=NUM_EPOCHS) + + coordinator.print_on_master(results) + + +if __name__ == '__main__': + main() diff --git a/examples/language/bert/benchmark.sh b/examples/language/bert/benchmark.sh new file mode 100755 index 000000000000..9453d1373f2f --- /dev/null +++ b/examples/language/bert/benchmark.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -xe + +pip install -r requirements.txt + +for plugin in "torch_ddp" "torch_ddp_fp16" "gemini" "low_level_zero"; do + torchrun --standalone --nproc_per_node 2 benchmark.py --plugin $plugin --model_type "bert" + torchrun --standalone --nproc_per_node 2 benchmark.py --plugin $plugin --model_type "albert" +done diff --git a/examples/language/bert/benchmark_utils.py b/examples/language/bert/benchmark_utils.py new file mode 100644 index 000000000000..886017a41826 --- /dev/null +++ b/examples/language/bert/benchmark_utils.py @@ -0,0 +1,146 @@ +import inspect +from logging import getLogger +from time import time +from typing import Callable + +import torch +import yaml +from torch.optim.lr_scheduler import _LRScheduler as LRScheduler +from torch.utils.data import DataLoader +from tqdm import tqdm + +from colossalai.booster import Booster +from colossalai.cluster import DistCoordinator + +logger = getLogger("colossalai-booster-benchmark") +_INVALID = float("nan") + + +def format_num(num: int, bytes=False): + """Scale bytes to its proper format, e.g. 1253656 => '1.20MB'""" + factor = 1024 if bytes else 1000 + suffix = "B" if bytes else "" + for unit in ["", " K", " M", " G", " T", " P"]: + if num < factor: + return f"{num:.2f}{unit}{suffix}" + num /= factor + + +def _is_valid(val): + return val == val + + +def get_call_arg_names(module_or_fn): + if isinstance(module_or_fn, torch.nn.Module): + return inspect.getfullargspec(module_or_fn.forward)[0][1:] + return inspect.getfullargspec(module_or_fn)[0] + + +def measure_params(model): + num_params = _INVALID + + try: + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + except AttributeError as e: + logger.error(f"Unable to measure model params due to error: {e}") + + return num_params + + +def warm_up( + model, + booster, + dataloader, + criterion, + optimizer, + lr_scheduler, + num_runs=10, +): + for i, data in enumerate(dataloader): + if i > num_runs: + break + inputs, labels = data[0].cuda(), data[1].cuda() + outputs = model(inputs, labels=labels) + loss = criterion(outputs) + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + +def fmt(d: dict): + return yaml.dump(d) + + +def benchmark( + model: torch.nn.Module, + booster: Booster, + optimizer: torch.optim.Optimizer, + lr_scheduler: LRScheduler, + dataloader: DataLoader, + criterion: Callable = None, + warm_up_fn=warm_up, + epoch_num: int = 3, + batch_size: int = 32, + warm_up_steps: int = 3, +): + results = {} + model_device = torch.cuda.current_device() + + # Warm up + warm_up_fn( + model, + booster, + dataloader, + criterion, + optimizer, + lr_scheduler, + num_runs=warm_up_steps, + ) + # Measure params + params = measure_params(model) + if _is_valid(params): + results["params"] = format_num(params) + logger.info(f"Model parameters: {params} ({format_num(params)})") + + # Measure Allocated Memory and Throughput + memory = {} + throughput = {} + torch.cuda.reset_peak_memory_stats(device=model_device) + pre_mem = torch.cuda.memory_allocated(device=model_device) + + start_time = time() + + for epoch in range(epoch_num): + with tqdm(dataloader, desc=f'Epoch [{epoch + 1}/{epoch_num}]', + disable=not DistCoordinator().is_master()) as pbar: + for data in pbar: + inputs, labels = data[0].cuda(), data[1].cuda() + outputs = model(inputs, labels=labels) + loss = criterion(outputs) + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + end_time = time() + + all_sample = epoch_num * len(dataloader) + + post_mem = torch.cuda.memory_allocated(device=model_device) + max_mem = torch.cuda.max_memory_allocated(device=model_device) + + memory[f"batch_size_{batch_size}"] = { + "cuda_pre_training_bytes": format_num(pre_mem, bytes=True), + "cuda_max_training_bytes": format_num(max_mem, bytes=True), + "cuda_post_training_bytes": format_num(post_mem, bytes=True), + } + logger.info(fmt({f"Memory results (batch_size={batch_size})": memory[f"batch_size_{batch_size}"]})) + + throughput[f"batch_size_{batch_size}"] = {"throughput:": "{:.1f}".format(all_sample * DistCoordinator().world_size / (end_time - start_time))} + logger.info(fmt({f"Throughput results (batch_size={batch_size})": throughput[f"batch_size_{batch_size}"]})) + + results["throughput"] = throughput + results["memory"] = memory + + return results diff --git a/examples/language/bert/data.py b/examples/language/bert/data.py new file mode 100644 index 000000000000..981cedcca8c2 --- /dev/null +++ b/examples/language/bert/data.py @@ -0,0 +1,127 @@ +import datasets +from transformers import AutoTokenizer, PreTrainedTokenizer + +from colossalai.booster.plugin.dp_plugin_base import DPPluginBase + + +class GLUEDataBuilder: + + task_text_field_map = { + "cola": ["sentence"], + "sst2": ["sentence"], + "mrpc": ["sentence1", "sentence2"], + "qqp": ["question1", "question2"], + "stsb": ["sentence1", "sentence2"], + "mnli": ["premise", "hypothesis"], + "qnli": ["question", "sentence"], + "rte": ["sentence1", "sentence2"], + "wnli": ["sentence1", "sentence2"], + "ax": ["premise", "hypothesis"], + } + + glue_task_num_labels = { + "cola": 2, + "sst2": 2, + "mrpc": 2, + "qqp": 2, + "stsb": 1, + "mnli": 3, + "qnli": 2, + "rte": 2, + "wnli": 2, + "ax": 3, + } + + loader_columns = [ + "datasets_idx", + "input_ids", + "token_type_ids", + "attention_mask", + "start_positions", + "end_positions", + "labels", + ] + + def __init__( + self, + model_name_or_path: str, + plugin: DPPluginBase, + task_name: str = "mrpc", + max_seq_length: int = 128, + train_batch_size: int = 32, + eval_batch_size: int = 32, + **kwargs, + ): + super().__init__() + self.model_name_or_path = model_name_or_path + self.task_name = task_name + self.max_seq_length = max_seq_length + self.train_batch_size = train_batch_size + self.eval_batch_size = eval_batch_size + self.plugin = plugin + + self.text_fields = self.task_text_field_map[task_name] + self.num_labels = self.glue_task_num_labels[task_name] + self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True) + self.setup() + + def setup(self): + self.dataset = datasets.load_dataset("glue", self.task_name) + + for split in self.dataset.keys(): + self.dataset[split] = self.dataset[split].map( + self.convert_to_features, + batched=True, + remove_columns=["label"], + ) + self.columns = [c for c in self.dataset[split].column_names if c in self.loader_columns] + self.dataset[split].set_format(type="torch", columns=self.columns) + + self.eval_splits = [x for x in self.dataset.keys() if "validation" in x] + + def prepare_data(self): + datasets.load_dataset("glue", self.task_name) + AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True) + + def train_dataloader(self): + return self.plugin.prepare_dataloader(self.dataset["train"], + batch_size=self.train_batch_size, + shuffle=True, + drop_last=True) + + def val_dataloader(self): + if len(self.eval_splits) == 1: + return self.plugin.prepare_dataloader(self.dataset["validation"], batch_size=self.eval_batch_size) + elif len(self.eval_splits) > 1: + return [ + self.plugin.prepare_dataloader(self.dataset[x], batch_size=self.eval_batch_size) + for x in self.eval_splits + ] + + def test_dataloader(self): + if len(self.eval_splits) == 1: + return self.plugin.prepare_dataloader(self.dataset["test"], batch_size=self.eval_batch_size) + elif len(self.eval_splits) > 1: + return [ + self.plugin.prepare_dataloader(self.dataset[x], batch_size=self.eval_batch_size) + for x in self.eval_splits + ] + + def convert_to_features(self, example_batch): + + # Either encode single sentence or sentence pairs + if len(self.text_fields) > 1: + texts_or_text_pairs = list(zip(example_batch[self.text_fields[0]], example_batch[self.text_fields[1]])) + else: + texts_or_text_pairs = example_batch[self.text_fields[0]] + + # Tokenize the text/text pairs + features = self.tokenizer.batch_encode_plus(texts_or_text_pairs, + max_length=self.max_seq_length, + padding='max_length', + truncation=True) + + # Rename label to labels to make it easier to pass to model forward + features["labels"] = example_batch["label"] + + return features diff --git a/examples/language/bert/finetune.py b/examples/language/bert/finetune.py new file mode 100644 index 000000000000..b209ffde85a4 --- /dev/null +++ b/examples/language/bert/finetune.py @@ -0,0 +1,220 @@ +import argparse +from typing import List, Union + +import evaluate +import torch +import torch.distributed as dist +import torch.nn as nn +from data import GLUEDataBuilder +from torch.optim import Optimizer +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import ( + AlbertForSequenceClassification, + AutoConfig, + BertForSequenceClassification, + get_linear_schedule_with_warmup, +) + +import colossalai +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator +from colossalai.nn.optimizer import HybridAdam +from colossalai.utils import get_current_device + +# ============================== +# Prepare Hyperparameters +# ============================== +NUM_EPOCHS = 3 +BATCH_SIZE = 32 +LEARNING_RATE = 2.4e-5 +WEIGHT_DECAY = 0.01 +WARMUP_FRACTION = 0.1 + + +def move_to_cuda(batch): + return {k: v.cuda() for k, v in batch.items()} + + +@torch.no_grad() +def evaluate_model(model: nn.Module, test_dataloader: Union[DataLoader, List[DataLoader]], num_labels: int, task_name: str, + eval_splits: List[str], coordinator: DistCoordinator): + metric = evaluate.load("glue", task_name, process_id=coordinator.rank, num_process=coordinator.world_size) + model.eval() + + def evaluate_subset(dataloader: DataLoader): + accum_loss = torch.zeros(1, device=get_current_device()) + for batch in dataloader: + batch = move_to_cuda(batch) + outputs = model(**batch) + val_loss, logits = outputs[:2] + accum_loss.add_(val_loss) + + if num_labels > 1: + preds = torch.argmax(logits, axis=1) + elif num_labels == 1: + preds = logits.squeeze() + + labels = batch["labels"] + + metric.add_batch(predictions=preds, references=labels) + + results = metric.compute() + dist.all_reduce(accum_loss.div_(len(dataloader))) + if coordinator.is_master(): + results['loss'] = accum_loss.item() / coordinator.world_size + return results + + if isinstance(test_dataloader, DataLoader): + return evaluate_subset(test_dataloader) + else: + assert len(test_dataloader) == len(eval_splits) + final_results = {} + for split, sub_loader in zip(eval_splits, test_dataloader): + results = evaluate_subset(sub_loader) + final_results.update({f'{k}_{split}': v for k, v in results.items()}) + return final_results + + +def train_epoch(epoch: int, model: nn.Module, optimizer: Optimizer, lr_scheduler, train_dataloader: DataLoader, + booster: Booster, coordinator: DistCoordinator): + model.train() + with tqdm(train_dataloader, desc=f'Epoch [{epoch + 1}/{NUM_EPOCHS}]', disable=not coordinator.is_master()) as pbar: + for batch in pbar: + # Forward pass + batch = move_to_cuda(batch) + outputs = model(**batch) + loss = outputs[0] + + # Backward and optimize + booster.backward(loss, optimizer) + optimizer.step() + optimizer.zero_grad() + lr_scheduler.step() + + # Print log info + pbar.set_postfix({'loss': loss.item()}) + + +def main(): + # ============================== + # Parse Arguments + # ============================== + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--task', default='mrpc', help="GLUE task to run") + parser.add_argument('-p', + '--plugin', + type=str, + default='torch_ddp', + choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero'], + help="plugin to use") + parser.add_argument( + "--model_type", + type=str, + default="bert", + help="bert or albert", + ) + parser.add_argument('--target_f1', type=float, default=None, help="target f1 score. Raise exception if not reached") + args = parser.parse_args() + + if args.model_type == 'bert': + model_name = "bert-base-uncased" + elif args.model_type == 'albert': + model_name = "albert-xxlarge-v2" + else: + raise RuntimeError + # ============================== + # Launch Distributed Environment + # ============================== + colossalai.launch_from_torch(config={}, seed=42) + coordinator = DistCoordinator() + + # local_batch_size = BATCH_SIZE // coordinator.world_size + lr = LEARNING_RATE * coordinator.world_size + + # ============================== + # Instantiate Plugin and Booster + # ============================== + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + + booster = Booster(plugin=plugin, **booster_kwargs) + + # ============================== + # Prepare Dataloader + # ============================== + data_builder = GLUEDataBuilder(model_name, + plugin, + args.task, + train_batch_size=BATCH_SIZE, + eval_batch_size=BATCH_SIZE) + train_dataloader = data_builder.train_dataloader() + test_dataloader = data_builder.test_dataloader() + + # ==================================== + # Prepare model, optimizer + # ==================================== + # bert pretrained model + + cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels) + if model_name == "bert-base-uncased": + model = BertForSequenceClassification.from_pretrained(model_name, config=cfg) + elif model_name == "albert-xxlarge-v2": + model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg) + else: + raise RuntimeError + + # optimizer + no_decay = ["bias", "LayerNorm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": WEIGHT_DECAY, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + + optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8) + + # lr scheduler + total_steps = len(train_dataloader) * NUM_EPOCHS + num_warmup_steps = int(WARMUP_FRACTION * total_steps) + lr_scheduler = get_linear_schedule_with_warmup( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=total_steps, + ) + + # ============================== + # Boost with ColossalAI + # ============================== + model, optimizer, _, _, lr_scheduler = booster.boost(model, optimizer, lr_scheduler=lr_scheduler) + + # ============================== + # Train model + # ============================== + for epoch in range(NUM_EPOCHS): + train_epoch(epoch, model, optimizer, lr_scheduler, train_dataloader, booster, coordinator) + + results = evaluate_model(model, test_dataloader, data_builder.num_labels, args.task, data_builder.eval_splits, + coordinator) + + if coordinator.is_master(): + print(results) + if args.target_f1 is not None and 'f1' in results: + assert results['f1'] >= args.target_f1, f'f1 score {results["f1"]} is lower than target {args.target_f1}' + + +if __name__ == '__main__': + main() diff --git a/examples/language/bert/requirements.txt b/examples/language/bert/requirements.txt new file mode 100644 index 000000000000..377422c260ad --- /dev/null +++ b/examples/language/bert/requirements.txt @@ -0,0 +1,9 @@ +colossalai +evaluate +datasets +torch +tqdm +transformers +scipy +scikit-learn +ptflops diff --git a/examples/language/bert/run_gemini.sh b/examples/language/bert/run_gemini.sh deleted file mode 100644 index d791334e8c97..000000000000 --- a/examples/language/bert/run_gemini.sh +++ /dev/null @@ -1,22 +0,0 @@ -set -x -# distplan in ["CAI_ZeRO1", "CAI_ZeRO2", "CAI_Gemini", "Pytorch_DDP", "Pytorch_ZeRO"] -export DISTPLAN=${DISTPLAN:-"CAI_Gemini"} - -# The following options only valid when DISTPLAN="colossalai" -export GPUNUM=${GPUNUM:-1} -export PLACEMENT=${PLACEMENT:-"cpu"} -export BATCH_SIZE=${BATCH_SIZE:-16} - -# bert | albert -export MODEL_TYPE=${MODEL_TYPE:-"bert"} -export TRAIN_STEP=${TRAIN_STEP:-10} - -mkdir -p gemini_logs - -env CUDA_LAUNCH_BLOCKING=1 torchrun --standalone --nproc_per_node=${GPUNUM} ./train_bert_demo.py \ ---model_type=${MODEL_TYPE} \ ---batch_size=${BATCH_SIZE} \ ---placement=${PLACEMENT} \ ---distplan=${DISTPLAN} \ ---train_step=${TRAIN_STEP} \ -2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}_${PLACEMENT}.log diff --git a/examples/language/bert/test_ci.sh b/examples/language/bert/test_ci.sh old mode 100644 new mode 100755 index 42c63fec50c0..7fc6daabb2f3 --- a/examples/language/bert/test_ci.sh +++ b/examples/language/bert/test_ci.sh @@ -1,2 +1,8 @@ -set -x -env GPUNUM=1 bash run_gemini.sh +#!/bin/bash +set -xe + +pip install -r requirements.txt + +for plugin in "torch_ddp" "torch_ddp_fp16" "gemini" "low_level_zero"; do + torchrun --standalone --nproc_per_node 4 finetune.py --target_f1 0.86 --plugin $plugin --model_type "bert" +done diff --git a/examples/language/bert/train_bert_demo.py b/examples/language/bert/train_bert_demo.py deleted file mode 100644 index 9a0278b2c711..000000000000 --- a/examples/language/bert/train_bert_demo.py +++ /dev/null @@ -1,331 +0,0 @@ -import os -from functools import partial -from time import time - -import psutil -import torch -from packaging import version -from torch import nn -from torch.nn.parallel import DistributedDataParallel as DDP -from transformers import AlbertConfig, AlbertForSequenceClassification, BertConfig, BertForSequenceClassification - -import colossalai -from colossalai.logging import disable_existing_loggers, get_dist_logger -from colossalai.nn.optimizer import HybridAdam -from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec -from colossalai.utils import get_current_device -from colossalai.zero import ColoInitContext, zero_model_wrapper, zero_optim_wrapper - -CAI_VERSION = colossalai.__version__ - - -def get_tflops(model_numel, batch_size, seq_len, step_time): - return model_numel * batch_size * seq_len * 8 / 1e12 / (step_time + 1e-12) - - -def get_profile_context(enable_flag, warmup_steps, active_steps, save_dir): - from contextlib import nullcontext - - from torch.profiler import ProfilerActivity, profile, schedule, tensorboard_trace_handler - if enable_flag: - return profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], - schedule=schedule(wait=0, warmup=warmup_steps, active=active_steps), - on_trace_ready=tensorboard_trace_handler(save_dir), - record_shapes=True, - profile_memory=True) - else: - - class DummyProfiler: - - def __init__(self): - self.step_number = 0 - - def step(self): - self.step_number += 1 - - return nullcontext(DummyProfiler()) - - -def get_time_stamp(): - import time - cur_time = time.strftime("%d-%H:%M", time.localtime()) - return cur_time - - -def get_bert_data(batch_size: int, sequence_length: int, vacob_size: int, n_class: int, device: torch.device): - input = torch.randint( - low=0, - high=vacob_size, - size=(batch_size, sequence_length), - device=device, - dtype=torch.long, - ) - label = torch.randint(low=0, high=n_class, size=(batch_size,), device=device, dtype=torch.long) - return input, label - - -def parse_args(): - parser = colossalai.get_default_parser() - parser.add_argument( - "--distplan", - type=str, - default='CAI_Gemini', - help="The distributed plan [colossalai, zero1, zero2, torch_ddp, torch_zero].", - ) - parser.add_argument( - "--placement", - type=str, - default='cpu', - help="Placement Policy for Gemini. Valid when using colossalai as dist plan.", - ) - parser.add_argument( - "--batch_size", - type=int, - default=8, - help="batch size per DP group of training.", - ) - parser.add_argument( - "--model_type", - type=str, - default="bert", - help="bert or albert", - ) - parser.add_argument( - "--train_step", - type=int, - default=10, - help="training iterations for test", - ) - - args = parser.parse_args() - return args - - -SEQ_LEN = 512 -VOCAB_SIZE = 1000 -NUM_LABELS = 10 - - -# Parameter Sharding Strategies for Tensor Parallelism -def split_param_single_dim_tp1d(dim: int, param: ColoParameter, pg: ProcessGroup): - spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D)) - param.set_tensor_spec(*spec) - - -def split_param_row_tp1d(param: ColoParameter, pg: ProcessGroup): - split_param_single_dim_tp1d(0, param, pg) - - -def split_param_col_tp1d(param: ColoParameter, pg: ProcessGroup): - split_param_single_dim_tp1d(-1, param, pg) - - -def get_cpu_mem(): - return psutil.Process().memory_info().rss / 1024**2 - - -def get_gpu_mem(): - return torch.cuda.memory_allocated() / 1024**2 - - -def get_mem_info(prefix=''): - return f'{prefix}GPU memory usage: {get_gpu_mem():.2f} MB, CPU memory usage: {get_cpu_mem():.2f} MB' - - -def get_model_size(model: nn.Module): - total_numel = 0 - for module in model.modules(): - for p in module.parameters(recurse=False): - total_numel += p.numel() - return total_numel - - -def model_builder(args): - if args.model_type == "bert": - cfg = BertConfig(vocab_size=VOCAB_SIZE, num_labels=NUM_LABELS) - return BertForSequenceClassification(cfg) - elif args.model_type == "albert": - cfg = AlbertConfig(vocab_size=VOCAB_SIZE, num_labels=NUM_LABELS) - return AlbertForSequenceClassification(cfg) - else: - raise RuntimeError - - -def model_size_formatter(numel: int) -> str: - GB_SIZE = 10**9 - MB_SIZE = 10**6 - KB_SIZE = 10**3 - if numel >= GB_SIZE: - return f'{numel / GB_SIZE:.1f}B' - elif numel >= MB_SIZE: - return f'{numel / MB_SIZE:.1f}M' - elif numel >= KB_SIZE: - return f'{numel / KB_SIZE:.1f}K' - else: - return str(numel) - - -def set_cpu_maximum_parallelism(): - conf_str = torch.__config__.parallel_info() - inter_str = conf_str.split("hardware_concurrency() : ")[1] - max_concurrency = inter_str.split('\n')[0] - os.environ["OMP_NUM_THREADS"] = max_concurrency - print(f"environmental variable OMP_NUM_THREADS is set to {max_concurrency}.") - - -def main(): - # version check - # this example is supposed to work for versions greater than 0.2.0 - assert version.parse(CAI_VERSION) >= version.parse("0.2.0") - - set_cpu_maximum_parallelism() - args = parse_args() - - # if args.distplan not in ["colossalai", "torch_ddp", "torch_zero", "zero1", "zero2"]: - if args.distplan not in ["CAI_ZeRO1", "CAI_ZeRO2", "CAI_Gemini", "Pytorch_DDP", "Pytorch_ZeRO"]: - raise TypeError(f"{args.distplan} is error") - - # batch size per DP degree - BATCH_SIZE = args.batch_size - - NUM_STEPS = args.train_step - - WARMUP_STEPS = 1 - assert WARMUP_STEPS < NUM_STEPS, "warmup steps should smaller than the total steps" - assert (NUM_STEPS - WARMUP_STEPS) % 2 == 1, "the number of valid steps should be odd to take the median" - PROF_FLAG = False # The flag of profiling, False by default - - disable_existing_loggers() - colossalai.launch_from_torch(config={}) - - logger = get_dist_logger() - logger.info(f" {args.distplan}, batch size {BATCH_SIZE}", ranks=[0]) - - torch.manual_seed(123) - if args.distplan.startswith("CAI"): - # all param must use the same process group. - world_size = torch.distributed.get_world_size() - - # build a base-bert model - with ColoInitContext(device=get_current_device(), dtype=torch.half): - model = model_builder(args) - # model = BertForSequenceClassification(BertConfig(vocal_size = VOCAB_SIZE)) - - # asign running configurations - gemini_config = None - if args.distplan.startswith("CAI_ZeRO"): - optim_config = dict(reduce_bucket_size=12 * 1024 * 1024, overlap_communication=True, verbose=True) - elif args.distplan == "CAI_Gemini": - gemini_config = dict(strict_ddp_mode=True, - device=get_current_device(), - placement_policy=args.placement, - pin_memory=True, - hidden_dim=model.config.hidden_size, - search_range_mb=128) - optim_config = dict(gpu_margin_mem_ratio=0.) - else: - raise RuntimeError - - # build a highly optimized gpu/cpu optimizer - optimizer = HybridAdam(model.parameters(), lr=1e-3) - - if args.distplan == "CAI_ZeRO1": - zero_stage = 1 - elif args.distplan == "CAI_ZeRO2": - zero_stage = 2 - elif args.distplan == "CAI_Gemini": - zero_stage = 3 - else: - raise RuntimeError - - # wrap your model and optimizer - model = zero_model_wrapper(model, zero_stage, gemini_config) - optimizer = zero_optim_wrapper(model, optimizer, optim_config=optim_config) - - logger.info(get_mem_info(prefix='After init optim, '), ranks=[0]) - elif args.distplan.startswith("Pytorch"): - model = model_builder(args).cuda() - model = DDP(model) - if args.distplan.endswith("DDP"): - optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) - elif args.distplan.endswith("ZeRO"): - from torch.distributed.optim import ZeroRedundancyOptimizer - optimizer = ZeroRedundancyOptimizer(model.parameters(), optimizer_class=torch.optim.Adam, lr=1e-3) - else: - raise RuntimeError - - # model is shared after TP - numel = get_model_size(model) - logger.info(f"the size of testing model size is {model_size_formatter(numel)}.") - logger.info(get_mem_info(prefix='After init model, '), ranks=[0]) - - # Tflops_per_GPU = global_batch * global_numel * seq_len * 8 / #gpu - # = (batch_per_DP_group * dp_degree) * (numel * tp_degree) * seq_len * 8 / (tp_degree * dp_degree) - # = batch_per_DP_group * numel * seq_len * 8 - get_tflops_func = partial(get_tflops, numel, BATCH_SIZE, SEQ_LEN) - - torch.cuda.synchronize() - model.train() - tflops_list = [] - - def train_step(): - # we just use randomly generated data here - input_ids, labels = get_bert_data(BATCH_SIZE, - SEQ_LEN, - VOCAB_SIZE, - NUM_LABELS, - device=torch.cuda.current_device()) - optimizer.zero_grad() - - start = time() - outputs = model(input_ids, labels=labels) - loss, logits = outputs[:2] - torch.cuda.synchronize() - fwd_end = time() - fwd_time = fwd_end - start - logger.info(get_mem_info(prefix=f'[{n + 1}/{NUM_STEPS}] Forward '), ranks=[0]) - - if args.distplan.startswith("CAI"): - optimizer.backward(loss) - elif args.distplan.startswith("Pytorch"): - loss.backward() - else: - raise RuntimeError - - torch.cuda.synchronize() - bwd_end = time() - bwd_time = bwd_end - fwd_end - logger.info(get_mem_info(prefix=f'[{n + 1}/{NUM_STEPS}] Backward '), ranks=[0]) - - optimizer.step() - torch.cuda.synchronize() - optim_time = time() - bwd_end - step_time = time() - start - logger.info(get_mem_info(prefix=f'[{n + 1}/{NUM_STEPS}] Optimizer step '), ranks=[0]) - - step_tflops = get_tflops_func(step_time) - logger.info( - f"[{n + 1}/{NUM_STEPS}] Loss:{loss.item():.3f}, Step time: {step_time:.3f}s, TFLOPS: {get_tflops_func(step_time):.3f}, FWD time: {fwd_time:.3f}s, BWD time: {bwd_time:.3f}s, OPTIM time: {optim_time:.3f}s", - ranks=[0], - ) - if n >= WARMUP_STEPS: - tflops_list.append(step_tflops) - - demo_profiler = get_profile_context(PROF_FLAG, - WARMUP_STEPS, - NUM_STEPS - WARMUP_STEPS, - save_dir=f"profile/{get_time_stamp()}-demo") - - with demo_profiler as prof: - for n in range(NUM_STEPS): - train_step() - prof.step() - - tflops_list.sort() - median_index = ((NUM_STEPS - WARMUP_STEPS) >> 1) + WARMUP_STEPS - logger.info(f"Median TFLOPS is {tflops_list[median_index]:.3f}") - torch.cuda.synchronize() - - -if __name__ == '__main__': - main() diff --git a/examples/language/gpt/gemini/train_gpt_demo.py b/examples/language/gpt/gemini/train_gpt_demo.py index 92751c7e2f47..4b78624f0110 100644 --- a/examples/language/gpt/gemini/train_gpt_demo.py +++ b/examples/language/gpt/gemini/train_gpt_demo.py @@ -162,7 +162,7 @@ def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup): # shard it w.r.t tp pattern if 'mlp.c_fc' in mn: if 'weight' in pn or 'bias' in pn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice # keep the shape of the output from c_fc param.compute_spec.set_output_replicate(False) else: @@ -173,9 +173,9 @@ def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup): else: param.set_dist_spec(ReplicaSpec()) elif 'wte' in mn or 'wpe' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif 'c_attn' in mn or 'c_proj' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice else: param.set_dist_spec(ReplicaSpec()) param.visited = True @@ -237,7 +237,7 @@ def main(): if args.tp_degree > 1: tensor_parallelize(model, tp_pg) - # asign running configurations + # assign running configurations if args.distplan == "CAI_ZeRO1": zero_stage = 1 elif args.distplan == "CAI_ZeRO2": diff --git a/examples/language/gpt/titans/model/embed.py b/examples/language/gpt/titans/model/embed.py index 6369b9f8c5a1..d825ae92a285 100644 --- a/examples/language/gpt/titans/model/embed.py +++ b/examples/language/gpt/titans/model/embed.py @@ -305,7 +305,7 @@ def forward(ctx, vocab_parallel_logits, target): @staticmethod def backward(ctx, grad_output): - # Retreive tensors from the forward path. + # Retrieve tensors from the forward path. softmax, target_mask, masked_target_1d = ctx.saved_tensors # All the inputs have softmax as their gradient. diff --git a/examples/language/opt/README.md b/examples/language/opt/README.md index c2fd254571c7..37e1ff4d9008 100644 --- a/examples/language/opt/README.md +++ b/examples/language/opt/README.md @@ -19,15 +19,35 @@ Meta recently released [Open Pretrained Transformer (OPT)](https://github.com/fa The following example of [Colossal-AI](https://github.com/hpcaitech/ColossalAI) demonstrates fine-tuning Casual Language Modelling at low cost. -We are using the pre-training weights of the OPT model provided by Hugging Face Hub on the raw WikiText-2 (no tokens were replaced before -the tokenization). This training script is adapted from the [HuggingFace Language Modelling examples](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). ## Our Modifications -We adapt the OPT training code to ColossalAI by leveraging Gemini and ZeRO DDP. -## Quick Start -You can launch training by using the following bash script +We are using the pre-training weights of the OPT model provided by Hugging Face Hub on the raw WikiText-2 (no tokens were replaced before +the tokenization). + +We adapt the OPT training code to ColossalAI by leveraging [Boosting API](https://colossalai.org/docs/basics/booster_api) loaded with a chosen plugin, where each plugin corresponds to a specific kind of training strategy. This example supports plugins including TorchDDPPlugin, LowLevelZeroPlugin, and GeminiPlugin. + +## Run Demo +By running the following script: ```bash -bash ./run_gemini.sh +bash run_demo.sh ``` +You will finetune a [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model on this [dataset](https://huggingface.co/datasets/hugginglearners/netflix-shows), which contains more than 8000 comments on Netflix shows. + +The script can be modified if you want to try another set of hyperparameters or change to another OPT model with different size. + +The demo code is adapted from this [blog](https://medium.com/geekculture/fine-tune-eleutherai-gpt-neo-to-generate-netflix-movie-descriptions-in-only-47-lines-of-code-40c9b4c32475) and the [HuggingFace Language Modelling examples](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). + + + +## Run Benchmark + +You can run benchmark for OPT model by running the following script: +```bash +bash run_benchmark.sh +``` +The script will test performance (throughput & peak memory usage) for each combination of hyperparameters. You can also play with this script to configure your set of hyperparameters for testing. + + + diff --git a/examples/language/opt/args.py b/examples/language/opt/args.py new file mode 100644 index 000000000000..16730be7ebea --- /dev/null +++ b/examples/language/opt/args.py @@ -0,0 +1,120 @@ +from colossalai import get_default_parser + + +def parse_demo_args(): + + parser = get_default_parser() + parser.add_argument( + "--model_name_or_path", + type=str, + default="facebook/opt-350m", + help="Path to pretrained model or model identifier from huggingface.co/models." + ) + parser.add_argument( + "--output_path", + type=str, + default="./output_model.bin", + help="The path of your saved model after finetuning." + ) + parser.add_argument( + "--plugin", + type=str, + default="gemini", + help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'." + ) + parser.add_argument( + "--num_epoch", + type=int, + default=10, + help="Number of epochs." + ) + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="Batch size (per dp group) for the training dataloader." + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use." + ) + parser.add_argument( + "--warmup_ratio", + type=float, + default=0.1, + help="Ratio of warmup steps against total training steps." + ) + parser.add_argument( + "--weight_decay", + type=float, + default=0.01, + help="Weight decay to use." + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="A seed for reproducible training." + ) + + args = parser.parse_args() + return args + + + +def parse_benchmark_args(): + + parser = get_default_parser() + parser.add_argument( + "--model_name_or_path", + type=str, + default="facebook/opt-125m", + help="Path to pretrained model or model identifier from huggingface.co/models." + ) + parser.add_argument( + "--plugin", + type=str, + default="gemini", + help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'." + ) + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="Batch size (per dp group) for the training dataloader." + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use." + ) + parser.add_argument( + "--weight_decay", + type=float, + default=0.0, + help="Weight decay to use." + ) + parser.add_argument( + "--max_train_steps", + type=int, + default=20, + help="Total number of training steps to perform." + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="A seed for reproducible training." + ) + parser.add_argument( + "--mem_cap", + type=int, + default=0, + help="Limit on the usage of space for each GPU (in GB)." + ) + args = parser.parse_args() + + return args \ No newline at end of file diff --git a/examples/language/opt/benchmark.sh b/examples/language/opt/benchmark.sh deleted file mode 100644 index 0d04b5e9b33c..000000000000 --- a/examples/language/opt/benchmark.sh +++ /dev/null @@ -1,21 +0,0 @@ -export BS=16 -export MEMCAP=0 -export MODEL="6.7b" -export GPUNUM=1 - -for MODEL in "6.7b" "13b" "1.3b" -do -for GPUNUM in 8 1 -do -for BS in 16 24 32 8 -do -for MEMCAP in 0 40 -do -pkill -9 torchrun -pkill -9 python - -env BS=$BS MEM_CAP=$MEMCAP MODEL=$MODEL GPUNUM=$GPUNUM bash ./run_gemini.sh -done -done -done -done diff --git a/examples/language/opt/data.py b/examples/language/opt/data.py new file mode 100644 index 000000000000..6cfffb5fc95b --- /dev/null +++ b/examples/language/opt/data.py @@ -0,0 +1,37 @@ +import torch +from torch.utils.data import Dataset +from datasets import load_dataset + + +class NetflixDataset(Dataset): + + def __init__(self, tokenizer): + + super().__init__() + + self.tokenizer = tokenizer + self.input_ids = [] + self.attn_masks = [] + self.labels = [] + self.txt_list = netflix_descriptions = load_dataset("hugginglearners/netflix-shows", split="train")['description'] + self.max_length = max([len(self.tokenizer.encode(description)) for description in netflix_descriptions]) + + for txt in self.txt_list: + encodings_dict = self.tokenizer('' + txt + '', + truncation=True, + max_length=self.max_length, + padding="max_length") + self.input_ids.append(torch.tensor(encodings_dict['input_ids'])) + self.attn_masks.append(torch.tensor(encodings_dict['attention_mask'])) + + def __len__(self): + return len(self.input_ids) + + def __getitem__(self, idx): + return self.input_ids[idx], self.attn_masks[idx] + + +def netflix_collator(data): + return {'input_ids': torch.stack([x[0] for x in data]), + 'attention_mask': torch.stack([x[1] for x in data]), + 'labels': torch.stack([x[0] for x in data])} diff --git a/examples/language/opt/opt_benchmark.py b/examples/language/opt/opt_benchmark.py new file mode 100755 index 000000000000..2d69036b50c6 --- /dev/null +++ b/examples/language/opt/opt_benchmark.py @@ -0,0 +1,137 @@ +import time + +import torch +import transformers +from transformers import AutoConfig, OPTForCausalLM +from transformers.utils.versions import require_version +import tqdm + +import colossalai +from colossalai.nn.optimizer import HybridAdam +from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.tensor import ProcessGroup, ShardSpec +from colossalai.utils import get_current_device +from colossalai.zero import ColoInitContext +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator + +from args import parse_benchmark_args + +require_version("transformers>=4.20.0", "To fix: pip install -r requirements.txt") + + +def format_num(num: int, bytes=False): + """Scale bytes to its proper format, e.g. 1253656 => '1.20MB'""" + factor = 1024 if bytes else 1000 + suffix = "B" if bytes else "" + for unit in ["", " K", " M", " G", " T", " P"]: + if num < factor: + return f"{num:.2f}{unit}{suffix}" + num /= factor + + +def get_data(batch_size, seq_len, vocab_size): + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device()) + attention_mask = torch.ones_like(input_ids) + return input_ids, attention_mask + + +def colo_memory_cap(size_in_GB): + from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device + cuda_capacity = colo_device_memory_capacity(get_current_device()) + if size_in_GB * (1024**3) < cuda_capacity: + colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity) + print(f"Limiting GPU memory usage to {size_in_GB} GB") + + +def main(): + + args = parse_benchmark_args() + + # Launch ColossalAI + colossalai.launch_from_torch(config={}, seed=args.seed) + coordinator = DistCoordinator() + world_size = coordinator.world_size + + # Manage loggers + disable_existing_loggers() + logger = get_dist_logger() + if coordinator.is_master(): + transformers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + + # Whether to set limit of memory capacity + if args.mem_cap > 0: + colo_memory_cap(args.mem_cap) + + # Build OPT model + config = AutoConfig.from_pretrained(args.model_name_or_path) + model = OPTForCausalLM(config=config) + logger.info(f"Finish loading model from {args.model_name_or_path}", ranks=[0]) + + # Enable gradient checkpointing + model.gradient_checkpointing_enable() + + # Set plugin + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(device=get_current_device(), + placement_policy='cpu', + pin_memory=True, + strict_ddp_mode=True, + initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + logger.info(f"Set plugin as {args.plugin}", ranks=[0]) + + # Set optimizer + optimizer = HybridAdam(model.parameters(), lr=args.learning_rate) + + # Set booster + booster = Booster(plugin=plugin, **booster_kwargs) + model, optimizer, _, _, _ = booster.boost(model, optimizer) + + SEQ_LEN = 1024 + VOCAB_SIZE = 50257 + + # Start training. + logger.info(f"Start testing", ranks=[0]) + progress_bar = tqdm.tqdm(total=args.max_train_steps, desc="Training Step", disable=not coordinator.is_master()) + + torch.cuda.synchronize() + model.train() + start_time = time.time() + + for _ in range(args.max_train_steps): + + input_ids, attn_mask = get_data(args.batch_size, SEQ_LEN, VOCAB_SIZE) + optimizer.zero_grad() + outputs = model(input_ids=input_ids, attention_mask=attn_mask, labels=input_ids, use_cache=False) + loss = outputs['loss'] + booster.backward(loss, optimizer) + optimizer.step() + + torch.cuda.synchronize() + progress_bar.update(1) + + # Compute Statistics + end_time = time.time() + throughput = "{:.4f}".format((world_size * args.max_train_steps * args.batch_size) / (end_time - start_time)) + max_mem = format_num(torch.cuda.max_memory_allocated(device=torch.cuda.current_device()), bytes=True) + + logger.info(f"Testing finished, " + f"batch size per gpu: {args.batch_size}, " + f"plugin: {args.plugin}, " + f"throughput: {throughput}, " + f"maximum memory usage per gpu: {max_mem}.", + ranks=[0]) + + +if __name__ == "__main__": + main() diff --git a/examples/language/opt/opt_train_demo.py b/examples/language/opt/opt_train_demo.py new file mode 100644 index 000000000000..fa7feca9c9a9 --- /dev/null +++ b/examples/language/opt/opt_train_demo.py @@ -0,0 +1,142 @@ +import time + +import torch +import datasets +import transformers +from transformers import AutoConfig, OPTForCausalLM, AutoTokenizer +from transformers import get_linear_schedule_with_warmup +from transformers.utils.versions import require_version +from tqdm import tqdm + +import colossalai +from colossalai.nn.optimizer import HybridAdam +from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.tensor import ProcessGroup, ShardSpec +from colossalai.utils import get_current_device +from colossalai.zero import ColoInitContext +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin +from colossalai.cluster import DistCoordinator + +from args import parse_demo_args +from data import NetflixDataset, netflix_collator + +require_version("datasets>=1.8.0", "To fix: pip install -r requirements.txt") +require_version("transformers>=4.20.0", "To fix: pip install -r requirements.txt") + + +def move_to_cuda(batch, device): + return {k: v.to(device) for k, v in batch.items()} + + +def train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coordinator): + + torch.cuda.synchronize() + model.train() + + with tqdm(dataloader, desc=f'Epoch [{epoch + 1}]', disable=not coordinator.is_master()) as pbar: + + for batch in pbar: + + # Forward + optimizer.zero_grad() + batch = move_to_cuda(batch, torch.cuda.current_device()) + + outputs = model(use_cache=False, **batch) + loss = outputs['loss'] + + # Backward + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() + + # Print batch loss + pbar.set_postfix({'loss': loss.item()}) + + +def main(): + + args = parse_demo_args() + + # Launch ColossalAI + colossalai.launch_from_torch(config={}, seed=args.seed) + coordinator = DistCoordinator() + world_size = coordinator.world_size + + # Manage loggers + disable_existing_loggers() + logger = get_dist_logger() + if coordinator.is_master(): + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # Build OPT model + config = AutoConfig.from_pretrained(args.model_name_or_path) + model = OPTForCausalLM.from_pretrained(args.model_name_or_path, config=config) + logger.info(f"Finish loading model from {args.model_name_or_path}", ranks=[0]) + + # Enable gradient checkpointing + model.gradient_checkpointing_enable() + + # Set plugin + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(device=get_current_device(), + placement_policy='cpu', + pin_memory=True, + strict_ddp_mode=True, + initial_scale=2**5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2**5) + logger.info(f"Set plugin as {args.plugin}", ranks=[0]) + + # Prepare tokenizer and dataloader + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) + dataset = NetflixDataset(tokenizer) + dataloader = plugin.prepare_dataloader(dataset, + batch_size=args.batch_size, + shuffle=True, + drop_last=True, + collate_fn=netflix_collator) + + # Set optimizer + optimizer = HybridAdam(model.parameters(), + lr=(args.learning_rate * world_size), + weight_decay=args.weight_decay) + + # Set lr scheduler + total_steps = len(dataloader) * args.num_epoch + num_warmup_steps = int(args.warmup_ratio * total_steps) + lr_scheduler = get_linear_schedule_with_warmup( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=len(dataloader) * args.num_epoch + ) + + # Set booster + booster = Booster(plugin=plugin, **booster_kwargs) + model, optimizer, _, dataloader, lr_scheduler = booster.boost(model=model, + optimizer=optimizer, + dataloader=dataloader, + lr_scheduler=lr_scheduler) + + # Start finetuning + logger.info(f"Start finetuning", ranks=[0]) + for epoch in range(args.num_epoch): + train_epoch(epoch, model, optimizer, lr_scheduler, dataloader, booster, coordinator) + + # Finish training and evaluate + logger.info(f"Finish finetuning", ranks=[0]) + booster.save_model(model, args.output_path) + logger.info(f"Saving model checkpoint to {args.output_path}", ranks=[0]) + + +if __name__ == "__main__": + main() diff --git a/examples/language/opt/requirements.txt b/examples/language/opt/requirements.txt index 137a69e80498..4422216e6a1c 100644 --- a/examples/language/opt/requirements.txt +++ b/examples/language/opt/requirements.txt @@ -1,2 +1,4 @@ colossalai >= 0.1.12 torch >= 1.8.1 +datasets >= 1.8.0 +transformers >= 4.20.0 \ No newline at end of file diff --git a/examples/language/opt/run_benchmark.sh b/examples/language/opt/run_benchmark.sh new file mode 100644 index 000000000000..76c5e8601989 --- /dev/null +++ b/examples/language/opt/run_benchmark.sh @@ -0,0 +1,30 @@ +set -xe +pip install -r requirements.txt + +export BS=32 +export MEMCAP=0 +export GPUNUM=1 + +# acceptable values include `125m`, `350m`, `1.3b`, `2.7b`, `6.7b`, `13b`, `30b`, `66b` +export MODEL="125m" + +for BS in 8 32 128 +do +for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" +do +for GPUNUM in 1 4 +do + +MODLE_PATH="facebook/opt-${MODEL}" +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + opt_benchmark.py \ + --model_name_or_path ${MODLE_PATH} \ + --mem_cap ${MEMCAP} \ + --plugin ${PLUGIN} \ + --batch_size ${BS} + +done +done +done diff --git a/examples/language/opt/run_demo.sh b/examples/language/opt/run_demo.sh new file mode 100644 index 000000000000..0c9759c34039 --- /dev/null +++ b/examples/language/opt/run_demo.sh @@ -0,0 +1,44 @@ +set -xe +pip install -r requirements.txt + +# model name or path +MODEL="facebook/opt-350m" + +# path for saving model +OUTPUT_PATH="./output_model.bin" + +# plugin(training strategy) +# can only be one of "torch_ddp"/"torch_ddp_fp16"/"low_level_zero"/"gemini" +PLUGIN="gemini" + +# number of gpus to use +GPUNUM=4 + +# batch size per gpu +BS=16 + +# learning rate +LR="5e-5" + +# number of epoch +EPOCH=10 + +# weight decay +WEIGHT_DECAY=0.01 + +# ratio of warmup steps +WARMUP_RATIO=0.1 + +# run the script for demo +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + opt_train_demo.py \ + --model_name_or_path ${MODEL} \ + --output_path ${OUTPUT_PATH} \ + --plugin ${PLUGIN} \ + --batch_size ${BS} \ + --num_epoch ${EPOCH} \ + --learning_rate ${LR} \ + --weight_decay ${WEIGHT_DECAY} \ + --warmup_ratio ${WARMUP_RATIO} diff --git a/examples/language/opt/run_gemini.sh b/examples/language/opt/run_gemini.sh deleted file mode 100644 index 73f231292a13..000000000000 --- a/examples/language/opt/run_gemini.sh +++ /dev/null @@ -1,28 +0,0 @@ -set -x -export BS=${BS:-16} -export MEMCAP=${MEMCAP:-0} -# Acceptable values include `125m`, `350m`, `1.3b`, `2.7b`, `6.7b`, `13b`, `30b`, `66b`. For `175b` -export MODEL=${MODEL:-"125m"} -export GPUNUM=${GPUNUM:-1} -export USE_SHARD_INIT=${USE_SHARD_INIT:-"false"} - -# make directory for logs -mkdir -p ./logs - -if [ ${USE_SHARD_INIT} = "true" ]; then - USE_SHARD_INIT="--shardinit" -else - USE_SHARD_INIT="" -fi - -export MODLE_PATH="facebook/opt-${MODEL}" - -# HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 -torchrun \ - --nproc_per_node ${GPUNUM} \ - --master_port 19198 \ - train_gemini_opt.py \ - --mem_cap ${MEMCAP} \ - --model_name_or_path ${MODLE_PATH} \ - ${USE_SHARD_INIT} \ - --batch_size ${BS} 2>&1 | tee ./logs/colo_${MODEL}_bs_${BS}_cap_${MEMCAP}_gpu_${GPUNUM}.log diff --git a/examples/language/opt/test_ci.sh b/examples/language/opt/test_ci.sh index 317f602cda3c..fa14f52b70d2 100644 --- a/examples/language/opt/test_ci.sh +++ b/examples/language/opt/test_ci.sh @@ -1,4 +1,19 @@ -for GPUNUM in 2 1 +set -xe +pip install -r requirements.txt + +BS=4 +for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" do -env BS=2 MODEL="125m" GPUNUM=$GPUNUM bash ./run_gemini.sh +for GPUNUM in 1 4 +do + +torchrun \ + --standalone \ + --nproc_per_node ${GPUNUM} \ + opt_benchmark.py \ + --model_name_or_path "facebook/opt-125m" \ + --plugin ${PLUGIN} \ + --batch_size ${BS} + +done done diff --git a/examples/language/opt/train_gemini_opt.py b/examples/language/opt/train_gemini_opt.py deleted file mode 100755 index 3614b689de26..000000000000 --- a/examples/language/opt/train_gemini_opt.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) -on a text file or a dataset without using HuggingFace Trainer. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=text-generation -""" -# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. - -import time -from functools import partial - -import datasets -import torch -import torch.distributed as dist -import transformers -from transformers import CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, OPTForCausalLM -from transformers.utils.versions import require_version - -import colossalai -from colossalai.logging import disable_existing_loggers, get_dist_logger -from colossalai.tensor import ProcessGroup, ShardSpec -from colossalai.utils import get_current_device -from colossalai.zero import ColoInitContext, GeminiAdamOptimizer, GeminiDDP - - -def get_data(batch_size, seq_len, vocab_size): - input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device()) - attention_mask = torch.ones_like(input_ids) - return input_ids, attention_mask - - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") - -MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -def get_time_stamp(): - torch.cuda.synchronize() - return time.time() - - -def get_tflops(model_numel, batch_size, seq_len, step_time): - return model_numel * batch_size * seq_len * 8 / 1e12 / (step_time + 1e-12) - - -def parse_args(): - parser = colossalai.get_default_parser() - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=True, - ) - parser.add_argument( - "--config_name", - type=str, - default=None, - help="Pretrained config name or path if not the same as model_name", - ) - parser.add_argument( - "--batch_size", - type=int, - default=8, - help="Batch size (per dp group) for the training dataloader.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument( - "--max_train_steps", - type=int, - default=20, - help="Total number of training steps to perform.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--model_type", - type=str, - default=None, - help="Model type to use if training from scratch.", - choices=MODEL_TYPES, - ) - parser.add_argument( - "--shardinit", - action="store_true", - help="Initialize the model with tensor parallel", - ) - parser.add_argument("--mem_cap", type=int, default=0, help="use mem cap") - parser.add_argument("--init_in_cpu", action='store_true', default=False, help="init training model in cpu") - args = parser.parse_args() - - return args - - -def colo_memory_cap(size_in_GB): - from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device - cuda_capacity = colo_device_memory_capacity(get_current_device()) - if size_in_GB * (1024**3) < cuda_capacity: - colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity) - print("Using {} GB of GPU memory".format(size_in_GB)) - - -def main(): - args = parse_args() - disable_existing_loggers() - colossalai.launch_from_torch({}) - logger = get_dist_logger() - is_main_process = dist.get_rank() == 0 - - if is_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - if args.mem_cap > 0: - colo_memory_cap(args.mem_cap) - - # If passed along, set the training seed now. - if args.seed is not None: - torch.mannul_seed(args.seed) - logger.info(f"Rank {dist.get_rank()}: random seed is set to {args.seed}") - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - if args.config_name: - config = AutoConfig.from_pretrained(args.config_name) - elif args.model_name_or_path: - config = AutoConfig.from_pretrained(args.model_name_or_path) - else: - config = CONFIG_MAPPING[args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - logger.info("Model config has been created", ranks=[0]) - - if args.init_in_cpu: - init_dev = torch.device('cpu') - else: - init_dev = get_current_device() - - # shard init parameters - if args.shardinit: - logger.info("Sharding initialization !", ranks=[0]) - else: - logger.info("Skipping sharding initialization", ranks=[0]) - - world_size = torch.distributed.get_world_size() - shard_pg = ProcessGroup(tp_degree=world_size) if args.shardinit else None - default_dist_spec = ShardSpec([-1], [world_size]) if args.shardinit else None - - # build model - if args.model_name_or_path is None: - logger.info("Train a new model from scratch", ranks=[0]) - with ColoInitContext(device=init_dev, - dtype=torch.half, - default_dist_spec=default_dist_spec, - default_pg=shard_pg): - model = OPTForCausalLM(config) - else: - logger.info("Finetune a pre-trained model", ranks=[0]) - with ColoInitContext(device=init_dev, - dtype=torch.half, - default_dist_spec=default_dist_spec, - default_pg=shard_pg): - model = OPTForCausalLM.from_pretrained(args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - local_files_only=False) - - # enable gradient checkpointing - model.gradient_checkpointing_enable() - - numel = sum([p.numel() for p in model.parameters()]) - PLACEMENT_POLICY = 'cpu' - model = GeminiDDP(model, - device=get_current_device(), - placement_policy=PLACEMENT_POLICY, - pin_memory=True, - strict_ddp_mode=args.shardinit) - optimizer = GeminiAdamOptimizer(model, lr=args.learning_rate, initial_scale=2**14, gpu_margin_mem_ratio=0.0) - - SEQ_LEN = 1024 - VOCAB_SIZE = 50257 - - get_tflops_func = partial(get_tflops, numel, args.batch_size, SEQ_LEN) - - model.train() - for step in range(args.max_train_steps): - st_time = time.time() - input_ids, attn_mask = get_data(args.batch_size, SEQ_LEN, VOCAB_SIZE) - - outputs = model(input_ids=input_ids, attention_mask=attn_mask, labels=input_ids, use_cache=False) - loss = outputs['loss'] - optimizer.backward(loss) - - optimizer.step() - optimizer.zero_grad() - torch.cuda.synchronize() - step_time = time.time() - st_time - step_tflops = get_tflops_func(step_time) - - logger.info("step {} finished, Tflops {}".format(step, step_tflops), ranks=[0]) - - logger.info("Training finished", ranks=[0]) - - -if __name__ == "__main__": - main() diff --git a/examples/language/palm/README.md b/examples/language/palm/README.md index 486bf240f89c..3ff3939d63d4 100644 --- a/examples/language/palm/README.md +++ b/examples/language/palm/README.md @@ -43,6 +43,9 @@ palm = PaLM( ) ``` +## New API +We have modified our previous implementation of PaLM with our new Booster API, which offers a more flexible and efficient way to train your model. The new API is more user-friendly and easy to use. You can find the new API in train.py. We have also offer a shell script test_ci.sh for you to go through all our plugins for the booster. For more information about the booster API you can refer to https://colossalai.org/docs/basics/booster_api/. + ## Test on Enwik8 ```bash diff --git a/examples/language/palm/run.sh b/examples/language/palm/run.sh index 7a533509e009..2a846e81a9a7 100644 --- a/examples/language/palm/run.sh +++ b/examples/language/palm/run.sh @@ -3,9 +3,11 @@ export DISTPAN="colossalai" # The following options only valid when DISTPAN="colossalai" export TPDEGREE=1 -export GPUNUM=1 +export GPUNUM=4 export PLACEMENT='cpu' export USE_SHARD_INIT=False -export BATCH_SIZE=4 +export BATCH_SIZE=1 -env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --master_port 29501 train.py --tp_degree=${TPDEGREE} --batch_size=${BATCH_SIZE} --placement ${PLACEMENT} --shardinit ${USE_SHARD_INIT} --distplan ${DISTPAN} 2>&1 | tee run.log +env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --master_port 29501 train.py \ +--dummy_data=True --tp_degree=${TPDEGREE} --batch_size=${BATCH_SIZE} --plugin='gemini' \ +--placement ${PLACEMENT} --shardinit ${USE_SHARD_INIT} --distplan ${DISTPAN} 2>&1 | tee run.log diff --git a/examples/language/palm/test_ci.sh b/examples/language/palm/test_ci.sh index f21095578077..4de6a44e5bf7 100644 --- a/examples/language/palm/test_ci.sh +++ b/examples/language/palm/test_ci.sh @@ -4,6 +4,6 @@ for BATCH_SIZE in 2 do for GPUNUM in 1 4 do -env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --master_port 29501 train.py --dummy_data=True --batch_size=${BATCH_SIZE} 2>&1 | tee run.log +env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --standalone train.py --dummy_data=True --batch_size=${BATCH_SIZE} --plugin='gemini' 2>&1 | tee run.log done done diff --git a/examples/language/palm/train.py b/examples/language/palm/train.py index b16da1c7744a..a0600db1bc5b 100644 --- a/examples/language/palm/train.py +++ b/examples/language/palm/train.py @@ -9,6 +9,8 @@ import torch.optim as optim import tqdm from packaging import version + +from colossalai.nn import HybridAdam from palm_pytorch import PaLM from palm_pytorch.autoregressive_wrapper import AutoregressiveWrapper from torch.utils.data import DataLoader, Dataset @@ -18,6 +20,8 @@ from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec from colossalai.utils import MultiTimer, get_current_device from colossalai.zero import ColoInitContext, GeminiAdamOptimizer, ZeroDDP +from colossalai.booster import Booster +from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin # constants @@ -58,6 +62,12 @@ def parse_args(): help= "Shard the tensors when init the model to shrink peak memory size on the assigned device. Valid when using colossalai as dist plan.", ) + parser.add_argument('-p', + '--plugin', + type=str, + default='torch_ddp', + choices=['torch_ddp', 'torch_ddp_fp16', 'gemini', 'low_level_zero'], + help="plugin to use") parser.add_argument( "--batch_size", type=int, @@ -101,28 +111,6 @@ def get_model_size(model: nn.Module): return total_numel -# Gemini + ZeRO DDP -def gemini_zero_dpp(model: torch.nn.Module, pg: ProcessGroup, placement_policy: str = "auto"): - cai_version = colossalai.__version__ - if version.parse(cai_version) > version.parse("0.1.10"): - from colossalai.nn.parallel import GeminiDDP - model = GeminiDDP(model, - device=get_current_device(), - placement_policy=placement_policy, - pin_memory=True, - search_range_mb=32) - elif version.parse(cai_version) <= version.parse("0.1.10") and version.parse(cai_version) >= version.parse("0.1.9"): - from colossalai.gemini import ChunkManager, GeminiManager - chunk_size = ChunkManager.search_chunk_size(model, 64 * 1024**2, 32) - gemini_manager = GeminiManager(placement_policy, chunk_manager) - chunk_manager = ChunkManager(chunk_size, - pg, - enable_distributed_storage=True, - init_device=GeminiManager.get_default_device(placement_policy)) - model = ZeroDDP(model, gemini_manager) - else: - raise NotImplemented(f"CAI version {cai_version} is not supported") - return model # Parameter Sharding Strategies for Tensor Parallelism @@ -152,15 +140,15 @@ def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup): continue param.set_dist_spec(ReplicaSpec()) if 'net.0' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif 'to_q' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif 'to_kv' in mn: split_param_row_tp1d(param, pg) # row slice elif 'to_out' in mn: split_param_row_tp1d(param, pg) # row slice elif '1.1' in mn: - split_param_col_tp1d(param, pg) # colmn slice + split_param_col_tp1d(param, pg) # column slice elif '1.2' in mn: split_param_row_tp1d(param, pg) # row slice else: @@ -218,6 +206,18 @@ def __len__(self): if args.distplan == "colossalai": # instantiate GPT-like decoder model + booster_kwargs = {} + if args.plugin == 'torch_ddp_fp16': + booster_kwargs['mixed_precision'] = 'fp16' + if args.plugin.startswith('torch_ddp'): + plugin = TorchDDPPlugin() + elif args.plugin == 'gemini': + plugin = GeminiPlugin(placement_policy=args.placement, strict_ddp_mode=True, initial_scale=2 ** 5) + elif args.plugin == 'low_level_zero': + plugin = LowLevelZeroPlugin(initial_scale=2 ** 5) + logger.info(f"plugin: {plugin}") + booster = Booster(plugin=plugin, **booster_kwargs) + default_pg = ProcessGroup(tp_degree=args.tp_degree) default_dist_spec = ShardSpec([-1], [args.tp_degree]) if args.shardinit else None ctx = ColoInitContext(device='cpu', default_dist_spec=default_dist_spec, default_pg=default_pg) @@ -228,12 +228,12 @@ def __len__(self): pg = default_pg tensor_parallelize(model, pg) - model = gemini_zero_dpp(model, pg, args.placement) # optimizer - #optimizer = GeminiAdamOptimizer(model, lr=1e-7, initial_scale=2**5) - optimizer = GeminiAdamOptimizer(model, lr=LEARNING_RATE, initial_scale=2**5) + optimizer = HybridAdam(model.parameters(), lr=LEARNING_RATE, initial_scale=2**5) + model, optimizer, _, _, _ = booster.boost(model, optimizer) + else: model = PaLM(num_tokens=256, dim=512, depth=8) model = AutoregressiveWrapper(model, max_seq_len=2048) diff --git a/tests/kit/model_zoo/registry.py b/tests/kit/model_zoo/registry.py index 7470327a65b6..6cc4c8ef370d 100644 --- a/tests/kit/model_zoo/registry.py +++ b/tests/kit/model_zoo/registry.py @@ -2,7 +2,7 @@ from dataclasses import dataclass from typing import Callable -__all__ = ['ModelZooRegistry', 'ModelAttributem', 'model_zoo'] +__all__ = ['ModelZooRegistry', 'ModelAttribute', 'model_zoo'] @dataclass @@ -37,7 +37,7 @@ def register(self, >>> model_zoo = ModelZooRegistry() >>> model_zoo.register('resnet18', resnet18, resnet18_data_gen) >>> # Run the model - >>> data = resnresnet18_data_gen() # do not input any argument + >>> data = resnet18_data_gen() # do not input any argument >>> model = resnet18() # do not input any argument >>> out = model(**data) diff --git a/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py b/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py index b47b3508ad1b..c3ceef4c7adf 100644 --- a/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py +++ b/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py @@ -27,7 +27,7 @@ def check_bn_module_handler(rank, world_size, port): # the index of bn node in computation graph node_index = 1 # the total number of bn strategies without sync bn mode - # TODO: add sync bn stategies after related passes ready + # TODO: add sync bn strategies after related passes ready strategy_number = 4 numerical_test_for_node_strategy(model=model, device_mesh=device_mesh, diff --git a/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_output_handler.py b/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_output_handler.py index 5259455d2179..1703d5ded2f2 100644 --- a/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_output_handler.py +++ b/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_output_handler.py @@ -43,14 +43,14 @@ def test_output_handler(output_option): output_strategies_vector = StrategiesVector(output_node) # build handler - otuput_handler = OutputHandler(node=output_node, + output_handler = OutputHandler(node=output_node, device_mesh=device_mesh, strategies_vector=output_strategies_vector, output_option=output_option) - otuput_handler.register_strategy(compute_resharding_cost=False) + output_handler.register_strategy(compute_resharding_cost=False) # check operation data mapping - mapping = otuput_handler.get_operation_data_mapping() + mapping = output_handler.get_operation_data_mapping() for name, op_data in mapping.items(): op_data: OperationData @@ -59,7 +59,7 @@ def test_output_handler(output_option): assert mapping['output'].name == "output" assert mapping['output'].type == OperationDataType.OUTPUT - strategy_name_list = [val.name for val in otuput_handler.strategies_vector] + strategy_name_list = [val.name for val in output_handler.strategies_vector] if output_option == 'distributed': assert "Distributed Output" in strategy_name_list else: diff --git a/tests/test_tensor/test_dtensor/test_layout_converter.py b/tests/test_tensor/test_dtensor/test_layout_converter.py index 5f56decb5e5d..5c3da5f2b9ff 100644 --- a/tests/test_tensor/test_dtensor/test_layout_converter.py +++ b/tests/test_tensor/test_dtensor/test_layout_converter.py @@ -137,7 +137,7 @@ def check_layout_converting(rank, world_size, port): assert comm_action_sequence[2].shard_dim == 0 assert comm_action_sequence[2].logical_process_axis == 1 - # checkout chached_spec_pairs_transform_path + # checkout cached_spec_pairs_transform_path assert layout_converter.cached_solution[('[R, S01, R]', '[S01, R, R]')][0] == transform_path assert layout_converter.cached_solution[('[R, S01, R]', '[S01, R, R]')][1] == comm_action_sequence