From 4a145f24bd20be5aa45eddff17a6b9c8d103f04e Mon Sep 17 00:00:00 2001 From: zhurunhua <1281592874@qq.com> Date: Fri, 19 Jul 2024 17:20:33 +0800 Subject: [PATCH 1/2] cannot access local variable 'default_conversation' where it is not associated with a value set default value for 'default_conversation' --- applications/Colossal-LLaMA/prepare_sft_dataset.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/applications/Colossal-LLaMA/prepare_sft_dataset.py b/applications/Colossal-LLaMA/prepare_sft_dataset.py index a857d6c0c696..fb63d005f014 100644 --- a/applications/Colossal-LLaMA/prepare_sft_dataset.py +++ b/applications/Colossal-LLaMA/prepare_sft_dataset.py @@ -10,7 +10,7 @@ import os from multiprocessing import cpu_count -from colossal_llama.dataset.conversation import LLaMA2_Conv +from colossal_llama.dataset.conversation import (LLaMA2_Conv, LLaMA3_Conv) from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft from datasets import dataset_dict, load_dataset from transformers import AddedToken, AutoTokenizer @@ -75,6 +75,8 @@ def main(): # Prepare to the tokenizer. tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) + default_conversation = LLaMA3_Conv + # Fix split issue: https://github.com/huggingface/transformers/issues/23833 if args.llama_version == 2: tokenizer.add_tokens(AddedToken("", normalized=False, special=True), special_tokens=True) From 232fa331cbfa5331f70aeac2ece8e1baa95d2b8a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 09:38:56 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- applications/Colossal-LLaMA/prepare_sft_dataset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/Colossal-LLaMA/prepare_sft_dataset.py b/applications/Colossal-LLaMA/prepare_sft_dataset.py index fb63d005f014..fe57907601f6 100644 --- a/applications/Colossal-LLaMA/prepare_sft_dataset.py +++ b/applications/Colossal-LLaMA/prepare_sft_dataset.py @@ -10,7 +10,7 @@ import os from multiprocessing import cpu_count -from colossal_llama.dataset.conversation import (LLaMA2_Conv, LLaMA3_Conv) +from colossal_llama.dataset.conversation import LLaMA2_Conv, LLaMA3_Conv from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft from datasets import dataset_dict, load_dataset from transformers import AddedToken, AutoTokenizer @@ -76,7 +76,7 @@ def main(): tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) default_conversation = LLaMA3_Conv - + # Fix split issue: https://github.com/huggingface/transformers/issues/23833 if args.llama_version == 2: tokenizer.add_tokens(AddedToken("", normalized=False, special=True), special_tokens=True)