Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions applications/Chat/coati/dataset/sft_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,14 @@ def __getitem__(self, idx):
# return dict(self.prompts[idx], self.prompts[idx])


def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, max_length: int) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
max_length=max_length,
truncation=True,
) for text in strings
]
Expand All @@ -105,10 +105,11 @@ def preprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
max_length: int,
) -> Dict:
"""Preprocess the data by tokenizing."""
examples = [s + t for s, t in zip(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer, max_length) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
Expand All @@ -119,7 +120,7 @@ def preprocess(
class SupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""

def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, max_datasets_size: int = None):
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, max_datasets_size: int = None, max_length: int = 512):
super(SupervisedDataset, self).__init__()
logger.info("Loading data...")
list_data_dict = jload(data_path)
Expand All @@ -138,7 +139,7 @@ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer,
targets = [f"{example['output']}{tokenizer.eos_token}" for example in list_data_dict]

logger.info("Tokenizing inputs... This may take some time...")
data_dict = preprocess(sources, targets, tokenizer)
data_dict = preprocess(sources, targets, tokenizer, max_length)

self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
Expand Down
9 changes: 6 additions & 3 deletions applications/Chat/examples/train_sft.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def train(args):
else:
raise ValueError(f'Unsupported model "{args.model}"')
tokenizer.pad_token = tokenizer.eos_token
max_len = args.max_len
if args.model == 'llama':
tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, model)

Expand Down Expand Up @@ -99,13 +100,14 @@ def train(args):
train_data = load_dataset(args.dataset, 'super_natural_instructions', split='train')
eval_data = load_dataset(args.dataset, 'super_natural_instructions', split='test')

train_dataset = SFTDataset(train_data, tokenizer)
eval_dataset = SFTDataset(eval_data, tokenizer)
train_dataset = SFTDataset(train_data, tokenizer, max_len)
eval_dataset = SFTDataset(eval_data, tokenizer, max_len)

else:
train_dataset = SupervisedDataset(tokenizer=tokenizer,
data_path=args.dataset,
max_datasets_size=args.max_datasets_size)
max_datasets_size=args.max_datasets_size,
max_length=max_len)
eval_dataset = None
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)

Expand Down Expand Up @@ -176,6 +178,7 @@ def train(args):
parser.add_argument('--need_optim_ckpt', type=bool, default=False)
parser.add_argument('--max_epochs', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--max_len', type=int, default=512)
parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
parser.add_argument('--log_interval', type=int, default=100, help="how many steps to log")
parser.add_argument('--lr', type=float, default=5e-6)
Expand Down