Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions applications/ChatGPT/chatgpt/models/llama/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .llama_actor import LlamaActor
from .llama_critic import LlamaCritic
from .llama_rm import LlamaRM

__all__ = ['LlamaActor', 'LlamaCritic', 'LlamaRM']
38 changes: 38 additions & 0 deletions applications/ChatGPT/chatgpt/models/llama/llama_actor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from typing import Optional

import torch
from transformers import AutoModelForCausalLM, LlamaConfig, LlamaForCausalLM

from ..base import Actor


class LlamaActor(Actor):
"""
Llama Actor model.

Args:
pretrained (str): Pretrained model name or path.
config (LlamaConfig): Model config.
checkpoint (bool): Enable gradient checkpointing.
lora_rank (int): LoRA rank.
lora_train_bias (str): LoRA bias training mode.
"""

def __init__(self,
pretrained: Optional[str] = None,
config: Optional[LlamaConfig] = None,
checkpoint: bool = False,
lora_rank: int = 0,
lora_train_bias: str = 'none') -> None:

if pretrained is not None:
model = LlamaForCausalLM.from_pretrained(pretrained)
elif config is not None:
model = LlamaForCausalLM(config)
else:
model = LlamaForCausalLM(LlamaConfig())

if checkpoint:
model.gradient_checkpointing_enable()

super().__init__(model, lora_rank, lora_train_bias)
42 changes: 42 additions & 0 deletions applications/ChatGPT/chatgpt/models/llama/llama_critic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from typing import Optional

import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, LlamaConfig, LlamaForCausalLM

from ..base import Critic


class LlamaCritic(Critic):
"""
Llama Critic model.

Args:
pretrained (str): Pretrained model name or path.
config (LlamaConfig): Model config.
checkpoint (bool): Enable gradient checkpointing.
lora_rank (int): LoRA rank.
lora_train_bias (str): LoRA bias training mode.
"""

def __init__(self,
pretrained: Optional[str] = None,
config: Optional[LlamaConfig] = None,
checkpoint: bool = False,
lora_rank: int = 0,
lora_train_bias: str = 'none',
**kwargs) -> None:

if pretrained is not None:
model = LlamaForCausalLM.from_pretrained(pretrained)
elif config is not None:
model = LlamaForCausalLM(config)
else:
model = LlamaForCausalLM(LlamaConfig())

if checkpoint:
model.gradient_checkpointing_enable()

value_head = nn.Linear(model.config.hidden_size, 1)

super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
41 changes: 41 additions & 0 deletions applications/ChatGPT/chatgpt/models/llama/llama_rm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from typing import Optional

import torch.nn as nn
from transformers import LlamaConfig, LlamaForCausalLM

from ..base import RewardModel


class LlamaRM(RewardModel):
"""
Llama Reward model.

Args:
pretrained (str): Pretrained model name or path.
config (LlamaConfig): Model config.
checkpoint (bool): Enable gradient checkpointing.
lora_rank (int): LoRA rank.
lora_train_bias (str): LoRA bias training mode.
"""

def __init__(self,
pretrained: Optional[str] = None,
config: Optional[LlamaConfig] = None,
checkpoint: bool = False,
lora_rank: int = 0,
lora_train_bias: str = 'none') -> None:

if pretrained is not None:
model = LlamaForCausalLM.from_pretrained(pretrained)
elif config is not None:
model = LlamaForCausalLM(config)
else:
model = LlamaForCausalLM(LlamaConfig())

if checkpoint:
model.gradient_checkpointing_enable()

value_head = nn.Linear(model.config.hidden_size, 1)
value_head.weight.data.normal_(mean=0.0, std=1 / (model.config.hidden_size + 1))

super().__init__(model, lora_rank, lora_train_bias)