conda create -n tttenv python=3.12
conda activate tttenvpip install -r requirements.txtgit clone --depth 1 https://github.com/hiyouga/LlamaFactory.git
cd LlamaFactory
pip install -e .
pip install -r requirements/metrics.txtbash bbh_run_fewshot_pipeline_simple.shbash arc_run_fewshot_pipeline_simple.shbash mmlu_run_fewshot_pipeline_simple.shbash password_run_fewshot_pipeline_simple.shbash run_fewshot_pipeline_adapter.sh --model_path /path/to/model --dataset bbh
# dataset-specific entrypoints
bash bbh_run_fewshot_pipeline_adapter.sh --model_path /path/to/model
bash arc_run_fewshot_pipeline_adapter.sh --model_path /path/to/model
bash password_run_fewshot_pipeline_adapter.sh --model_path /path/to/model
bash mmlu_run_fewshot_pipeline_adapter.sh --model_path /path/to/model
# dual-pool GPU settings (default infer=0, train=auto from remaining GPUs)
bash run_fewshot_pipeline_adapter.sh \
--model_path /path/to/model \
--dataset bbh \
--inference_gpu_id 0 \
--train_gpu_ids 1,2,3Auto split fallback policy (when train_gpu_ids is not provided):
- 4+ GPUs: inference=0, training=remaining GPUs
- 3 GPUs: inference=0, training=1,2
- 2 GPUs: inference=0, training=1
- 1 GPU: inference=0, training=0 (serial mode)