Skip to content
Merged

m #192

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
132 commits
Select commit Hold shift + click to select a range
32e7f99
[kernel] update triton init #4740 (#4740)
oahzxl Sep 18, 2023
b5f9e37
[legacy] clean up legacy code (#4743)
ver217 Sep 18, 2023
3c6b831
[format] applied code formatting on changed files in pull request 474…
github-actions[bot] Sep 18, 2023
079bf3c
[misc] update pre-commit and run all files (#4752)
ver217 Sep 19, 2023
10513f2
[doc] explain suitable use case for each plugin
ppt0011 Sep 19, 2023
a04337b
[doc] put individual plugin explanation in front
ppt0011 Sep 19, 2023
e10d9f0
[doc] add model examples for each plugin
ppt0011 Sep 19, 2023
4d7537b
[doc] put native colossalai plugins first in description section
ppt0011 Sep 20, 2023
07c2e3d
Merge pull request #4757 from ppt0011/main
ppt0011 Sep 20, 2023
7b9b864
[chat]: update rm, add wandb and fix bugs (#4471)
cwher Sep 20, 2023
c0a0337
[shardformer] fix master param sync for hybrid plugin/rewrite unwrapp…
Sep 20, 2023
df66741
[bug] fix get_default_parser in examples (#4764)
Sep 21, 2023
66f3926
[doc] clean up outdated docs (#4765)
ver217 Sep 21, 2023
493a5ef
[doc] add shardformer doc to sidebar (#4768)
Sep 21, 2023
901ab1e
[chat]: add lora merge weights config (#4766)
cwher Sep 21, 2023
3e05c07
[lazy] support torch 2.0 (#4763)
ver217 Sep 21, 2023
1e0e080
[bug] Fix the version check bug in colossalai run when generating the…
littsk Sep 22, 2023
946ab56
[feature] add gptq for inference (#4754)
Xu-Kai Sep 22, 2023
ce7ade3
[inference] chatglm2 infer demo (#4724)
CjhHa1 Sep 22, 2023
4146f1c
[release] update version (#4775)
ver217 Sep 22, 2023
74aa7d9
initial commit: add colossal llama 2 (#4784)
TongLi3701 Sep 24, 2023
ce77785
[feature] ColossalEval: Evaluation Pipeline for LLMs (#4786)
chengeharrison Sep 24, 2023
d512a4d
[doc] add llama2 domain-specific solution news (#4789)
binmakeswell Sep 25, 2023
26cd6d8
[fix] fix weekly runing example (#4787)
flybird11111 Sep 25, 2023
a2db755
[doc] polish shardformer doc (#4779)
Sep 26, 2023
64a08b2
[checkpointio] support unsharded checkpointIO for hybrid parallel (#4…
Sep 26, 2023
bd01467
update readme
TongLi3701 Sep 26, 2023
4965c0d
[lazy] support from_pretrained (#4801)
ver217 Sep 26, 2023
8cbce61
update
TongLi3701 Sep 26, 2023
62b6af1
Merge pull request #4805 from TongLi3701/docs/fix
Desperado-Jia Sep 26, 2023
b6cf0ac
[hotfix] change llama2 Colossal-LLaMA-2 script filename (#4800)
Chandler-Bing Sep 26, 2023
a227063
[misc] add last_epoch in CosineAnnealingWarmupLR (#4778)
hova88 Sep 26, 2023
da15fdb
[doc] add lazy init docs (#4808)
ver217 Sep 27, 2023
54b3ad8
[hotfix] fix norm type error in zero optimizer (#4795)
littsk Sep 27, 2023
11f1e42
[hotfix] Correct several erroneous code comments (#4794)
littsk Sep 27, 2023
fb46d05
[format] applied code formatting on changed files in pull request 459…
github-actions[bot] Sep 27, 2023
bbbcac2
fix format (#4815)
TongLi3701 Sep 27, 2023
be400a0
[chat] fix gemini strategy (#4698)
flybird11111 Sep 27, 2023
1fa8c5e
Update Qwen-7B results (#4821)
chengeharrison Sep 27, 2023
822051d
[doc] update slack link (#4823)
binmakeswell Sep 27, 2023
c3bef20
add autotune (#4822)
Xu-Kai Sep 28, 2023
ed06731
update Colossal (#4832)
TongLi3701 Sep 28, 2023
3a74eb4
[Infer] Colossal-Inference serving example w/ TorchServe (single GPU …
yuanheng-zhao Oct 2, 2023
573f270
[Infer] Serving example w/ ray-serve (multiple GPU case) (#4841)
yuanheng-zhao Oct 2, 2023
013a4be
[inference]fix import bug and delete down useless init (#4830)
CjhHa1 Oct 4, 2023
d1fcc0f
[infer] fix test bug (#4838)
Xu-Kai Oct 4, 2023
db40e08
[test] modify model supporting part of low_level_zero plugin (includi…
Oct 5, 2023
c97a352
fix: typo in comment of low_level_zero plugin
shawlleyw Oct 5, 2023
81ee91f
Merge pull request #4858 from Shawlleyw/main
ppt0011 Oct 6, 2023
ad23460
Merge pull request #4856 from KKZ20/test/model_support_for_low_level_…
ppt0011 Oct 6, 2023
cb3a25a
[checkpointio] hotfix torch 2.0 compatibility (#4824)
ver217 Oct 7, 2023
eef96e0
polish code for gptq (#4793)
littsk Sep 25, 2023
07ed155
[NFC] polish colossalai/inference/quant/gptq/cai_gptq/__init__.py cod…
MichelleMa8 Sep 27, 2023
cd6a962
[NFC] polish code style (#4799)
Camille7777 Sep 27, 2023
8aed02b
[nfc] fix minor typo in README (#4846)
blagoySimandov Oct 7, 2023
6a21f96
[doc] update advanced tutorials, training gpt with hybrid parallelism…
flybird11111 Oct 10, 2023
3043d5d
Update modelscope link in README.md
Camille7777 Oct 10, 2023
d6c4b9b
Update main README.md
Camille7777 Oct 10, 2023
afe10a8
Update README.md
Camille7777 Oct 10, 2023
652adc2
Update README.md
Camille7777 Oct 10, 2023
08a9f76
[Pipeline Inference] Sync pipeline inference branch to main (#4820)
FoolPlayer Oct 11, 2023
fdec650
fix test llama (#4884)
Xu-Kai Oct 11, 2023
1dcaf24
[doc] add reminder for issue encountered with hybrid adam
ppt0011 Oct 11, 2023
ffd9a3c
[hotfix] fix bug in sequence parallel test (#4887)
littsk Oct 11, 2023
c1fab95
Merge pull request #4889 from ppt0011/main
ppt0011 Oct 12, 2023
df63564
[gemini] support amp o3 for gemini (#4872)
ver217 Oct 12, 2023
83b52c5
[feature] Add clip_grad_norm for hybrid_parallel_plugin (#4837)
littsk Oct 12, 2023
39f2582
[hotfix] fix lr scheduler bug in torch 2.0 (#4864)
Oct 12, 2023
77a9328
[inference] add llama2 support (#4898)
Xu-Kai Oct 13, 2023
a0684e7
[feature] support no master weights option for low level zero plugin …
KKZ20 Oct 13, 2023
611a5a8
[inference] Add smmoothquant for llama (#4904)
Xu-Kai Oct 16, 2023
7768afb
Update flash_attention_patch.py
Orion-Zheng Oct 13, 2023
4f68b3f
[kernel] support pure fp16 for cpu adam and update gemini optim tests…
ver217 Oct 16, 2023
a41cf88
[format] applied code formatting on changed files in pull request 490…
github-actions[bot] Oct 17, 2023
21ba89c
[gemini] support gradient accumulation (#4869)
Oct 17, 2023
1f5d2e8
[hotfix] fix torch 2.0 compatibility (#4936)
ver217 Oct 18, 2023
c7aa319
[test] add no master test for low level zero plugin (#4934)
KKZ20 Oct 18, 2023
486d06a
[format] applied code formatting on changed files in pull request 482…
github-actions[bot] Oct 18, 2023
1100910
[nfc] fix some typo with colossalai/ docs/ etc. (#4920)
digger-yu Oct 18, 2023
3a41e83
[Refactor] Integrated some lightllm kernels into token-attention (#4…
tiandiao123 Oct 19, 2023
b8e770c
[test] merge old components to test to model zoo (#4945)
ver217 Oct 20, 2023
785802e
[inference] add reference and fix some bugs (#4937)
Xu-Kai Oct 20, 2023
c6cd629
[Inference]ADD Bench Chatglm2 script (#4963)
CjhHa1 Oct 24, 2023
1db6727
[Pipeline inference] Combine kvcache with pipeline inference (#4938)
FoolPlayer Oct 27, 2023
4e4a10c
updated c++17 compiler flags (#4983)
kurisusnowdeng Oct 27, 2023
cf579ff
[Inference] Dynamic Batching Inference, online and offline (#4953)
CjhHa1 Oct 30, 2023
459a88c
[Kernels]Updated Triton kernels into 2.1.0 and adding flash-decoding …
tiandiao123 Oct 30, 2023
abe071b
fix ColossalEval (#4992)
chengeharrison Oct 31, 2023
4f0234f
[doc]Update doc for colossal-inference (#4989)
tiandiao123 Oct 31, 2023
be82b5d
[hotfix] Fix the bug where process groups were not being properly rel…
littsk Oct 31, 2023
c040d70
[hotfix] fix the bug of repeatedly storing param group (#4951)
Oct 31, 2023
335cb10
[doc] add supported feature diagram for hybrid parallel plugin (#4996)
ppt0011 Oct 31, 2023
b6696be
[Pipeline Inference] Merge pp with tp (#4993)
FoolPlayer Nov 1, 2023
8993c8a
[release] update version (#4995)
ver217 Nov 1, 2023
dc003c3
[moe] merge moe into main (#4978)
oahzxl Nov 2, 2023
d99b2c9
[hotfix] fix grad accumulation plus clipping for gemini (#5002)
Nov 2, 2023
1a3315e
[hotfix] Add layer norm gradients all-reduce for sequence parallel (#…
littsk Nov 3, 2023
c36e782
[format] applied code formatting on changed files in pull request 492…
github-actions[bot] Nov 6, 2023
ef4c14a
[Inference] Fix bug in ChatGLM2 Tensor Parallelism (#5014)
CjhHa1 Nov 7, 2023
67f5331
[misc] add code owners (#5024)
ver217 Nov 8, 2023
f71e63b
[moe] support optimizer checkpoint (#5015)
oahzxl Nov 8, 2023
239cd92
Support mtbench (#5025)
chengeharrison Nov 9, 2023
7244412
[moe]: fix ep/tp tests, add hierarchical all2all (#4982)
cwher Nov 9, 2023
a448938
[shardformer] Fix serialization error with Tensor Parallel state savi…
imgaojun Nov 9, 2023
576a2f7
[gemini] gemini support tensor parallelism. (#4942)
flybird11111 Nov 10, 2023
70885d7
[hotfix] Suport extra_kwargs in ShardConfig (#5031)
KKZ20 Nov 10, 2023
43ad0d9
fix wrong EOS token in ColossalChat
Orion-Zheng Nov 14, 2023
28052a7
[Kernels]Update triton kernels into 2.1.0 (#5046)
tiandiao123 Nov 16, 2023
b2ad0d9
[pipeline,shardformer] Fix p2p efficiency in pipeline, allow skipping…
zeyugao Nov 16, 2023
3e02154
[gemini] gemini support extra-dp (#5043)
flybird11111 Nov 16, 2023
97cd0cd
[shardformer] fix llama error when transformers upgraded. (#5055)
flybird11111 Nov 16, 2023
3c08f17
[hotfix]: modify create_ep_hierarchical_group and add test (#5032)
cwher Nov 17, 2023
bc09b95
[exampe] fix llama example' loss error when using gemini plugin (#5060)
flybird11111 Nov 18, 2023
fd6482a
[inference] Refactor inference architecture (#5057)
Xu-Kai Nov 19, 2023
bce9197
[Kernels]added flash-decoidng of triton (#5063)
tiandiao123 Nov 20, 2023
8d56c9c
[misc] remove outdated submodule (#5070)
ver217 Nov 20, 2023
e5ce4c8
[npu] add npu support for gemini and zero (#5067)
ver217 Nov 20, 2023
0c7d8be
[hotfix/hybridengine] fix bug when tp*pp size = 1 (#5069)
FoolPlayer Nov 20, 2023
fb103cf
[inference] update examples and engine (#5073)
Xu-Kai Nov 20, 2023
8921a73
[format] applied code formatting on changed files in pull request 506…
github-actions[bot] Nov 20, 2023
4e3959d
[hotfix/hybridengine] Fix init model with random parameters in benchm…
FoolPlayer Nov 20, 2023
1cd7efc
[inference] refactor examples and fix schedule (#5077)
ver217 Nov 21, 2023
dce05da
fix thrust-transform-reduce error (#5078)
imgaojun Nov 21, 2023
fd3567e
[nfc] fix typo in docs/ (#4972)
digger-yu Nov 21, 2023
0d48230
[nfc] fix typo and author name (#5089)
digger-yu Nov 22, 2023
4ccb9de
[gemini]fix gemini optimzer, saving Shardformer in Gemini got list as…
flybird11111 Nov 22, 2023
75af66c
[Hotfix] Fix model policy matching strategy in ShardFormer (#5064)
KKZ20 Nov 22, 2023
aae4966
[shardformer]fix flash attention, when mask is casual, just don't unp…
flybird11111 Nov 22, 2023
3acbf6d
[npu] add npu support for hybrid plugin and llama (#5090)
oahzxl Nov 22, 2023
e53e729
[Feature] Add document retrieval QA (#5020)
YeAnbang Nov 23, 2023
68fcaa2
remove duplicate import (#5100)
oahzxl Nov 23, 2023
2bdf76f
fix typo change lazy_iniy to lazy_init (#5099)
digger-yu Nov 24, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
22 changes: 0 additions & 22 deletions .flake8

This file was deleted.

1 change: 1 addition & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
* @hpcaitech/colossalai-qa
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/config.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
blank_issues_enabled: true
contact_links:
- name: ❓ Simple question - Slack Chat
url: https://join.slack.com/t/colossalaiworkspace/shared_invite/zt-z7b26eeb-CBp7jouvu~r0~lcFzX832w
url: https://github.com/hpcaitech/public_assets/tree/main/colossalai/contact/slack
about: This issue tracker is not for technical support. Please use our Slack chat, and ask the community for help.
- name: ❓ Simple question - WeChat
url: https://github.com/hpcaitech/ColossalAI/blob/main/docs/images/WeChat.png
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/build_on_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ jobs:
runs-on: [self-hosted, gpu]
container:
image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
timeout-minutes: 60
defaults:
run:
Expand Down Expand Up @@ -214,6 +214,7 @@ jobs:
NCCL_SHM_DISABLE: 1
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
TESTMON_CORE_PKGS: /__w/ColossalAI/ColossalAI/requirements/requirements.txt,/__w/ColossalAI/ColossalAI/requirements/requirements-test.txt
LLAMA_PATH: /data/scratch/llama-tiny

- name: Store Testmon Cache
run: |
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/build_on_schedule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
runs-on: [self-hosted, 8-gpu]
container:
image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
timeout-minutes: 40
steps:
- name: Check GPU Availability # ensure all GPUs have enough memory
Expand Down Expand Up @@ -64,6 +64,7 @@ jobs:
env:
DATA: /data/scratch/cifar-10
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
LLAMA_PATH: /data/scratch/llama-tiny

- name: Notify Lark
id: message-preparation
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/compatiblity_test_on_dispatch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
container:
image: ${{ matrix.container }}
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
timeout-minutes: 120
steps:
- name: Install dependencies
Expand Down Expand Up @@ -92,3 +92,4 @@ jobs:
DATA: /data/scratch/cifar-10
NCCL_SHM_DISABLE: 1
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
LLAMA_PATH: /data/scratch/llama-tiny
3 changes: 2 additions & 1 deletion .github/workflows/compatiblity_test_on_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
container:
image: ${{ matrix.container }}
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
timeout-minutes: 120
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-run-test-${{ matrix.container }}
Expand Down Expand Up @@ -87,3 +87,4 @@ jobs:
DATA: /data/scratch/cifar-10
NCCL_SHM_DISABLE: 1
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
LLAMA_PATH: /data/scratch/llama-tiny
3 changes: 2 additions & 1 deletion .github/workflows/compatiblity_test_on_schedule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ jobs:
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
container:
image: ${{ matrix.container }}
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10 -v /data/scratch/llama-tiny:/data/scratch/llama-tiny
timeout-minutes: 120
steps:
- name: Install dependencies
Expand Down Expand Up @@ -85,6 +85,7 @@ jobs:
DATA: /data/scratch/cifar-10
NCCL_SHM_DISABLE: 1
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
LLAMA_PATH: /data/scratch/llama-tiny

- name: Notify Lark
id: message-preparation
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/doc_test_on_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ jobs:
- name: Install ColossalAI
run: |
source activate pytorch
pip install -v .
CUDA_EXT=1 pip install -v .

- name: Test the Doc
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/doc_test_on_schedule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:

- name: Install ColossalAI
run: |
pip install -v .
CUDA_EXT=1 pip install -v .

- name: Install Doc Test Requirements
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/example_check_on_dispatch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ jobs:
uses: actions/checkout@v3
- name: Install Colossal-AI
run: |
pip install -v .
CUDA_EXT=1 pip install -v .
- name: Test the example
run: |
dir=${{ matrix.directory }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/example_check_on_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ jobs:

- name: Install Colossal-AI
run: |
pip install -v .
CUDA_EXT=1 pip install -v .

- name: Test the example
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/example_check_on_schedule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:

- name: Install Colossal-AI
run: |
pip install -v .
CUDA_EXT=1 pip install -v .

- name: Traverse all files
run: |
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/release_test_pypi_before_merge.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ jobs:
echo $new_version > ./version.txt
echo "version=$new_version" >> $GITHUB_OUTPUT

- run: python setup.py sdist build
- run: |
pip install --upgrade pip
python setup.py sdist build

# publish to PyPI if executed on the main branch
- name: Publish package to PyPI
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/run_chatgpt_examples.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,5 @@ jobs:
NCCL_SHM_DISABLE: 1
MAX_JOBS: 8
SFT_DATASET: /data/scratch/github_actions/chat/data.json
PROMPT_PATH: /data/scratch/github_actions/chat/prompts_en.jsonl
PROMPT_DATASET: /data/scratch/github_actions/chat/prompts_en.jsonl
PRETRAIN_DATASET: /data/scratch/github_actions/chat/alpaca_data.json
54 changes: 54 additions & 0 deletions .github/workflows/run_colossalqa_unit_tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
name: Run colossalqa unit tests

on:
pull_request:
types: [synchronize, opened, reopened]
paths:
- 'applications/ColossalQA/colossalqa/**'
- 'applications/ColossalQA/requirements.txt'
- 'applications/ColossalQA/setup.py'
- 'applications/ColossalQA/tests/**'
- 'applications/ColossalQA/pytest.ini'

jobs:
tests:
name: Run colossalqa unit tests
if: |
github.event.pull_request.draft == false &&
github.base_ref == 'main' &&
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
runs-on: [self-hosted, gpu]
container:
image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
volumes:
- /data/scratch/test_data_colossalqa:/data/scratch/test_data_colossalqa
- /data/scratch/llama-tiny:/data/scratch/llama-tiny
options: --gpus all --rm
timeout-minutes: 30
defaults:
run:
shell: bash
steps:
- name: Checkout ColossalAI
uses: actions/checkout@v2

- name: Install colossalqa
run: |
cd applications/ColossalQA
pip install -e .

- name: Execute Unit Testing
run: |
cd applications/ColossalQA
pytest tests/
env:
NCCL_SHM_DISABLE: 1
MAX_JOBS: 8
ZH_MODEL_PATH: bigscience/bloom-560m
ZH_MODEL_NAME: bloom
EN_MODEL_PATH: bigscience/bloom-560m
EN_MODEL_NAME: bloom
TEST_DATA_PATH_EN: /data/scratch/test_data_colossalqa/companies.txt
TEST_DATA_PATH_ZH: /data/scratch/test_data_colossalqa/companies_zh.txt
TEST_DOCUMENT_LOADER_DATA_PATH: /data/scratch/test_data_colossalqa/tests/*
SQL_FILE_PATH: /data/scratch/test_data_colossalqa/sql_file_path
12 changes: 6 additions & 6 deletions .github/workflows/scripts/check_doc_i18n.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ def compare_dirs(dir1, dir2):

# If the corresponding item doesn't exist in the second directory, the directories are different
if not os.path.exists(item_path2):
print(f'Found mismatch: {item_path1}, {item_path2}')
print(f"Found mismatch: {item_path1}, {item_path2}")
return False

# If the corresponding item is a directory, we compare the two directories recursively
if os.path.isdir(item_path1) and os.path.isdir(item_path2):
if not compare_dirs(item_path1, item_path2):
print(f'Found mismatch: {item_path1}, {item_path2}')
print(f"Found mismatch: {item_path1}, {item_path2}")
return False

# both are files
Expand All @@ -37,16 +37,16 @@ def compare_dirs(dir1, dir2):

# If the corresponding item is not a file or a directory, the directories are different
else:
print(f'Found mismatch: {item_path1}, {item_path2}')
print(f"Found mismatch: {item_path1}, {item_path2}")
return False

# If all items are the same, the directories are the same
return True


if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help="The directory where the multi-language source files are kept.")
parser.add_argument("-d", "--directory", help="The directory where the multi-language source files are kept.")
args = parser.parse_args()

i18n_folders = os.listdir(args.directory)
Expand All @@ -56,7 +56,7 @@ def compare_dirs(dir1, dir2):
for i in range(1, len(i18n_folders)):
dir1 = i18n_folders[0]
dir2 = i18n_folders[i]
print(f'comparing {dir1} vs {dir2}')
print(f"comparing {dir1} vs {dir2}")
match = compare_dirs(i18n_folders[0], i18n_folders[i])

if not match:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,24 @@

def check_inputs(input_list):
for path in input_list:
real_path = os.path.join('examples', path)
real_path = os.path.join("examples", path)
if not os.path.exists(real_path):
return False
return True


def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fileNameList', type=str, help="List of file names")
parser.add_argument("-f", "--fileNameList", type=str, help="List of file names")
args = parser.parse_args()
name_list = args.fileNameList.split(",")
is_correct = check_inputs(name_list)

if is_correct:
print('success')
print("success")
else:
print('failure')
print("failure")


if __name__ == '__main__':
if __name__ == "__main__":
main()
10 changes: 5 additions & 5 deletions .github/workflows/scripts/example_checks/check_example_weekly.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ def show_files(path, all_files):


def join(input_list, sep=None):
return (sep or ' ').join(input_list)
return (sep or " ").join(input_list)


def main():
contents = show_files('examples/', [])
contents = show_files("examples/", [])
all_loc = []
for file_loc in contents:
split_loc = file_loc.split('/')
split_loc = file_loc.split("/")
# must have two sub-folder levels after examples folder, such as examples/images/vit is acceptable, examples/images/README.md is not, examples/requirements.txt is not.
if len(split_loc) >= 4:
re_loc = '/'.join(split_loc[1:3])
re_loc = "/".join(split_loc[1:3])
if re_loc not in all_loc:
all_loc.append(re_loc)
print(all_loc)


if __name__ == '__main__':
if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fileNameList', type=str, help="The list of changed files")
parser.add_argument("-f", "--fileNameList", type=str, help="The list of changed files")
args = parser.parse_args()
name_list = args.fileNameList.split(":")
folder_need_check = set()
Expand All @@ -15,10 +15,10 @@ def main():
# - application
# - file
if loc.split("/")[0] == "examples" and len(loc.split("/")) >= 4:
folder_need_check.add('/'.join(loc.split("/")[1:3]))
folder_need_check.add("/".join(loc.split("/")[1:3]))
# Output the result using print. Then the shell can get the values.
print(list(folder_need_check))


if __name__ == '__main__':
if __name__ == "__main__":
main()
Loading