diff --git a/.github/workflows/inference-test.yml b/.github/workflows/inference-test.yml index a4b210f..bc4813a 100644 --- a/.github/workflows/inference-test.yml +++ b/.github/workflows/inference-test.yml @@ -48,3 +48,28 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} + + tests: + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + os: ["ubuntu-latest", "macos-13", "windows-latest"] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + cache-dependency-path: "setup.py" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install torch --index-url https://download.pytorch.org/whl/cpu + pip install setuptools transformers pytest + pip install -e . + - name: Test with pytest + run: | + pytest test.py --durations=99 diff --git a/test.py b/test.py new file mode 100644 index 0000000..5b009b7 --- /dev/null +++ b/test.py @@ -0,0 +1,23 @@ +import pytest +import torch +from transformers import AutoModelForCausalLM + + +MODELS_TO_TEST = [ + "hf-internal-testing/tiny-random-OPTForCausalLM", + "hf-internal-testing/tiny-random-GPT2LMHeadModel", + "hf-internal-testing/tiny-random-gpt_neo", +] + + +@pytest.mark.parametrize("model_id", MODELS_TO_TEST) +@pytest.mark.parametrize("useless", [False, True]) +def test_inference(model_id, useless): + if useless: + # do nothing + pass + + model = AutoModelForCausalLM.from_pretrained(model_id) + model.eval() + inputs = torch.tensor([[0, 1, 2, 3, 4]]) + model.generate(inputs, do_sample=True, num_return_sequences=1)