From 1d3e44273cc0ba4fdb88ce0e0b82e90d2db16a1d Mon Sep 17 00:00:00 2001 From: Abdullah Elkady Date: Wed, 4 Oct 2023 19:13:19 -0400 Subject: [PATCH 1/2] Update generate finish reason in test --- tests/async/test_async_generate.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/async/test_async_generate.py b/tests/async/test_async_generate.py index 0ca5d6304..327862499 100644 --- a/tests/async/test_async_generate.py +++ b/tests/async/test_async_generate.py @@ -57,7 +57,9 @@ async def test_raise_ex(async_client): @pytest.mark.asyncio async def test_async_generate_stream(async_client): - res = await async_client.generate("Hey!", max_tokens=5, stream=True) + res = await async_client.generate( + "Hey!", max_tokens=5, stream=True, temperature=0 + ) # setting temp=0 to avoid random finish reasons final_text = "" async for token in res: assert isinstance(token.text, str) @@ -67,10 +69,10 @@ async def test_async_generate_stream(async_client): final_text += token.text assert res.id != None - assert res.finish_reason, "COMPLETE" + assert res.finish_reason, "MAX_TOKENS" assert isinstance(res.generations, Generations) - assert res.generations[0].finish_reason == "COMPLETE" + assert res.generations[0].finish_reason == "MAX_TOKENS" assert res.generations[0].prompt == "Hey!" assert res.generations[0].text == final_text assert res.generations[0].id != None From f510caef786a7b24bd5c37a972a32b2638329715 Mon Sep 17 00:00:00 2001 From: Abdullah Elkady Date: Wed, 4 Oct 2023 19:22:21 -0400 Subject: [PATCH 2/2] remove finish reason check from sdk --- tests/async/test_async_generate.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/async/test_async_generate.py b/tests/async/test_async_generate.py index 327862499..c909e0adc 100644 --- a/tests/async/test_async_generate.py +++ b/tests/async/test_async_generate.py @@ -57,9 +57,7 @@ async def test_raise_ex(async_client): @pytest.mark.asyncio async def test_async_generate_stream(async_client): - res = await async_client.generate( - "Hey!", max_tokens=5, stream=True, temperature=0 - ) # setting temp=0 to avoid random finish reasons + res = await async_client.generate("Hey!", max_tokens=5, stream=True) final_text = "" async for token in res: assert isinstance(token.text, str) @@ -69,10 +67,8 @@ async def test_async_generate_stream(async_client): final_text += token.text assert res.id != None - assert res.finish_reason, "MAX_TOKENS" assert isinstance(res.generations, Generations) - assert res.generations[0].finish_reason == "MAX_TOKENS" assert res.generations[0].prompt == "Hey!" assert res.generations[0].text == final_text assert res.generations[0].id != None