From d52d0baed6096f8abd44435d5e9ef560dec4c84d Mon Sep 17 00:00:00 2001 From: vizsatiz Date: Thu, 28 Aug 2025 23:00:13 +0530 Subject: [PATCH] fix: response_formatter doesnt work well in vllm + phi4 --- flo_ai/flo_ai/llm/openai_vllm.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/flo_ai/flo_ai/llm/openai_vllm.py b/flo_ai/flo_ai/llm/openai_vllm.py index 47a87d0a..78f071fa 100644 --- a/flo_ai/flo_ai/llm/openai_vllm.py +++ b/flo_ai/flo_ai/llm/openai_vllm.py @@ -37,14 +37,13 @@ async def generate( if messages and messages[0]['role'] == 'system': messages[0]['content'] = ( messages[0]['content'] - + '\n\nPlease provide your response in JSON format according to the specified schema.' + + f'\n\nPlease provide your response in JSON format according to the specified schema. \n\n {output_schema}' ) else: messages.insert( - 0, { 'role': 'system', - 'content': 'Please provide your response in JSON format according to the specified schema.', + 'content': f'Please provide your response in JSON format according to the specified schema.\n \n {output_schema}', }, )