diff --git a/commit/api/api_explorer.py b/commit/api/api_explorer.py index d320d39..82e88da 100644 --- a/commit/api/api_explorer.py +++ b/commit/api/api_explorer.py @@ -13,7 +13,7 @@ def get_apis_for_project(project_branch: str): apis = json.loads(branch_doc.whitelisted_apis).get("apis", []) if branch_doc.whitelisted_apis else [] documentation = json.loads(branch_doc.documentation).get("apis", []) if branch_doc.documentation else [] - # print('documentation', len(documentation)) + print('documentation', len(documentation)) for api in apis: # find the documentation for the api whose function_name equals to name and path same as path for doc in documentation: diff --git a/commit/api/generate_documentation.py b/commit/api/generate_documentation.py index 675f64a..fbb04be 100644 --- a/commit/api/generate_documentation.py +++ b/commit/api/generate_documentation.py @@ -6,7 +6,7 @@ def generate_docs_for_apis(api_definitions): - max_tokens_per_request = 10000 # This is a safe limit to avoid hitting the max token limit + max_tokens_per_request = 1800 # This is a safe limit to avoid hitting the max token limit chunks = chunk_data(api_definitions, max_tokens_per_request) all_docs = [] @@ -84,7 +84,6 @@ def generate_docs_for_chunk(api_chunk): cleaned_response = clean_response(response_text) try: - print(f"Type of cleaned_response: {type(cleaned_response)}") # Check if cleaned_response is already a list (or the expected type) if isinstance(cleaned_response, list): return cleaned_response diff --git a/commit/commit/doctype/open_ai_settings/open_ai_settings.py b/commit/commit/doctype/open_ai_settings/open_ai_settings.py index 28c0f83..825edff 100644 --- a/commit/commit/doctype/open_ai_settings/open_ai_settings.py +++ b/commit/commit/doctype/open_ai_settings/open_ai_settings.py @@ -26,10 +26,9 @@ def open_ai_call(message): response = client.chat.completions.create( model="gpt-3.5-turbo", messages=message, - max_tokens=2500, # Adjust max tokens as needed - n=1, - stop=None, - temperature=0.5, + max_tokens=3900, + temperature=0.3, # Lower temperature for more deterministic output + stop=["Function Name:", "\n\n"] # Stop sequence to separate functions ) return response.choices[0].message.content \ No newline at end of file