diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 1168bd9..9608e75 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -5,7 +5,7 @@ name: Python application on: push: - branches: [ "main" ] + branches: [ "main", "dev" ] pull_request: branches: [ "main" ] @@ -26,14 +26,10 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip + pip install poetry + poetry lock + poetry install pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | - pytest + poetry run pytest diff --git a/src/api/controllers/__init__.py b/src/api/controllers/__init__.py new file mode 100644 index 0000000..21b4703 --- /dev/null +++ b/src/api/controllers/__init__.py @@ -0,0 +1,3 @@ +from .chat import new_message as chat_new_message + +__all__ = ["chat_new_message"] diff --git a/src/api/controllers/chat.py b/src/api/controllers/chat.py new file mode 100644 index 0000000..3f8423e --- /dev/null +++ b/src/api/controllers/chat.py @@ -0,0 +1,23 @@ +from src.infrastructure.database import MongoDB, get_service_details +from src.infrastructure.config import LLM +from src.services import CustomChat + + +async def new_message( + db: MongoDB, + model: LLM, + message: str, + service_name: str, +) -> str: + + if service_details := get_service_details(service_name, db): + prompt = service_details["prompt"] + else: + prompt = "You are a helpful assistant. Be kind!" + + chat = CustomChat( + model=model, + sys_prompt=prompt + ) + response = await chat(message) + return response diff --git a/src/api/models/__init__.py b/src/api/models/__init__.py new file mode 100644 index 0000000..f19d09f --- /dev/null +++ b/src/api/models/__init__.py @@ -0,0 +1,3 @@ +from .api import APIResponse, APIRequest + +__all__ = ["APIResponse", "APIRequest"] diff --git a/src/api/models/api.py b/src/api/models/api.py new file mode 100644 index 0000000..508623a --- /dev/null +++ b/src/api/models/api.py @@ -0,0 +1,14 @@ +from typing import Optional +from pydantic import BaseModel + + +class APIResponse(BaseModel): + status_code: int + status_message: Optional[str] = None + response: Optional[dict] = None + + +class APIRequest(BaseModel): + message: str + user_id: str + service_name: Optional[str] = None diff --git a/src/api/routes/chat.py b/src/api/routes/chat.py new file mode 100644 index 0000000..898b5df --- /dev/null +++ b/src/api/routes/chat.py @@ -0,0 +1,44 @@ +from fastapi import APIRouter, status, Request, Depends + +from src.api.models import APIResponse, APIRequest +from src.api.controllers import chat_new_message + + +router = APIRouter( + prefix="/chat", + tags=["chat"], + # dependencies=[Depends(validate_user)] +) + + +@router.get("/", status_code=status.HTTP_200_OK) +async def router_test() -> APIResponse: + return APIResponse( + status_code=200, + status_message="-- CHAT ROUTER WORKING! --" + ) + + +@router.post("/new_message", status_code=status.HTTP_200_OK) +async def new_message(api_request: APIRequest, req: Request) -> APIResponse: + try: + response = await chat_new_message( + req.app.database, + req.app.llm, + api_request.message, + api_request.service_name + ) + + return APIResponse( + status_code=200, + response={ + "user": api_request.message, + "ai": response + } + ) + + except Exception as e: + return APIResponse( + status_code=500, + status_message=f"Error: {e}" + ) diff --git a/src/infrastructure/config/__init__.py b/src/infrastructure/config/__init__.py new file mode 100644 index 0000000..13c85f9 --- /dev/null +++ b/src/infrastructure/config/__init__.py @@ -0,0 +1,4 @@ +from .settings import settings +from .llm import LLM + +__all__ = ["settings", "LLM"] diff --git a/src/infrastructure/config/llm.py b/src/infrastructure/config/llm.py new file mode 100644 index 0000000..6740112 --- /dev/null +++ b/src/infrastructure/config/llm.py @@ -0,0 +1,31 @@ +from .settings import settings + +from langchain_ollama.llms import OllamaLLM +from langchain_openai import ChatOpenAI + + +class LLM: + def __new__(cls, model_name: str): + try: + if model_name == "ollama": + return OllamaLLM( + model=settings.MODEL_NAME, + base_url=settings.MODEL_URL, + temperature=settings.MODEL_TEMPERATURE + ) + + elif model_name == "openai": + return ChatOpenAI( + model=settings.MODEL_NAME, + base_url=settings.MODEL_URL, + temperature=settings.MODEL_TEMPERATURE, + api_key=settings.MODEL_API_KEY + ) + # More models can be added here + + raise ValueError(f"Model {model_name} not supported") + + except Exception as e: + raise ValueError( + f"Problem instantiating the model {model_name}: {e}" + ) diff --git a/src/infrastructure/config/settings.py b/src/infrastructure/config/settings.py new file mode 100644 index 0000000..d6a2512 --- /dev/null +++ b/src/infrastructure/config/settings.py @@ -0,0 +1,33 @@ +from pydantic_settings import BaseSettings + + +class Settings(BaseSettings): + + # MongoDB + MONGO_USER: str + MONGO_PASSWORD: str + MONGO_HOST: str = "localhost" + MONGO_PORT: str = "27017" + MONGO_DB: str + + # ChromaDB + CHROMA_HOST: str = "localhost" + CHROMA_PORT: str = "8000" + CHROMA_DB: str + + # General Settings + TIMEZONE: str = "America/Sao_Paulo" + + # LLM + MODEL: str = "ollama" + MODEL_NAME: str = "llama3" + MODEL_URL: str = "http://localhost:11434" + MODEL_TEMPERATURE: float = 0.2 + MODEL_API_KEY: str = '' + + class Config: + env_file = ".env" + extra = "ignore" + + +settings = Settings() diff --git a/src/services/__init__.py b/src/services/__init__.py new file mode 100644 index 0000000..575337e --- /dev/null +++ b/src/services/__init__.py @@ -0,0 +1,3 @@ +from .custom_chat.chat import CustomChat + +__all__ = ["CustomChat"] diff --git a/src/services/custom_chat/chat.py b/src/services/custom_chat/chat.py new file mode 100644 index 0000000..1c0e646 --- /dev/null +++ b/src/services/custom_chat/chat.py @@ -0,0 +1,23 @@ +from src.infrastructure.config import LLM +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.tools import BaseTool +from typing import List + + +class CustomChat: + def __init__(self, model: LLM, sys_prompt: str): + self.model = model + self.sys_prompt = sys_prompt + self._prompt_template = ChatPromptTemplate.from_messages([ + ("system", self.sys_prompt), + ("user", "{input}") + ]) + + async def __call__(self, user_input: str): + # TO DO: add bind_tools + self.chain = self._prompt_template | self.model + response = self.chain.invoke({"input": user_input}) + return response + + def add_tools(self, tools: List[BaseTool]): + pass diff --git a/src/services/custom_chat/tools.py b/src/services/custom_chat/tools.py new file mode 100644 index 0000000..8971ea1 --- /dev/null +++ b/src/services/custom_chat/tools.py @@ -0,0 +1,4 @@ +""" +TO DO: + - Pode-se adicionar tools para dar um bind_tools no modelo. +"""