Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
3ff98cf
Initial Commit for Mistral Connector with Integration Tests for Chat …
nmoeller Jun 24, 2024
a71fb38
Merge branch 'main' into issue-6499-Mistral-Ai-Connector
nmoeller Jun 24, 2024
f57151e
Added Env Variables to Pipeline, Changed Env Prefix and Integration T…
nmoeller Jun 25, 2024
dacf454
Merge branch 'issue-6499-Mistral-Ai-Connector' of https://github.com/…
nmoeller Jun 25, 2024
43e5488
added first unit tests
nmoellerms Jun 25, 2024
a18a7da
fixed precommits
nmoellerms Jun 25, 2024
d148b30
Moved Function Calling to BaseClass, Integrated Feedback
nmoellerms Jun 28, 2024
7bb2e39
Merge remote-tracking branch 'origin/main' into issue-6499-Mistral-Ai…
nmoellerms Jun 28, 2024
59d2811
migrated to FunctionChoiceBehavior
nmoellerms Jun 28, 2024
1f0ca82
Merge branch 'main' into issue-6499-Mistral-Ai-Connector
nmoeller Jun 28, 2024
6af18b1
Merge remote-tracking branch 'origin/main' into issue-6499-Mistral-Ai…
nmoeller Jun 29, 2024
2e8fea7
adding mistral to new pyproject style
nmoeller Jun 29, 2024
d498df1
Added Embedding Support
nmoeller Jun 29, 2024
74ec2d0
Parametrized Embedding Tests
nmoeller Jun 29, 2024
d694a23
forwarding open ai invoke to chat message method
nmoeller Jun 30, 2024
570f54e
added tests for FunctionBehaviorFlow with Mistral
nmoeller Jun 30, 2024
730f332
fixed integration test
nmoeller Jun 30, 2024
212200a
added more test cases for concept
nmoellerms Jul 1, 2024
e9cdf09
added ollama to test abstraction concept
nmoellerms Jul 1, 2024
f2110d9
added mistral env to ci/cd & fixed NoneInvoke TestCase
nmoellerms Jul 1, 2024
5d97bb2
Merge branch 'main' into issue-6499-Mistral-Ai-Connector
nmoeller Jul 2, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ jobs:
AZURE_KEY_VAULT_CLIENT_ID: ${{secrets.AZURE_KEY_VAULT_CLIENT_ID}}
AZURE_KEY_VAULT_CLIENT_SECRET: ${{secrets.AZURE_KEY_VAULT_CLIENT_SECRET}}
ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}}
MISTRALAI_API_KEY: ${{secrets.MISTRALAI_API_KEY}}
MISTRALAI_CHAT_MODEL_ID: ${{ vars.MISTRALAI_CHAT_MODEL_ID }}
run: |
if ${{ matrix.os == 'ubuntu-latest' }}; then
docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
Expand Down Expand Up @@ -163,6 +165,8 @@ jobs:
AZURE_KEY_VAULT_CLIENT_ID: ${{secrets.AZURE_KEY_VAULT_CLIENT_ID}}
AZURE_KEY_VAULT_CLIENT_SECRET: ${{secrets.AZURE_KEY_VAULT_CLIENT_SECRET}}
ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}}
MISTRALAI_API_KEY: ${{secrets.MISTRALAI_API_KEY}}
MISTRALAI_CHAT_MODEL_ID: ${{ vars.MISTRALAI_CHAT_MODEL_ID }}
run: |
if ${{ matrix.os == 'ubuntu-latest' }}; then
docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
Expand Down
1,556 changes: 783 additions & 773 deletions python/poetry.lock

Large diffs are not rendered by default.

8 changes: 6 additions & 2 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,9 @@ redis = { version = "^4.6.0", optional = true}
usearch = { version = "^2.9", optional = true}
pyarrow = { version = ">=12.0.1,<17.0.0", optional = true}
weaviate-client = { version = ">=3.18,<5.0", optional = true}
# mistralai
mistralai = { version = "^0.4.1"}

# Groups are for development only (installed through Poetry)
[tool.poetry.group.dev.dependencies]
pre-commit = ">=3.7.1"
ruff = ">=0.4.5"
Expand Down Expand Up @@ -128,16 +129,19 @@ usearch = "^2.9"
pyarrow = ">=12.0.1,<17.0.0"
# weaviate
weaviate-client = ">=3.18,<5.0"
# mistralai
mistralai = "^0.4.1"

# Extras are exposed to pip, this allows a user to easily add the right dependencies to their environment
[tool.poetry.extras]
all = ["google-generativeai", "transformers", "sentence-transformers", "qdrant-client", "chromadb", "pymilvus", "milvus", "weaviate-client", "pinecone-client", "psycopg", "redis", "azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "usearch", "pyarrow", "ipykernel", "motor"]
all = ["google-generativeai", "transformers", "sentence-transformers", "qdrant-client", "chromadb", "pymilvus", "milvus", "weaviate-client", "pinecone-client", "psycopg", "redis", "azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "usearch", "pyarrow", "ipykernel", "motor", "mistralai"]

azure = ["azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "msgraph-sdk"]
chromadb = ["chromadb"]
google = ["google-generativeai"]
hugging_face = ["transformers", "sentence-transformers"]
milvus = ["pymilvus", "milvus"]
mistralai = ["mistralai"]
mongo = ["motor"]
notebooks = ["ipykernel"]
pinecone = ["pinecone-client"]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio
import os
from functools import reduce
from typing import TYPE_CHECKING

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_chat_completion import (
MistralAIChatCompletion,
MistralAIChatPromptExecutionSettings,
)
from semantic_kernel.contents import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
from semantic_kernel.contents.function_call_content import FunctionCallContent
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent
from semantic_kernel.core_plugins import MathPlugin, TimePlugin
from semantic_kernel.functions import KernelArguments

if TYPE_CHECKING:
from semantic_kernel.functions import KernelFunction


system_message = """
You are a chat bot. Your name is Mosscap and
you have one goal: figure out what people need.
Your full name, should you need to know it, is
Splendid Speckled Mosscap. You communicate
effectively, but you tend to answer with long
flowery prose. You are also a math wizard,
especially for adding and subtracting.
You also excel at joke telling, where your tone is often sarcastic.
Once you have the answer I am looking for,
you will return a full answer to me as soon as possible.
"""

kernel = Kernel()

kernel.add_service(MistralAIChatCompletion(service_id="chat", ai_model_id="mistral-large-latest"))

plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/")
# adding plugins to the kernel
# the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled.
# kernel.import_plugin_from_prompt_directory("chat", plugins_directory, "FunPlugin")
# the math plugin is a core plugin and has the function calling enabled.
kernel.add_plugin(MathPlugin(), plugin_name="math")
kernel.add_plugin(TimePlugin(), plugin_name="time")

chat_function = kernel.add_function(
prompt="{{$chat_history}}{{$user_input}}",
plugin_name="ChatBot",
function_name="Chat",
)
# enabling or disabling function calling is done by setting the function_call parameter for the completion.
# when the function_call parameter is set to "auto" the model will decide which function to use, if any.
# if you only want to use a specific function, set the name of that function in this parameter,
# the format for that is 'PluginName-FunctionName', (i.e. 'math-Add').
# if the model or api version does not support this you will get an error.

# Note: the number of responses for auto invoking tool calls is limited to 1.
# If configured to be greater than one, this value will be overridden to 1.
execution_settings = MistralAIChatPromptExecutionSettings(
service_id="chat",
max_tokens=2000,
temperature=0.7,
top_p=0.8,
function_choice_behavior=FunctionChoiceBehavior.Auto()
)

history = ChatHistory()

history.add_system_message(system_message)
history.add_user_message("Hi there, who are you?")
history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.")

arguments = KernelArguments(settings=execution_settings)


def print_tool_calls(message: ChatMessageContent) -> None:
# A helper method to pretty print the tool calls from the message.
# This is only triggered if auto invoke tool calls is disabled.
items = message.items
formatted_tool_calls = []
for i, item in enumerate(items, start=1):
if isinstance(item, FunctionCallContent):
tool_call_id = item.id
function_name = item.name
function_arguments = item.arguments
formatted_str = (
f"tool_call {i} id: {tool_call_id}\n"
f"tool_call {i} function name: {function_name}\n"
f"tool_call {i} arguments: {function_arguments}"
)
formatted_tool_calls.append(formatted_str)
print("Tool calls:\n" + "\n\n".join(formatted_tool_calls))


async def handle_streaming(
kernel: Kernel,
chat_function: "KernelFunction",
arguments: KernelArguments,
) -> None:
response = kernel.invoke_stream(
chat_function,
return_function_results=False,
arguments=arguments,
)

print("Mosscap:> ", end="")
streamed_chunks: list[StreamingChatMessageContent] = []
async for message in response:
if not execution_settings.function_choice_behavior.auto_invoke_kernel_functions and isinstance(
message[0], StreamingChatMessageContent
):
streamed_chunks.append(message[0])
else:
print(str(message[0]), end="")

if streamed_chunks:
streaming_chat_message = reduce(lambda first, second: first + second, streamed_chunks)
print("Auto tool calls is disabled, printing returned tool calls...")
print_tool_calls(streaming_chat_message)

print("\n")


async def chat() -> bool:
try:
user_input = input("User:> ")
except KeyboardInterrupt:
print("\n\nExiting chat...")
return False
except EOFError:
print("\n\nExiting chat...")
return False

if user_input == "exit":
print("\n\nExiting chat...")
return False
arguments["user_input"] = user_input
arguments["chat_history"] = history

stream = True
if stream:
await handle_streaming(kernel, chat_function, arguments=arguments)
else:
result = await kernel.invoke(chat_function, arguments=arguments)

# If tools are used, and auto invoke tool calls is False, the response will be of type
# ChatMessageContent with information about the tool calls, which need to be sent
# back to the model to get the final response.
function_calls = [item for item in result.value[-1].items if isinstance(item, FunctionCallContent)]
if not execution_settings.function_choice_behavior.auto_invoke_kernel_functions and len(function_calls) > 0:
print_tool_calls(result.value[0])
return True

print(f"Mosscap:> {result}")
return True


async def main() -> None:
chatting = True
print(
"Welcome to the chat bot!\
\n Type 'exit' to exit.\
\n Try a math question to see the function calling in action (i.e. what is 3+3?)."
)
while chatting:
chatting = await chat()


if __name__ == "__main__":
asyncio.run(main())
71 changes: 71 additions & 0 deletions python/samples/concepts/chat_completion/chat_mistral_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion
from semantic_kernel.contents import ChatHistory
from semantic_kernel.functions import KernelArguments

system_message = """
You are a chat bot. Your name is Mosscap and
you have one goal: figure out what people need.
Your full name, should you need to know it, is
Splendid Speckled Mosscap. You communicate
effectively, but you tend to answer with long
flowery prose.
"""

kernel = Kernel()

service_id = "mistral-ai-chat"
kernel.add_service(MistralAIChatCompletion(service_id=service_id, ai_model_id="mistral-small-latest"))

settings = kernel.get_prompt_execution_settings_from_service_id(service_id)
settings.max_tokens = 2000
settings.temperature = 0.7
settings.top_p = 0.8

chat_function = kernel.add_function(
plugin_name="ChatBot",
function_name="Chat",
prompt="{{$chat_history}}{{$user_input}}",
template_format="semantic-kernel",
prompt_execution_settings=settings,
)

chat_history = ChatHistory(system_message=system_message)
chat_history.add_user_message("Hi there, who are you?")
chat_history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need")
chat_history.add_user_message("I want to find a hotel in Seattle with free wifi and a pool.")


async def chat() -> bool:
try:
user_input = input("User:> ")
except KeyboardInterrupt:
print("\n\nExiting chat...")
return False
except EOFError:
print("\n\nExiting chat...")
return False

if user_input == "exit":
print("\n\nExiting chat...")
return False

answer = await kernel.invoke(chat_function, KernelArguments(user_input=user_input, chat_history=chat_history))
chat_history.add_user_message(user_input)
chat_history.add_assistant_message(str(answer))
print(f"Mosscap:> {answer}")
return True


async def main() -> None:
chatting = True
while chatting:
chatting = await chat()


if __name__ == "__main__":
asyncio.run(main())
Loading