diff --git a/python/samples/concepts/auto_function_calling/nexus_raven.py b/python/samples/concepts/auto_function_calling/nexus_raven.py new file mode 100644 index 000000000000..0d270d067c7b --- /dev/null +++ b/python/samples/concepts/auto_function_calling/nexus_raven.py @@ -0,0 +1,409 @@ +# Copyright (c) Microsoft. All rights reserved. + +import ast +import asyncio +import json +import math +from collections.abc import AsyncGenerator +from html import escape +from typing import Annotated, Any, Literal + +from huggingface_hub import AsyncInferenceClient +from pydantic import HttpUrl + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.open_ai import ( + OpenAIChatCompletion, +) +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( + OpenAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase +from semantic_kernel.contents import ( + ChatHistory, + ChatMessageContent, + FunctionCallContent, + StreamingChatMessageContent, + TextContent, +) +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( + AutoFunctionInvocationContext, +) +from semantic_kernel.filters.filter_types import FilterTypes +from semantic_kernel.functions import KernelArguments, kernel_function + +kernel = Kernel() + + +@kernel.filter(FilterTypes.AUTO_FUNCTION_INVOCATION) +async def auto_function_invocation_filter(context: AutoFunctionInvocationContext, next): + """A filter that will be called for each function call in the response.""" + print("\033[92m\n Function called by Nexus Raven model\033[0m") + print(f" \033[96mFunction: {context.function.fully_qualified_name}") + print(f" Arguments: {context.arguments}") + await next(context) + print(f" Result: {context.function_result}\n\033[0m") + + +######################################################################### +# Step 0: Define a custom AI Service, with Prompt Execution settings. ### +# This uses huggingface_hub package, so install that if needed. ### +######################################################################### + + +class NexusRavenPromptExecutionSettings(PromptExecutionSettings): + do_sample: bool = True + max_new_tokens: int | None = None + stop_sequences: Any = None + temperature: float | None = None + top_p: float | None = None + + def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: + """Prepare the settings dictionary.""" + return self.model_dump( + include={"max_new_tokens", "temperature", "top_p", "do_sample", "stop_sequences"}, + exclude_unset=False, + exclude_none=True, + by_alias=True, + ) + + +class NexusRavenCompletion(TextCompletionClientBase, ChatCompletionClientBase): + """To use this class, you should have installed the ``huggingface_hub`` package, and + the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, + or given as a named parameter to the constructor.""" + + client: AsyncInferenceClient + + def __init__( + self, + service_id: str, + ai_model_id: str, + endpoint_url: HttpUrl, + api_token: str | None = None, + client: AsyncInferenceClient | None = None, + ): + if not client: + client = AsyncInferenceClient(model=endpoint_url, token=api_token) + super().__init__(service_id=service_id, ai_model_id=ai_model_id, client=client) + + async def get_chat_message_contents( + self, + chat_history: "ChatHistory", + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> list["ChatMessageContent"]: + """Creates a chat message content with a function call content within. + + Uses the text content and and parses it into a function call content.""" + result = await self.get_text_contents( + prompt=chat_history.messages[-1].content, + settings=settings, + ) + messages = [] + for part in result[0].text.split(";"): + try: + function_call, function_result = await self._execute_function_calls(part, chat_history, **kwargs) + if function_call: + messages.extend( + [ + ChatMessageContent( + role="assistant", items=[function_call], metadata={"ai_model_id": self.ai_model_id} + ), + ChatMessageContent( + role="tool", + items=[function_result], + name="nexus", + metadata={"ai_model_id": self.ai_model_id}, + ), + ] + ) + else: + messages.append(ChatMessageContent(role="assistant", content=part, ai_model_id=self.ai_model_id)) + except Exception as e: + messages.append( + ChatMessageContent( + role="assistant", + items=[ + TextContent( + text=f"An error occurred while executing the function call: {e}", + ai_model_id=self.ai_model_id, + ) + ], + ) + ) + return messages + return messages + + async def _execute_function_calls( + self, result: str, chat_history: ChatHistory, **kwargs: Any + ) -> tuple[FunctionCallContent, FunctionResultContent] | None: + function_call = result.strip().split("\n")[0].strip("Call:").strip() + if not function_call: + return None + parsed_fc = ast.parse(function_call, mode="eval") + if not isinstance(parsed_fc.body, ast.Call): + return None + idx = 0 + call_stack = {} + queue = [] + current = parsed_fc.body + queue.append((idx, parsed_fc.body)) + idx += 1 + while queue: + current_idx, current = queue.pop(0) + dependent_on = [] + args = {} + for keyword in current.keywords: + if isinstance(keyword.value, ast.Call): + queue.append((idx, keyword.value)) + dependent_on.append(idx) + args[keyword.arg] = (idx, keyword.value) + idx += 1 + else: + args[keyword.arg] = keyword.value.value + call = { + "idx": current_idx, + "func": current.func.id.replace("_", "-", 1), + "args": args, + "dependent_on": dependent_on, + "fcc": None, + "result": None, + } + call_stack[current_idx] = call + while any(call["result"] is None for call in call_stack.values()): + await asyncio.gather( + *[ + self._execute_function_call(call, chat_history, kwargs.get("kernel")) + for call in call_stack.values() + if not any(isinstance(arg, tuple) for arg in call["args"].values()) and call["result"] is None + ] + ) + for call in call_stack.values(): + if call["result"] is None: + for name, arg in call["args"].items(): + if isinstance(arg, tuple) and call_stack[arg[0]]["result"] is not None: + function_result: FunctionResultContent = call_stack[arg[0]]["result"] + call["args"][name] = function_result.result + return call_stack[0]["fcc"], call_stack[0]["result"] + + async def _execute_function_call( + self, call_def: dict[str, Any], chat_history: ChatHistory, kernel: Kernel + ) -> FunctionResultContent: + """Execute a function call.""" + call_def["fcc"] = FunctionCallContent( + name=call_def["func"], arguments=json.dumps(call_def["args"]), id=str(call_def["idx"]) + ) + result = await kernel.invoke_function_call(call_def["fcc"], chat_history) + if not result: + call_def["result"] = chat_history.messages[-1].items[0] + else: + call_def["result"] = result.function_result + + async def get_text_contents(self, prompt: str, settings: NexusRavenPromptExecutionSettings) -> list[TextContent]: + result = await self.client.text_generation(prompt, **settings.prepare_settings_dict(), stream=False) + return [TextContent(text=result.strip(), ai_model_id=self.ai_model_id)] + + async def get_streaming_text_contents(self, prompt: str, settings: NexusRavenPromptExecutionSettings): + raise NotImplementedError("Streaming text contents not implemented.") + + def get_streaming_chat_message_contents( + self, + chat_history: "ChatHistory", + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> AsyncGenerator[list["StreamingChatMessageContent"], Any]: + raise NotImplementedError("Streaming chat message contents not implemented.") + + def get_prompt_execution_settings_class(self) -> type[PromptExecutionSettings]: + return NexusRavenPromptExecutionSettings + + +########################################################## +# Step 1: Define the functions you want to articulate. ### +########################################################## + + +class MathPlugin: + @kernel_function + def cylinder_volume( + self, + radius: Annotated[float, "The radius of the base of the cylinder."], + height: Annotated[float, "The height of the cylinder."], + ): + """Calculate the volume of a cylinder.""" + if radius < 0 or height < 0: + raise ValueError("Radius and height must be non-negative.") + + return math.pi * (radius**2) * height + + @kernel_function + def add( + self, + input: Annotated[float, "the first number to add"], + amount: Annotated[float, "the second number to add"], + ) -> Annotated[float, "the output is a number"]: + """Returns the Addition result of the values provided.""" + return MathPlugin.calculator(input, amount, "add") + + @kernel_function + def subtract( + self, + input: Annotated[float, "the first number"], + amount: Annotated[float, "the number to subtract"], + ) -> float: + """Returns the difference of numbers provided.""" + return MathPlugin.calculator(input, amount, "subtract") + + @kernel_function + def multiply( + self, + input: Annotated[float, "the first number"], + amount: Annotated[float, "the number to multiply with"], + ) -> float: + """Returns the product of numbers provided.""" + return MathPlugin.calculator(input, amount, "multiply") + + @kernel_function + def divide( + self, + input: Annotated[float, "the first number"], + amount: Annotated[float, "the number to divide by"], + ) -> float: + """Returns the quotient of numbers provided.""" + return MathPlugin.calculator(input, amount, "divide") + + @staticmethod + def calculator( + input_a: float, + input_b: float, + operation: Literal["add", "subtract", "multiply", "divide"], + ): + """Computes a calculation.""" + match operation: + case "add": + return input_a + input_b + case "subtract": + return input_a - input_b + case "multiply": + return input_a * input_b + case "divide": + return input_a / input_b + + +############################################################# +# Step 2: Let's define some utils for building the prompt ### +############################################################# + + +@kernel_function +def format_functions_for_prompt(): + filters = {"excluded_plugins": ["kernel"]} + functions = kernel.get_list_of_function_metadata(filters) + formatted_functions = [] + for func in functions: + args_strings = [] + for arg in func.parameters: + arg_string = f"{arg.name}: {arg.type_}" + if arg.default_value: + arg_string += f" = {arg.default_value}" + args_strings.append(arg_string) + func_string = f"{func.fully_qualified_name.replace('-', '_')}({', '.join(args_strings)})" + formatted_functions.append( + escape( + f"OPTION:\n{func_string}\n\n{func.description}\n" + ) + ) + return formatted_functions + + +######################################################################### +# Step 3: Let's define the two prompts, one for Nexus, one for OpenAI ### +# and add everything to the kernel! ### +######################################################################### + +kernel.add_function( + "kernel", + function_name="function_call", + prompt="""{{chat_history}}<human>: +{{kernel-format_functions_for_prompt}} +\n\nUser Query: Question: {{user_query}} +Please pick a function from the above options that best answers the user query and fill in the appropriate arguments.<human_end>""", # noqa: E501 + template_format="handlebars", + prompt_execution_settings=NexusRavenPromptExecutionSettings( + service_id="nexus", + temperature=0.001, + max_new_tokens=500, + do_sample=False, + stop_sequences=["\nReflection:", "\nThought:"], + ), +) +kernel.add_function( + "kernel", + function_name="chat", + prompt="""You are a chatbot that gets fed questions and answers, you write out the response to the question based on the answer, but you do not supply underlying math formulas nor do you try to do math yourself, just a nice sentence that repeats the question and gives the answer. {{chat_history}}""", # noqa: E501 + template_format="handlebars", + prompt_execution_settings=OpenAIChatPromptExecutionSettings( + service_id="openai", + temperature=0.0, + max_tokens=1000, + ), +) +kernel.add_plugin(MathPlugin(), "math") +kernel.add_function("kernel", format_functions_for_prompt) +kernel.add_service( + NexusRavenCompletion(service_id="nexus", ai_model_id="raven", endpoint_url="http://nexusraven.nexusflow.ai") +) +kernel.add_service(OpenAIChatCompletion(service_id="openai")) + +############################################ +# Step 4: The main function and a runner ### +############################################ + + +async def run_question(user_input: str, chat_history: ChatHistory): + arguments = KernelArguments( + user_query=user_input, + chat_history=chat_history, + kernel=kernel, + ) + result = await kernel.invoke(plugin_name="kernel", function_name="function_call", arguments=arguments) + chat_history.add_user_message(user_input) + for msg in result.value: + chat_history.add_message(msg) + final_result = await kernel.invoke(plugin_name="kernel", function_name="chat", arguments=arguments) + chat_history.add_message(final_result.value[0]) + + +async def main(): + chat_history = ChatHistory() + user_input_example = ( + "my cake is 3 centimers high and 20 centimers in radius, can you subtract 200 from that number?" + ) + print("Welcome to the chatbot!") + print( + "This chatbot uses local function calling with Nexus Raven, and OpenAI for the final answer, " + "it has some math skills so feel free to ask anything about that." + ) + print(f'For example: "{user_input_example}".') + print("You can type 'exit' to quit the chatbot.") + while True: + try: + user_input = input("What is your question: ") + except Exception: + break + if user_input == "exit": + break + if not user_input: + user_input = user_input_example + + await run_question(user_input, chat_history) + print(chat_history.messages[-1].content) + print("Thanks for chatting with me!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/semantic_kernel/contents/chat_history.py b/python/semantic_kernel/contents/chat_history.py index 2d7ef35d5a79..d635b94c61c4 100644 --- a/python/semantic_kernel/contents/chat_history.py +++ b/python/semantic_kernel/contents/chat_history.py @@ -263,8 +263,8 @@ def from_rendered_prompt(cls, rendered_prompt: str) -> "ChatHistory": prompt = rendered_prompt.strip() try: xml_prompt = XML(text=f"<{prompt_tag}>{prompt}") - except ParseError: - logger.info(f"Could not parse prompt {prompt} as xml, treating as text") + except ParseError as exc: + logger.info(f"Could not parse prompt {prompt} as xml, treating as text, error was: {exc}") return cls(messages=[ChatMessageContent(role=AuthorRole.USER, content=unescape(prompt))]) if xml_prompt.text and xml_prompt.text.strip(): messages.append(ChatMessageContent(role=AuthorRole.SYSTEM, content=unescape(xml_prompt.text.strip()))) diff --git a/python/semantic_kernel/contents/chat_message_content.py b/python/semantic_kernel/contents/chat_message_content.py index 51394d0ce116..54244d4baff7 100644 --- a/python/semantic_kernel/contents/chat_message_content.py +++ b/python/semantic_kernel/contents/chat_message_content.py @@ -296,5 +296,5 @@ def _parse_items(self) -> str | list[dict[str, Any]]: if len(self.items) == 1 and isinstance(self.items[0], TextContent): return self.items[0].text if len(self.items) == 1 and isinstance(self.items[0], FunctionResultContent): - return self.items[0].result + return str(self.items[0].result) return [item.to_dict() for item in self.items] diff --git a/python/semantic_kernel/contents/function_result_content.py b/python/semantic_kernel/contents/function_result_content.py index 06395f30a5d9..b9b5a35f06b3 100644 --- a/python/semantic_kernel/contents/function_result_content.py +++ b/python/semantic_kernel/contents/function_result_content.py @@ -4,9 +4,10 @@ from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar from xml.etree.ElementTree import Element # nosec -from pydantic import Field, field_validator +from pydantic import Field from semantic_kernel.contents.const import FUNCTION_RESULT_CONTENT_TAG, TEXT_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.kernel_content import KernelContent from semantic_kernel.contents.text_content import TextContent from semantic_kernel.contents.utils.author_role import AuthorRole @@ -47,7 +48,7 @@ class FunctionResultContent(KernelContent): tag: ClassVar[str] = FUNCTION_RESULT_CONTENT_TAG id: str name: str | None = None - result: str + result: Any encoding: str | None = None @cached_property @@ -60,13 +61,6 @@ def plugin_name(self) -> str | None: """Get the plugin name.""" return self.split_name()[0] - @field_validator("result", mode="before") - @classmethod - def _validate_result(cls, result: Any): - if not isinstance(result, str): - result = str(result) - return result - def __str__(self) -> str: """Return the text of the response.""" return self.result @@ -95,11 +89,30 @@ def from_function_call_content_and_result( metadata: dict[str, Any] = {}, ) -> _T: """Create an instance from a FunctionCallContent and a result.""" + from semantic_kernel.contents.chat_message_content import ChatMessageContent + from semantic_kernel.functions.function_result import FunctionResult + if function_call_content.metadata: metadata.update(function_call_content.metadata) + inner_content = result + if isinstance(result, FunctionResult): + result = result.value + if isinstance(result, TextContent): + res = result.text + elif isinstance(result, ChatMessageContent): + if isinstance(result.items[0], TextContent): + res = result.items[0].text + elif isinstance(result.items[0], ImageContent): + res = result.items[0].data_uri + elif isinstance(result.items[0], FunctionResultContent): + res = result.items[0].result + res = str(result) + else: + res = result return cls( id=function_call_content.id or "unknown", - result=str(result), + inner_content=inner_content, + result=res, name=function_call_content.name, ai_model_id=function_call_content.ai_model_id, metadata=metadata, diff --git a/python/semantic_kernel/core_plugins/math_plugin.py b/python/semantic_kernel/core_plugins/math_plugin.py index 87c211368904..48035d3a9a64 100644 --- a/python/semantic_kernel/core_plugins/math_plugin.py +++ b/python/semantic_kernel/core_plugins/math_plugin.py @@ -29,21 +29,13 @@ def add( amount = int(amount) return MathPlugin.add_or_subtract(input, amount, add=True) - @kernel_function( - description="Subtracts value to a value", - name="Subtract", - ) + @kernel_function(name="Subtract") def subtract( self, input: Annotated[int, "the first number"], amount: Annotated[int, "the number to subtract"], ) -> int: - """Returns the difference of numbers provided. - - :param initial_value_text: Initial value as string to subtract the specified amount - :param context: Contains the context to get the numbers from - :return: The resulting subtraction as a string - """ + """Returns the difference of numbers provided.""" if isinstance(input, str): input = int(input) if isinstance(amount, str): @@ -52,11 +44,5 @@ def subtract( @staticmethod def add_or_subtract(input: int, amount: int, add: bool) -> int: - """Helper function to perform addition or subtraction based on the add flag. - - :param initial_value_text: Initial value as string to add or subtract the specified amount - :param context: Contains the context to get the numbers from - :param add: If True, performs addition, otherwise performs subtraction - :return: The resulting sum or subtraction as a string - """ + """Helper function to perform addition or subtraction based on the add flag.""" return input + amount if add else input - amount diff --git a/python/semantic_kernel/functions/kernel_function_from_prompt.py b/python/semantic_kernel/functions/kernel_function_from_prompt.py index dd28e8a1637d..c83fba398eae 100644 --- a/python/semantic_kernel/functions/kernel_function_from_prompt.py +++ b/python/semantic_kernel/functions/kernel_function_from_prompt.py @@ -167,18 +167,11 @@ async def _invoke_internal(self, context: FunctionInvocationContext) -> None: if isinstance(prompt_render_result.ai_service, ChatCompletionClientBase): chat_history = ChatHistory.from_rendered_prompt(prompt_render_result.rendered_prompt) - - # pass the kernel in for auto function calling - kwargs: dict[str, Any] = {} - if hasattr(prompt_render_result.execution_settings, "function_choice_behavior"): - kwargs["kernel"] = context.kernel - kwargs["arguments"] = context.arguments - try: chat_message_contents = await prompt_render_result.ai_service.get_chat_message_contents( chat_history=chat_history, settings=prompt_render_result.execution_settings, - **kwargs, + **{"kernel": context.kernel, "arguments": context.arguments}, ) except Exception as exc: raise FunctionExecutionException(f"Error occurred while invoking function {self.name}: {exc}") from exc @@ -211,18 +204,11 @@ async def _invoke_internal_stream(self, context: FunctionInvocationContext) -> N prompt_render_result = await self._render_prompt(context) if isinstance(prompt_render_result.ai_service, ChatCompletionClientBase): - # pass the kernel in for auto function calling - kwargs: dict[str, Any] = {} - if hasattr(prompt_render_result.execution_settings, "function_choice_behavior"): - kwargs["kernel"] = context.kernel - kwargs["arguments"] = context.arguments - chat_history = ChatHistory.from_rendered_prompt(prompt_render_result.rendered_prompt) - value: AsyncGenerator = prompt_render_result.ai_service.get_streaming_chat_message_contents( chat_history=chat_history, settings=prompt_render_result.execution_settings, - **kwargs, + **{"kernel": context.kernel, "arguments": context.arguments}, ) elif isinstance(prompt_render_result.ai_service, TextCompletionClientBase): value = prompt_render_result.ai_service.get_streaming_text_contents( diff --git a/python/semantic_kernel/kernel.py b/python/semantic_kernel/kernel.py index da92feaca68d..da54baadb429 100644 --- a/python/semantic_kernel/kernel.py +++ b/python/semantic_kernel/kernel.py @@ -36,6 +36,7 @@ from semantic_kernel.reliability.kernel_reliability_extension import KernelReliabilityExtension from semantic_kernel.services.ai_service_selector import AIServiceSelector from semantic_kernel.services.kernel_services_extension import KernelServicesExtension +from semantic_kernel.utils.naming import generate_random_ascii_name if TYPE_CHECKING: from semantic_kernel.connectors.ai.function_choice_behavior import ( @@ -128,6 +129,8 @@ async def invoke_stream( """ if arguments is None: arguments = KernelArguments(**kwargs) + else: + arguments.update(kwargs) if not function: if not function_name or not plugin_name: raise KernelFunctionNotFoundError("No function(s) or function- and plugin-name provided") @@ -207,9 +210,9 @@ async def invoke( async def invoke_prompt( self, - function_name: str, - plugin_name: str, prompt: str, + function_name: str | None = None, + plugin_name: str | None = None, arguments: KernelArguments | None = None, template_format: Literal[ "semantic-kernel", @@ -221,9 +224,9 @@ async def invoke_prompt( """Invoke a function from the provided prompt. Args: - function_name (str): The name of the function - plugin_name (str): The name of the plugin prompt (str): The prompt to use + function_name (str): The name of the function, optional + plugin_name (str): The name of the plugin, optional arguments (KernelArguments | None): The arguments to pass to the function(s), optional template_format (str | None): The format of the prompt template kwargs (dict[str, Any]): arguments that can be used instead of supplying KernelArguments @@ -237,7 +240,7 @@ async def invoke_prompt( raise TemplateSyntaxError("The prompt is either null or empty.") function = KernelFunctionFromPrompt( - function_name=function_name, + function_name=function_name or generate_random_ascii_name(), plugin_name=plugin_name, prompt=prompt, template_format=template_format, @@ -246,9 +249,9 @@ async def invoke_prompt( async def invoke_prompt_stream( self, - function_name: str, - plugin_name: str, prompt: str, + function_name: str | None = None, + plugin_name: str | None = None, arguments: KernelArguments | None = None, template_format: Literal[ "semantic-kernel", @@ -261,9 +264,9 @@ async def invoke_prompt_stream( """Invoke a function from the provided prompt and stream the results. Args: - function_name (str): The name of the function - plugin_name (str): The name of the plugin prompt (str): The prompt to use + function_name (str): The name of the function, optional + plugin_name (str): The name of the plugin, optional arguments (KernelArguments | None): The arguments to pass to the function(s), optional template_format (str | None): The format of the prompt template return_function_results (bool): If True, the function results are yielded as a list[FunctionResult] @@ -280,7 +283,7 @@ async def invoke_prompt_stream( from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt function = KernelFunctionFromPrompt( - function_name=function_name, + function_name=function_name or generate_random_ascii_name(), plugin_name=plugin_name, prompt=prompt, template_format=template_format, @@ -314,13 +317,13 @@ async def invoke_function_call( self, function_call: FunctionCallContent, chat_history: ChatHistory, - arguments: "KernelArguments", + arguments: "KernelArguments | None" = None, function_call_count: int | None = None, request_index: int | None = None, function_behavior: "FunctionChoiceBehavior" = None, # type: ignore ) -> "AutoFunctionInvocationContext | None": """Processes the provided FunctionCallContent and updates the chat history.""" - args_cloned = copy(arguments) + args_cloned = copy(arguments) if arguments else KernelArguments() try: parsed_args = function_call.to_kernel_arguments() if parsed_args: @@ -383,8 +386,8 @@ async def invoke_function_call( arguments=args_cloned, chat_history=chat_history, function_result=FunctionResult(function=function_to_call.metadata, value=None), - function_count=function_call_count, - request_sequence_index=request_index, + function_count=function_call_count or 0, + request_sequence_index=request_index or 0, ) if function_call.index is not None: invocation_context.function_sequence_index = function_call.index