Tool/Function Calling

The library supports tool/function calling across all providers with a unified interface.

Basic Tool Calling#

from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent from primfunctions.context import Context from voicerun_completions import generate_chat_completion async def handler(event: Event, context: Context): if isinstance(event, StartEvent): yield TextToSpeechEvent( text="I can check the weather for you. Just ask me about any location.", voice="kore" ) if isinstance(event, TextEvent): user_message = event.data.get("text", "N/A") tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather for a location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "unit": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "Temperature unit" } }, "required": ["location"] } } } ] response = await generate_chat_completion({ "provider": "anthropic", "api_key": context.variables.get("ANTHROPIC_API_KEY"), "model": "claude-haiku-4-5", "messages": [{"role": "user", "content": user_message}], "tools": tools, "tool_choice": "auto" # Let model decide }) # Check if model wants to call a tool if response.message.tool_calls: for tool_call in response.message.tool_calls: # Execute the tool here # Then add result to conversation and call again yield TextToSpeechEvent( text=f"I need to call {tool_call.function.name} with {tool_call.function.arguments}", voice="kore" ) elif response.message.content: yield TextToSpeechEvent( text=response.message.content, voice="kore" )

Defining Tools as Objects#

For better IDE support and type checking, define tools using ToolDefinition and FunctionDefinition objects instead of dictionaries:

from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent from primfunctions.context import Context from voicerun_completions import ( generate_chat_completion, ChatCompletionRequest, CompletionsProvider, ToolDefinition, FunctionDefinition, UserMessage, ) async def handler(event: Event, context: Context): if isinstance(event, StartEvent): yield TextToSpeechEvent( text="I can check the weather for you. Just ask me about any location.", voice="kore" ) if isinstance(event, TextEvent): user_message = event.data.get("text", "N/A") tools = [ ToolDefinition( type="function", function=FunctionDefinition( name="get_weather", description="Get the current weather for a location", parameters={ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "unit": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "Temperature unit" } }, "required": ["location"] } ) ) ] request = ChatCompletionRequest( provider=CompletionsProvider.ANTHROPIC, api_key=context.variables.get("ANTHROPIC_API_KEY"), model="claude-haiku-4-5", messages=[UserMessage(content=user_message)], tools=tools, tool_choice="auto" ) response = await generate_chat_completion(request) if response.message.tool_calls: for tool_call in response.message.tool_calls: yield TextToSpeechEvent( text=f"I need to call {tool_call.function.name} with {tool_call.function.arguments}", voice="kore" ) elif response.message.content: yield TextToSpeechEvent( text=response.message.content, voice="kore" )

Tool Calling with Results#

from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent from primfunctions.context import Context from voicerun_completions import ( generate_chat_completion, ConversationHistory, UserMessage, ToolResultMessage, deserialize_conversation, ) async def get_weather(location: str) -> dict: # Your actual weather API call here return {"temperature": 72, "condition": "sunny"} async def handler(event: Event, context: Context): if isinstance(event, StartEvent): yield TextToSpeechEvent( text="I can check the weather for multiple locations.", voice="kore" ) if isinstance(event, TextEvent): user_message = event.data.get("text", "N/A") # Deserialize conversation history from context messages: ConversationHistory = deserialize_conversation(context.get_completion_messages()) messages.append(UserMessage(content=user_message)) tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get weather for a location", "parameters": { "type": "object", "properties": { "location": {"type": "string"} }, "required": ["location"] } } } ] # First call - model decides to use tool response = await generate_chat_completion({ "provider": "anthropic", "api_key": context.variables.get("ANTHROPIC_API_KEY"), "model": "claude-haiku-4-5", "messages": messages, "tools": tools, "tool_choice": "auto" }) # Add assistant's tool calls to conversation messages.append(response.message) # Execute tools and add results if response.message.tool_calls: for tool_call in response.message.tool_calls: result = await get_weather(tool_call.function.arguments["location"]) messages.append(ToolResultMessage( tool_call_id=tool_call.id, name=tool_call.function.name, content=result )) # Second call - model uses tool results response = await generate_chat_completion({ "provider": "anthropic", "api_key": context.variables.get("ANTHROPIC_API_KEY"), "model": "claude-haiku-4-5", "messages": messages, "tools": tools, "tool_choice": "auto" }) # Store updated conversation messages.append(response.message) context.set_completion_messages(messages) if response.message.content: yield TextToSpeechEvent( text=response.message.content, voice="kore" )

Streaming with Tool Calls#

from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent from primfunctions.context import Context from voicerun_completions import generate_chat_completion_stream async def handler(event: Event, context: Context): if isinstance(event, StartEvent): yield TextToSpeechEvent( text="I can check the weather and respond in real-time.", voice="kore" ) if isinstance(event, TextEvent): user_message = event.data.get("text", "N/A") stream = await generate_chat_completion_stream( request={ "provider": "anthropic", "api_key": context.variables.get("ANTHROPIC_API_KEY"), "model": "claude-haiku-4-5", "messages": [{"role": "user", "content": user_message}], "tools": [ { "type": "function", "function": { "name": "get_weather", "description": "Get weather for a location", "parameters": { "type": "object", "properties": { "location": {"type": "string"} }, "required": ["location"] } } } ], "tool_choice": "auto" }, stream_options={"stream_sentences": True, "clean_sentences": True} ) async for chunk in stream: if chunk.type == "content_sentence": yield TextToSpeechEvent( text=chunk.sentence, voice="kore" ) elif chunk.type == "tool_call": # Complete tool call received # Execute tool here pass elif chunk.type == "response": complete_response = chunk.response

Tool Choice Options#

# Let the model decide (default) "tool_choice": "auto" # Force the model to call a specific tool "tool_choice": "get_weather" # Don't allow tool calls "tool_choice": "none" # Require the model to call at least one tool "tool_choice": "required"

OpenAI Strict Mode#

For OpenAI models, you can enable strict parameter validation:

tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get weather for a location", "parameters": {...}, "strict": True # Enable strict mode (OpenAI only) } } ]

Tool Parameter Schemas#

Tool parameter schemas use JSON Schema format. The same schema support applies to both tool parameters and response_schema. See JSON Schema Support for cross-provider compatibility details, Google sanitization behavior, and recommendations.

Next Steps#

toolsfunctionsagents