Examples
from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent
from primfunctions.context import Context
from voicerun_completions import (
generate_chat_completion,
ChatCompletionRequest,
CompletionsProvider,
RetryConfiguration,
FallbackRequest,
ToolDefinition,
FunctionDefinition,
ConversationHistory,
SystemMessage,
UserMessage,
ToolResultMessage,
deserialize_conversation,
)
async def get_weather(location: str) -> dict:
"""Mock weather function."""
return {"temperature": 72, "condition": "sunny", "location": location}
async def execute_tool(name: str, arguments: dict) -> dict:
"""Route tool calls to the appropriate function."""
if name == "get_weather":
return await get_weather(arguments["location"])
raise ValueError(f"Unknown tool: {name}")
async def handler(event: Event, context: Context):
if isinstance(event, StartEvent):
yield TextToSpeechEvent(
text="I can check the weather for multiple locations.",
voice="kore"
)
if isinstance(event, TextEvent):
user_message = event.data.get("text", "N/A")
# Deserialize conversation history from context
messages: ConversationHistory = deserialize_conversation(context.get_completion_messages())
messages.append(UserMessage(content=user_message))
tools = [
ToolDefinition(
type="function",
function=FunctionDefinition(
name="get_weather",
description="Get weather for a location",
parameters={
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
)
)
]
request = ChatCompletionRequest(
provider=CompletionsProvider.ANTHROPIC,
api_key=context.variables.get("ANTHROPIC_API_KEY"),
model="claude-haiku-4-5",
messages=messages,
tools=tools,
tool_choice="auto",
temperature=0.7,
max_tokens=500,
timeout=30.0,
retry=RetryConfiguration(
enabled=True,
max_retries=3,
retry_delay=1.0,
backoff_multiplier=2.0
),
fallbacks=[
FallbackRequest(
provider=CompletionsProvider.OPENAI,
api_key=context.variables.get("OPENAI_API_KEY"),
model="gpt-4.1-mini"
),
FallbackRequest(
provider=CompletionsProvider.GOOGLE,
api_key=context.variables.get("GEMINI_API_KEY"),
model="gemini-2.5-flash"
)
]
)
# First turn: Model calls tools
response = await generate_chat_completion(request)
messages.append(response.message)
# Execute tools and pass results back to the model
if response.message.tool_calls:
for tool_call in response.message.tool_calls:
result = await execute_tool(tool_call.function.name, tool_call.function.arguments)
messages.append(ToolResultMessage(
tool_call_id=tool_call.id,
name=tool_call.function.name,
content=result
))
# Second turn: Model uses tool results
request.messages = messages
response = await generate_chat_completion(request)
messages.append(response.message)
# Store updated conversation
context.set_completion_messages(messages)
if response.message.content:
yield TextToSpeechEvent(
text=response.message.content,
voice="kore"
)
from primfunctions.events import Event, StartEvent, TextEvent, TextToSpeechEvent
from primfunctions.context import Context
from voicerun_completions import (
generate_chat_completion_stream,
ConversationHistory,
SystemMessage,
UserMessage,
ToolResultMessage,
deserialize_conversation,
)
async def get_weather(location: str) -> dict:
"""Mock weather function."""
return {"temperature": 72, "condition": "sunny", "location": location}
async def execute_tool(name: str, arguments: dict) -> dict:
"""Route tool calls to the appropriate function."""
if name == "get_weather":
return await get_weather(arguments["location"])
raise ValueError(f"Unknown tool: {name}")
async def handler(event: Event, context: Context):
if isinstance(event, StartEvent):
yield TextToSpeechEvent(
text="I can check the weather for multiple locations.",
voice="kore"
)
if isinstance(event, TextEvent):
user_message = event.data.get("text", "N/A")
# Deserialize conversation history from context
messages: ConversationHistory = deserialize_conversation(context.get_completion_messages())
messages.append(UserMessage(content=user_message))
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}
]
request = {
"provider": "anthropic",
"api_key": context.variables.get("ANTHROPIC_API_KEY"),
"model": "claude-haiku-4-5",
"messages": messages,
"tools": tools,
"tool_choice": "auto",
"temperature": 0.7,
"max_tokens": 500,
"timeout": 30.0,
"retry": {
"max_retries": 3,
"retry_delay": 1.0,
"backoff_multiplier": 2.0
},
"fallbacks": [
{
"provider": "openai",
"api_key": context.variables.get("OPENAI_API_KEY"),
"model": "gpt-4.1-mini"
},
{
"provider": "google",
"api_key": context.variables.get("GEMINI_API_KEY"),
"model": "gemini-2.5-flash"
}
]
}
# First turn: Stream to capture tool calls
stream = await generate_chat_completion_stream(
request=request,
stream_options={"stream_sentences": True, "clean_sentences": True}
)
tool_calls = []
async for chunk in stream:
if chunk.type == "content_sentence":
yield TextToSpeechEvent(
text=chunk.sentence,
voice="kore"
)
elif chunk.type == "tool_call":
tool_calls.append(chunk.tool_call)
elif chunk.type == "response":
messages.append(chunk.response.message)
# Execute tools and pass results back to the model
if tool_calls:
for tool_call in tool_calls:
result = await execute_tool(tool_call.function.name, tool_call.function.arguments)
messages.append(ToolResultMessage(
tool_call_id=tool_call.id,
name=tool_call.function.name,
content=result
))
# Second turn: Stream the model's response using tool results
request["messages"] = messages
stream = await generate_chat_completion_stream(
request=request,
stream_options={"stream_sentences": True, "clean_sentences": True}
)
async for chunk in stream:
if chunk.type == "content_sentence":
yield TextToSpeechEvent(
text=chunk.sentence,
voice="kore"
)
elif chunk.type == "response":
messages.append(chunk.response.message)
# Store updated conversation
context.set_completion_messages(messages)
- Explore the API Reference for complete documentation
- Check out other documentation sections for specific features