Function Calling
Quick Start
Function calling allows models to generate structured outputs that can trigger function execution. Here's a complete example using parallel function calling.
Full Example
from openai import OpenAI
import json
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.haimaker.ai/v1"
)
# Example function that could be your backend API or external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})
def run_conversation():
# Step 1: Send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="openai/gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto",
)
print("\nFirst LLM Response:\n", response)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
print("\nNumber of tool calls:", len(tool_calls) if tool_calls else 0)
# Step 2: Check if the model wanted to call a function
if tool_calls:
available_functions = {
"get_current_weather": get_current_weather,
}
messages.append(response_message)
# Step 3: Execute each function call and send results back to model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
# Step 4: Get final response from model with function results
second_response = client.chat.completions.create(
model="openai/gpt-4o",
messages=messages,
)
print("\nSecond LLM response:\n", second_response)
return second_response
run_conversation()
Step-by-Step Explanation
Step 1: Send the Model Available Functions
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="openai/gpt-4o",
messages=messages,
tools=tools,
tool_choice="auto",
)
Expected Output
The model returns multiple tool calls for each location:
{
"id": "chatcmpl-...",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"message": {
"content": null,
"role": "assistant",
"tool_calls": [
{
"id": "call_...",
"function": {
"arguments": "{\"location\": \"San Francisco\", \"unit\": \"celsius\"}",
"name": "get_current_weather"
},
"type": "function"
},
{
"id": "call_...",
"function": {
"arguments": "{\"location\": \"Tokyo\", \"unit\": \"celsius\"}",
"name": "get_current_weather"
},
"type": "function"
},
{
"id": "call_...",
"function": {
"arguments": "{\"location\": \"Paris\", \"unit\": \"celsius\"}",
"name": "get_current_weather"
},
"type": "function"
}
]
}
}
],
"model": "gpt-4o",
"usage": {"completion_tokens": 77, "prompt_tokens": 88, "total_tokens": 165}
}
Step 2: Parse and Execute Functions
if tool_calls:
available_functions = {
"get_current_weather": get_current_weather,
}
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
Step 3: Send Function Results Back to Model
second_response = client.chat.completions.create(
model="openai/gpt-4o",
messages=messages,
)
print("Final Response:", second_response.choices[0].message.content)
Expected Output
The current weather in San Francisco is 72F, in Tokyo it's 10C, and in Paris it's 22C.
cURL Example
curl https://api.haimaker.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{
"model": "openai/gpt-4o",
"messages": [
{"role": "user", "content": "What'\''s the weather like in Boston?"}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
],
"tool_choice": "auto"
}'
Using Different Models
Function calling works with multiple providers. Just change the model name:
Anthropic Claude
response = client.chat.completions.create(
model="anthropic/claude-3-7-sonnet-latest",
messages=messages,
tools=tools,
tool_choice="auto",
)
Google Gemini
response = client.chat.completions.create(
model="gemini/gemini-1.5-pro",
messages=messages,
tools=tools,
tool_choice="auto",
)
xAI Grok
response = client.chat.completions.create(
model="xai/grok-2-latest",
messages=messages,
tools=tools,
tool_choice="auto",
)